提交 4420471f 编写于 作者: I Ingo Molnar

Merge branch 'x86/apic' into irq/numa

Conflicts:
	arch/x86/kernel/apic/io_apic.c

Merge reason: non-trivial interaction between ongoing work in io_apic.c
              and the NUMA migration feature in the irq tree.
Signed-off-by: NIngo Molnar <mingo@elte.hu>
......@@ -1565,6 +1565,9 @@ and is between 256 and 4096 characters. It is defined in the file
noinitrd [RAM] Tells the kernel not to load any configured
initial RAM disk.
nointremap [X86-64, Intel-IOMMU] Do not enable interrupt
remapping.
nointroute [IA-64]
nojitter [IA64] Disables jitter checking for ITC timers.
......
......@@ -349,7 +349,7 @@ config X86_UV
depends on X86_64
depends on X86_EXTENDED_PLATFORM
depends on NUMA
select X86_X2APIC
depends on X86_X2APIC
---help---
This option is needed in order to support SGI Ultraviolet systems.
If you don't have one of these, you should say N here.
......
......@@ -107,8 +107,7 @@ extern u32 native_safe_apic_wait_icr_idle(void);
extern void native_apic_icr_write(u32 low, u32 id);
extern u64 native_apic_icr_read(void);
#define EIM_8BIT_APIC_ID 0
#define EIM_32BIT_APIC_ID 1
extern int x2apic_mode;
#ifdef CONFIG_X86_X2APIC
/*
......@@ -166,10 +165,9 @@ static inline u64 native_x2apic_icr_read(void)
return val;
}
extern int x2apic, x2apic_phys;
extern int x2apic_phys;
extern void check_x2apic(void);
extern void enable_x2apic(void);
extern void enable_IR_x2apic(void);
extern void x2apic_icr_write(u32 low, u32 id);
static inline int x2apic_enabled(void)
{
......@@ -183,6 +181,8 @@ static inline int x2apic_enabled(void)
return 1;
return 0;
}
#define x2apic_supported() (cpu_has_x2apic)
#else
static inline void check_x2apic(void)
{
......@@ -190,28 +190,20 @@ static inline void check_x2apic(void)
static inline void enable_x2apic(void)
{
}
static inline void enable_IR_x2apic(void)
{
}
static inline int x2apic_enabled(void)
{
return 0;
}
#define x2apic 0
#define x2apic_preenabled 0
#define x2apic_supported() 0
#endif
extern int get_physical_broadcast(void);
extern void enable_IR_x2apic(void);
#ifdef CONFIG_X86_X2APIC
static inline void ack_x2APIC_irq(void)
{
/* Docs say use 0 for future compatibility */
native_apic_msr_write(APIC_EOI, 0);
}
#endif
extern int get_physical_broadcast(void);
extern void apic_disable(void);
extern int lapic_get_maxlvt(void);
extern void clear_local_APIC(void);
extern void connect_bsp_APIC(void);
......@@ -252,7 +244,7 @@ static inline void lapic_shutdown(void) { }
#define local_apic_timer_c2_ok 1
static inline void init_apic_mappings(void) { }
static inline void disable_local_APIC(void) { }
static inline void apic_disable(void) { }
#endif /* !CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_64
......
......@@ -60,8 +60,4 @@ extern struct irq_chip i8259A_chip;
extern void mask_8259A(void);
extern void unmask_8259A(void);
#ifdef CONFIG_X86_32
extern void init_ISA_irqs(void);
#endif
#endif /* _ASM_X86_I8259_H */
......@@ -161,15 +161,11 @@ extern int io_apic_set_pci_routing(struct device *dev, int ioapic, int pin,
extern int (*ioapic_renumber_irq)(int ioapic, int irq);
extern void ioapic_init_mappings(void);
#ifdef CONFIG_X86_64
extern struct IO_APIC_route_entry **alloc_ioapic_entries(void);
extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries);
extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
extern void reinit_intr_remapped_IO_APIC(int intr_remapping,
struct IO_APIC_route_entry **ioapic_entries);
#endif
extern void probe_nr_irqs_gsi(void);
......
#ifndef _ASM_X86_IRQ_REMAPPING_H
#define _ASM_X86_IRQ_REMAPPING_H
#define IRTE_DEST(dest) ((x2apic) ? dest : dest << 8)
#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
#endif /* _ASM_X86_IRQ_REMAPPING_H */
......@@ -34,6 +34,7 @@
#ifdef CONFIG_X86_32
# define SYSCALL_VECTOR 0x80
# define IA32_SYSCALL_VECTOR 0x80
#else
# define IA32_SYSCALL_VECTOR 0x80
#endif
......
......@@ -33,7 +33,6 @@ struct x86_quirks {
int (*setup_ioapic_ids)(void);
};
extern void x86_quirk_pre_intr_init(void);
extern void x86_quirk_intr_init(void);
extern void x86_quirk_trap_init(void);
......
......@@ -28,7 +28,7 @@ CFLAGS_paravirt.o := $(nostackp)
obj-y := process_$(BITS).o signal.o entry_$(BITS).o
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o
obj-y += setup.o i8259.o irqinit_$(BITS).o
obj-y += setup.o i8259.o irqinit.o
obj-$(CONFIG_X86_VISWS) += visws_quirks.o
obj-$(CONFIG_X86_32) += probe_roms_32.o
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
......
......@@ -98,6 +98,29 @@ early_param("lapic", parse_lapic);
/* Local APIC was disabled by the BIOS and enabled by the kernel */
static int enabled_via_apicbase;
/*
* Handle interrupt mode configuration register (IMCR).
* This register controls whether the interrupt signals
* that reach the BSP come from the master PIC or from the
* local APIC. Before entering Symmetric I/O Mode, either
* the BIOS or the operating system must switch out of
* PIC Mode by changing the IMCR.
*/
static inline void imcr_pic_to_apic(void)
{
/* select IMCR register */
outb(0x70, 0x22);
/* NMI and 8259 INTR go through APIC */
outb(0x01, 0x23);
}
static inline void imcr_apic_to_pic(void)
{
/* select IMCR register */
outb(0x70, 0x22);
/* NMI and 8259 INTR go directly to BSP */
outb(0x00, 0x23);
}
#endif
#ifdef CONFIG_X86_64
......@@ -111,13 +134,19 @@ static __init int setup_apicpmtimer(char *s)
__setup("apicpmtimer", setup_apicpmtimer);
#endif
int x2apic_mode;
#ifdef CONFIG_X86_X2APIC
int x2apic;
/* x2apic enabled before OS handover */
static int x2apic_preenabled;
static int disable_x2apic;
static __init int setup_nox2apic(char *str)
{
if (x2apic_enabled()) {
pr_warning("Bios already enabled x2apic, "
"can't enforce nox2apic");
return 0;
}
disable_x2apic = 1;
setup_clear_cpu_cap(X86_FEATURE_X2APIC);
return 0;
......@@ -209,6 +238,24 @@ static int modern_apic(void)
return lapic_get_version() >= 0x14;
}
/*
* bare function to substitute write operation
* and it's _that_ fast :)
*/
void native_apic_write_dummy(u32 reg, u32 v)
{
WARN_ON_ONCE((cpu_has_apic || !disable_apic));
}
/*
* right after this call apic->write doesn't do anything
* note that there is no restore operation it works one way
*/
void apic_disable(void)
{
apic->write = native_apic_write_dummy;
}
void native_apic_wait_icr_idle(void)
{
while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
......@@ -815,7 +862,7 @@ void clear_local_APIC(void)
u32 v;
/* APIC hasn't been mapped yet */
if (!x2apic && !apic_phys)
if (!x2apic_mode && !apic_phys)
return;
maxlvt = lapic_get_maxlvt();
......@@ -1287,7 +1334,7 @@ void check_x2apic(void)
{
if (x2apic_enabled()) {
pr_info("x2apic enabled by BIOS, switching to x2apic ops\n");
x2apic_preenabled = x2apic = 1;
x2apic_preenabled = x2apic_mode = 1;
}
}
......@@ -1295,7 +1342,7 @@ void enable_x2apic(void)
{
int msr, msr2;
if (!x2apic)
if (!x2apic_mode)
return;
rdmsr(MSR_IA32_APICBASE, msr, msr2);
......@@ -1304,6 +1351,7 @@ void enable_x2apic(void)
wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
}
}
#endif /* CONFIG_X86_X2APIC */
void __init enable_IR_x2apic(void)
{
......@@ -1312,32 +1360,21 @@ void __init enable_IR_x2apic(void)
unsigned long flags;
struct IO_APIC_route_entry **ioapic_entries = NULL;
if (!cpu_has_x2apic)
return;
if (!x2apic_preenabled && disable_x2apic) {
pr_info("Skipped enabling x2apic and Interrupt-remapping "
"because of nox2apic\n");
return;
ret = dmar_table_init();
if (ret) {
pr_debug("dmar_table_init() failed with %d:\n", ret);
goto ir_failed;
}
if (x2apic_preenabled && disable_x2apic)
panic("Bios already enabled x2apic, can't enforce nox2apic");
if (!x2apic_preenabled && skip_ioapic_setup) {
pr_info("Skipped enabling x2apic and Interrupt-remapping "
"because of skipping io-apic setup\n");
return;
if (!intr_remapping_supported()) {
pr_debug("intr-remapping not supported\n");
goto ir_failed;
}
ret = dmar_table_init();
if (ret) {
pr_info("dmar_table_init() failed with %d:\n", ret);
if (x2apic_preenabled)
panic("x2apic enabled by bios. But IR enabling failed");
else
pr_info("Not enabling x2apic,Intr-remapping\n");
if (!x2apic_preenabled && skip_ioapic_setup) {
pr_info("Skipped enabling intr-remap because of skipping "
"io-apic setup\n");
return;
}
......@@ -1357,19 +1394,16 @@ void __init enable_IR_x2apic(void)
mask_IO_APIC_setup(ioapic_entries);
mask_8259A();
ret = enable_intr_remapping(EIM_32BIT_APIC_ID);
if (ret && x2apic_preenabled) {
local_irq_restore(flags);
panic("x2apic enabled by bios. But IR enabling failed");
}
ret = enable_intr_remapping(x2apic_supported());
if (ret)
goto end_restore;
if (!x2apic) {
x2apic = 1;
pr_info("Enabled Interrupt-remapping\n");
if (x2apic_supported() && !x2apic_mode) {
x2apic_mode = 1;
enable_x2apic();
pr_info("Enabled x2apic\n");
}
end_restore:
......@@ -1378,37 +1412,34 @@ void __init enable_IR_x2apic(void)
* IR enabling failed
*/
restore_IO_APIC_setup(ioapic_entries);
else
reinit_intr_remapped_IO_APIC(x2apic_preenabled, ioapic_entries);
unmask_8259A();
local_irq_restore(flags);
end:
if (!ret) {
if (!x2apic_preenabled)
pr_info("Enabled x2apic and interrupt-remapping\n");
else
pr_info("Enabled Interrupt-remapping\n");
} else
pr_err("Failed to enable Interrupt-remapping and x2apic\n");
if (ioapic_entries)
free_ioapic_entries(ioapic_entries);
if (!ret)
return;
ir_failed:
if (x2apic_preenabled)
panic("x2apic enabled by bios. But IR enabling failed");
else if (cpu_has_x2apic)
pr_info("Not enabling x2apic,Intr-remapping\n");
#else
if (!cpu_has_x2apic)
return;
if (x2apic_preenabled)
panic("x2apic enabled prior OS handover,"
" enable CONFIG_INTR_REMAP");
pr_info("Enable CONFIG_INTR_REMAP for enabling intr-remapping "
" and x2apic\n");
" enable CONFIG_X86_X2APIC, CONFIG_INTR_REMAP");
#endif
return;
}
#endif /* CONFIG_X86_X2APIC */
#ifdef CONFIG_X86_64
/*
......@@ -1539,7 +1570,7 @@ void __init early_init_lapic_mapping(void)
*/
void __init init_apic_mappings(void)
{
if (x2apic) {
if (x2apic_mode) {
boot_cpu_physical_apicid = read_apic_id();
return;
}
......@@ -1565,6 +1596,12 @@ void __init init_apic_mappings(void)
*/
if (boot_cpu_physical_apicid == -1U)
boot_cpu_physical_apicid = read_apic_id();
/* lets check if we may to NOP'ify apic operations */
if (!cpu_has_apic) {
pr_info("APIC: disable apic facility\n");
apic_disable();
}
}
/*
......@@ -1733,8 +1770,7 @@ void __init connect_bsp_APIC(void)
*/
apic_printk(APIC_VERBOSE, "leaving PIC mode, "
"enabling APIC mode.\n");
outb(0x70, 0x22);
outb(0x01, 0x23);
imcr_pic_to_apic();
}
#endif
if (apic->enable_apic_mode)
......@@ -1762,8 +1798,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
*/
apic_printk(APIC_VERBOSE, "disabling APIC mode, "
"entering PIC mode.\n");
outb(0x70, 0x22);
outb(0x00, 0x23);
imcr_apic_to_pic();
return;
}
#endif
......@@ -1969,10 +2004,10 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state)
local_irq_save(flags);
disable_local_APIC();
#ifdef CONFIG_INTR_REMAP
if (intr_remapping_enabled)
disable_intr_remapping();
#endif
local_irq_restore(flags);
return 0;
}
......@@ -1982,8 +2017,6 @@ static int lapic_resume(struct sys_device *dev)
unsigned int l, h;
unsigned long flags;
int maxlvt;
#ifdef CONFIG_INTR_REMAP
int ret;
struct IO_APIC_route_entry **ioapic_entries = NULL;
......@@ -1991,7 +2024,7 @@ static int lapic_resume(struct sys_device *dev)
return 0;
local_irq_save(flags);
if (x2apic) {
if (intr_remapping_enabled) {
ioapic_entries = alloc_ioapic_entries();
if (!ioapic_entries) {
WARN(1, "Alloc ioapic_entries in lapic resume failed.");
......@@ -2007,17 +2040,10 @@ static int lapic_resume(struct sys_device *dev)
mask_IO_APIC_setup(ioapic_entries);
mask_8259A();
enable_x2apic();
}
#else
if (!apic_pm_state.active)
return 0;
local_irq_save(flags);
if (x2apic)
if (x2apic_mode)
enable_x2apic();
#endif
else {
/*
* Make sure the APICBASE points to the right address
......@@ -2055,20 +2081,15 @@ static int lapic_resume(struct sys_device *dev)
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
#ifdef CONFIG_INTR_REMAP
if (intr_remapping_enabled)
reenable_intr_remapping(EIM_32BIT_APIC_ID);
if (x2apic) {
if (intr_remapping_enabled) {
reenable_intr_remapping(x2apic_mode);
unmask_8259A();
restore_IO_APIC_setup(ioapic_entries);
free_ioapic_entries(ioapic_entries);
}
#endif
local_irq_restore(flags);
return 0;
}
......@@ -2117,31 +2138,14 @@ static void apic_pm_activate(void) { }
#endif /* CONFIG_PM */
#ifdef CONFIG_X86_64
/*
* apic_is_clustered_box() -- Check if we can expect good TSC
*
* Thus far, the major user of this is IBM's Summit2 series:
*
* Clustered boxes may have unsynced TSC problems if they are
* multi-chassis. Use available data to take a good guess.
* If in doubt, go HPET.
*/
__cpuinit int apic_is_clustered_box(void)
static int __cpuinit apic_cluster_num(void)
{
int i, clusters, zeros;
unsigned id;
u16 *bios_cpu_apicid;
DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
/*
* there is not this kind of box with AMD CPU yet.
* Some AMD box with quadcore cpu and 8 sockets apicid
* will be [4, 0x23] or [8, 0x27] could be thought to
* vsmp box still need checking...
*/
if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
return 0;
bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
......@@ -2177,18 +2181,67 @@ __cpuinit int apic_is_clustered_box(void)
++zeros;
}
/* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
* not guaranteed to be synced between boards
*/
if (is_vsmp_box() && clusters > 1)
return clusters;
}
static int __cpuinitdata multi_checked;
static int __cpuinitdata multi;
static int __cpuinit set_multi(const struct dmi_system_id *d)
{
if (multi)
return 0;
printk(KERN_INFO "APIC: %s detected, Multi Chassis\n", d->ident);
multi = 1;
return 0;
}
static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = {
{
.callback = set_multi,
.ident = "IBM System Summit2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"),
},
},
{}
};
static void __cpuinit dmi_check_multi(void)
{
if (multi_checked)
return;
dmi_check_system(multi_dmi_table);
multi_checked = 1;
}
/*
* apic_is_clustered_box() -- Check if we can expect good TSC
*
* Thus far, the major user of this is IBM's Summit2 series:
* Clustered boxes may have unsynced TSC problems if they are
* multi-chassis.
* Use DMI to check them
*/
__cpuinit int apic_is_clustered_box(void)
{
dmi_check_multi();
if (multi)
return 1;
if (!is_vsmp_box())
return 0;
/*
* If clusters > 2, then should be multi-chassis.
* May have to revisit this when multi-core + hyperthreaded CPUs come
* out, but AFAIK this will work even for them.
* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
* not guaranteed to be synced between boards
*/
return (clusters > 2);
if (apic_cluster_num() > 1)
return 1;
return 0;
}
#endif
......
......@@ -145,7 +145,7 @@ es7000_rename_gsi(int ioapic, int gsi)
return gsi;
}
static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
{
unsigned long vect = 0, psaival = 0;
......
......@@ -489,121 +489,6 @@ static void ioapic_mask_entry(int apic, int pin)
spin_unlock_irqrestore(&ioapic_lock, flags);
}
#ifdef CONFIG_SMP
static void send_cleanup_vector(struct irq_cfg *cfg)
{
cpumask_var_t cleanup_mask;
if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
unsigned int i;
cfg->move_cleanup_count = 0;
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
cfg->move_cleanup_count++;
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
} else {
cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
free_cpumask_var(cleanup_mask);
}
cfg->move_in_progress = 0;
}
static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
{
int apic, pin;
struct irq_pin_list *entry;
u8 vector = cfg->vector;
entry = cfg->irq_2_pin;
for (;;) {
unsigned int reg;
if (!entry)
break;
apic = entry->apic;
pin = entry->pin;
/*
* With interrupt-remapping, destination information comes
* from interrupt-remapping table entry.
*/
if (!irq_remapped(irq))
io_apic_write(apic, 0x11 + pin*2, dest);
reg = io_apic_read(apic, 0x10 + pin*2);
reg &= ~IO_APIC_REDIR_VECTOR_MASK;
reg |= vector;
io_apic_modify(apic, 0x10 + pin*2, reg);
if (!entry->next)
break;
entry = entry->next;
}
}
static int
assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
/*
* Either sets desc->affinity to a valid value, and returns
* ->cpu_mask_to_apicid of that, or returns BAD_APICID and
* leaves desc->affinity untouched.
*/
static unsigned int
set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
struct irq_cfg *cfg;
unsigned int irq;
if (!cpumask_intersects(mask, cpu_online_mask))
return BAD_APICID;
irq = desc->irq;
cfg = desc->chip_data;
if (assign_irq_vector(irq, cfg, mask))
return BAD_APICID;
cpumask_copy(desc->affinity, mask);
return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain);
}
static int
set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
{
struct irq_cfg *cfg;
unsigned long flags;
unsigned int dest;
unsigned int irq;
int ret = -1;
irq = desc->irq;
cfg = desc->chip_data;
spin_lock_irqsave(&ioapic_lock, flags);
dest = set_desc_affinity(desc, mask);
if (dest != BAD_APICID) {
/* Only the high 8 bits are valid. */
dest = SET_APIC_LOGICAL_ID(dest);
__target_IO_APIC_irq(irq, dest, cfg);
ret = 0;
}
spin_unlock_irqrestore(&ioapic_lock, flags);
return ret;
}
static int
set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
return set_ioapic_affinity_irq_desc(desc, mask);
}
#endif /* CONFIG_SMP */
/*
* The common case is 1:1 IRQ<->pin mappings. Sometimes there are
* shared ISA-space IRQs, so we have to support them. We are super
......@@ -822,7 +707,6 @@ static int __init ioapic_pirq_setup(char *str)
__setup("pirq=", ioapic_pirq_setup);
#endif /* CONFIG_X86_32 */
#ifdef CONFIG_INTR_REMAP
struct IO_APIC_route_entry **alloc_ioapic_entries(void)
{
int apic;
......@@ -920,20 +804,6 @@ int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
return 0;
}
void reinit_intr_remapped_IO_APIC(int intr_remapping,
struct IO_APIC_route_entry **ioapic_entries)
{
/*
* for now plain restore of previous settings.
* TBD: In the case of OS enabling interrupt-remapping,
* IO-APIC RTE's need to be setup to point to interrupt-remapping
* table entries. for now, do a plain restore, and wait for
* the setup_IO_APIC_irqs() to do proper initialization.
*/
restore_IO_APIC_setup(ioapic_entries);
}
void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries)
{
int apic;
......@@ -943,7 +813,6 @@ void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries)
kfree(ioapic_entries);
}
#endif
/*
* Find the IRQ entry number of a certain pin.
......@@ -2332,6 +2201,118 @@ static int ioapic_retrigger_irq(unsigned int irq)
*/
#ifdef CONFIG_SMP
static void send_cleanup_vector(struct irq_cfg *cfg)
{
cpumask_var_t cleanup_mask;
if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
unsigned int i;
cfg->move_cleanup_count = 0;
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
cfg->move_cleanup_count++;
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
} else {
cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
free_cpumask_var(cleanup_mask);
}
cfg->move_in_progress = 0;
}
static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
{
int apic, pin;
struct irq_pin_list *entry;
u8 vector = cfg->vector;
entry = cfg->irq_2_pin;
for (;;) {
unsigned int reg;
if (!entry)
break;
apic = entry->apic;
pin = entry->pin;
/*
* With interrupt-remapping, destination information comes
* from interrupt-remapping table entry.
*/
if (!irq_remapped(irq))
io_apic_write(apic, 0x11 + pin*2, dest);
reg = io_apic_read(apic, 0x10 + pin*2);
reg &= ~IO_APIC_REDIR_VECTOR_MASK;
reg |= vector;
io_apic_modify(apic, 0x10 + pin*2, reg);
if (!entry->next)
break;
entry = entry->next;
}
}
static int
assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
/*
* Either sets desc->affinity to a valid value, and returns
* ->cpu_mask_to_apicid of that, or returns BAD_APICID and
* leaves desc->affinity untouched.
*/
static unsigned int
set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
struct irq_cfg *cfg;
unsigned int irq;
if (!cpumask_intersects(mask, cpu_online_mask))
return BAD_APICID;
irq = desc->irq;
cfg = desc->chip_data;
if (assign_irq_vector(irq, cfg, mask))
return BAD_APICID;
cpumask_copy(desc->affinity, mask);
return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain);
}
static int
set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
{
struct irq_cfg *cfg;
unsigned long flags;
unsigned int dest;
unsigned int irq;
int ret = -1;
irq = desc->irq;
cfg = desc->chip_data;
spin_lock_irqsave(&ioapic_lock, flags);
dest = set_desc_affinity(desc, mask);
if (dest != BAD_APICID) {
/* Only the high 8 bits are valid. */
dest = SET_APIC_LOGICAL_ID(dest);
__target_IO_APIC_irq(irq, dest, cfg);
ret = 0;
}
spin_unlock_irqrestore(&ioapic_lock, flags);
return ret;
}
static int
set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
return set_ioapic_affinity_irq_desc(desc, mask);
}
#ifdef CONFIG_INTR_REMAP
......@@ -2478,53 +2459,6 @@ static void irq_complete_move(struct irq_desc **descp)
static inline void irq_complete_move(struct irq_desc **descp) {}
#endif
static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
{
int apic, pin;
struct irq_pin_list *entry;
entry = cfg->irq_2_pin;
for (;;) {
if (!entry)
break;
apic = entry->apic;
pin = entry->pin;
io_apic_eoi(apic, pin);
entry = entry->next;
}
}
static void
eoi_ioapic_irq(struct irq_desc *desc)
{
struct irq_cfg *cfg;
unsigned long flags;
unsigned int irq;
irq = desc->irq;
cfg = desc->chip_data;
spin_lock_irqsave(&ioapic_lock, flags);
__eoi_ioapic_irq(irq, cfg);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
#ifdef CONFIG_X86_X2APIC
static void ack_x2apic_level(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
ack_x2APIC_irq();
eoi_ioapic_irq(desc);
}
static void ack_x2apic_edge(unsigned int irq)
{
ack_x2APIC_irq();
}
#endif
static void ack_apic_edge(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
......@@ -2588,9 +2522,6 @@ static void ack_apic_level(unsigned int irq)
*/
ack_APIC_irq();
if (irq_remapped(irq))
eoi_ioapic_irq(desc);
/* Now we can move and renable the irq */
if (unlikely(do_unmask_irq)) {
/* Only migrate the irq if the ack has been received.
......@@ -2637,22 +2568,50 @@ static void ack_apic_level(unsigned int irq)
}
#ifdef CONFIG_INTR_REMAP
static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
{
int apic, pin;
struct irq_pin_list *entry;
entry = cfg->irq_2_pin;
for (;;) {
if (!entry)
break;
apic = entry->apic;
pin = entry->pin;
io_apic_eoi(apic, pin);
entry = entry->next;
}
}
static void
eoi_ioapic_irq(struct irq_desc *desc)
{
struct irq_cfg *cfg;
unsigned long flags;
unsigned int irq;
irq = desc->irq;
cfg = desc->chip_data;
spin_lock_irqsave(&ioapic_lock, flags);
__eoi_ioapic_irq(irq, cfg);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
static void ir_ack_apic_edge(unsigned int irq)
{
#ifdef CONFIG_X86_X2APIC
if (x2apic_enabled())
return ack_x2apic_edge(irq);
#endif
return ack_apic_edge(irq);
ack_APIC_irq();
}
static void ir_ack_apic_level(unsigned int irq)
{
#ifdef CONFIG_X86_X2APIC
if (x2apic_enabled())
return ack_x2apic_level(irq);
#endif
return ack_apic_level(irq);
struct irq_desc *desc = irq_to_desc(irq);
ack_APIC_irq();
eoi_ioapic_irq(desc);
}
#endif /* CONFIG_INTR_REMAP */
......
......@@ -50,7 +50,7 @@ static struct apic *apic_probe[] __initdata = {
void __init default_setup_apic_routing(void)
{
#ifdef CONFIG_X86_X2APIC
if (x2apic && (apic != &apic_x2apic_phys &&
if (x2apic_mode && (apic != &apic_x2apic_phys &&
#ifdef CONFIG_X86_UV
apic != &apic_x2apic_uv_x &&
#endif
......
......@@ -173,13 +173,6 @@ static inline int is_WPEG(struct rio_detail *rio){
rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
}
/* In clustered mode, the high nibble of APIC ID is a cluster number.
* The low nibble is a 4-bit bitmap. */
#define XAPIC_DEST_CPUS_SHIFT 4
#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
static const struct cpumask *summit_target_cpus(void)
......
......@@ -105,7 +105,7 @@ static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
cpumask_set_cpu(cpu, retmask);
}
static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
{
#ifdef CONFIG_SMP
unsigned long val;
......
......@@ -24,9 +24,9 @@ void (*generic_interrupt_extension)(void) = NULL;
*/
void ack_bad_irq(unsigned int irq)
{
printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
if (printk_ratelimit())
pr_err("unexpected IRQ trap at vector %02x\n", irq);
#ifdef CONFIG_X86_LOCAL_APIC
/*
* Currently unexpected vectors happen only on SMP and APIC.
* We _must_ ack these because every local APIC has only N
......@@ -36,9 +36,7 @@ void ack_bad_irq(unsigned int irq)
* completely.
* But only ack when the APIC is enabled -AK
*/
if (cpu_has_apic)
ack_APIC_irq();
#endif
ack_APIC_irq();
}
#define irq_stats(x) (&per_cpu(irq_stat, x))
......@@ -178,7 +176,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
sum += irq_stats(cpu)->irq_thermal_count;
# ifdef CONFIG_X86_64
sum += irq_stats(cpu)->irq_threshold_count;
#endif
# endif
#endif
return sum;
}
......@@ -213,14 +211,11 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
irq = __get_cpu_var(vector_irq)[vector];
if (!handle_irq(irq, regs)) {
#ifdef CONFIG_X86_64
if (!disable_apic)
ack_APIC_irq();
#endif
ack_APIC_irq();
if (printk_ratelimit())
printk(KERN_EMERG "%s: %d.%d No irq handler for vector (irq %d)\n",
__func__, smp_processor_id(), vector, irq);
pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n",
__func__, smp_processor_id(), vector, irq);
}
irq_exit();
......
#include <linux/linkage.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/kprobes.h>
#include <linux/init.h>
#include <linux/kernel_stat.h>
#include <linux/sysdev.h>
#include <linux/bitops.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/timer.h>
#include <asm/hw_irq.h>
#include <asm/pgtable.h>
#include <asm/desc.h>
#include <asm/apic.h>
......@@ -22,7 +27,23 @@
#include <asm/i8259.h>
#include <asm/traps.h>
/*
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
* (these are usually mapped to vectors 0x30-0x3f)
*/
/*
* The IO-APIC gives us many more interrupt sources. Most of these
* are unused but an SMP system is supposed to have enough memory ...
* sometimes (mostly wrt. hw bugs) we get corrupted vectors all
* across the spectrum, so we really want to be prepared to get all
* of these. Plus, more powerful systems might have more than 64
* IO-APIC registers.
*
* (these are usually mapped into the 0x30-0xff vector range)
*/
#ifdef CONFIG_X86_32
/*
* Note that on a 486, we don't want to do a SIGFPE on an irq13
* as the irq is unreliable, and exception 16 works correctly
......@@ -52,30 +73,7 @@ static struct irqaction fpu_irq = {
.handler = math_error_irq,
.name = "fpu",
};
void __init init_ISA_irqs(void)
{
int i;
#ifdef CONFIG_X86_LOCAL_APIC
init_bsp_APIC();
#endif
init_8259A(0);
/*
* 16 old-style INTA-cycle interrupts:
*/
for (i = 0; i < NR_IRQS_LEGACY; i++) {
struct irq_desc *desc = irq_to_desc(i);
desc->status = IRQ_DISABLED;
desc->action = NULL;
desc->depth = 1;
set_irq_chip_and_handler_name(i, &i8259A_chip,
handle_level_irq, "XT");
}
}
/*
* IRQ2 is cascade interrupt to second interrupt controller
......@@ -118,29 +116,37 @@ int vector_used_by_percpu_irq(unsigned int vector)
return 0;
}
/* Overridden in paravirt.c */
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
void __init native_init_IRQ(void)
static void __init init_ISA_irqs(void)
{
int i;
/* Execute any quirks before the call gates are initialised: */
x86_quirk_pre_intr_init();
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
init_bsp_APIC();
#endif
init_8259A(0);
/*
* Cover the whole vector space, no vector can escape
* us. (some of these will be overridden and become
* 'special' SMP interrupts)
* 16 old-style INTA-cycle interrupts:
*/
for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
/* SYSCALL_VECTOR was reserved in trap_init. */
if (i != SYSCALL_VECTOR)
set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
for (i = 0; i < NR_IRQS_LEGACY; i++) {
struct irq_desc *desc = irq_to_desc(i);
desc->status = IRQ_DISABLED;
desc->action = NULL;
desc->depth = 1;
set_irq_chip_and_handler_name(i, &i8259A_chip,
handle_level_irq, "XT");
}
}
/* Overridden in paravirt.c */
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP)
static void __init smp_intr_init(void)
{
#ifdef CONFIG_SMP
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
/*
* The reschedule interrupt is a CPU-to-CPU reschedule-helper
* IPI, driven by wakeup.
......@@ -160,16 +166,27 @@ void __init native_init_IRQ(void)
/* IPI for generic function call */
alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
/* IPI for single call function */
/* IPI for generic single function call */
alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
call_function_single_interrupt);
call_function_single_interrupt);
/* Low priority IPI to cleanup after moving an irq */
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
#endif
#endif /* CONFIG_SMP */
}
static void __init apic_intr_init(void)
{
smp_intr_init();
#ifdef CONFIG_X86_64
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
#endif
#ifdef CONFIG_X86_LOCAL_APIC
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
/* self generated IPI for local APIC timer */
alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
......@@ -179,16 +196,67 @@ void __init native_init_IRQ(void)
/* IPI vectors for APIC spurious and error interrupts */
alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
/* Performance monitoring interrupts: */
# ifdef CONFIG_PERF_COUNTERS
alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt);
alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
# endif
#endif
#ifdef CONFIG_X86_32
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL)
/* thermal monitor LVT interrupt */
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
#endif
#endif
}
/**
* x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors
*
* Description:
* Perform any necessary interrupt initialisation prior to setting up
* the "ordinary" interrupt call gates. For legacy reasons, the ISA
* interrupts should be initialised here if the machine emulates a PC
* in any way.
**/
static void __init x86_quirk_pre_intr_init(void)
{
#ifdef CONFIG_X86_32
if (x86_quirks->arch_pre_intr_init) {
if (x86_quirks->arch_pre_intr_init())
return;
}
#endif
init_ISA_irqs();
}
void __init native_init_IRQ(void)
{
int i;
/* Execute any quirks before the call gates are initialised: */
x86_quirk_pre_intr_init();
apic_intr_init();
/*
* Cover the whole vector space, no vector can escape
* us. (some of these will be overridden and become
* 'special' SMP interrupts)
*/
for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
/* IA32_SYSCALL_VECTOR could be used in trap_init already. */
if (!test_bit(i, used_vectors))
set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
}
if (!acpi_ioapic)
setup_irq(2, &irq2);
#ifdef CONFIG_X86_32
/*
* Call quirks after call gates are initialised (usually add in
* the architecture specific gates):
......@@ -203,4 +271,5 @@ void __init native_init_IRQ(void)
setup_irq(FPU_IRQ, &fpu_irq);
irq_ctx_init(smp_processor_id());
#endif
}
#include <linux/linkage.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/kernel_stat.h>
#include <linux/sysdev.h>
#include <linux/bitops.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/hw_irq.h>
#include <asm/pgtable.h>
#include <asm/desc.h>
#include <asm/apic.h>
#include <asm/i8259.h>
/*
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
* (these are usually mapped to vectors 0x30-0x3f)
*/
/*
* The IO-APIC gives us many more interrupt sources. Most of these
* are unused but an SMP system is supposed to have enough memory ...
* sometimes (mostly wrt. hw bugs) we get corrupted vectors all
* across the spectrum, so we really want to be prepared to get all
* of these. Plus, more powerful systems might have more than 64
* IO-APIC registers.
*
* (these are usually mapped into the 0x30-0xff vector range)
*/
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = {
.handler = no_action,
.name = "cascade",
};
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
[0 ... IRQ0_VECTOR - 1] = -1,
[IRQ0_VECTOR] = 0,
[IRQ1_VECTOR] = 1,
[IRQ2_VECTOR] = 2,
[IRQ3_VECTOR] = 3,
[IRQ4_VECTOR] = 4,
[IRQ5_VECTOR] = 5,
[IRQ6_VECTOR] = 6,
[IRQ7_VECTOR] = 7,
[IRQ8_VECTOR] = 8,
[IRQ9_VECTOR] = 9,
[IRQ10_VECTOR] = 10,
[IRQ11_VECTOR] = 11,
[IRQ12_VECTOR] = 12,
[IRQ13_VECTOR] = 13,
[IRQ14_VECTOR] = 14,
[IRQ15_VECTOR] = 15,
[IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
};
int vector_used_by_percpu_irq(unsigned int vector)
{
int cpu;
for_each_online_cpu(cpu) {
if (per_cpu(vector_irq, cpu)[vector] != -1)
return 1;
}
return 0;
}
static void __init init_ISA_irqs(void)
{
int i;
init_bsp_APIC();
init_8259A(0);
for (i = 0; i < NR_IRQS_LEGACY; i++) {
struct irq_desc *desc = irq_to_desc(i);
desc->status = IRQ_DISABLED;
desc->action = NULL;
desc->depth = 1;
/*
* 16 old-style INTA-cycle interrupts:
*/
set_irq_chip_and_handler_name(i, &i8259A_chip,
handle_level_irq, "XT");
}
}
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
static void __init smp_intr_init(void)
{
#ifdef CONFIG_SMP
/*
* The reschedule interrupt is a CPU-to-CPU reschedule-helper
* IPI, driven by wakeup.
*/
alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
/* IPIs for invalidation */
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
/* IPI for generic function call */
alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
/* IPI for generic single function call */
alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
call_function_single_interrupt);
/* Low priority IPI to cleanup after moving an irq */
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
#endif
}
static void __init apic_intr_init(void)
{
smp_intr_init();
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
/* self generated IPI for local APIC timer */
alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
/* generic IPI for platform specific use */
alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
/* IPI vectors for APIC spurious and error interrupts */
alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
}
void __init native_init_IRQ(void)
{
int i;
init_ISA_irqs();
/*
* Cover the whole vector space, no vector can escape
* us. (some of these will be overridden and become
* 'special' SMP interrupts)
*/
for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
int vector = FIRST_EXTERNAL_VECTOR + i;
if (vector != IA32_SYSCALL_VECTOR)
set_intr_gate(vector, interrupt[i]);
}
apic_intr_init();
if (!acpi_ioapic)
setup_irq(2, &irq2);
}
......@@ -996,24 +996,6 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_X86_32
/**
* x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors
*
* Description:
* Perform any necessary interrupt initialisation prior to setting up
* the "ordinary" interrupt call gates. For legacy reasons, the ISA
* interrupts should be initialised here if the machine emulates a PC
* in any way.
**/
void __init x86_quirk_pre_intr_init(void)
{
if (x86_quirks->arch_pre_intr_init) {
if (x86_quirks->arch_pre_intr_init())
return;
}
init_ISA_irqs();
}
/**
* x86_quirk_intr_init - post gate setup interrupt initialisation
*
......
......@@ -193,19 +193,19 @@ void smp_call_function_single_interrupt(struct pt_regs *regs)
}
struct smp_ops smp_ops = {
.smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
.smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done,
.smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
.smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done,
.smp_send_stop = native_smp_send_stop,
.smp_send_reschedule = native_smp_send_reschedule,
.smp_send_stop = native_smp_send_stop,
.smp_send_reschedule = native_smp_send_reschedule,
.cpu_up = native_cpu_up,
.cpu_die = native_cpu_die,
.cpu_disable = native_cpu_disable,
.play_dead = native_play_dead,
.cpu_up = native_cpu_up,
.cpu_die = native_cpu_die,
.cpu_disable = native_cpu_disable,
.play_dead = native_play_dead,
.send_call_func_ipi = native_send_call_func_ipi,
.send_call_func_ipi = native_send_call_func_ipi,
.send_call_func_single_ipi = native_send_call_func_single_ipi,
};
EXPORT_SYMBOL_GPL(smp_ops);
......@@ -504,7 +504,7 @@ void __inquire_remote_apic(int apicid)
* INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
* won't ... remember to clear down the APIC, etc later.
*/
int __devinit
int __cpuinit
wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
{
unsigned long send_status, accept_status = 0;
......@@ -538,7 +538,7 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
return (send_status | accept_status);
}
int __devinit
static int __cpuinit
wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
{
unsigned long send_status, accept_status = 0;
......@@ -822,10 +822,12 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
/* mark "stuck" area as not stuck */
*((volatile unsigned long *)trampoline_base) = 0;
/*
* Cleanup possible dangling ends...
*/
smpboot_restore_warm_reset_vector();
if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
/*
* Cleanup possible dangling ends...
*/
smpboot_restore_warm_reset_vector();
}
return boot_error;
}
......
......@@ -969,11 +969,8 @@ void __init trap_init(void)
for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
set_bit(i, used_vectors);
#ifdef CONFIG_X86_64
set_bit(IA32_SYSCALL_VECTOR, used_vectors);
#else
set_bit(SYSCALL_VECTOR, used_vectors);
#endif
/*
* Should be a barrier for any external CPU state:
*/
......
......@@ -1968,15 +1968,6 @@ static int __init init_dmars(void)
}
}
#ifdef CONFIG_INTR_REMAP
if (!intr_remapping_enabled) {
ret = enable_intr_remapping(0);
if (ret)
printk(KERN_ERR
"IOMMU: enable interrupt remapping failed\n");
}
#endif
/*
* For each rmrr
* for each dev attached to rmrr
......
......@@ -15,6 +15,14 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static int ir_ioapic_num;
int intr_remapping_enabled;
static int disable_intremap;
static __init int setup_nointremap(char *str)
{
disable_intremap = 1;
return 0;
}
early_param("nointremap", setup_nointremap);
struct irq_2_iommu {
struct intel_iommu *iommu;
u16 irte_index;
......@@ -420,20 +428,6 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
readl, (sts & DMA_GSTS_IRTPS), sts);
spin_unlock_irqrestore(&iommu->register_lock, flags);
if (mode == 0) {
spin_lock_irqsave(&iommu->register_lock, flags);
/* enable comaptiblity format interrupt pass through */
cmd = iommu->gcmd | DMA_GCMD_CFI;
iommu->gcmd |= DMA_GCMD_CFI;
writel(cmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_CFIS), sts);
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
/*
* global invalidation of interrupt entry cache before enabling
* interrupt-remapping.
......@@ -513,6 +507,23 @@ static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
int __init intr_remapping_supported(void)
{
struct dmar_drhd_unit *drhd;
if (disable_intremap)
return 0;
for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu;
if (!ecap_ir_support(iommu->ecap))
return 0;
}
return 1;
}
int __init enable_intr_remapping(int eim)
{
struct dmar_drhd_unit *drhd;
......
......@@ -108,6 +108,7 @@ struct irte {
};
#ifdef CONFIG_INTR_REMAP
extern int intr_remapping_enabled;
extern int intr_remapping_supported(void);
extern int enable_intr_remapping(int);
extern void disable_intr_remapping(void);
extern int reenable_intr_remapping(int);
......@@ -157,6 +158,8 @@ static inline struct intel_iommu *map_ioapic_to_ir(int apic)
}
#define irq_remapped(irq) (0)
#define enable_intr_remapping(mode) (-1)
#define disable_intr_remapping() (0)
#define reenable_intr_remapping(mode) (0)
#define intr_remapping_enabled (0)
#endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册