diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index c6c90f39f4d9e35381cedf30cbd700fd4d7c84cb..7b897b7b0ae6222ca5b5e9ef5d6039f122c87814 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -477,6 +477,12 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) if (!(pa->flags & ACPI_SRAT_CPU_ENABLED)) return; + if (srat_num_cpus >= ARRAY_SIZE(node_cpuid)) { + printk_once(KERN_WARNING + "node_cpuid[%d] is too small, may not be able to use all cpus\n", + ARRAY_SIZE(node_cpuid)); + return; + } pxm = get_processor_proximity_domain(pa); /* record this node in proximity bitmap */ diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 9a26015c3e50027d7cb88981881971feb3d88bc0..38c07b8669011dad6e5637c4049d9d959e61bf5c 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -633,7 +633,7 @@ ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action) BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL)); desc = irq_desc + irq; desc->status |= IRQ_PER_CPU; - desc->chip = &irq_type_ia64_lsapic; + set_irq_chip(irq, &irq_type_ia64_lsapic); if (action) setup_irq(irq, action); set_irq_handler(irq, handle_percpu_irq); diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index f099b82703d8f4ff6d23292fcc18251691a741a7..d92d5b5161fc3f01c952113cc52c14362abbb058 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -829,10 +829,9 @@ pfm_rvmalloc(unsigned long size) unsigned long addr; size = PAGE_ALIGN(size); - mem = vmalloc(size); + mem = vzalloc(size); if (mem) { //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem); - memset(mem, 0, size); addr = (unsigned long)mem; while (size > 0) { pfm_reserve_page(addr); diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index dabeefe211347f4d0e3c55d046c411f4060fb488..be450a3e9871673e22d56445dbe69f80522baac4 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -293,6 +293,7 @@ smp_flush_tlb_all (void) void smp_flush_tlb_mm (struct mm_struct *mm) { + cpumask_var_t cpus; preempt_disable(); /* this happens for the common case of a single-threaded fork(): */ if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) @@ -301,9 +302,15 @@ smp_flush_tlb_mm (struct mm_struct *mm) preempt_enable(); return; } - - smp_call_function_many(mm_cpumask(mm), - (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); + if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) { + smp_call_function((void (*)(void *))local_finish_flush_tlb_mm, + mm, 1); + } else { + cpumask_copy(cpus, mm_cpumask(mm)); + smp_call_function_many(cpus, + (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); + free_cpumask_var(cpus); + } local_irq_disable(); local_finish_flush_tlb_mm(mm); local_irq_enable(); diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index ed6f22eb5b12adcd647abb384a44ff55e4e14604..9702fa92489edb3f7aa04d21ad1a801afc8a1807 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -168,7 +168,7 @@ timer_interrupt (int irq, void *dev_id) { unsigned long new_itm; - if (unlikely(cpu_is_offline(smp_processor_id()))) { + if (cpu_is_offline(smp_processor_id())) { return IRQ_HANDLED; }