diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index c24b4224d439267051d85af645a026a54fd67f84..1fde8d580a5bac371c7a56cf60e688c49d2299f6 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -319,12 +319,6 @@ static inline void reset_lazy_tlbstate(void) #endif /* SMP */ -/* Not inlined due to inc_irq_stat not being defined yet */ -#define flush_tlb_local() { \ - inc_irq_stat(irq_tlb_count); \ - local_flush_tlb(); \ -} - #ifndef CONFIG_PARAVIRT #define flush_tlb_others(mask, mm, start, end) \ native_flush_tlb_others(mask, mm, start, end) diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 5fb6adaaa7964d68c1bd7cbc81d6c2ed6bd44bcd..fe9b9f77636168752f989b9d007634396864bf13 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -104,7 +104,7 @@ static void flush_tlb_func(void *info) inc_irq_stat(irq_tlb_count); - if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) + if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) return; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); diff --git a/mm/rmap.c b/mm/rmap.c index c399a0d41b3132f8322a1f7f76eb00231d20c52f..395e314b79962696d9be729861bd28300fd7e68b 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -569,19 +569,6 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma) } #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH -static void percpu_flush_tlb_batch_pages(void *data) -{ - /* - * All TLB entries are flushed on the assumption that it is - * cheaper to flush all TLBs and let them be refilled than - * flushing individual PFNs. Note that we do not track mm's - * to flush as that might simply be multiple full TLB flushes - * for no gain. - */ - count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); - flush_tlb_local(); -} - /* * Flush TLB entries for recently unmapped pages from remote CPUs. It is * important if a PTE was dirty when it was unmapped that it's flushed @@ -598,15 +585,14 @@ void try_to_unmap_flush(void) cpu = get_cpu(); - trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL); - - if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) - percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask); - - if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) { - smp_call_function_many(&tlb_ubc->cpumask, - percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true); + if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) { + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); + local_flush_tlb(); + trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); } + + if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) + flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL); cpumask_clear(&tlb_ubc->cpumask); tlb_ubc->flush_required = false; tlb_ubc->writable = false;