From effee4b9b3b0aa5770bcd98de5f672b05b27703c Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Thu, 28 Jun 2012 09:02:24 +0800 Subject: [PATCH] x86/tlb: do flush_tlb_kernel_range by 'invlpg' This patch do flush_tlb_kernel_range by 'invlpg'. The performance pay and gain was analyzed in previous patch (x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range). In the testing: http://lkml.org/lkml/2012/6/21/10 The pay is mostly covered by long kernel path, but the gain is still quite clear, memory access in user APP can increase 30+% when kernel execute this funtion. Signed-off-by: Alex Shi Link: http://lkml.kernel.org/r/1340845344-27557-10-git-send-email-alex.shi@intel.com Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/tlbflush.h | 13 +++++++------ arch/x86/mm/tlb.c | 30 ++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 621b959e1dbf..b5a27bd77669 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -123,6 +123,12 @@ static inline void reset_lazy_tlbstate(void) { } +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) +{ + flush_tlb_all(); +} + #else /* SMP */ #include @@ -139,6 +145,7 @@ extern void flush_tlb_current_task(void); extern void flush_tlb_page(struct vm_area_struct *, unsigned long); extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); #define flush_tlb() flush_tlb_current_task() @@ -168,10 +175,4 @@ static inline void reset_lazy_tlbstate(void) native_flush_tlb_others(mask, mm, start, end) #endif -static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) -{ - flush_tlb_all(); -} - #endif /* _ASM_X86_TLBFLUSH_H */ diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 2b5f506a7655..613cd83e8c0c 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -264,6 +264,36 @@ void flush_tlb_all(void) on_each_cpu(do_flush_tlb_all, NULL, 1); } +static void do_kernel_range_flush(void *info) +{ + struct flush_tlb_info *f = info; + unsigned long addr; + + /* flush range by one by one 'invlpg' */ + for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) + __flush_tlb_single(addr); +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + unsigned act_entries; + struct flush_tlb_info info; + + /* In modern CPU, last level tlb used for both data/ins */ + act_entries = tlb_lld_4k[ENTRIES]; + + /* Balance as user space task's flush, a bit conservative */ + if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 || + (end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) + + on_each_cpu(do_flush_tlb_all, NULL, 1); + else { + info.flush_start = start; + info.flush_end = end; + on_each_cpu(do_kernel_range_flush, &info, 1); + } +} + #ifdef CONFIG_DEBUG_TLBFLUSH static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) -- GitLab