From 9b5757bdc7635ee22d7442f156f15d67210386d7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 16 Nov 2020 14:17:38 +0800 Subject: [PATCH] asm-generic/tlb: Remove tlb_flush_mmu_free() mainline inclusion from mainline-v5.2-rc1 commit fa0aafb8acb684e68231ff0a547ed249f8dc31a5 category:feature bugzilla:NA CVE:NA ------------------- As the comment notes; it is a potentially dangerous operation. Just use tlb_flush_mmu(), that will skip the (double) TLB invalidate if it really isn't needed anyway. No change in behavior intended. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Signed-off-by: Ingo Molnar Signed-off-by: Chen Jun Reviewed-by: Hanjun Guo Signed-off-by: Yang Yingliang --- include/asm-generic/tlb.h | 10 +++------- mm/memory.c | 2 +- mm/mmu_gather.c | 2 +- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index ef1c077dd07e..6998e6d7c728 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -66,16 +66,13 @@ * call before __tlb_remove_page*() to set the current page-size; implies a * possible tlb_flush_mmu() call. * - * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly() / tlb_flush_mmu_free() + * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly() * * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets * related state, like the range) * - * tlb_flush_mmu_free() - frees the queued pages; make absolutely - * sure no additional tlb_remove_page() - * calls happen between _tlbonly() and this. - * - * tlb_flush_mmu() - the above two calls. + * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees + * whatever pages are still batched. * * - mmu_gather::fullmm * @@ -266,7 +263,6 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, void tlb_flush_mmu(struct mmu_gather *tlb); void arch_tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end, bool force); -void tlb_flush_mmu_free(struct mmu_gather *tlb); static inline void __tlb_adjust_range(struct mmu_gather *tlb, unsigned long address, diff --git a/mm/memory.c b/mm/memory.c index 85dd6fb68be1..7503203c8436 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1168,7 +1168,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, */ if (force_flush) { force_flush = 0; - tlb_flush_mmu_free(tlb); + tlb_flush_mmu(tlb); if (addr != end) goto again; } diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index cac2eb492ee9..b5d53f170a00 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -91,7 +91,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ #endif /* HAVE_MMU_GATHER_NO_GATHER */ -void tlb_flush_mmu_free(struct mmu_gather *tlb) +static void tlb_flush_mmu_free(struct mmu_gather *tlb) { #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb_table_flush(tlb); -- GitLab