diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 75e888b3cfd23fe06d90e7ad23b2046c3df30259..ed6642ad03e073fcd5a4d35d02ba9f6bcb5221b0 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -86,6 +86,8 @@ struct mmu_gather { #ifdef CONFIG_HAVE_RCU_TABLE_FREE struct mmu_table_batch *batch; #endif + unsigned long start; + unsigned long end; unsigned int need_flush : 1, /* Did free PTEs */ fast_mode : 1; /* No batching */ diff --git a/mm/memory.c b/mm/memory.c index 1b7dc662bf9f229063cb3e7b97e8e4c22147b92b..32c99433cfdf7e9c2c5eb9d6ec9ac27350470e16 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -206,6 +206,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) tlb->mm = mm; tlb->fullmm = fullmm; + tlb->start = -1UL; + tlb->end = 0; tlb->need_flush = 0; tlb->fast_mode = (num_possible_cpus() == 1); tlb->local.next = NULL; @@ -248,6 +250,8 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e { struct mmu_gather_batch *batch, *next; + tlb->start = start; + tlb->end = end; tlb_flush_mmu(tlb); /* keep the page table cache within bounds */ @@ -1204,6 +1208,11 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, */ if (force_flush) { force_flush = 0; + +#ifdef HAVE_GENERIC_MMU_GATHER + tlb->start = addr; + tlb->end = end; +#endif tlb_flush_mmu(tlb); if (addr != end) goto again;