diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index edeb2adaf31f2d2554871c61c71c4bcdd964d411..255fc631b042d7ec335bfeec77045c2593a38915 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -214,26 +214,6 @@ void arch_sync_kernel_mappings(unsigned long start, unsigned long end) } } -static void vmalloc_sync(void) -{ - unsigned long address; - - if (SHARED_KERNEL_PMD) - return; - - arch_sync_kernel_mappings(VMALLOC_START, VMALLOC_END); -} - -void vmalloc_sync_mappings(void) -{ - vmalloc_sync(); -} - -void vmalloc_sync_unmappings(void) -{ - vmalloc_sync(); -} - /* * 32-bit: * @@ -336,23 +316,6 @@ static void dump_pagetable(unsigned long address) #else /* CONFIG_X86_64: */ -void vmalloc_sync_mappings(void) -{ - /* - * 64-bit mappings might allocate new p4d/pud pages - * that need to be propagated to all tasks' PGDs. - */ - sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); -} - -void vmalloc_sync_unmappings(void) -{ - /* - * Unmappings never allocate or free p4d/pud pages. - * No work is required here. - */ -} - /* * 64-bit: * diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 24c9642e8fc70d272f71b083f703eb1a1095e564..aabe9c5ee5154e6fd6da10ba6d6daccf608b9e68 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -167,12 +167,6 @@ int ghes_estatus_pool_init(int num_ghes) if (!addr) goto err_pool_alloc; - /* - * New allocation must be visible in all pgd before it can be found by - * an NMI allocating from the pool. - */ - vmalloc_sync_mappings(); - rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1); if (rc) goto err_pool_add; diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 0efc35dc5b258d80f79e3d258be0b93046428395..48bb681e6c2aeda4ef78b7c263cded21217d52bb 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -130,8 +130,6 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff); -void vmalloc_sync_mappings(void); -void vmalloc_sync_unmappings(void); /* * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values diff --git a/kernel/notifier.c b/kernel/notifier.c index 5989bbb93039e61da64a4b260c10b86f4e7349fd..84c987dfbe0360a406d79a0036c1d82dc35f5d53 100644 --- a/kernel/notifier.c +++ b/kernel/notifier.c @@ -519,7 +519,6 @@ NOKPROBE_SYMBOL(notify_die); int register_die_notifier(struct notifier_block *nb) { - vmalloc_sync_mappings(); return atomic_notifier_chain_register(&die_chain, nb); } EXPORT_SYMBOL_GPL(register_die_notifier); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 29615f15a820b2f67d66af8084bfcd13936ed9c3..f12e99b387b254e5e154d133fd5b350c47f2fd10 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -8527,18 +8527,6 @@ static int allocate_trace_buffers(struct trace_array *tr, int size) allocate_snapshot = false; #endif - /* - * Because of some magic with the way alloc_percpu() works on - * x86_64, we need to synchronize the pgd of all the tables, - * otherwise the trace events that happen in x86_64 page fault - * handlers can't cope with accessing the chance that a - * alloc_percpu()'d memory might be touched in the page fault trace - * event. Oh, and we need to audit all other alloc_percpu() and vmalloc() - * calls in tracing, because something might get triggered within a - * page fault trace event! - */ - vmalloc_sync_mappings(); - return 0; } diff --git a/mm/nommu.c b/mm/nommu.c index 371697bf372daaebc3b65506eaedd565d0e3971c..dfae55f41901374d3aa1db9a70ba415e237d1293 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -371,18 +371,6 @@ void vm_unmap_aliases(void) } EXPORT_SYMBOL_GPL(vm_unmap_aliases); -/* - * Implement a stub for vmalloc_sync_[un]mapping() if the architecture - * chose not to have one. - */ -void __weak vmalloc_sync_mappings(void) -{ -} - -void __weak vmalloc_sync_unmappings(void) -{ -} - struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) { BUG(); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 154e3396154cdb3a9dd35d6bf07267ee27e98983..1e94497b738853a08d383878fa5e8261dba18a26 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1353,12 +1353,6 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) if (unlikely(valist == NULL)) return false; - /* - * First make sure the mappings are removed from all page-tables - * before they are freed. - */ - vmalloc_sync_unmappings(); - /* * TODO: to calculate a flush range without looping. * The list can be up to lazy_max_pages() elements. @@ -3089,21 +3083,6 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, } EXPORT_SYMBOL(remap_vmalloc_range); -/* - * Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose - * not to have one. - * - * The purpose of this function is to make sure the vmalloc area - * mappings are identical in all page-tables in the system. - */ -void __weak vmalloc_sync_mappings(void) -{ -} - -void __weak vmalloc_sync_unmappings(void) -{ -} - static int f(pte_t *pte, unsigned long addr, void *data) { pte_t ***p = data;