diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 2f1b7217c25cf638ffbaee6e14117b1c614889e5..0e64f08d3d694516fd7664a7c61933949222a476 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -43,13 +43,13 @@ static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action, switch (action) { case PM_SUSPEND_PREPARE: case PM_HIBERNATION_PREPARE: - if (crashk_res.start) - crash_map_reserved_pages(); + if (kexec_crash_image) + arch_kexec_unprotect_crashkres(); break; case PM_POST_SUSPEND: case PM_POST_HIBERNATION: - if (crashk_res.start) - crash_unmap_reserved_pages(); + if (kexec_crash_image) + arch_kexec_protect_crashkres(); break; default: return NOTIFY_DONE; @@ -60,6 +60,8 @@ static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action, static int __init machine_kdump_pm_init(void) { pm_notifier(machine_kdump_pm_cb, 0); + /* Create initial mapping for crashkernel memory */ + arch_kexec_unprotect_crashkres(); return 0; } arch_initcall(machine_kdump_pm_init); @@ -146,6 +148,8 @@ static int kdump_csum_valid(struct kimage *image) #endif } +#ifdef CONFIG_CRASH_DUMP + /* * Map or unmap crashkernel memory */ @@ -167,21 +171,25 @@ static void crash_map_pages(int enable) } /* - * Map crashkernel memory + * Unmap crashkernel memory */ -void crash_map_reserved_pages(void) +void arch_kexec_protect_crashkres(void) { - crash_map_pages(1); + if (crashk_res.end) + crash_map_pages(0); } /* - * Unmap crashkernel memory + * Map crashkernel memory */ -void crash_unmap_reserved_pages(void) +void arch_kexec_unprotect_crashkres(void) { - crash_map_pages(0); + if (crashk_res.end) + crash_map_pages(1); } +#endif + /* * Give back memory to hypervisor before new kdump is loaded */ diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 643ff4a3fbf6b3535d20bd86b43850fefa091e02..e8acb2b43dd918f1ee55f6c612e9af425c9b84d4 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -230,8 +230,6 @@ extern void crash_kexec(struct pt_regs *); int kexec_should_crash(struct task_struct *); void crash_save_cpu(struct pt_regs *regs, int cpu); void crash_save_vmcoreinfo(void); -void crash_map_reserved_pages(void); -void crash_unmap_reserved_pages(void); void arch_crash_save_vmcoreinfo(void); __printf(1, 2) void vmcoreinfo_append_str(const char *fmt, ...); diff --git a/kernel/kexec.c b/kernel/kexec.c index b73dc211fcfdb72c9e4c6c9b6f5648f09701d1f4..4384672d324516128557e4f7b29637a4cca1df30 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -136,9 +136,6 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments, if (ret) return ret; - if (flags & KEXEC_ON_CRASH) - crash_map_reserved_pages(); - if (flags & KEXEC_PRESERVE_CONTEXT) image->preserve_context = 1; @@ -161,12 +158,6 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments, if ((flags & KEXEC_ON_CRASH) && kexec_crash_image) arch_kexec_protect_crashkres(); - /* - * Once the reserved memory is mapped, we should unmap this memory - * before returning - */ - if (flags & KEXEC_ON_CRASH) - crash_unmap_reserved_pages(); kimage_free(image); return ret; } @@ -232,9 +223,6 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, result = do_kexec_load(entry, nr_segments, segments, flags); - if ((flags & KEXEC_ON_CRASH) && kexec_crash_image) - arch_kexec_protect_crashkres(); - mutex_unlock(&kexec_mutex); return result; diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 48b73cc8e42508933dc4cb813c3eb77f87098a8f..56b3ed0927b0cd9ec7d91045069f94076cdfc1be 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -954,7 +954,6 @@ int crash_shrink_memory(unsigned long new_size) start = roundup(start, KEXEC_CRASH_MEM_ALIGN); end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN); - crash_map_reserved_pages(); crash_free_reserved_phys_range(end, crashk_res.end); if ((start == end) && (crashk_res.parent != NULL)) @@ -968,7 +967,6 @@ int crash_shrink_memory(unsigned long new_size) crashk_res.end = end - 1; insert_resource(&iomem_resource, ram_res); - crash_unmap_reserved_pages(); unlock: mutex_unlock(&kexec_mutex); @@ -1553,17 +1551,12 @@ int kernel_kexec(void) } /* - * Add and remove page tables for crashkernel memory + * Protection mechanism for crashkernel reserved memory after + * the kdump kernel is loaded. * * Provide an empty default implementation here -- architecture * code may override this */ -void __weak crash_map_reserved_pages(void) -{} - -void __weak crash_unmap_reserved_pages(void) -{} - void __weak arch_kexec_protect_crashkres(void) {}