未验证 提交 c098198d 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!766 LoongArch: add kexec&kdump support

Merge Pull Request from: @Hongchen_Zhang 
 


```
This series of patches adds the kexec/kdump feature of the LoongArch
architecture.

The production kernel and capture kernel can be the same kernel with
the same binary implementation added. However, this implementation
depends on the kernel relocation function, so the kernel relocation
implementation and KASLR features are added to this series of patches.

In order to be able to be used normally in machines compatible with the
old interface specification, compatibility with the old interface
specification has also been added.

Manual command line test:
kexec:
 $ sudo kexec -l vmlinuz --reuse-cmdline --initrd=initrd
 $ sudo kexec -e

kdump:
Add crashkernel=512M parameter in grub.cfg,
 $ sudo kexec -p vmlinuz --reuse-cmdline --initrd=initrd
 # echo c > /proc/sysrq-trigger


kdump service mode test:
kexec:
 $ sudo kexec -l vmlinuz --reuse-cmdline --initrd=initrd
 $ sudo kexec -e

kdump:
Add crashkernel=512M parameter in grub.cfg,
 $ sudo systemctl enable kdump
 $ sudo systemctl restart kdump
 # echo c > /proc/sysrq-trigger

```
 
 
Link:https://gitee.com/openeuler/kernel/pulls/766 

Reviewed-by: Guo Dongtai <guodongtai@kylinos.cn> 
Reviewed-by: Jialin Zhang <zhangjialin11@huawei.com> 
Signed-off-by: Jialin Zhang <zhangjialin11@huawei.com> 
...@@ -463,6 +463,62 @@ config ARCH_IOREMAP ...@@ -463,6 +463,62 @@ config ARCH_IOREMAP
protection support. However, you can enable LoongArch DMW-based protection support. However, you can enable LoongArch DMW-based
ioremap() for better performance. ioremap() for better performance.
config KEXEC
bool "Kexec system call"
select KEXEC_CORE
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
but it is independent of the system firmware. And like a reboot
you can start any kernel with it, not just Linux.
The name comes from the similarity to the exec system call.
config CRASH_DUMP
bool "Build kdump crash kernel"
select RELOCATABLE
help
Generate crash dump after being started by kexec. This should
be normally only set in special crash dump kernels which are
loaded in the main kernel with kexec-tools into a specially
reserved region and then later executed after a crash by
kdump/kexec.
For more details see Documentation/admin-guide/kdump/kdump.rst
config RELOCATABLE
bool "Relocatable kernel"
help
This builds the kernel as a Position Independent Executable (PIE),
which retains all relocation metadata required, so as to relocate
the kernel binary at runtime to a different virtual address from
its link address.
config RANDOMIZE_BASE
bool "Randomize the address of the kernel (KASLR)"
depends on RELOCATABLE
help
Randomizes the physical and virtual address at which the
kernel image is loaded, as a security feature that
deters exploit attempts relying on knowledge of the location
of kernel internals.
The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET.
If unsure, say N.
config RANDOMIZE_BASE_MAX_OFFSET
hex "Maximum KASLR offset" if EXPERT
depends on RANDOMIZE_BASE
range 0x0 0x10000000
default "0x01000000"
help
When KASLR is active, this provides the maximum offset that will
be applied to the kernel image. It should be set according to the
amount of physical RAM available in the target system.
This is limited by the size of the lower address memory, 256MB.
config SECCOMP config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode" bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS depends on PROC_FS
......
...@@ -63,6 +63,11 @@ KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs ...@@ -63,6 +63,11 @@ KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs
KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs
endif endif
ifeq ($(CONFIG_RELOCATABLE),y)
KBUILD_CFLAGS_KERNEL += -fPIE
LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext
endif
cflags-y += -ffreestanding cflags-y += -ffreestanding
cflags-y += $(call cc-option, -mno-check-zero-division) cflags-y += $(call cc-option, -mno-check-zero-division)
......
...@@ -38,6 +38,8 @@ CONFIG_PERF_EVENTS=y ...@@ -38,6 +38,8 @@ CONFIG_PERF_EVENTS=y
CONFIG_CPU_HAS_LSX=y CONFIG_CPU_HAS_LSX=y
CONFIG_CPU_HAS_LASX=y CONFIG_CPU_HAS_LASX=y
CONFIG_NUMA=y CONFIG_NUMA=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
CONFIG_HIBERNATION=y CONFIG_HIBERNATION=y
CONFIG_ACPI_SPCR_TABLE=y CONFIG_ACPI_SPCR_TABLE=y
CONFIG_ACPI_DOCK=y CONFIG_ACPI_DOCK=y
......
...@@ -126,4 +126,6 @@ extern unsigned long vm_map_base; ...@@ -126,4 +126,6 @@ extern unsigned long vm_map_base;
#define IO_SPACE_LIMIT (PCI_IOSIZE - 1) #define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
#define ISA_PHY_IOBASE LOONGSON_LIO_BASE #define ISA_PHY_IOBASE LOONGSON_LIO_BASE
#define PHYS_LINK_KADDR PHYSADDR(VMLINUX_LOAD_ADDRESS)
#endif /* _ASM_ADDRSPACE_H */ #endif /* _ASM_ADDRSPACE_H */
...@@ -898,4 +898,21 @@ ...@@ -898,4 +898,21 @@
nor \dst, \src, zero nor \dst, \src, zero
.endm .endm
.macro la_abs reg, sym
#ifndef CONFIG_RELOCATABLE
la.abs \reg, \sym
#else
766:
lu12i.w \reg, 0
ori \reg, \reg, 0
lu32i.d \reg, 0
lu52i.d \reg, \reg, 0
.pushsection ".la_abs", "aw", %progbits
768:
.dword 768b-766b
.dword \sym
.popsection
#endif
.endm
#endif /* _ASM_ASMMACRO_H */ #endif /* _ASM_ASMMACRO_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* kexec.h for kexec
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_KEXEC_H
#define _ASM_KEXEC_H
#include <asm/stacktrace.h>
#include <asm/page.h>
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */
#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
/* Maximum address we can use for the control code buffer */
#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
/* Reserve a page for the control code buffer */
#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_LOONGARCH
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
if (oldregs)
memcpy(newregs, oldregs, sizeof(*newregs));
else
prepare_frametrace(newregs);
}
#define ARCH_HAS_KIMAGE_ARCH
struct kimage_arch {
unsigned long efi_boot;
unsigned long cmdline_ptr;
unsigned long systable_ptr;
};
typedef void (*do_kexec_t)(unsigned long efi_boot,
unsigned long cmdline_ptr,
unsigned long systable_ptr,
unsigned long start_addr,
unsigned long first_ind_entry);
struct kimage;
extern const unsigned char relocate_new_kernel[];
extern const size_t relocate_new_kernel_size;
extern void kexec_reboot(void);
#ifdef CONFIG_SMP
extern atomic_t kexec_ready_to_reboot;
extern const unsigned char kexec_smp_wait[];
#endif
#endif /* !_ASM_KEXEC_H */
...@@ -18,4 +18,20 @@ extern void per_cpu_trap_init(int cpu); ...@@ -18,4 +18,20 @@ extern void per_cpu_trap_init(int cpu);
extern void set_handler(unsigned long offset, void *addr, unsigned long len); extern void set_handler(unsigned long offset, void *addr, unsigned long len);
extern void set_merr_handler(unsigned long offset, void *addr, unsigned long len); extern void set_merr_handler(unsigned long offset, void *addr, unsigned long len);
#ifdef CONFIG_RELOCATABLE
struct rela_la_abs {
long offset;
long symvalue;
};
extern long __la_abs_begin;
extern long __la_abs_end;
extern long __rela_dyn_begin;
extern long __rela_dyn_end;
extern void * __init relocate_kernel(void);
#endif
#endif /* __SETUP_H */ #endif /* __SETUP_H */
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/addrspace.h>
#include <asm/loongarch.h> #include <asm/loongarch.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
...@@ -36,6 +37,14 @@ ...@@ -36,6 +37,14 @@
cfi_restore \reg \offset \docfi cfi_restore \reg \offset \docfi
.endm .endm
/* Jump to the runtime virtual address. */
.macro JUMP_VIRT_ADDR temp1 temp2
li.d \temp1, CACHE_BASE
pcaddi \temp2, 0
or \temp1, \temp1, \temp2
jirl zero, \temp1, 0xc
.endm
.macro BACKUP_T0T1 .macro BACKUP_T0T1
csrwr t0, EXCEPTION_KS0 csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1 csrwr t1, EXCEPTION_KS1
...@@ -77,7 +86,7 @@ ...@@ -77,7 +86,7 @@
* new value in sp. * new value in sp.
*/ */
.macro get_saved_sp docfi=0 .macro get_saved_sp docfi=0
la.abs t1, kernelsp la_abs t1, kernelsp
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
csrrd t0, PERCPU_BASE_KS csrrd t0, PERCPU_BASE_KS
LONG_ADD t1, t1, t0 LONG_ADD t1, t1, t0
...@@ -90,7 +99,7 @@ ...@@ -90,7 +99,7 @@
.endm .endm
.macro set_saved_sp stackp temp temp2 .macro set_saved_sp stackp temp temp2
la.abs \temp, kernelsp la.pcrel \temp, kernelsp
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
LONG_ADD \temp, \temp, u0 LONG_ADD \temp, \temp, u0
#endif #endif
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
extern u64 __ua_limit; extern u64 __ua_limit;
#define __UA_ADDR ".dword" #define __UA_ADDR ".dword"
#define __UA_LA "la.abs"
#define __UA_LIMIT __ua_limit #define __UA_LIMIT __ua_limit
/* /*
......
...@@ -26,6 +26,11 @@ obj-$(CONFIG_NUMA) += numa.o ...@@ -26,6 +26,11 @@ obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
obj-$(CONFIG_RELOCATABLE) += relocate.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/crash_dump.h>
#include <linux/io.h>
#include <linux/uio.h>
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
if (!csize)
return 0;
vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
if (!vaddr)
return -ENOMEM;
if (!userbuf) {
memcpy(buf, vaddr + offset, csize);
} else {
if (copy_to_user(buf, vaddr + offset, csize)) {
memunmap(vaddr);
csize = -EFAULT;
}
}
memunmap(vaddr);
return csize;
}
...@@ -34,7 +34,7 @@ SYM_FUNC_END(__arch_cpu_idle) ...@@ -34,7 +34,7 @@ SYM_FUNC_END(__arch_cpu_idle)
SYM_FUNC_START(handle_vint) SYM_FUNC_START(handle_vint)
BACKUP_T0T1 BACKUP_T0T1
SAVE_ALL SAVE_ALL
la.abs t1, __arch_cpu_idle la_abs t1, __arch_cpu_idle
LONG_L t0, sp, PT_ERA LONG_L t0, sp, PT_ERA
/* 32 byte rollback region */ /* 32 byte rollback region */
ori t0, t0, 0x1f ori t0, t0, 0x1f
...@@ -43,7 +43,7 @@ SYM_FUNC_START(handle_vint) ...@@ -43,7 +43,7 @@ SYM_FUNC_START(handle_vint)
LONG_S t0, sp, PT_ERA LONG_S t0, sp, PT_ERA
1: move a0, sp 1: move a0, sp
move a1, sp move a1, sp
la.abs t0, do_vint la_abs t0, do_vint
jirl ra, t0, 0 jirl ra, t0, 0
RESTORE_ALL_AND_RET RESTORE_ALL_AND_RET
SYM_FUNC_END(handle_vint) SYM_FUNC_END(handle_vint)
...@@ -71,7 +71,7 @@ SYM_FUNC_END(except_vec_cex) ...@@ -71,7 +71,7 @@ SYM_FUNC_END(except_vec_cex)
SAVE_ALL SAVE_ALL
build_prep_\prep build_prep_\prep
move a0, sp move a0, sp
la.abs t0, do_\handler la_abs t0, do_\handler
jirl ra, t0, 0 jirl ra, t0, 0
RESTORE_ALL_AND_RET RESTORE_ALL_AND_RET
SYM_FUNC_END(handle_\exception) SYM_FUNC_END(handle_\exception)
...@@ -90,6 +90,6 @@ SYM_FUNC_END(except_vec_cex) ...@@ -90,6 +90,6 @@ SYM_FUNC_END(except_vec_cex)
BUILD_HANDLER reserved reserved none /* others */ BUILD_HANDLER reserved reserved none /* others */
SYM_FUNC_START(handle_sys) SYM_FUNC_START(handle_sys)
la.abs t0, handle_syscall la_abs t0, handle_syscall
jr t0 jr t0
SYM_FUNC_END(handle_sys) SYM_FUNC_END(handle_sys)
...@@ -20,12 +20,17 @@ ...@@ -20,12 +20,17 @@
_head: _head:
.word MZ_MAGIC /* "MZ", MS-DOS header */ .word MZ_MAGIC /* "MZ", MS-DOS header */
.org 0x3c /* 0x04 ~ 0x3b reserved */ .org 0x8
.dword kernel_entry /* Kernel entry point */
.dword _end - _text /* Kernel image effective size */
.quad PHYS_LINK_KADDR /* Kernel image load offset from start of RAM */
.org 0x3c /* 0x20 ~ 0x3b reserved */
.long pe_header - _head /* Offset to the PE header */ .long pe_header - _head /* Offset to the PE header */
pe_header: pe_header:
__EFI_PE_HEADER __EFI_PE_HEADER
SYM_DATA(kernel_entry_rel, .quad kernel_entry);
SYM_DATA(kernel_asize, .long _end - _text); SYM_DATA(kernel_asize, .long _end - _text);
SYM_DATA(kernel_fsize, .long _edata - _text); SYM_DATA(kernel_fsize, .long _edata - _text);
SYM_DATA(kernel_offset, .long kernel_offset - _text); SYM_DATA(kernel_offset, .long kernel_offset - _text);
...@@ -43,11 +48,8 @@ SYM_CODE_START(kernel_entry) # kernel entry point ...@@ -43,11 +48,8 @@ SYM_CODE_START(kernel_entry) # kernel entry point
li.d t0, CSR_DMW1_INIT # CA, PLV0, 0x9000 xxxx xxxx xxxx li.d t0, CSR_DMW1_INIT # CA, PLV0, 0x9000 xxxx xxxx xxxx
csrwr t0, LOONGARCH_CSR_DMWIN1 csrwr t0, LOONGARCH_CSR_DMWIN1
/* We might not get launched at the address the kernel is linked to, JUMP_VIRT_ADDR t0, t1
so we jump there. */
la.abs t0, 0f
jr t0
0:
/* Enable PG */ /* Enable PG */
li.w t0, 0xb0 # PLV=0, IE=0, PG=1 li.w t0, 0xb0 # PLV=0, IE=0, PG=1
csrwr t0, LOONGARCH_CSR_CRMD csrwr t0, LOONGARCH_CSR_CRMD
...@@ -83,6 +85,22 @@ SYM_CODE_START(kernel_entry) # kernel entry point ...@@ -83,6 +85,22 @@ SYM_CODE_START(kernel_entry) # kernel entry point
set_saved_sp sp, t0, t1 set_saved_sp sp, t0, t1
PTR_ADDI sp, sp, -4 * SZREG # init stack pointer PTR_ADDI sp, sp, -4 * SZREG # init stack pointer
#ifdef CONFIG_RELOCATABLE
bl relocate_kernel
#ifdef CONFIG_RANDOMIZE_BASE
/* Repoint the sp into the new kernel */
PTR_LI sp, (_THREAD_SIZE - 32 - PT_SIZE)
PTR_ADD sp, sp, tp
set_saved_sp sp, t0, t1
#endif
/* relocate_kernel() returns the new kernel entry point */
jr a0
#endif
bl start_kernel bl start_kernel
SYM_CODE_END(kernel_entry) SYM_CODE_END(kernel_entry)
...@@ -99,9 +117,8 @@ SYM_CODE_START(smpboot_entry) ...@@ -99,9 +117,8 @@ SYM_CODE_START(smpboot_entry)
li.d t0, CSR_DMW1_INIT # CA, PLV0 li.d t0, CSR_DMW1_INIT # CA, PLV0
csrwr t0, LOONGARCH_CSR_DMWIN1 csrwr t0, LOONGARCH_CSR_DMWIN1
la.abs t0, 0f JUMP_VIRT_ADDR t0, t1
jr t0
0:
/* Enable PG */ /* Enable PG */
li.w t0, 0xb0 # PLV=0, IE=0, PG=1 li.w t0, 0xb0 # PLV=0, IE=0, PG=1
csrwr t0, LOONGARCH_CSR_CRMD csrwr t0, LOONGARCH_CSR_CRMD
...@@ -110,7 +127,7 @@ SYM_CODE_START(smpboot_entry) ...@@ -110,7 +127,7 @@ SYM_CODE_START(smpboot_entry)
li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0 li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
csrwr t0, LOONGARCH_CSR_EUEN csrwr t0, LOONGARCH_CSR_EUEN
la.abs t0, cpuboot_data la.pcrel t0, cpuboot_data
ld.d sp, t0, CPU_BOOT_STACK ld.d sp, t0, CPU_BOOT_STACK
ld.d tp, t0, CPU_BOOT_TINFO ld.d tp, t0, CPU_BOOT_TINFO
......
...@@ -16,7 +16,7 @@ __efistub_strncat = strncat; ...@@ -16,7 +16,7 @@ __efistub_strncat = strncat;
__efistub_strnstr = strnstr; __efistub_strnstr = strnstr;
__efistub_strnlen = strnlen; __efistub_strnlen = strnlen;
__efistub_strrchr = strrchr; __efistub_strrchr = strrchr;
__efistub_kernel_entry = kernel_entry; __efistub_kernel_entry = kernel_entry_rel;
__efistub_kernel_asize = kernel_asize; __efistub_kernel_asize = kernel_asize;
__efistub_kernel_fsize = kernel_fsize; __efistub_kernel_fsize = kernel_fsize;
__efistub_kernel_offset = kernel_offset; __efistub_kernel_offset = kernel_offset;
......
...@@ -303,79 +303,9 @@ int setup_legacy_IRQ(void) ...@@ -303,79 +303,9 @@ int setup_legacy_IRQ(void)
* Manage initrd * Manage initrd
*/ */
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
static unsigned long init_initrd(unsigned long ps, unsigned long z)
{
static int initalized;
if (!ps || !z)
return 0;
initrd_start = (unsigned long)__va(ps);
initrd_end = initrd_start + z;
/*
* Board specific code or command line parser should have
* already set up initrd_start and initrd_end. In these cases
* perfom sanity checks and use them if all looks good.
*/
if (initrd_start < PAGE_OFFSET || initrd_end <= initrd_start) {
pr_err("initrd start load address error!");
goto disable;
}
if (initrd_start & ~PAGE_MASK) {
pr_err("initrd start must be page aligned\n");
goto disable;
}
memblock_reserve(__pa(initrd_start), z);
initrd_below_start_ok = 1;
if (!initalized)
pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
initrd_start, z);
initalized = 1;
return 0;
disable:
printk(KERN_CONT " - disabling initrd\n");
initrd_start = 0;
initrd_end = 0;
return 0;
}
static int early_initrd(char *p)
{
unsigned long start, size;
char *endp;
if (!efi_bp)
return 0;
start = memparse(p, &endp);
if (*endp == ',')
size = memparse(endp + 1, NULL);
if (start + size > PFN_PHYS(max_low_pfn)) {
pr_err(KERN_INFO "Initrd physical address is out of memory!");
return 0;
}
init_initrd(start, size);
return 0;
}
early_param("initrd", early_initrd);
static int rd_start_early(char *p) static int rd_start_early(char *p)
{ {
unsigned long start; phys_initrd_start = __pa(memparse(p, &p));
if (!efi_bp)
return 0;
start = memparse(p, &p);
initrd_start = start;
initrd_end += start;
init_initrd(__pa(start), initrd_end - start);
return 0; return 0;
} }
...@@ -383,24 +313,21 @@ early_param("rd_start", rd_start_early); ...@@ -383,24 +313,21 @@ early_param("rd_start", rd_start_early);
static int rd_size_early(char *p) static int rd_size_early(char *p)
{ {
unsigned long size; phys_initrd_size = memparse(p, &p);
if (!efi_bp)
return 0;
size = memparse(p, &p);
initrd_end += size;
init_initrd(__pa(initrd_start), size);
return 0; return 0;
} }
early_param("rd_size", rd_size_early); early_param("rd_size", rd_size_early);
#endif
#else /* !CONFIG_BLK_DEV_INITRD */ void __init loongarch_reserve_initrd_mem(void)
static unsigned long init_initrd(void)
{ {
return 0; /* The small fdt method should be skipped directly to avoid two reserved operations. */
if (!fw_arg2)
return;
reserve_initrd_mem();
} }
#endif
void fw_init_cmdline(unsigned long argc, unsigned long cmdp) void fw_init_cmdline(unsigned long argc, unsigned long cmdp)
{ {
...@@ -417,6 +344,7 @@ void fw_init_cmdline(unsigned long argc, unsigned long cmdp) ...@@ -417,6 +344,7 @@ void fw_init_cmdline(unsigned long argc, unsigned long cmdp)
} }
strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE); strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
} }
EXPORT_SYMBOL_GPL(fw_init_cmdline);
static u8 ext_listhdr_checksum(u8 *buffer, u32 length) static u8 ext_listhdr_checksum(u8 *buffer, u32 length)
{ {
......
...@@ -87,4 +87,6 @@ extern int __init ...@@ -87,4 +87,6 @@ extern int __init
pch_msi_parse_madt(union acpi_subtable_headers *header, pch_msi_parse_madt(union acpi_subtable_headers *header,
const unsigned long end); const unsigned long end);
extern struct irq_domain *get_pchpic_irq_domain(void); extern struct irq_domain *get_pchpic_irq_domain(void);
extern void __init loongarch_reserve_initrd_mem(void);
#endif #endif
// SPDX-License-Identifier: GPL-2.0-only
/*
* machine_kexec.c for kexec
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/libfdt.h>
#include <linux/mm.h>
#include <linux/of_fdt.h>
#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <asm/bootinfo.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
/* 0x100000 ~ 0x200000 is safe */
#define KEXEC_CONTROL_CODE TO_CACHE(0x100000UL)
#define KEXEC_CMDLINE_ADDR TO_CACHE(0x108000UL)
static unsigned long reboot_code_buffer;
static cpumask_t cpus_in_crash = CPU_MASK_NONE;
#ifdef CONFIG_SMP
static void (*relocated_kexec_smp_wait)(void *);
atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
#endif
static unsigned long efi_boot;
static unsigned long cmdline_ptr;
static unsigned long systable_ptr;
static unsigned long start_addr;
static unsigned long first_ind_entry;
static void kexec_image_info(const struct kimage *kimage)
{
unsigned long i;
pr_debug("kexec kimage info:\n");
pr_debug("\ttype: %d\n", kimage->type);
pr_debug("\tstart: %lx\n", kimage->start);
pr_debug("\thead: %lx\n", kimage->head);
pr_debug("\tnr_segments: %lu\n", kimage->nr_segments);
for (i = 0; i < kimage->nr_segments; i++) {
pr_debug("\t segment[%lu]: %016lx - %016lx", i,
kimage->segment[i].mem,
kimage->segment[i].mem + kimage->segment[i].memsz);
pr_debug("\t\t0x%lx bytes, %lu pages\n",
(unsigned long)kimage->segment[i].memsz,
(unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
}
}
#define MAX_ARGS 64
#define KEXEC_CMDLINE_SIZE (COMMAND_LINE_SIZE * 2)
int machine_kexec_prepare(struct kimage *kimage)
{
int i;
char *bootloader = "kexec";
void *cmdline_ptr = (void *)KEXEC_CMDLINE_ADDR;
kexec_image_info(kimage);
kimage->arch.efi_boot = fw_arg0;
kimage->arch.systable_ptr = fw_arg2;
if (!fw_arg2)
pr_err("Small fdt mode is not supported!\n");
/* Find the command line */
for (i = 0; i < kimage->nr_segments; i++) {
if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) {
if (fw_arg0 < 2) {
/* New firmware */
if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE))
kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr;
} else {
/* Old firmware */
int argc = 0;
long offt;
char *ptr, *str;
unsigned long *argv;
/*
* convert command line string to array
* of parameters (as bootloader does).
*/
argv = (unsigned long *)kmalloc(KEXEC_CMDLINE_SIZE, GFP_KERNEL);
argv[argc++] = (unsigned long)(KEXEC_CMDLINE_ADDR + KEXEC_CMDLINE_SIZE/2);
str = (char *)argv + KEXEC_CMDLINE_SIZE/2;
if (copy_from_user(str, kimage->segment[i].buf, KEXEC_CMDLINE_SIZE/2))
return -EINVAL;
ptr = strchr(str, ' ');
while (ptr && (argc < MAX_ARGS)) {
*ptr = '\0';
if (ptr[1] != ' ') {
offt = (long)(ptr - str + 1);
argv[argc++] = (unsigned long)argv + KEXEC_CMDLINE_SIZE/2 + offt;
}
ptr = strchr(ptr + 1, ' ');
}
kimage->arch.efi_boot = argc;
kimage->arch.cmdline_ptr = (unsigned long)argv;
break;
}
break;
}
}
if (!kimage->arch.cmdline_ptr) {
pr_err("Command line not included in the provided image\n");
return -EINVAL;
}
/* kexec/kdump need a safe page to save reboot_code_buffer */
kimage->control_code_page = virt_to_page((void *)KEXEC_CONTROL_CODE);
reboot_code_buffer = (unsigned long)page_address(kimage->control_code_page);
memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size);
#ifdef CONFIG_SMP
/* All secondary cpus now may jump to kexec_smp_wait cycle */
relocated_kexec_smp_wait = reboot_code_buffer + (void *)(kexec_smp_wait - relocate_new_kernel);
#endif
return 0;
}
void machine_kexec_cleanup(struct kimage *kimage)
{
}
void kexec_reboot(void)
{
do_kexec_t do_kexec = NULL;
/*
* We know we were online, and there will be no incoming IPIs at
* this point. Mark online again before rebooting so that the crash
* analysis tool will see us correctly.
*/
set_cpu_online(smp_processor_id(), true);
/* Ensure remote CPUs observe that we're online before rebooting. */
smp_mb__after_atomic();
/*
* Make sure we get correct instructions written by the
* machine_kexec_prepare() CPU.
*/
__asm__ __volatile__ ("\tibar 0\n"::);
#ifdef CONFIG_SMP
/* All secondary cpus go to kexec_smp_wait */
if (smp_processor_id() > 0) {
relocated_kexec_smp_wait(NULL);
unreachable();
}
#endif
do_kexec = (void *)reboot_code_buffer;
do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry);
unreachable();
}
#ifdef CONFIG_SMP
static void kexec_shutdown_secondary(void *regs)
{
int cpu = smp_processor_id();
if (!cpu_online(cpu))
return;
/* We won't be sent IPIs any more. */
set_cpu_online(cpu, false);
local_irq_disable();
while (!atomic_read(&kexec_ready_to_reboot))
cpu_relax();
kexec_reboot();
}
static void crash_shutdown_secondary(void *passed_regs)
{
int cpu = smp_processor_id();
struct pt_regs *regs = passed_regs;
/*
* If we are passed registers, use those. Otherwise get the
* regs from the last interrupt, which should be correct, as
* we are in an interrupt. But if the regs are not there,
* pull them from the top of the stack. They are probably
* wrong, but we need something to keep from crashing again.
*/
if (!regs)
regs = get_irq_regs();
if (!regs)
regs = task_pt_regs(current);
if (!cpu_online(cpu))
return;
/* We won't be sent IPIs any more. */
set_cpu_online(cpu, false);
local_irq_disable();
if (!cpumask_test_cpu(cpu, &cpus_in_crash))
crash_save_cpu(regs, cpu);
cpumask_set_cpu(cpu, &cpus_in_crash);
while (!atomic_read(&kexec_ready_to_reboot))
cpu_relax();
kexec_reboot();
}
void crash_smp_send_stop(void)
{
unsigned int ncpus;
unsigned long timeout;
static int cpus_stopped;
/*
* This function can be called twice in panic path, but obviously
* we should execute this only once.
*/
if (cpus_stopped)
return;
cpus_stopped = 1;
/* Excluding the panic cpu */
ncpus = num_online_cpus() - 1;
smp_call_function(crash_shutdown_secondary, NULL, 0);
smp_wmb();
/*
* The crash CPU sends an IPI and wait for other CPUs to
* respond. Delay of at least 10 seconds.
*/
timeout = MSEC_PER_SEC * 10;
pr_emerg("Sending IPI to other cpus...\n");
while ((cpumask_weight(&cpus_in_crash) < ncpus) && timeout--) {
mdelay(1);
cpu_relax();
}
}
#endif /* defined(CONFIG_SMP) */
void machine_shutdown(void)
{
int cpu;
/* All CPUs go to reboot_code_buffer */
for_each_possible_cpu(cpu)
if (!cpu_online(cpu))
cpu_device_up(get_cpu_device(cpu));
#ifdef CONFIG_SMP
smp_call_function(kexec_shutdown_secondary, NULL, 0);
#endif
}
void machine_crash_shutdown(struct pt_regs *regs)
{
int crashing_cpu;
local_irq_disable();
crashing_cpu = smp_processor_id();
crash_save_cpu(regs, crashing_cpu);
#ifdef CONFIG_SMP
crash_smp_send_stop();
#endif
cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
pr_info("Starting crashdump kernel...\n");
}
void machine_kexec(struct kimage *image)
{
unsigned long entry, *ptr;
struct kimage_arch *internal = &image->arch;
efi_boot = internal->efi_boot;
cmdline_ptr = internal->cmdline_ptr;
systable_ptr = internal->systable_ptr;
start_addr = (unsigned long)phys_to_virt(image->start);
first_ind_entry = (image->type == KEXEC_TYPE_DEFAULT) ?
(unsigned long)phys_to_virt(image->head & PAGE_MASK) : 0;
/*
* The generic kexec code builds a page list with physical
* addresses. they are directly accessible through XKPRANGE
* hence the phys_to_virt() call.
*/
for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
ptr = (entry & IND_INDIRECTION) ?
phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
*ptr & IND_DESTINATION)
*ptr = (unsigned long) phys_to_virt(*ptr);
}
/* Mark offline before disabling local irq. */
set_cpu_online(smp_processor_id(), false);
/* We do not want to be bothered. */
local_irq_disable();
pr_notice("EFI boot flag 0x%lx\n", efi_boot);
pr_notice("Command line at 0x%lx\n", cmdline_ptr);
pr_notice("System table at 0x%lx\n", systable_ptr);
pr_notice("We will call new kernel at 0x%lx\n", start_addr);
pr_notice("Bye ...\n");
/* Make reboot code buffer available to the boot CPU. */
flush_cache_all();
#ifdef CONFIG_SMP
atomic_set(&kexec_ready_to_reboot, 1);
#endif
kexec_reboot();
}
...@@ -80,7 +80,4 @@ void __init memblock_init(void) ...@@ -80,7 +80,4 @@ void __init memblock_init(void)
/* Reserve the kernel text/data/bss */ /* Reserve the kernel text/data/bss */
memblock_reserve(__pa_symbol(&_text), memblock_reserve(__pa_symbol(&_text),
__pa_symbol(&_end) - __pa_symbol(&_text)); __pa_symbol(&_end) - __pa_symbol(&_text));
/* Reserve the initrd */
reserve_initrd_mem();
} }
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Kernel relocation at boot time
*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/start_kernel.h>
#include <asm/bootinfo.h>
#include <asm/early_ioremap.h>
#include <asm/inst.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <linux/of_fdt.h>
#define RELOCATED(x) ((void *)((long)x + reloc_offset))
#define RELOCATED_KASLR(x) ((void *)((long)x + random_offset))
extern void fw_init_cmdline(unsigned long argc, unsigned long cmdp);
static unsigned long reloc_offset;
static inline __init void relocate_relative(void)
{
Elf64_Rela *rela, *rela_end;
rela = (Elf64_Rela *)&__rela_dyn_begin;
rela_end = (Elf64_Rela *)&__rela_dyn_end;
for ( ; rela < rela_end; rela++) {
Elf64_Addr addr = rela->r_offset;
Elf64_Addr relocated_addr = rela->r_addend;
if (rela->r_info != R_LARCH_RELATIVE)
continue;
if (relocated_addr >= VMLINUX_LOAD_ADDRESS)
relocated_addr = (Elf64_Addr)RELOCATED(relocated_addr);
*(Elf64_Addr *)RELOCATED(addr) = relocated_addr;
}
}
static inline void __init relocate_la_abs(long random_offset)
{
void *begin, *end;
struct rela_la_abs *p;
begin = RELOCATED_KASLR(&__la_abs_begin);
end = RELOCATED_KASLR(&__la_abs_end);
for (p = begin; (void *)p < end; p++) {
long v = p->symvalue;
uint32_t lu12iw, ori, lu32id, lu52id;
union loongarch_instruction *insn = (void *)p - p->offset;
lu12iw = (v >> 12) & 0xfffff;
ori = v & 0xfff;
lu32id = (v >> 32) & 0xfffff;
lu52id = v >> 52;
insn[0].reg1i20_format.immediate = lu12iw;
insn[1].reg2i12_format.immediate = ori;
insn[2].reg1i20_format.immediate = lu32id;
insn[3].reg2i12_format.immediate = lu52id;
}
}
#ifdef CONFIG_RANDOMIZE_BASE
static inline __init unsigned long rotate_xor(unsigned long hash,
const void *area, size_t size)
{
size_t i, diff;
const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
diff = (void *)ptr - area;
if (size < diff + sizeof(hash))
return hash;
size = ALIGN_DOWN(size - diff, sizeof(hash));
for (i = 0; i < size / sizeof(hash); i++) {
/* Rotate by odd number of bits and XOR. */
hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
hash ^= ptr[i];
}
return hash;
}
static inline __init unsigned long get_random_boot(void)
{
unsigned long hash = 0;
unsigned long entropy = random_get_entropy();
/* Attempt to create a simple but unpredictable starting entropy. */
hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
/* Add in any runtime entropy we can get */
hash = rotate_xor(hash, &entropy, sizeof(entropy));
return hash;
}
static inline __init bool kaslr_disabled(void)
{
char *str;
const char *builtin_cmdline = CONFIG_CMDLINE;
str = strstr(builtin_cmdline, "nokaslr");
if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' '))
return true;
str = strstr(boot_command_line, "nokaslr");
if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
return true;
return false;
}
/* Choose a new address for the kernel */
static inline void __init *determine_relocation_address(void)
{
unsigned long kernel_length;
unsigned long random_offset;
void *destination = _text;
if (kaslr_disabled())
return destination;
kernel_length = (long)_end - (long)_text;
random_offset = get_random_boot() << 16;
random_offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1);
if (random_offset < kernel_length)
random_offset += ALIGN(kernel_length, 0xffff);
return RELOCATED_KASLR(destination);
}
static inline int __init relocation_addr_valid(void *location_new)
{
if ((unsigned long)location_new & 0x00000ffff)
return 0; /* Inappropriately aligned new location */
if ((unsigned long)location_new < (unsigned long)_end)
return 0; /* New location overlaps original kernel */
return 1;
}
#endif
static inline void __init update_reloc_offset(unsigned long *addr, long random_offset)
{
unsigned long *new_addr = (unsigned long *)RELOCATED_KASLR(addr);
*new_addr = (unsigned long)reloc_offset;
}
void * __init relocate_kernel(void)
{
unsigned long kernel_length;
unsigned long random_offset = 0;
void *location_new = _text; /* Default to original kernel start */
void *kernel_entry = start_kernel; /* Default to original kernel entry point */
char *cmdline;
/* Get the command line */
if (!fw_arg2) {
/* a0 = efi flag, a1 = small fdt */
early_init_dt_scan(early_ioremap(fw_arg1, SZ_64K));
} else if (fw_arg0 == 1 || fw_arg0 == 0) {
/* a0 = efi flag, a1 = cmdline, a2 = systemtab */
cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE);
strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
early_iounmap(cmdline, COMMAND_LINE_SIZE);
} else {
/* a0 = argc, a1 = argv, a3 = envp */
fw_init_cmdline(fw_arg0, fw_arg1);
}
#ifdef CONFIG_RANDOMIZE_BASE
location_new = determine_relocation_address();
/* Sanity check relocation address */
if (relocation_addr_valid(location_new))
random_offset = (unsigned long)location_new - (unsigned long)(_text);
#endif
reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
/* Reset the command line now so we don't end up with a duplicate */
boot_command_line[0] = '\0';
if (random_offset) {
kernel_length = (long)(_end) - (long)(_text);
/* Copy the kernel to it's new location */
memcpy(location_new, _text, kernel_length);
/* Sync the caches ready for execution of new kernel */
__asm__ __volatile__ (
"ibar 0 \t\n"
"dbar 0 \t\n"
::: "memory");
reloc_offset += random_offset;
/* Return the new kernel's entry point */
kernel_entry = RELOCATED_KASLR(start_kernel);
/* The current thread is now within the relocated kernel */
__asm__ __volatile__ (
"move $t0, %0\t\n"
"add.d $tp, $tp, $t0\t\n"
::"r" (random_offset)
:);
update_reloc_offset(&reloc_offset, random_offset);
}
if (reloc_offset)
relocate_relative();
relocate_la_abs(random_offset);
return kernel_entry;
}
/*
* Show relocation information on panic.
*/
static void show_kernel_relocation(const char *level)
{
if (reloc_offset > 0) {
printk(level);
pr_cont("Kernel relocated by 0x%lx\n", reloc_offset);
pr_cont(" .text @ 0x%px\n", _text);
pr_cont(" .data @ 0x%px\n", _sdata);
pr_cont(" .bss @ 0x%px\n", __bss_start);
}
}
static int kernel_location_notifier_fn(struct notifier_block *self,
unsigned long v, void *p)
{
show_kernel_relocation(KERN_EMERG);
return NOTIFY_DONE;
}
static struct notifier_block kernel_location_notifier = {
.notifier_call = kernel_location_notifier_fn
};
static int __init register_kernel_offset_dumper(void)
{
atomic_notifier_chain_register(&panic_notifier_list,
&kernel_location_notifier);
return 0;
}
arch_initcall(register_kernel_offset_dumper);
/* SPDX-License-Identifier: GPL-2.0 */
/*
* relocate_kernel.S for kexec
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/kexec.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/regdef.h>
#include <asm/loongarch.h>
#include <asm/stackframe.h>
#include <asm/addrspace.h>
SYM_CODE_START(relocate_new_kernel)
/*
* a0: EFI boot flag for the new kernel
* a1: Command line pointer for the new kernel
* a2: System table pointer for the new kernel
* a3: Start address to jump to after relocation
* a4: Pointer to the current indirection page entry
*/
move s0, a4
/*
* In case of a kdump/crash kernel, the indirection page is not
* populated as the kernel is directly copied to a reserved location
*/
beqz s0, done
process_entry:
PTR_L s1, s0, 0
PTR_ADDI s0, s0, SZREG
/* destination page */
andi s2, s1, IND_DESTINATION
beqz s2, 1f
li.w t0, ~0x1
and s3, s1, t0 /* store destination addr in s3 */
b process_entry
1:
/* indirection page, update s0 */
andi s2, s1, IND_INDIRECTION
beqz s2, 1f
li.w t0, ~0x2
and s0, s1, t0
b process_entry
1:
/* done page */
andi s2, s1, IND_DONE
beqz s2, 1f
b done
1:
/* source page */
andi s2, s1, IND_SOURCE
beqz s2, process_entry
li.w t0, ~0x8
and s1, s1, t0
li.w s5, (1 << _PAGE_SHIFT) / SZREG
copy_word:
/* copy page word by word */
REG_L s4, s1, 0
REG_S s4, s3, 0
PTR_ADDI s3, s3, SZREG
PTR_ADDI s1, s1, SZREG
LONG_ADDI s5, s5, -1
beqz s5, process_entry
b copy_word
b process_entry
done:
ibar 0
dbar 0
/*
* Jump to the new kernel,
* make sure the values of a0, a1, a2 and a3 are not changed.
*/
jr a3
SYM_CODE_END(relocate_new_kernel)
#ifdef CONFIG_SMP
/*
* Other CPUs should wait until code is relocated and
* then start at the entry point from LOONGARCH_IOCSR_MBUF0.
*/
SYM_CODE_START(kexec_smp_wait)
1: li.w t0, 0x100 /* wait for init loop */
2: addi.w t0, t0, -1 /* limit mailbox access */
bnez t0, 2b
li.w t1, LOONGARCH_IOCSR_MBUF0
iocsrrd.w s0, t1 /* check PC as an indicator */
beqz s0, 1b
iocsrrd.d s0, t1 /* get PC via mailbox */
li.d t0, CACHE_BASE
or s0, s0, t0 /* s0 = TO_CACHE(s0) */
jr s0 /* jump to initial PC */
SYM_CODE_END(kexec_smp_wait)
#endif
relocate_new_kernel_end:
SYM_DATA_START(relocate_new_kernel_size)
PTR relocate_new_kernel_end - relocate_new_kernel
SYM_DATA_END(relocate_new_kernel_size)
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/initrd.h> #include <linux/initrd.h>
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
#include <linux/root_dev.h> #include <linux/root_dev.h>
#include <linux/console.h> #include <linux/console.h>
#include <linux/pfn.h> #include <linux/pfn.h>
...@@ -63,6 +65,8 @@ EXPORT_SYMBOL(cpu_data); ...@@ -63,6 +65,8 @@ EXPORT_SYMBOL(cpu_data);
struct loongson_board_info b_info; struct loongson_board_info b_info;
static const char dmi_empty_string[] = " "; static const char dmi_empty_string[] = " ";
static phys_addr_t crashmem_start, crashmem_size;
/* /*
* Setup information * Setup information
* *
...@@ -178,16 +182,6 @@ static int __init early_parse_mem(char *p) ...@@ -178,16 +182,6 @@ static int __init early_parse_mem(char *p)
return -EINVAL; return -EINVAL;
} }
/*
* If a user specifies memory size, we
* blow away any automatically generated
* size.
*/
if (usermem == 0) {
usermem = 1;
memblock_remove(memblock_start_of_DRAM(),
memblock_end_of_DRAM() - memblock_start_of_DRAM());
}
start = 0; start = 0;
size = memparse(p, &p); size = memparse(p, &p);
if (*p == '@') if (*p == '@')
...@@ -197,6 +191,23 @@ static int __init early_parse_mem(char *p) ...@@ -197,6 +191,23 @@ static int __init early_parse_mem(char *p)
return -EINVAL; return -EINVAL;
} }
/*
* If a user specifies memory size, we
* blow away any automatically generated
* size.
*/
if (usermem == 0) {
usermem = 1;
if (!strstr(boot_command_line, "elfcorehdr")) {
memblock_remove(memblock_start_of_DRAM(),
memblock_end_of_DRAM() - memblock_start_of_DRAM());
} else {
crashmem_start = start;
crashmem_size = size;
return 0;
}
}
if (!IS_ENABLED(CONFIG_NUMA)) if (!IS_ENABLED(CONFIG_NUMA))
memblock_add(start, size); memblock_add(start, size);
else else
...@@ -221,6 +232,88 @@ static void __init set_pcie_wakeup(void) ...@@ -221,6 +232,88 @@ static void __init set_pcie_wakeup(void)
} }
static void __init arch_reserve_vmcore(void)
{
#ifdef CONFIG_PROC_VMCORE
u64 i;
phys_addr_t start, end;
if (!is_kdump_kernel())
return;
if (!elfcorehdr_size) {
for_each_mem_range(i, &start, &end) {
if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
/*
* Reserve from the elf core header to the end of
* the memory segment, that should all be kdump
* reserved memory.
*/
elfcorehdr_size = end - elfcorehdr_addr;
break;
}
}
}
if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
pr_warn("elfcorehdr is overlapped\n");
return;
}
memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
elfcorehdr_size >> 10, elfcorehdr_addr);
#endif
}
/* 2MB alignment for crash kernel regions */
#define CRASH_ALIGN SZ_2M
#define CRASH_ADDR_MAX SZ_4G
static void __init arch_parse_crashkernel(void)
{
#ifdef CONFIG_KEXEC
int ret;
unsigned long long total_mem;
unsigned long long crash_base, crash_size;
total_mem = memblock_phys_mem_size();
ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
if (ret < 0 || crash_size <= 0)
return;
if (crash_base <= 0) {
crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, CRASH_ALIGN, CRASH_ADDR_MAX);
if (!crash_base) {
pr_warn("crashkernel reservation failed - No suitable area found.\n");
return;
}
} else if (!memblock_phys_alloc_range(crash_size, CRASH_ALIGN, crash_base, crash_base + crash_size)) {
pr_warn("Invalid memory region reserved for crash kernel\n");
return;
}
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
#endif
}
/*
* After the kdump operation is performed to enter the capture kernel, the
* memory area used by the previous production kernel should be reserved to
* avoid destroy to the captured data.
*/
static void reserve_oldmem_region(void)
{
#ifdef CONFIG_CRASH_DUMP
if (!is_kdump_kernel())
return;
memblock_cap_memory_range(crashmem_start, crashmem_size);
#endif
}
void __init platform_init(void) void __init platform_init(void)
{ {
#ifdef CONFIG_ACPI_TABLE_UPGRADE #ifdef CONFIG_ACPI_TABLE_UPGRADE
...@@ -259,6 +352,10 @@ static void __init check_kernel_sections_mem(void) ...@@ -259,6 +352,10 @@ static void __init check_kernel_sections_mem(void)
*/ */
static void __init arch_mem_init(char **cmdline_p) static void __init arch_mem_init(char **cmdline_p)
{ {
arch_reserve_vmcore();
arch_parse_crashkernel();
reserve_oldmem_region();
if (usermem) if (usermem)
pr_info("User-defined physical RAM map overwrite\n"); pr_info("User-defined physical RAM map overwrite\n");
...@@ -326,6 +423,15 @@ static void __init resource_init(void) ...@@ -326,6 +423,15 @@ static void __init resource_init(void)
request_resource(res, &data_resource); request_resource(res, &data_resource);
request_resource(res, &bss_resource); request_resource(res, &bss_resource);
} }
#ifdef CONFIG_KEXEC
if (crashk_res.start < crashk_res.end) {
insert_resource(&iomem_resource, &crashk_res);
pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
(unsigned long)((crashk_res.end - crashk_res.start + 1) >> 20),
(unsigned long)(crashk_res.start >> 20));
}
#endif
} }
static int __init reserve_memblock_reserved_regions(void) static int __init reserve_memblock_reserved_regions(void)
...@@ -388,6 +494,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -388,6 +494,7 @@ void __init setup_arch(char **cmdline_p)
memblock_init(); memblock_init();
pagetable_init(); pagetable_init();
parse_early_param(); parse_early_param();
loongarch_reserve_initrd_mem();
platform_init(); platform_init();
arch_mem_init(cmdline_p); arch_mem_init(cmdline_p);
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/entry-common.h> #include <linux/entry-common.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/extable.h> #include <linux/extable.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -246,6 +247,9 @@ void __noreturn die(const char *str, struct pt_regs *regs) ...@@ -246,6 +247,9 @@ void __noreturn die(const char *str, struct pt_regs *regs)
oops_exit(); oops_exit();
if (regs && kexec_should_crash(current))
crash_kexec(regs);
if (in_interrupt()) if (in_interrupt())
panic("Fatal exception in interrupt"); panic("Fatal exception in interrupt");
......
...@@ -66,6 +66,17 @@ SECTIONS ...@@ -66,6 +66,17 @@ SECTIONS
__alt_instructions_end = .; __alt_instructions_end = .;
} }
#ifdef CONFIG_RELOCATABLE
. = ALIGN(8);
.la_abs : AT(ADDR(.la_abs) - LOAD_OFFSET) {
__la_abs_begin = .;
*(.la_abs)
__la_abs_end = .;
}
#endif
.data.rel : { *(.data.rel*) }
. = ALIGN(PECOFF_SEGMENT_ALIGN); . = ALIGN(PECOFF_SEGMENT_ALIGN);
__init_begin = .; __init_begin = .;
__inittext_begin = .; __inittext_begin = .;
...@@ -101,6 +112,12 @@ SECTIONS ...@@ -101,6 +112,12 @@ SECTIONS
RO_DATA(4096) RO_DATA(4096)
RW_DATA(1 << CONFIG_L1_CACHE_SHIFT, PAGE_SIZE, THREAD_SIZE) RW_DATA(1 << CONFIG_L1_CACHE_SHIFT, PAGE_SIZE, THREAD_SIZE)
.rela.dyn : ALIGN(8) {
__rela_dyn_begin = .;
*(.rela.dyn) *(.rela*)
__rela_dyn_end = .;
}
.sdata : { .sdata : {
*(.sdata) *(.sdata)
} }
...@@ -127,6 +144,7 @@ SECTIONS ...@@ -127,6 +144,7 @@ SECTIONS
DISCARDS DISCARDS
/DISCARD/ : { /DISCARD/ : {
*(.dynamic .dynsym .dynstr .hash .gnu.hash)
*(.gnu.attributes) *(.gnu.attributes)
*(.options) *(.options)
*(.eh_frame) *(.eh_frame)
......
...@@ -258,6 +258,7 @@ extern long exception_handlers[VECSIZE * 128 / sizeof(long)]; ...@@ -258,6 +258,7 @@ extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
void setup_tlb_handler(int cpu) void setup_tlb_handler(int cpu)
{ {
setup_ptwalker(); setup_ptwalker();
local_flush_tlb_all();
output_pgtable_bits_defines(); output_pgtable_bits_defines();
/* The tlb handlers are generated only once */ /* The tlb handlers are generated only once */
...@@ -302,5 +303,4 @@ void tlb_init(int cpu) ...@@ -302,5 +303,4 @@ void tlb_init(int cpu)
write_csr_stlbpgsize(PS_DEFAULT_SIZE); write_csr_stlbpgsize(PS_DEFAULT_SIZE);
write_csr_tlbrefill_pagesize(PS_DEFAULT_SIZE); write_csr_tlbrefill_pagesize(PS_DEFAULT_SIZE);
setup_tlb_handler(cpu); setup_tlb_handler(cpu);
local_flush_tlb_all();
} }
...@@ -17,8 +17,7 @@ ...@@ -17,8 +17,7 @@
move a0, sp move a0, sp
REG_S a2, sp, PT_BVADDR REG_S a2, sp, PT_BVADDR
li.w a1, \write li.w a1, \write
la.abs t0, do_page_fault bl do_page_fault
jirl ra, t0, 0
RESTORE_ALL_AND_RET RESTORE_ALL_AND_RET
SYM_FUNC_END(tlb_do_page_fault_\write) SYM_FUNC_END(tlb_do_page_fault_\write)
.endm .endm
...@@ -33,7 +32,7 @@ SYM_FUNC_START(handle_tlb_protect) ...@@ -33,7 +32,7 @@ SYM_FUNC_START(handle_tlb_protect)
move a1, zero move a1, zero
csrrd a2, LOONGARCH_CSR_BADV csrrd a2, LOONGARCH_CSR_BADV
REG_S a2, sp, PT_BVADDR REG_S a2, sp, PT_BVADDR
la.abs t0, do_page_fault la_abs t0, do_page_fault
jirl ra, t0, 0 jirl ra, t0, 0
RESTORE_ALL_AND_RET RESTORE_ALL_AND_RET
SYM_FUNC_END(handle_tlb_protect) SYM_FUNC_END(handle_tlb_protect)
...@@ -123,7 +122,7 @@ leave_load: ...@@ -123,7 +122,7 @@ leave_load:
ertn ertn
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
vmalloc_load: vmalloc_load:
la.abs t1, swapper_pg_dir la_abs t1, swapper_pg_dir
b vmalloc_done_load b vmalloc_done_load
#endif #endif
...@@ -197,7 +196,7 @@ tlb_huge_update_load: ...@@ -197,7 +196,7 @@ tlb_huge_update_load:
nopage_tlb_load: nopage_tlb_load:
dbar 0 dbar 0
csrrd ra, EXCEPTION_KS2 csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_0 la_abs t0, tlb_do_page_fault_0
jr t0 jr t0
SYM_FUNC_END(handle_tlb_load) SYM_FUNC_END(handle_tlb_load)
...@@ -289,7 +288,7 @@ leave_store: ...@@ -289,7 +288,7 @@ leave_store:
ertn ertn
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
vmalloc_store: vmalloc_store:
la.abs t1, swapper_pg_dir la_abs t1, swapper_pg_dir
b vmalloc_done_store b vmalloc_done_store
#endif #endif
...@@ -365,7 +364,7 @@ tlb_huge_update_store: ...@@ -365,7 +364,7 @@ tlb_huge_update_store:
nopage_tlb_store: nopage_tlb_store:
dbar 0 dbar 0
csrrd ra, EXCEPTION_KS2 csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_1 la_abs t0, tlb_do_page_fault_1
jr t0 jr t0
SYM_FUNC_END(handle_tlb_store) SYM_FUNC_END(handle_tlb_store)
...@@ -454,7 +453,7 @@ leave_modify: ...@@ -454,7 +453,7 @@ leave_modify:
ertn ertn
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
vmalloc_modify: vmalloc_modify:
la.abs t1, swapper_pg_dir la_abs t1, swapper_pg_dir
b vmalloc_done_modify b vmalloc_done_modify
#endif #endif
...@@ -524,7 +523,7 @@ tlb_huge_update_modify: ...@@ -524,7 +523,7 @@ tlb_huge_update_modify:
nopage_tlb_modify: nopage_tlb_modify:
dbar 0 dbar 0
csrrd ra, EXCEPTION_KS2 csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_1 la_abs t0, tlb_do_page_fault_1
jr t0 jr t0
SYM_FUNC_END(handle_tlb_modify) SYM_FUNC_END(handle_tlb_modify)
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
.text .text
SYM_FUNC_START(swsusp_arch_save) SYM_FUNC_START(swsusp_arch_save)
la.abs t0, saved_regs la.pcrel t0, saved_regs
PTR_S ra, t0, PT_R1 PTR_S ra, t0, PT_R1
PTR_S sp, t0, PT_R3 PTR_S sp, t0, PT_R3
PTR_S fp, t0, PT_R22 PTR_S fp, t0, PT_R22
......
...@@ -113,9 +113,8 @@ SYM_CODE_START(loongarch_wakeup_start) ...@@ -113,9 +113,8 @@ SYM_CODE_START(loongarch_wakeup_start)
li.d t0, CSR_DMW1_INIT # CA, PLV0 li.d t0, CSR_DMW1_INIT # CA, PLV0
csrwr t0, LOONGARCH_CSR_DMWIN1 csrwr t0, LOONGARCH_CSR_DMWIN1
la.abs t0, 0f JUMP_VIRT_ADDR t0, t1
jirl zero, t0, 0
0:
la.pcrel t0, acpi_saved_sp la.pcrel t0, acpi_saved_sp
ld.d sp, t0, 0 ld.d sp, t0, 0
SETUP_WAKEUP SETUP_WAKEUP
......
...@@ -49,8 +49,7 @@ void __noreturn efi_enter_kernel(unsigned long entrypoint, unsigned long fdt, un ...@@ -49,8 +49,7 @@ void __noreturn efi_enter_kernel(unsigned long entrypoint, unsigned long fdt, un
csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0); csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0);
csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1); csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1);
real_kernel_entry = (kernel_entry_t) real_kernel_entry = (kernel_entry_t) kernel_entry;
((unsigned long)&kernel_entry - entrypoint + VMLINUX_LOAD_ADDRESS);
real_kernel_entry(true, fdt, 0); real_kernel_entry(true, fdt, 0);
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册