提交 03367495 编写于 作者: S Sang Yan 提交者: Yang Yingliang

kexec: Add quick kexec support for kernel

hulk inclusion
category: feature
bugzilla: 48159
CVE: N/A

------------------------------

In normal kexec, relocating kernel may cost 5 ~ 10 seconds, to
copy all segments from vmalloced memory to kernel boot memory,
because of disabled mmu.

We introduce quick kexec to save time of copying memory as above,
just like kdump(kexec on crash), by using reserved memory
"Quick Kexec".

Constructing quick kimage as the same as crash kernel,
then simply copy all segments of kimage to reserved memroy.

We also add this support in syscall kexec_load using flags
of KEXEC_QUICK.
Signed-off-by: NSang Yan <sangyan@huawei.com>
Reviewed-by: NWang Xiongfeng <wangxiongfeng2@huawei.com>
Acked-by: NHanjun Guo <guohanjun@huawei.com>
Reviewed-by: NXie XiuQi <xiexiuqi@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 c6fa39d7
......@@ -18,6 +18,15 @@ config KEXEC_CORE
select CRASH_CORE
bool
config QUICK_KEXEC
bool "Support for quick kexec"
depends on KEXEC_CORE
help
It uses pre-reserved memory to accelerate kexec, just like
crash kexec, loads new kernel and initrd to reserved memory,
and boots new kernel on that memory. It will save the time
of relocating kernel.
config HAVE_IMA_KEXEC
bool
......
......@@ -282,6 +282,13 @@ static void __init request_standard_resources(void)
request_resource(res, &pin_memory_resource);
#endif
#ifdef CONFIG_QUICK_KEXEC
if (quick_kexec_res.end &&
quick_kexec_res.start >= res->start &&
quick_kexec_res.end <= res->end)
request_resource(res, &quick_kexec_res);
#endif
for (j = 0; j < res_mem_count; j++) {
if (res_resources[j].start >= res->start &&
res_resources[j].end <= res->end)
......
......@@ -243,6 +243,45 @@ static void __init kexec_reserve_crashkres_pages(void)
}
#endif /* CONFIG_KEXEC_CORE */
#ifdef CONFIG_QUICK_KEXEC
static int __init parse_quick_kexec(char *p)
{
if (!p)
return 0;
quick_kexec_res.end = PAGE_ALIGN(memparse(p, NULL));
return 0;
}
early_param("quickkexec", parse_quick_kexec);
static void __init reserve_quick_kexec(void)
{
unsigned long long mem_start, mem_len;
mem_len = quick_kexec_res.end;
if (mem_len == 0)
return;
/* Current arm64 boot protocol requires 2MB alignment */
mem_start = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
mem_len, CRASH_ALIGN);
if (mem_start == 0) {
pr_warn("cannot allocate quick kexec mem (size:0x%llx)\n",
mem_len);
quick_kexec_res.end = 0;
return;
}
memblock_reserve(mem_start, mem_len);
pr_info("quick kexec mem reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
mem_start, mem_start + mem_len, mem_len >> 20);
quick_kexec_res.start = mem_start;
quick_kexec_res.end = mem_start + mem_len - 1;
}
#endif
#ifdef CONFIG_CRASH_DUMP
static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
const char *uname, int depth, void *data)
......@@ -699,6 +738,10 @@ void __init arm64_memblock_init(void)
reserve_crashkernel();
#ifdef CONFIG_QUICK_KEXEC
reserve_quick_kexec();
#endif
reserve_elfcorehdr();
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
......
......@@ -139,6 +139,7 @@ enum {
IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
IORES_DESC_DEVICE_PUBLIC_MEMORY = 7,
IORES_DESC_QUICK_KEXEC = 8,
};
/* helpers to define resources */
......
......@@ -233,9 +233,10 @@ struct kimage {
unsigned long control_page;
/* Flags to indicate special processing */
unsigned int type : 1;
unsigned int type : 2;
#define KEXEC_TYPE_DEFAULT 0
#define KEXEC_TYPE_CRASH 1
#define KEXEC_TYPE_QUICK 2
unsigned int preserve_context : 1;
/* If set, we are using file mode kexec syscall */
unsigned int file_mode:1;
......@@ -309,6 +310,11 @@ extern int kexec_sysctl_handler(struct ctl_table *table, int write,
#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT)
#endif
#ifdef CONFIG_QUICK_KEXEC
#undef KEXEC_FLAGS
#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_QUICK)
#endif
/* List of defined/legal kexec file flags */
#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
KEXEC_FILE_NO_INITRAMFS)
......@@ -318,6 +324,9 @@ extern int kexec_sysctl_handler(struct ctl_table *table, int write,
extern struct resource crashk_res;
extern struct resource crashk_low_res;
extern note_buf_t __percpu *crash_notes;
#ifdef CONFIG_QUICK_KEXEC
extern struct resource quick_kexec_res;
#endif
/* flag to track if kexec reboot is in progress */
extern bool kexec_in_progress;
......
......@@ -12,6 +12,7 @@
/* kexec flags for different usage scenarios */
#define KEXEC_ON_CRASH 0x00000001
#define KEXEC_PRESERVE_CONTEXT 0x00000002
#define KEXEC_QUICK 0x00000004
#define KEXEC_ARCH_MASK 0xffff0000
/*
......
......@@ -46,6 +46,9 @@ static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
int ret;
struct kimage *image;
bool kexec_on_panic = flags & KEXEC_ON_CRASH;
#ifdef CONFIG_QUICK_KEXEC
bool kexec_on_quick = flags & KEXEC_QUICK;
#endif
if (kexec_on_panic) {
/* Verify we have a valid entry point */
......@@ -71,6 +74,13 @@ static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
image->type = KEXEC_TYPE_CRASH;
}
#ifdef CONFIG_QUICK_KEXEC
if (kexec_on_quick) {
image->control_page = quick_kexec_res.start;
image->type = KEXEC_TYPE_QUICK;
}
#endif
ret = sanity_check_segment_list(image);
if (ret)
goto out_free_image;
......
......@@ -74,6 +74,16 @@ struct resource crashk_low_res = {
.desc = IORES_DESC_CRASH_KERNEL
};
#ifdef CONFIG_QUICK_KEXEC
struct resource quick_kexec_res = {
.name = "Quick kexec",
.start = 0,
.end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
.desc = IORES_DESC_QUICK_KEXEC
};
#endif
int kexec_should_crash(struct task_struct *p)
{
/*
......@@ -470,8 +480,10 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
return pages;
}
static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
unsigned int order)
static struct page *kimage_alloc_special_control_pages(struct kimage *image,
unsigned int order,
unsigned long end)
{
/* Control pages are special, they are the intermediaries
* that are needed while we copy the rest of the pages
......@@ -501,7 +513,7 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
size = (1 << order) << PAGE_SHIFT;
hole_start = (image->control_page + (size - 1)) & ~(size - 1);
hole_end = hole_start + size - 1;
while (hole_end <= crashk_res.end) {
while (hole_end <= end) {
unsigned long i;
cond_resched();
......@@ -536,7 +548,6 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
return pages;
}
struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order)
{
......@@ -547,8 +558,15 @@ struct page *kimage_alloc_control_pages(struct kimage *image,
pages = kimage_alloc_normal_control_pages(image, order);
break;
case KEXEC_TYPE_CRASH:
pages = kimage_alloc_crash_control_pages(image, order);
pages = kimage_alloc_special_control_pages(image, order,
crashk_res.end);
break;
#ifdef CONFIG_QUICK_KEXEC
case KEXEC_TYPE_QUICK:
pages = kimage_alloc_special_control_pages(image, order,
quick_kexec_res.end);
break;
#endif
}
return pages;
......@@ -898,11 +916,11 @@ static int kimage_load_normal_segment(struct kimage *image,
return result;
}
static int kimage_load_crash_segment(struct kimage *image,
static int kimage_load_special_segment(struct kimage *image,
struct kexec_segment *segment)
{
/* For crash dumps kernels we simply copy the data from
* user space to it's destination.
/* For crash dumps kernels and quick kexec kernels
* we simply copy the data from user space to it's destination.
* We do things a page at a time for the sake of kmap.
*/
unsigned long maddr;
......@@ -976,8 +994,13 @@ int kimage_load_segment(struct kimage *image,
result = kimage_load_normal_segment(image, segment);
break;
case KEXEC_TYPE_CRASH:
result = kimage_load_crash_segment(image, segment);
result = kimage_load_special_segment(image, segment);
break;
#ifdef CONFIG_QUICK_KEXEC
case KEXEC_TYPE_QUICK:
result = kimage_load_special_segment(image, segment);
break;
#endif
}
return result;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册