提交 04340249 编写于 作者: L Linus Torvalds

Merge tag 'integrity-v6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity

Pull integrity updates from Mimi Zohar:
 "Aside from the one EVM cleanup patch, all the other changes are kexec
  related.

  On different architectures different keyrings are used to verify the
  kexec'ed kernel image signature. Here are a number of preparatory
  cleanup patches and the patches themselves for making the keyrings -
  builtin_trusted_keyring, .machine, .secondary_trusted_keyring, and
  .platform - consistent across the different architectures"

* tag 'integrity-v6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity:
  kexec, KEYS, s390: Make use of built-in and secondary keyring for signature verification
  arm64: kexec_file: use more system keyrings to verify kernel image signature
  kexec, KEYS: make the code in bzImage64_verify_sig generic
  kexec: clean up arch_kexec_kernel_verify_sig
  kexec: drop weak attribute from functions
  kexec_file: drop weak attribute from functions
  evm: Use IS_ENABLED to initialize .enabled
...@@ -84,16 +84,30 @@ static inline void crash_setup_regs(struct pt_regs *newregs, ...@@ -84,16 +84,30 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
extern bool crash_is_nosave(unsigned long pfn); extern bool crash_is_nosave(unsigned long pfn);
extern void crash_prepare_suspend(void); extern void crash_prepare_suspend(void);
extern void crash_post_resume(void); extern void crash_post_resume(void);
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
#define crash_free_reserved_phys_range crash_free_reserved_phys_range
#else #else
static inline bool crash_is_nosave(unsigned long pfn) {return false; } static inline bool crash_is_nosave(unsigned long pfn) {return false; }
static inline void crash_prepare_suspend(void) {} static inline void crash_prepare_suspend(void) {}
static inline void crash_post_resume(void) {} static inline void crash_post_resume(void) {}
#endif #endif
struct kimage;
#if defined(CONFIG_KEXEC_CORE) #if defined(CONFIG_KEXEC_CORE)
void cpu_soft_restart(unsigned long el2_switch, unsigned long entry, void cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
unsigned long arg0, unsigned long arg1, unsigned long arg0, unsigned long arg1,
unsigned long arg2); unsigned long arg2);
int machine_kexec_post_load(struct kimage *image);
#define machine_kexec_post_load machine_kexec_post_load
void arch_kexec_protect_crashkres(void);
#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres
void arch_kexec_unprotect_crashkres(void);
#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
#endif #endif
#define ARCH_HAS_KIMAGE_ARCH #define ARCH_HAS_KIMAGE_ARCH
...@@ -113,9 +127,9 @@ struct kimage_arch { ...@@ -113,9 +127,9 @@ struct kimage_arch {
#ifdef CONFIG_KEXEC_FILE #ifdef CONFIG_KEXEC_FILE
extern const struct kexec_file_ops kexec_image_ops; extern const struct kexec_file_ops kexec_image_ops;
struct kimage; int arch_kimage_file_post_load_cleanup(struct kimage *image);
#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
extern int arch_kimage_file_post_load_cleanup(struct kimage *image);
extern int load_other_segments(struct kimage *image, extern int load_other_segments(struct kimage *image,
unsigned long kernel_load_addr, unsigned long kernel_size, unsigned long kernel_load_addr, unsigned long kernel_size,
char *initrd, unsigned long initrd_len, char *initrd, unsigned long initrd_len,
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#include <linux/kexec.h> #include <linux/kexec.h>
#include <linux/pe.h> #include <linux/pe.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/verification.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/image.h> #include <asm/image.h>
...@@ -130,18 +129,10 @@ static void *image_load(struct kimage *image, ...@@ -130,18 +129,10 @@ static void *image_load(struct kimage *image,
return NULL; return NULL;
} }
#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
static int image_verify_sig(const char *kernel, unsigned long kernel_len)
{
return verify_pefile_signature(kernel, kernel_len, NULL,
VERIFYING_KEXEC_PE_SIGNATURE);
}
#endif
const struct kexec_file_ops kexec_image_ops = { const struct kexec_file_ops kexec_image_ops = {
.probe = image_probe, .probe = image_probe,
.load = image_load, .load = image_load,
#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG #ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
.verify_sig = image_verify_sig, .verify_sig = kexec_kernel_verify_pe_sig,
#endif #endif
}; };
...@@ -98,6 +98,11 @@ void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_co ...@@ -98,6 +98,11 @@ void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_co
void kexec_copy_flush(struct kimage *image); void kexec_copy_flush(struct kimage *image);
#if defined(CONFIG_CRASH_DUMP) && defined(CONFIG_PPC_RTAS)
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
#define crash_free_reserved_phys_range crash_free_reserved_phys_range
#endif
#ifdef CONFIG_KEXEC_FILE #ifdef CONFIG_KEXEC_FILE
extern const struct kexec_file_ops kexec_elf64_ops; extern const struct kexec_file_ops kexec_elf64_ops;
...@@ -120,6 +125,15 @@ int setup_purgatory(struct kimage *image, const void *slave_code, ...@@ -120,6 +125,15 @@ int setup_purgatory(struct kimage *image, const void *slave_code,
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
struct kexec_buf; struct kexec_buf;
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len);
#define arch_kexec_kernel_image_probe arch_kexec_kernel_image_probe
int arch_kimage_file_post_load_cleanup(struct kimage *image);
#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf);
#define arch_kexec_locate_mem_hole arch_kexec_locate_mem_hole
int load_crashdump_segments_ppc64(struct kimage *image, int load_crashdump_segments_ppc64(struct kimage *image,
struct kexec_buf *kbuf); struct kexec_buf *kbuf);
int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
......
...@@ -85,6 +85,17 @@ struct kimage_arch { ...@@ -85,6 +85,17 @@ struct kimage_arch {
extern const struct kexec_file_ops s390_kexec_image_ops; extern const struct kexec_file_ops s390_kexec_image_ops;
extern const struct kexec_file_ops s390_kexec_elf_ops; extern const struct kexec_file_ops s390_kexec_elf_ops;
#ifdef CONFIG_CRASH_DUMP
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
#define crash_free_reserved_phys_range crash_free_reserved_phys_range
void arch_kexec_protect_crashkres(void);
#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres
void arch_kexec_unprotect_crashkres(void);
#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
#endif
#ifdef CONFIG_KEXEC_FILE #ifdef CONFIG_KEXEC_FILE
struct purgatory_info; struct purgatory_info;
int arch_kexec_apply_relocations_add(struct purgatory_info *pi, int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
...@@ -92,5 +103,8 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, ...@@ -92,5 +103,8 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
const Elf_Shdr *relsec, const Elf_Shdr *relsec,
const Elf_Shdr *symtab); const Elf_Shdr *symtab);
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
int arch_kimage_file_post_load_cleanup(struct kimage *image);
#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
#endif #endif
#endif /*_S390_KEXEC_H */ #endif /*_S390_KEXEC_H */
...@@ -31,6 +31,7 @@ int s390_verify_sig(const char *kernel, unsigned long kernel_len) ...@@ -31,6 +31,7 @@ int s390_verify_sig(const char *kernel, unsigned long kernel_len)
const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1; const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1;
struct module_signature *ms; struct module_signature *ms;
unsigned long sig_len; unsigned long sig_len;
int ret;
/* Skip signature verification when not secure IPLed. */ /* Skip signature verification when not secure IPLed. */
if (!ipl_secure_flag) if (!ipl_secure_flag)
...@@ -65,11 +66,18 @@ int s390_verify_sig(const char *kernel, unsigned long kernel_len) ...@@ -65,11 +66,18 @@ int s390_verify_sig(const char *kernel, unsigned long kernel_len)
return -EBADMSG; return -EBADMSG;
} }
return verify_pkcs7_signature(kernel, kernel_len, ret = verify_pkcs7_signature(kernel, kernel_len,
kernel + kernel_len, sig_len, kernel + kernel_len, sig_len,
VERIFY_USE_PLATFORM_KEYRING, VERIFY_USE_SECONDARY_KEYRING,
VERIFYING_MODULE_SIGNATURE, VERIFYING_MODULE_SIGNATURE,
NULL, NULL); NULL, NULL);
if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING))
ret = verify_pkcs7_signature(kernel, kernel_len,
kernel + kernel_len, sig_len,
VERIFY_USE_PLATFORM_KEYRING,
VERIFYING_MODULE_SIGNATURE,
NULL, NULL);
return ret;
} }
#endif /* CONFIG_KEXEC_SIG */ #endif /* CONFIG_KEXEC_SIG */
......
...@@ -186,6 +186,12 @@ extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, ...@@ -186,6 +186,12 @@ extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages); extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
#define arch_kexec_pre_free_pages arch_kexec_pre_free_pages #define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
void arch_kexec_protect_crashkres(void);
#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres
void arch_kexec_unprotect_crashkres(void);
#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
#ifdef CONFIG_KEXEC_FILE #ifdef CONFIG_KEXEC_FILE
struct purgatory_info; struct purgatory_info;
int arch_kexec_apply_relocations_add(struct purgatory_info *pi, int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
...@@ -193,6 +199,12 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, ...@@ -193,6 +199,12 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
const Elf_Shdr *relsec, const Elf_Shdr *relsec,
const Elf_Shdr *symtab); const Elf_Shdr *symtab);
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
void *arch_kexec_kernel_image_load(struct kimage *image);
#define arch_kexec_kernel_image_load arch_kexec_kernel_image_load
int arch_kimage_file_post_load_cleanup(struct kimage *image);
#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
#endif #endif
#endif #endif
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/efi.h> #include <linux/efi.h>
#include <linux/verification.h>
#include <linux/random.h> #include <linux/random.h>
#include <asm/bootparam.h> #include <asm/bootparam.h>
...@@ -596,28 +595,11 @@ static int bzImage64_cleanup(void *loader_data) ...@@ -596,28 +595,11 @@ static int bzImage64_cleanup(void *loader_data)
return 0; return 0;
} }
#ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG
static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
{
int ret;
ret = verify_pefile_signature(kernel, kernel_len,
VERIFY_USE_SECONDARY_KEYRING,
VERIFYING_KEXEC_PE_SIGNATURE);
if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING)) {
ret = verify_pefile_signature(kernel, kernel_len,
VERIFY_USE_PLATFORM_KEYRING,
VERIFYING_KEXEC_PE_SIGNATURE);
}
return ret;
}
#endif
const struct kexec_file_ops kexec_bzImage64_ops = { const struct kexec_file_ops kexec_bzImage64_ops = {
.probe = bzImage64_probe, .probe = bzImage64_probe,
.load = bzImage64_load, .load = bzImage64_load,
.cleanup = bzImage64_cleanup, .cleanup = bzImage64_cleanup,
#ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG #ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG
.verify_sig = bzImage64_verify_sig, .verify_sig = kexec_kernel_verify_pe_sig,
#endif #endif
}; };
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <asm/io.h> #include <asm/io.h>
#include <uapi/linux/kexec.h> #include <uapi/linux/kexec.h>
#include <linux/verification.h>
/* Location of a reserved region to hold the crash kernel. /* Location of a reserved region to hold the crash kernel.
*/ */
...@@ -188,21 +189,54 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, ...@@ -188,21 +189,54 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
void *buf, unsigned int size, void *buf, unsigned int size,
bool get_value); bool get_value);
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
void *kexec_image_load_default(struct kimage *image);
#ifndef arch_kexec_kernel_image_probe
static inline int
arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len)
{
return kexec_image_probe_default(image, buf, buf_len);
}
#endif
#ifndef arch_kimage_file_post_load_cleanup
static inline int arch_kimage_file_post_load_cleanup(struct kimage *image)
{
return kexec_image_post_load_cleanup_default(image);
}
#endif
#ifndef arch_kexec_kernel_image_load
static inline void *arch_kexec_kernel_image_load(struct kimage *image)
{
return kexec_image_load_default(image);
}
#endif
/* Architectures may override the below functions */
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len);
void *arch_kexec_kernel_image_load(struct kimage *image);
int arch_kimage_file_post_load_cleanup(struct kimage *image);
#ifdef CONFIG_KEXEC_SIG #ifdef CONFIG_KEXEC_SIG
int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, #ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
unsigned long buf_len); int kexec_kernel_verify_pe_sig(const char *kernel, unsigned long kernel_len);
#endif
#endif #endif
int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf);
extern int kexec_add_buffer(struct kexec_buf *kbuf); extern int kexec_add_buffer(struct kexec_buf *kbuf);
int kexec_locate_mem_hole(struct kexec_buf *kbuf); int kexec_locate_mem_hole(struct kexec_buf *kbuf);
#ifndef arch_kexec_locate_mem_hole
/**
* arch_kexec_locate_mem_hole - Find free memory to place the segments.
* @kbuf: Parameters for the memory search.
*
* On success, kbuf->mem will have the start address of the memory region found.
*
* Return: 0 on success, negative errno on error.
*/
static inline int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
{
return kexec_locate_mem_hole(kbuf);
}
#endif
/* Alignment required for elf header segment */ /* Alignment required for elf header segment */
#define ELF_CORE_HEADER_ALIGN 4096 #define ELF_CORE_HEADER_ALIGN 4096
...@@ -358,7 +392,10 @@ extern void machine_kexec_cleanup(struct kimage *image); ...@@ -358,7 +392,10 @@ extern void machine_kexec_cleanup(struct kimage *image);
extern int kernel_kexec(void); extern int kernel_kexec(void);
extern struct page *kimage_alloc_control_pages(struct kimage *image, extern struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order); unsigned int order);
int machine_kexec_post_load(struct kimage *image);
#ifndef machine_kexec_post_load
static inline int machine_kexec_post_load(struct kimage *image) { return 0; }
#endif
extern void __crash_kexec(struct pt_regs *); extern void __crash_kexec(struct pt_regs *);
extern void crash_kexec(struct pt_regs *); extern void crash_kexec(struct pt_regs *);
...@@ -391,10 +428,21 @@ extern bool kexec_in_progress; ...@@ -391,10 +428,21 @@ extern bool kexec_in_progress;
int crash_shrink_memory(unsigned long new_size); int crash_shrink_memory(unsigned long new_size);
size_t crash_get_memory_size(void); size_t crash_get_memory_size(void);
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
void arch_kexec_protect_crashkres(void); #ifndef arch_kexec_protect_crashkres
void arch_kexec_unprotect_crashkres(void); /*
* Protection mechanism for crashkernel reserved memory after
* the kdump kernel is loaded.
*
* Provide an empty default implementation here -- architecture
* code may override this
*/
static inline void arch_kexec_protect_crashkres(void) { }
#endif
#ifndef arch_kexec_unprotect_crashkres
static inline void arch_kexec_unprotect_crashkres(void) { }
#endif
#ifndef page_to_boot_pfn #ifndef page_to_boot_pfn
static inline unsigned long page_to_boot_pfn(struct page *page) static inline unsigned long page_to_boot_pfn(struct page *page)
...@@ -424,6 +472,16 @@ static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys) ...@@ -424,6 +472,16 @@ static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys)
} }
#endif #endif
#ifndef crash_free_reserved_phys_range
static inline void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
{
unsigned long addr;
for (addr = begin; addr < end; addr += PAGE_SIZE)
free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
}
#endif
static inline unsigned long virt_to_boot_phys(void *addr) static inline unsigned long virt_to_boot_phys(void *addr)
{ {
return phys_to_boot_phys(__pa((unsigned long)addr)); return phys_to_boot_phys(__pa((unsigned long)addr));
......
...@@ -591,11 +591,6 @@ static void kimage_free_extra_pages(struct kimage *image) ...@@ -591,11 +591,6 @@ static void kimage_free_extra_pages(struct kimage *image)
} }
int __weak machine_kexec_post_load(struct kimage *image)
{
return 0;
}
void kimage_terminate(struct kimage *image) void kimage_terminate(struct kimage *image)
{ {
if (*image->entry != 0) if (*image->entry != 0)
...@@ -1020,15 +1015,6 @@ size_t crash_get_memory_size(void) ...@@ -1020,15 +1015,6 @@ size_t crash_get_memory_size(void)
return size; return size;
} }
void __weak crash_free_reserved_phys_range(unsigned long begin,
unsigned long end)
{
unsigned long addr;
for (addr = begin; addr < end; addr += PAGE_SIZE)
free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
}
int crash_shrink_memory(unsigned long new_size) int crash_shrink_memory(unsigned long new_size)
{ {
int ret = 0; int ret = 0;
...@@ -1225,16 +1211,3 @@ int kernel_kexec(void) ...@@ -1225,16 +1211,3 @@ int kernel_kexec(void)
mutex_unlock(&kexec_mutex); mutex_unlock(&kexec_mutex);
return error; return error;
} }
/*
* Protection mechanism for crashkernel reserved memory after
* the kdump kernel is loaded.
*
* Provide an empty default implementation here -- architecture
* code may override this
*/
void __weak arch_kexec_protect_crashkres(void)
{}
void __weak arch_kexec_unprotect_crashkres(void)
{}
...@@ -62,14 +62,7 @@ int kexec_image_probe_default(struct kimage *image, void *buf, ...@@ -62,14 +62,7 @@ int kexec_image_probe_default(struct kimage *image, void *buf,
return ret; return ret;
} }
/* Architectures can provide this probe function */ void *kexec_image_load_default(struct kimage *image)
int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len)
{
return kexec_image_probe_default(image, buf, buf_len);
}
static void *kexec_image_load_default(struct kimage *image)
{ {
if (!image->fops || !image->fops->load) if (!image->fops || !image->fops->load)
return ERR_PTR(-ENOEXEC); return ERR_PTR(-ENOEXEC);
...@@ -80,11 +73,6 @@ static void *kexec_image_load_default(struct kimage *image) ...@@ -80,11 +73,6 @@ static void *kexec_image_load_default(struct kimage *image)
image->cmdline_buf_len); image->cmdline_buf_len);
} }
void * __weak arch_kexec_kernel_image_load(struct kimage *image)
{
return kexec_image_load_default(image);
}
int kexec_image_post_load_cleanup_default(struct kimage *image) int kexec_image_post_load_cleanup_default(struct kimage *image)
{ {
if (!image->fops || !image->fops->cleanup) if (!image->fops || !image->fops->cleanup)
...@@ -93,30 +81,6 @@ int kexec_image_post_load_cleanup_default(struct kimage *image) ...@@ -93,30 +81,6 @@ int kexec_image_post_load_cleanup_default(struct kimage *image)
return image->fops->cleanup(image->image_loader_data); return image->fops->cleanup(image->image_loader_data);
} }
int __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
{
return kexec_image_post_load_cleanup_default(image);
}
#ifdef CONFIG_KEXEC_SIG
static int kexec_image_verify_sig_default(struct kimage *image, void *buf,
unsigned long buf_len)
{
if (!image->fops || !image->fops->verify_sig) {
pr_debug("kernel loader does not support signature verification.\n");
return -EKEYREJECTED;
}
return image->fops->verify_sig(buf, buf_len);
}
int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
unsigned long buf_len)
{
return kexec_image_verify_sig_default(image, buf, buf_len);
}
#endif
/* /*
* Free up memory used by kernel, initrd, and command line. This is temporary * Free up memory used by kernel, initrd, and command line. This is temporary
* memory allocation which is not needed any more after these buffers have * memory allocation which is not needed any more after these buffers have
...@@ -159,13 +123,41 @@ void kimage_file_post_load_cleanup(struct kimage *image) ...@@ -159,13 +123,41 @@ void kimage_file_post_load_cleanup(struct kimage *image)
} }
#ifdef CONFIG_KEXEC_SIG #ifdef CONFIG_KEXEC_SIG
#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
int kexec_kernel_verify_pe_sig(const char *kernel, unsigned long kernel_len)
{
int ret;
ret = verify_pefile_signature(kernel, kernel_len,
VERIFY_USE_SECONDARY_KEYRING,
VERIFYING_KEXEC_PE_SIGNATURE);
if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING)) {
ret = verify_pefile_signature(kernel, kernel_len,
VERIFY_USE_PLATFORM_KEYRING,
VERIFYING_KEXEC_PE_SIGNATURE);
}
return ret;
}
#endif
static int kexec_image_verify_sig(struct kimage *image, void *buf,
unsigned long buf_len)
{
if (!image->fops || !image->fops->verify_sig) {
pr_debug("kernel loader does not support signature verification.\n");
return -EKEYREJECTED;
}
return image->fops->verify_sig(buf, buf_len);
}
static int static int
kimage_validate_signature(struct kimage *image) kimage_validate_signature(struct kimage *image)
{ {
int ret; int ret;
ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf, ret = kexec_image_verify_sig(image, image->kernel_buf,
image->kernel_buf_len); image->kernel_buf_len);
if (ret) { if (ret) {
if (sig_enforce) { if (sig_enforce) {
...@@ -621,19 +613,6 @@ int kexec_locate_mem_hole(struct kexec_buf *kbuf) ...@@ -621,19 +613,6 @@ int kexec_locate_mem_hole(struct kexec_buf *kbuf)
return ret == 1 ? 0 : -EADDRNOTAVAIL; return ret == 1 ? 0 : -EADDRNOTAVAIL;
} }
/**
* arch_kexec_locate_mem_hole - Find free memory to place the segments.
* @kbuf: Parameters for the memory search.
*
* On success, kbuf->mem will have the start address of the memory region found.
*
* Return: 0 on success, negative errno on error.
*/
int __weak arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
{
return kexec_locate_mem_hole(kbuf);
}
/** /**
* kexec_add_buffer - place a buffer in a kexec segment * kexec_add_buffer - place a buffer in a kexec segment
* @kbuf: Buffer contents and memory parameters. * @kbuf: Buffer contents and memory parameters.
......
...@@ -36,42 +36,36 @@ static const char * const integrity_status_msg[] = { ...@@ -36,42 +36,36 @@ static const char * const integrity_status_msg[] = {
int evm_hmac_attrs; int evm_hmac_attrs;
static struct xattr_list evm_config_default_xattrnames[] = { static struct xattr_list evm_config_default_xattrnames[] = {
{.name = XATTR_NAME_SELINUX, {
#ifdef CONFIG_SECURITY_SELINUX .name = XATTR_NAME_SELINUX,
.enabled = true .enabled = IS_ENABLED(CONFIG_SECURITY_SELINUX)
#endif
}, },
{.name = XATTR_NAME_SMACK, {
#ifdef CONFIG_SECURITY_SMACK .name = XATTR_NAME_SMACK,
.enabled = true .enabled = IS_ENABLED(CONFIG_SECURITY_SMACK)
#endif
}, },
{.name = XATTR_NAME_SMACKEXEC, {
#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS .name = XATTR_NAME_SMACKEXEC,
.enabled = true .enabled = IS_ENABLED(CONFIG_EVM_EXTRA_SMACK_XATTRS)
#endif
}, },
{.name = XATTR_NAME_SMACKTRANSMUTE, {
#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS .name = XATTR_NAME_SMACKTRANSMUTE,
.enabled = true .enabled = IS_ENABLED(CONFIG_EVM_EXTRA_SMACK_XATTRS)
#endif
}, },
{.name = XATTR_NAME_SMACKMMAP, {
#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS .name = XATTR_NAME_SMACKMMAP,
.enabled = true .enabled = IS_ENABLED(CONFIG_EVM_EXTRA_SMACK_XATTRS)
#endif
}, },
{.name = XATTR_NAME_APPARMOR, {
#ifdef CONFIG_SECURITY_APPARMOR .name = XATTR_NAME_APPARMOR,
.enabled = true .enabled = IS_ENABLED(CONFIG_SECURITY_APPARMOR)
#endif
}, },
{.name = XATTR_NAME_IMA, {
#ifdef CONFIG_IMA_APPRAISE .name = XATTR_NAME_IMA,
.enabled = true .enabled = IS_ENABLED(CONFIG_IMA_APPRAISE)
#endif
}, },
{.name = XATTR_NAME_CAPS, {
.name = XATTR_NAME_CAPS,
.enabled = true .enabled = true
}, },
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册