提交 0faef837 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching

Pull livepatching fixes from Jiri Kosina:

 - symbol lookup locking fix, from Miroslav Benes

 - error handling improvements in case of failure of the module coming
   notifier, from Minfei Huang

 - we were too pessimistic when kASLR has been enabled on x86 and were
   dropping address hints on the floor unnecessarily in such case.  Fix
   from Jiri Kosina

 - a few other small fixes and cleanups

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching:
  livepatch: add module locking around kallsyms calls
  livepatch: annotate klp_init() with __init
  livepatch: introduce patch/func-walking helpers
  livepatch: make kobject in klp_object statically allocated
  livepatch: Prevent patch inconsistencies if the coming module notifier fails
  livepatch: match return value to function signature
  x86: kaslr: fix build due to missing ALIGN definition
  livepatch: x86: make kASLR logic more accurate
  x86: introduce kaslr_offset()
......@@ -21,6 +21,7 @@
#ifndef _ASM_X86_LIVEPATCH_H
#define _ASM_X86_LIVEPATCH_H
#include <asm/setup.h>
#include <linux/module.h>
#include <linux/ftrace.h>
......
......@@ -60,17 +60,24 @@ static inline void x86_ce4100_early_setup(void) { }
#ifndef _SETUP
#include <asm/espfix.h>
#include <linux/kernel.h>
/*
* This is set up by the setup-routine at boot-time
*/
extern struct boot_params boot_params;
extern char _text[];
static inline bool kaslr_enabled(void)
{
return !!(boot_params.hdr.loadflags & KASLR_FLAG);
}
static inline unsigned long kaslr_offset(void)
{
return (unsigned long)&_text - __START_KERNEL;
}
/*
* Do NOT EVER look at the BIOS memory size location.
* It does not work on many machines.
......
......@@ -26,6 +26,7 @@
#include <asm/io_apic.h>
#include <asm/debugreg.h>
#include <asm/kexec-bzimage64.h>
#include <asm/setup.h>
#ifdef CONFIG_KEXEC_FILE
static struct kexec_file_ops *kexec_file_loaders[] = {
......@@ -335,7 +336,7 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
(unsigned long)&_text - __START_KERNEL);
kaslr_offset());
}
/* arch-dependent functionality related to kexec file-based syscall */
......
......@@ -836,7 +836,7 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
{
if (kaslr_enabled()) {
pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
(unsigned long)&_text - __START_KERNEL,
kaslr_offset(),
__START_KERNEL,
__START_KERNEL_map,
MODULES_VADDR-1);
......
......@@ -99,7 +99,7 @@ struct klp_object {
struct klp_func *funcs;
/* internal */
struct kobject *kobj;
struct kobject kobj;
struct module *mod;
enum klp_state state;
};
......@@ -123,6 +123,12 @@ struct klp_patch {
enum klp_state state;
};
#define klp_for_each_object(patch, obj) \
for (obj = patch->objs; obj->funcs; obj++)
#define klp_for_each_func(obj, func) \
for (func = obj->funcs; func->old_name; func++)
int klp_register_patch(struct klp_patch *);
int klp_unregister_patch(struct klp_patch *);
int klp_enable_patch(struct klp_patch *);
......
......@@ -128,7 +128,7 @@ static bool klp_is_patch_registered(struct klp_patch *patch)
static bool klp_initialized(void)
{
return klp_root_kobj;
return !!klp_root_kobj;
}
struct klp_find_arg {
......@@ -179,7 +179,9 @@ static int klp_find_object_symbol(const char *objname, const char *name,
.count = 0
};
mutex_lock(&module_mutex);
kallsyms_on_each_symbol(klp_find_callback, &args);
mutex_unlock(&module_mutex);
if (args.count == 0)
pr_err("symbol '%s' not found in symbol table\n", name);
......@@ -219,13 +221,19 @@ static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
.name = name,
.addr = addr,
};
int ret;
if (kallsyms_on_each_symbol(klp_verify_callback, &args))
return 0;
mutex_lock(&module_mutex);
ret = kallsyms_on_each_symbol(klp_verify_callback, &args);
mutex_unlock(&module_mutex);
pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
name, addr);
return -EINVAL;
if (!ret) {
pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
name, addr);
return -EINVAL;
}
return 0;
}
static int klp_find_verify_func_addr(struct klp_object *obj,
......@@ -234,8 +242,9 @@ static int klp_find_verify_func_addr(struct klp_object *obj,
int ret;
#if defined(CONFIG_RANDOMIZE_BASE)
/* KASLR is enabled, disregard old_addr from user */
func->old_addr = 0;
/* If KASLR has been enabled, adjust old_addr accordingly */
if (kaslr_enabled() && func->old_addr)
func->old_addr += kaslr_offset();
#endif
if (!func->old_addr || klp_is_module(obj))
......@@ -422,7 +431,7 @@ static void klp_disable_object(struct klp_object *obj)
{
struct klp_func *func;
for (func = obj->funcs; func->old_name; func++)
klp_for_each_func(obj, func)
if (func->state == KLP_ENABLED)
klp_disable_func(func);
......@@ -440,7 +449,7 @@ static int klp_enable_object(struct klp_object *obj)
if (WARN_ON(!klp_is_object_loaded(obj)))
return -EINVAL;
for (func = obj->funcs; func->old_name; func++) {
klp_for_each_func(obj, func) {
ret = klp_enable_func(func);
if (ret) {
klp_disable_object(obj);
......@@ -463,7 +472,7 @@ static int __klp_disable_patch(struct klp_patch *patch)
pr_notice("disabling patch '%s'\n", patch->mod->name);
for (obj = patch->objs; obj->funcs; obj++) {
klp_for_each_object(patch, obj) {
if (obj->state == KLP_ENABLED)
klp_disable_object(obj);
}
......@@ -523,7 +532,7 @@ static int __klp_enable_patch(struct klp_patch *patch)
pr_notice("enabling patch '%s'\n", patch->mod->name);
for (obj = patch->objs; obj->funcs; obj++) {
klp_for_each_object(patch, obj) {
if (!klp_is_object_loaded(obj))
continue;
......@@ -651,6 +660,15 @@ static struct kobj_type klp_ktype_patch = {
.default_attrs = klp_patch_attrs,
};
static void klp_kobj_release_object(struct kobject *kobj)
{
}
static struct kobj_type klp_ktype_object = {
.release = klp_kobj_release_object,
.sysfs_ops = &kobj_sysfs_ops,
};
static void klp_kobj_release_func(struct kobject *kobj)
{
}
......@@ -680,7 +698,7 @@ static void klp_free_object_loaded(struct klp_object *obj)
obj->mod = NULL;
for (func = obj->funcs; func->old_name; func++)
klp_for_each_func(obj, func)
func->old_addr = 0;
}
......@@ -695,7 +713,7 @@ static void klp_free_objects_limited(struct klp_patch *patch,
for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
klp_free_funcs_limited(obj, NULL);
kobject_put(obj->kobj);
kobject_put(&obj->kobj);
}
}
......@@ -713,7 +731,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
func->state = KLP_DISABLED;
return kobject_init_and_add(&func->kobj, &klp_ktype_func,
obj->kobj, "%s", func->old_name);
&obj->kobj, "%s", func->old_name);
}
/* parts of the initialization that is done only when the object is loaded */
......@@ -729,7 +747,7 @@ static int klp_init_object_loaded(struct klp_patch *patch,
return ret;
}
for (func = obj->funcs; func->old_name; func++) {
klp_for_each_func(obj, func) {
ret = klp_find_verify_func_addr(obj, func);
if (ret)
return ret;
......@@ -753,11 +771,12 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
klp_find_object_module(obj);
name = klp_is_module(obj) ? obj->name : "vmlinux";
obj->kobj = kobject_create_and_add(name, &patch->kobj);
if (!obj->kobj)
return -ENOMEM;
ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
&patch->kobj, "%s", name);
if (ret)
return ret;
for (func = obj->funcs; func->old_name; func++) {
klp_for_each_func(obj, func) {
ret = klp_init_func(obj, func);
if (ret)
goto free;
......@@ -773,7 +792,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
free:
klp_free_funcs_limited(obj, func);
kobject_put(obj->kobj);
kobject_put(&obj->kobj);
return ret;
}
......@@ -794,7 +813,7 @@ static int klp_init_patch(struct klp_patch *patch)
if (ret)
goto unlock;
for (obj = patch->objs; obj->funcs; obj++) {
klp_for_each_object(patch, obj) {
ret = klp_init_object(patch, obj);
if (ret)
goto free;
......@@ -883,7 +902,7 @@ int klp_register_patch(struct klp_patch *patch)
}
EXPORT_SYMBOL_GPL(klp_register_patch);
static void klp_module_notify_coming(struct klp_patch *patch,
static int klp_module_notify_coming(struct klp_patch *patch,
struct klp_object *obj)
{
struct module *pmod = patch->mod;
......@@ -891,22 +910,23 @@ static void klp_module_notify_coming(struct klp_patch *patch,
int ret;
ret = klp_init_object_loaded(patch, obj);
if (ret)
goto err;
if (ret) {
pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
pmod->name, mod->name, ret);
return ret;
}
if (patch->state == KLP_DISABLED)
return;
return 0;
pr_notice("applying patch '%s' to loading module '%s'\n",
pmod->name, mod->name);
ret = klp_enable_object(obj);
if (!ret)
return;
err:
pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
pmod->name, mod->name, ret);
if (ret)
pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
pmod->name, mod->name, ret);
return ret;
}
static void klp_module_notify_going(struct klp_patch *patch,
......@@ -930,6 +950,7 @@ static void klp_module_notify_going(struct klp_patch *patch,
static int klp_module_notify(struct notifier_block *nb, unsigned long action,
void *data)
{
int ret;
struct module *mod = data;
struct klp_patch *patch;
struct klp_object *obj;
......@@ -949,13 +970,18 @@ static int klp_module_notify(struct notifier_block *nb, unsigned long action,
mod->klp_alive = false;
list_for_each_entry(patch, &klp_patches, list) {
for (obj = patch->objs; obj->funcs; obj++) {
klp_for_each_object(patch, obj) {
if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
continue;
if (action == MODULE_STATE_COMING) {
obj->mod = mod;
klp_module_notify_coming(patch, obj);
ret = klp_module_notify_coming(patch, obj);
if (ret) {
obj->mod = NULL;
pr_warn("patch '%s' is in an inconsistent state!\n",
patch->mod->name);
}
} else /* MODULE_STATE_GOING */
klp_module_notify_going(patch, obj);
......@@ -973,7 +999,7 @@ static struct notifier_block klp_module_nb = {
.priority = INT_MIN+1, /* called late but before ftrace notifier */
};
static int klp_init(void)
static int __init klp_init(void)
{
int ret;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册