提交 c33e4283 编写于 作者: C Cheng Jian 提交者: Zheng Zengkai

livepatch/core: Allow implementation without ftrace

euler inclusion
category: feature
bugzilla: 51921
CVE: NA

----------------------------------------

support for livepatch without ftrace mode

new config for WO_FTRACE
	CONFIG_LIVEPATCH_WO_FTRACE=y
	CONFIG_LIVEPATCH_STACK=y

Implements livepatch without ftrace by direct jump, we
directly modify the first few instructions(usually one,
but four for long jumps under ARM64) of the old function
as jump instructions by stop_machine, so it will jump to
the first address of the new function when livepatch enable

KERNEL/MODULE
call/bl A---------------old_A------------
                        | jump new_A----+--------|
                        |               |        |
                        |               |        |
                        -----------------        |
                                                 |
                                                 |
                                                 |
livepatch_module-------------                    |
|                           |                    |
|new_A <--------------------+--------------------|
|                           |
|                           |
|---------------------------|
| .plt                      |
| ......PLTS for livepatch  |
-----------------------------

something we need to consider under different architectures:

1. jump instruction
2. partial relocation in new function requires for livepatch.
3. long jumps may be required if the jump address exceeds the
   offset. both for livepatch relocation and livepatch enable.
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
Reviewed-by: NLi Bin <huawei.libin@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NWang ShaoBo <bobo.shaobowang@huawei.com>
Signed-off-by: NDong Kai <dongkai11@huawei.com>
Signed-off-by: NYe Weihua <yeweihua4@huawei.com>
Reviewed-by: NYang Jihong <yangjihong1@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 81aafb2b
......@@ -219,7 +219,7 @@ config PPC
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_LIVEPATCH_FTRACE if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
select HAVE_HARDLOCKUP_DETECTOR_ARCH if PPC64 && PPC_BOOK3S && SMP
......
......@@ -165,7 +165,7 @@ config S390
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_KVM
select HAVE_LIVEPATCH
select HAVE_LIVEPATCH_FTRACE
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_MEMBLOCK_PHYS_MAP
......
......@@ -198,7 +198,7 @@ config X86
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_KRETPROBES
select HAVE_KVM
select HAVE_LIVEPATCH if X86_64
select HAVE_LIVEPATCH_FTRACE if X86_64
select HAVE_MIXED_BREAKPOINTS_REGS
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_MOVE_PMD
......
......@@ -75,7 +75,9 @@ struct klp_func {
unsigned long old_size, new_size;
bool nop;
bool patched;
#ifdef CONFIG_LIVEPATCH_FTRACE
bool transition;
#endif
};
struct klp_object;
......@@ -195,6 +197,12 @@ struct klp_patch {
int klp_enable_patch(struct klp_patch *);
int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
const char *shstrtab, const char *strtab,
unsigned int symindex, unsigned int secindex,
const char *objname);
#ifdef CONFIG_LIVEPATCH_FTRACE
/* Called from the module loader during module coming/going states */
int klp_module_coming(struct module *mod);
void klp_module_going(struct module *mod);
......@@ -231,10 +239,20 @@ void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id);
struct klp_state *klp_get_prev_state(unsigned long id);
int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
const char *shstrtab, const char *strtab,
unsigned int symindex, unsigned int secindex,
const char *objname);
#else /* !CONFIG_LIVEPATCH_FTRACE */
static inline int klp_module_coming(struct module *mod) { return 0; }
static inline void klp_module_going(struct module *mod) {}
static inline bool klp_patch_pending(struct task_struct *task) { return false; }
static inline void klp_update_patch_state(struct task_struct *task) {}
static inline void klp_copy_process(struct task_struct *child) {}
static inline bool klp_have_reliable_stack(void) { return true; }
#ifndef klp_smp_isb
#define klp_smp_isb()
#endif
#endif /* CONFIG_LIVEPATCH_FTRACE */
#else /* !CONFIG_LIVEPATCH */
......
# SPDX-License-Identifier: GPL-2.0-only
config HAVE_LIVEPATCH
config HAVE_LIVEPATCH_FTRACE
bool
help
Arch supports kernel live patching
Arch supports kernel live patching based on ftrace
config HAVE_LIVEPATCH_WO_FTRACE
bool
help
Arch supports kernel live patching without ftrace
if HAVE_LIVEPATCH_FTRACE || HAVE_LIVEPATCH_WO_FTRACE
menu "Enable Livepatch"
config LIVEPATCH
bool "Kernel Live Patching"
depends on DYNAMIC_FTRACE_WITH_REGS
depends on MODULES
depends on SYSFS
depends on KALLSYMS_ALL
depends on HAVE_LIVEPATCH
depends on HAVE_LIVEPATCH_FTRACE || HAVE_LIVEPATCH_WO_FTRACE
depends on !TRIM_UNUSED_KSYMS
depends on DEBUG_INFO
default n
help
Say Y here if you want to support kernel live patching.
This option has no runtime impact until a kernel "patch"
module uses the interface provided by this option to register
a patch, causing calls to patched functions to be redirected
to new function code contained in the patch module.
choice
prompt "live patching method"
depends on LIVEPATCH
help
Live patching implementation method configuration.
config LIVEPATCH_FTRACE
bool "based on ftrace"
depends on HAVE_LIVEPATCH_FTRACE
depends on DYNAMIC_FTRACE_WITH_REGS
help
Supports kernel live patching based on ftrace
config LIVEPATCH_WO_FTRACE
bool "without ftrace"
depends on HAVE_LIVEPATCH_WO_FTRACE
help
Supports kernel live patching without ftrace
endchoice
config LIVEPATCH_STACK
bool "Enforcing the patch stacking principle"
depends on LIVEPATCH_FTRACE || LIVEPATCH_WO_FTRACE
default y
help
Say N here if you want to remove the patch stacking principle.
endmenu
endif
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-$(CONFIG_LIVEPATCH_FTRACE) += transition.o
obj-$(CONFIG_LIVEPATCH_FTRACE) += shadow.o
obj-$(CONFIG_LIVEPATCH_FTRACE) += state.o
livepatch-objs := core.o patch.o shadow.o state.o transition.o
livepatch-objs := core.o patch.o
......@@ -22,8 +22,15 @@
#include <asm/cacheflush.h>
#include "core.h"
#include "patch.h"
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#ifdef CONFIG_LIVEPATCH_FTRACE
#include "state.h"
#include "transition.h"
#endif
#ifdef CONFIG_LIVEPATCH_WO_FTRACE
#include <linux/stop_machine.h>
#endif
/*
* klp_mutex is a coarse lock which serializes access to klp data. All
......@@ -44,18 +51,25 @@ LIST_HEAD(klp_patches);
static struct kobject *klp_root_kobj;
#ifdef CONFIG_LIVEPATCH_WO_FTRACE
struct patch_data {
struct klp_patch *patch;
atomic_t cpu_count;
};
#endif
static bool klp_is_module(struct klp_object *obj)
{
return obj->name;
}
/* sets obj->mod if object is not vmlinux and module is found */
static void klp_find_object_module(struct klp_object *obj)
static int klp_find_object_module(struct klp_object *obj)
{
struct module *mod;
if (!klp_is_module(obj))
return;
return 0;
mutex_lock(&module_mutex);
/*
......@@ -63,6 +77,7 @@ static void klp_find_object_module(struct klp_object *obj)
* we do not take a reference here. The patches are removed by
* klp_module_going() instead.
*/
mod = find_module(obj->name);
/*
* Do not mess work of klp_module_coming() and klp_module_going().
......@@ -71,10 +86,26 @@ static void klp_find_object_module(struct klp_object *obj)
* until mod->exit() finishes. This is especially important for
* patches that modify semantic of the functions.
*/
#ifdef CONFIG_LIVEPATCH_FTRACE
if (mod && mod->klp_alive)
obj->mod = mod;
#else
if (!mod) {
pr_err("module '%s' not loaded\n", obj->name);
mutex_unlock(&module_mutex);
return -ENOPKG; /* the deponds module is not loaded */
}
if (mod->state == MODULE_STATE_COMING || !try_module_get(mod)) {
mutex_unlock(&module_mutex);
return -EINVAL;
}
obj->mod = mod;
#endif
mutex_unlock(&module_mutex);
return 0;
}
static bool klp_initialized(void)
......@@ -351,6 +382,7 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
goto out;
}
#ifdef CONFIG_LIVEPATCH_FTRACE
/*
* Allow to reverse a pending transition in both ways. It might be
* necessary to complete the transition without forcing and breaking
......@@ -364,6 +396,15 @@ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
ret = __klp_disable_patch(patch);
else
ret = -EINVAL;
#else /* ifdef CONFIG_LIVEPATCH_WO_FTRACE */
if (enabled) {
ret = -EINVAL;
} else {
ret = __klp_disable_patch(patch);
if (ret)
goto out;
}
#endif
out:
mutex_unlock(&klp_mutex);
......@@ -382,6 +423,7 @@ static ssize_t enabled_show(struct kobject *kobj,
return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
}
#ifdef CONFIG_LIVEPATCH_FTRACE
static ssize_t transition_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
......@@ -420,18 +462,60 @@ static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
return count;
}
#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */
static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
#ifdef CONFIG_LIVEPATCH_FTRACE
static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */
static struct attribute *klp_patch_attrs[] = {
&enabled_kobj_attr.attr,
#ifdef CONFIG_LIVEPATCH_FTRACE
&transition_kobj_attr.attr,
&force_kobj_attr.attr,
#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */
NULL
};
ATTRIBUTE_GROUPS(klp_patch);
static int state_show(struct seq_file *m, void *v)
{
struct klp_patch *patch;
char *state;
int index = 0;
seq_printf(m, "%-5s\t%-26s\t%-8s\n", "Index", "Patch", "State");
seq_puts(m, "-----------------------------------------------\n");
mutex_lock(&klp_mutex);
list_for_each_entry(patch, &klp_patches, list) {
if (patch->enabled)
state = "enabled";
else
state = "disabled";
seq_printf(m, "%-5d\t%-26s\t%-8s\n", ++index,
patch->mod->name, state);
}
mutex_unlock(&klp_mutex);
seq_puts(m, "-----------------------------------------------\n");
return 0;
}
static int klp_state_open(struct inode *inode, struct file *filp)
{
return single_open(filp, state_show, NULL);
}
static const struct proc_ops proc_klpstate_operations = {
.proc_open = klp_state_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
};
static void klp_free_object_dynamic(struct klp_object *obj)
{
kfree(obj->name);
......@@ -607,6 +691,7 @@ static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
}
}
#ifdef CONFIG_LIVEPATCH_FTRACE
/* Clean up when a patched object is unloaded */
static void klp_free_object_loaded(struct klp_object *obj)
{
......@@ -621,12 +706,17 @@ static void klp_free_object_loaded(struct klp_object *obj)
func->new_func = NULL;
}
}
#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */
static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
{
struct klp_object *obj, *tmp_obj;
klp_for_each_object_safe(patch, obj, tmp_obj) {
#ifdef CONFIG_LIVEPATCH_WO_FTRACE
if (klp_is_module(obj))
module_put(obj->mod);
#endif
__klp_free_funcs(obj, nops_only);
if (nops_only && !obj->dynamic)
......@@ -642,10 +732,12 @@ static void klp_free_objects(struct klp_patch *patch)
__klp_free_objects(patch, false);
}
#ifdef CONFIG_LIVEPATCH_FTRACE
static void klp_free_objects_dynamic(struct klp_patch *patch)
{
__klp_free_objects(patch, true);
}
#endif
/*
* This function implements the free operations that can be called safely
......@@ -733,7 +825,9 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
INIT_LIST_HEAD(&func->stack_node);
func->patched = false;
#ifdef CONFIG_LIVEPATCH_FTRACE
func->transition = false;
#endif
/* The format for the sysfs directory is <function,sympos> where sympos
* is the nth occurrence of this symbol in kallsyms for the patched
......@@ -829,22 +923,34 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
obj->patched = false;
obj->mod = NULL;
klp_find_object_module(obj);
ret = klp_find_object_module(obj);
if (ret)
return ret;
name = klp_is_module(obj) ? obj->name : "vmlinux";
ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
if (ret)
return ret;
goto out;
klp_for_each_func(obj, func) {
ret = klp_init_func(obj, func);
if (ret)
return ret;
goto out;
}
if (klp_is_object_loaded(obj))
if (klp_is_object_loaded(obj)) {
ret = klp_init_object_loaded(patch, obj);
if (ret)
goto out;
}
return 0;
out:
#ifdef CONFIG_LIVEPATCH_WO_FTRACE
if (klp_is_module(obj))
module_put(obj->mod);
#endif
return ret;
}
......@@ -896,6 +1002,21 @@ static int klp_init_patch_early(struct klp_patch *patch)
return 0;
}
#ifdef CONFIG_LIVEPATCH_WO_FTRACE
static void klp_free_objects_mod_limited(struct klp_patch *patch,
struct klp_object *limit)
{
struct klp_object *obj, *tmp_obj;
klp_for_each_object_safe(patch, obj, tmp_obj) {
if (limit == obj)
break;
if (klp_is_module(obj))
module_put(obj->mod);
}
}
#endif
static int klp_init_patch(struct klp_patch *patch)
{
struct klp_object *obj;
......@@ -914,14 +1035,20 @@ static int klp_init_patch(struct klp_patch *patch)
klp_for_each_object(patch, obj) {
ret = klp_init_object(patch, obj);
if (ret)
return ret;
goto out;
}
list_add_tail(&patch->list, &klp_patches);
return 0;
out:
#ifdef CONFIG_LIVEPATCH_WO_FTRACE
klp_free_objects_mod_limited(patch, obj);
#endif
return ret;
}
#ifdef CONFIG_LIVEPATCH_FTRACE
static int __klp_disable_patch(struct klp_patch *patch)
{
struct klp_object *obj;
......@@ -953,7 +1080,84 @@ static int __klp_disable_patch(struct klp_patch *patch)
return 0;
}
#else /* ifdef CONFIG_LIVEPATCH_WO_FTRACE */
int __weak klp_check_calltrace(struct klp_patch *patch, int enable)
{
return 0;
}
/*
* This function is called from stop_machine() context.
*/
static int disable_patch(struct klp_patch *patch)
{
pr_notice("disabling patch '%s'\n", patch->mod->name);
klp_unpatch_objects(patch);
patch->enabled = false;
module_put(patch->mod);
return 0;
}
int klp_try_disable_patch(void *data)
{
int ret = 0;
struct patch_data *pd = (struct patch_data *)data;
if (atomic_inc_return(&pd->cpu_count) == 1) {
struct klp_patch *patch = pd->patch;
ret = klp_check_calltrace(patch, 0);
if (ret) {
atomic_inc(&pd->cpu_count);
return ret;
}
ret = disable_patch(patch);
if (ret) {
atomic_inc(&pd->cpu_count);
return ret;
}
atomic_inc(&pd->cpu_count);
} else {
while (atomic_read(&pd->cpu_count) <= num_online_cpus())
cpu_relax();
klp_smp_isb();
}
return ret;
}
static int __klp_disable_patch(struct klp_patch *patch)
{
int ret;
struct patch_data patch_data = {
.patch = patch,
.cpu_count = ATOMIC_INIT(0),
};
if (WARN_ON(!patch->enabled))
return -EINVAL;
#ifdef CONFIG_LIVEPATCH_STACK
/* enforce stacking: only the last enabled patch can be disabled */
if (!list_is_last(&patch->list, &klp_patches) &&
list_next_entry(patch, list)->enabled) {
pr_err("only the last enabled patch can be disabled\n");
return -EBUSY;
}
#endif
ret = stop_machine(klp_try_disable_patch, &patch_data, cpu_online_mask);
if (ret)
return ret;
klp_free_patch_async(patch);
return 0;
}
#endif /* ifdef CONFIG_LIVEPATCH_FTRACE */
#ifdef CONFIG_LIVEPATCH_FTRACE
static int __klp_enable_patch(struct klp_patch *patch)
{
struct klp_object *obj;
......@@ -1008,6 +1212,106 @@ static int __klp_enable_patch(struct klp_patch *patch)
klp_cancel_transition();
return ret;
}
#else /* ifdef CONFIG_LIVEPATCH_WO_FTRACE */
/*
* This function is called from stop_machine() context.
*/
static int enable_patch(struct klp_patch *patch)
{
struct klp_object *obj;
int ret;
pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
if (!try_module_get(patch->mod))
return -ENODEV;
patch->enabled = true;
pr_notice("enabling patch '%s'\n", patch->mod->name);
klp_for_each_object(patch, obj) {
if (!klp_is_object_loaded(obj))
continue;
ret = klp_patch_object(obj);
if (ret) {
pr_warn("failed to patch object '%s'\n",
klp_is_module(obj) ? obj->name : "vmlinux");
goto disable;
}
}
return 0;
disable:
disable_patch(patch);
return ret;
}
int klp_try_enable_patch(void *data)
{
int ret = 0;
struct patch_data *pd = (struct patch_data *)data;
if (atomic_inc_return(&pd->cpu_count) == 1) {
struct klp_patch *patch = pd->patch;
ret = klp_check_calltrace(patch, 1);
if (ret) {
atomic_inc(&pd->cpu_count);
return ret;
}
ret = enable_patch(patch);
if (ret) {
atomic_inc(&pd->cpu_count);
return ret;
}
atomic_inc(&pd->cpu_count);
} else {
while (atomic_read(&pd->cpu_count) <= num_online_cpus())
cpu_relax();
klp_smp_isb();
}
return ret;
}
static int __klp_enable_patch(struct klp_patch *patch)
{
int ret;
struct patch_data patch_data = {
.patch = patch,
.cpu_count = ATOMIC_INIT(0),
};
if (WARN_ON(patch->enabled))
return -EINVAL;
#ifdef CONFIG_LIVEPATCH_STACK
/* enforce stacking: only the first disabled patch can be enabled */
if (patch->list.prev != &klp_patches &&
!list_prev_entry(patch, list)->enabled) {
pr_err("only the first disabled patch can be enabled\n");
return -EBUSY;
}
#endif
ret = stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask);
if (ret)
return ret;
#ifndef CONFIG_LIVEPATCH_STACK
/* move the enabled patch to the list tail */
list_del(&patch->list);
list_add_tail(&patch->list, &klp_patches);
#endif
return 0;
}
#endif /* #ifdef CONFIG_LIVEPATCH_FTRACE */
/**
* klp_enable_patch() - enable the livepatch
......@@ -1045,12 +1349,14 @@ int klp_enable_patch(struct klp_patch *patch)
mutex_lock(&klp_mutex);
#ifdef CONFIG_LIVEPATCH_FTRACE
if (!klp_is_patch_compatible(patch)) {
pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n",
patch->mod->name);
mutex_unlock(&klp_mutex);
return -EINVAL;
}
#endif
ret = klp_init_patch_early(patch);
if (ret) {
......@@ -1109,6 +1415,7 @@ void klp_unpatch_replaced_patches(struct klp_patch *new_patch)
}
}
#ifdef CONFIG_LIVEPATCH_FTRACE
/*
* This function removes the dynamically allocated 'nop' functions.
*
......@@ -1264,14 +1571,31 @@ void klp_module_going(struct module *mod)
mutex_unlock(&klp_mutex);
}
#endif /* ifdef CONFIG_LIVEPATCH_FTRACE */
static int __init klp_init(void)
{
struct proc_dir_entry *root_klp_dir, *res;
root_klp_dir = proc_mkdir("livepatch", NULL);
if (!root_klp_dir)
goto error_out;
res = proc_create("livepatch/state", 0, NULL,
&proc_klpstate_operations);
if (!res)
goto error_remove;
klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
if (!klp_root_kobj)
return -ENOMEM;
goto error_remove;
return 0;
error_remove:
remove_proc_entry("livepatch", NULL);
error_out:
return -ENOMEM;
}
module_init(klp_init);
......@@ -20,6 +20,7 @@
#include "patch.h"
#include "transition.h"
#ifdef CONFIG_LIVEPATCH_FTRACE
static LIST_HEAD(klp_ops);
struct klp_ops *klp_find_ops(void *old_func)
......@@ -235,6 +236,47 @@ static int klp_patch_func(struct klp_func *func)
return ret;
}
#else /* #ifdef CONFIG_LIVEPATCH_WO_FTRACE */
void __weak arch_klp_unpatch_func(struct klp_func *func)
{
}
int __weak arch_klp_patch_func(struct klp_func *func)
{
return -ENOSYS;
}
static void klp_unpatch_func(struct klp_func *func)
{
if (WARN_ON(!func->patched))
return;
if (WARN_ON(!func->old_func))
return;
arch_klp_unpatch_func(func);
func->patched = false;
}
static inline int klp_patch_func(struct klp_func *func)
{
int ret = 0;
if (WARN_ON(!func->old_func))
return -EINVAL;
if (WARN_ON(func->patched))
return -EINVAL;
ret = arch_klp_patch_func(func);
if (!ret)
func->patched = true;
return ret;
}
#endif
static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
{
struct klp_func *func;
......
......@@ -22,7 +22,9 @@
struct klp_ops {
struct list_head node;
struct list_head func_stack;
#ifdef CONFIG_LIVEPATCH_FTRACE
struct ftrace_ops fops;
#endif
};
struct klp_ops *klp_find_ops(void *old_func);
......
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-sample.o
ifeq ($(CONFIG_LIVEPATCH_FTRACE), y)
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-mod.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix1.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix2.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-demo.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-mod.o
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-busymod.o
endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册