未验证 提交 6468c85c 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!123 Implement KProbe jump optimization for RISC-V

Merge Pull Request from: @chenguokai 
 
This is the output of OSPP 2022 [project](https://summer-ospp.ac.cn/#/org/prodetail/22b970495)

Tested on QEMU RISC-V 32 virt machine with SMP on. 
 
Link:https://gitee.com/openeuler/kernel/pulls/123 
Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
...@@ -362,14 +362,11 @@ register_kprobe ...@@ -362,14 +362,11 @@ register_kprobe
#include <linux/kprobes.h> #include <linux/kprobes.h>
int register_kprobe(struct kprobe *kp); int register_kprobe(struct kprobe *kp);
Sets a breakpoint at the address kp->addr. When the breakpoint is Sets a breakpoint at the address kp->addr. When the breakpoint is hit, Kprobes
hit, Kprobes calls kp->pre_handler. After the probed instruction calls kp->pre_handler. After the probed instruction is single-stepped, Kprobe
is single-stepped, Kprobe calls kp->post_handler. If a fault calls kp->post_handler. Any or all handlers can be NULL. If kp->flags is set
occurs during execution of kp->pre_handler or kp->post_handler, KPROBE_FLAG_DISABLED, that kp will be registered but disabled, so, its handlers
or during single-stepping of the probed instruction, Kprobes calls aren't hit until calling enable_kprobe(kp).
kp->fault_handler. Any or all handlers can be NULL. If kp->flags
is set KPROBE_FLAG_DISABLED, that kp will be registered but disabled,
so, its handlers aren't hit until calling enable_kprobe(kp).
.. note:: .. note::
...@@ -415,17 +412,6 @@ User's post-handler (kp->post_handler):: ...@@ -415,17 +412,6 @@ User's post-handler (kp->post_handler)::
p and regs are as described for the pre_handler. flags always seems p and regs are as described for the pre_handler. flags always seems
to be zero. to be zero.
User's fault-handler (kp->fault_handler)::
#include <linux/kprobes.h>
#include <linux/ptrace.h>
int fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr);
p and regs are as described for the pre_handler. trapnr is the
architecture-specific trap number associated with the fault (e.g.,
on i386, 13 for a general protection fault or 14 for a page fault).
Returns 1 if it successfully handled the exception.
register_kretprobe register_kretprobe
------------------ ------------------
......
...@@ -317,22 +317,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr) ...@@ -317,22 +317,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
* caused the fault. * caused the fault.
*/ */
/* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it first.
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
/* /*
* In case the user-specified fault handler returned zero, * In case the user-specified fault handler returned zero,
* try to fix up. * try to fix up.
......
...@@ -348,29 +348,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) ...@@ -348,29 +348,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
reset_current_kprobe(); reset_current_kprobe();
} }
break; break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it.
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
return 1;
break;
default:
break;
} }
return 0; return 0;
......
...@@ -296,23 +296,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) ...@@ -296,23 +296,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
break; break;
case KPROBE_HIT_ACTIVE: case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE: case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it first.
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
return 1;
/* /*
* In case the user-specified fault handler returned * In case the user-specified fault handler returned
* zero, try to fix up. * zero, try to fix up.
......
...@@ -294,23 +294,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr) ...@@ -294,23 +294,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
break; break;
case KPROBE_HIT_ACTIVE: case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE: case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it first.
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
/* /*
* In case the user-specified fault handler returned * In case the user-specified fault handler returned
* zero, try to fix up. * zero, try to fix up.
......
...@@ -844,22 +844,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) ...@@ -844,22 +844,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
break; break;
case KPROBE_HIT_ACTIVE: case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE: case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it first.
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
/* /*
* In case the user-specified fault handler returned * In case the user-specified fault handler returned
* zero, try to fix up. * zero, try to fix up.
......
...@@ -403,9 +403,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) ...@@ -403,9 +403,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
struct kprobe *cur = kprobe_running(); struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
if (kcb->kprobe_status & KPROBE_HIT_SS) { if (kcb->kprobe_status & KPROBE_HIT_SS) {
resume_execution(cur, regs, kcb); resume_execution(cur, regs, kcb);
regs->cp0_status |= kcb->kprobe_old_SR; regs->cp0_status |= kcb->kprobe_old_SR;
......
...@@ -502,23 +502,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) ...@@ -502,23 +502,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
break; break;
case KPROBE_HIT_ACTIVE: case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE: case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it first.
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
/* /*
* In case the user-specified fault handler returned * In case the user-specified fault handler returned
* zero, try to fix up. * zero, try to fix up.
......
...@@ -69,10 +69,15 @@ config RISCV ...@@ -69,10 +69,15 @@ config RISCV
select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO if MMU && 64BIT select HAVE_GENERIC_VDSO if MMU && 64BIT
select HAVE_KPROBES if !XIP_KERNEL
select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
select HAVE_KRETPROBES if !XIP_KERNEL
select HAVE_OPTPROBES if !XIP_KERNEL && !RISCV_ISA_C
select HAVE_PCI select HAVE_PCI
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_PERF_REGS select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_STACKPROTECTOR select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_RUST if 64BIT select HAVE_RUST if 64BIT
......
...@@ -35,7 +35,7 @@ struct dyn_arch_ftrace { ...@@ -35,7 +35,7 @@ struct dyn_arch_ftrace {
}; };
#endif #endif
#ifdef CONFIG_DYNAMIC_FTRACE #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_OPTPROBES)
/* /*
* A general call in RISC-V is a pair of insts: * A general call in RISC-V is a pair of insts:
* 1) auipc: setting high-20 pc-related bits to ra register * 1) auipc: setting high-20 pc-related bits to ra register
......
...@@ -11,4 +11,68 @@ ...@@ -11,4 +11,68 @@
#include <asm-generic/kprobes.h> #include <asm-generic/kprobes.h>
#ifdef CONFIG_KPROBES
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 2
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
#include <asm/probes.h>
struct prev_kprobe {
struct kprobe *kp;
unsigned int status;
};
/* per-cpu kprobe control block */
struct kprobe_ctlblk {
unsigned int kprobe_status;
unsigned long saved_status;
struct prev_kprobe prev_kprobe;
};
void arch_remove_kprobe(struct kprobe *p);
int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr);
bool kprobe_breakpoint_handler(struct pt_regs *regs);
bool kprobe_single_step_handler(struct pt_regs *regs);
void kretprobe_trampoline(void);
void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
#ifdef CONFIG_OPTPROBES
/* optinsn template addresses */
extern __visible kprobe_opcode_t optprobe_template_entry[];
extern __visible kprobe_opcode_t optprobe_template_val[];
extern __visible kprobe_opcode_t optprobe_template_call[];
extern __visible kprobe_opcode_t optprobe_template_store_epc[];
extern __visible kprobe_opcode_t optprobe_template_end[];
extern __visible kprobe_opcode_t optprobe_template_sub_sp[];
extern __visible kprobe_opcode_t optprobe_template_add_sp[];
extern __visible kprobe_opcode_t optprobe_template_restore_begin[];
extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn[];
extern __visible kprobe_opcode_t optprobe_template_restore_end[];
#define MAX_OPTINSN_SIZE \
((unsigned long)optprobe_template_end - \
(unsigned long)optprobe_template_entry)
#define MAX_COPIED_INSN 2
#define MAX_OPTIMIZED_LENGTH (MAX_COPIED_INSN * 4)
#define JUMP_SIZE MAX_OPTIMIZED_LENGTH
struct arch_optimized_insn {
kprobe_opcode_t copied_insn[MAX_COPIED_INSN];
/* detour code buffer */
kprobe_opcode_t *insn;
};
#define RVI_INST_SIZE 4
#endif /* CONFIG_OPTPROBES */
#endif /* CONFIG_KPROBES */
#endif /* _ASM_RISCV_KPROBES_H */ #endif /* _ASM_RISCV_KPROBES_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_RISCV_PROBES_H
#define _ASM_RISCV_PROBES_H
typedef u32 probe_opcode_t;
typedef bool (probes_handler_t) (u32 opcode, unsigned long addr, struct pt_regs *);
/* architecture specific copy of original instruction */
struct arch_probe_insn {
probe_opcode_t *insn;
probes_handler_t *handler;
/* restore address after simulation */
unsigned long restore;
};
#ifdef CONFIG_KPROBES
typedef u32 kprobe_opcode_t;
struct arch_specific_insn {
struct arch_probe_insn api;
};
#endif
#endif /* _ASM_RISCV_PROBES_H */
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <uapi/asm/ptrace.h> #include <uapi/asm/ptrace.h>
#include <asm/csr.h> #include <asm/csr.h>
#include <linux/compiler.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -60,6 +61,7 @@ struct pt_regs { ...@@ -60,6 +61,7 @@ struct pt_regs {
#define user_mode(regs) (((regs)->status & SR_PP) == 0) #define user_mode(regs) (((regs)->status & SR_PP) == 0)
#define MAX_REG_OFFSET offsetof(struct pt_regs, orig_a0)
/* Helpers for working with the instruction pointer */ /* Helpers for working with the instruction pointer */
static inline unsigned long instruction_pointer(struct pt_regs *regs) static inline unsigned long instruction_pointer(struct pt_regs *regs)
...@@ -85,6 +87,12 @@ static inline void user_stack_pointer_set(struct pt_regs *regs, ...@@ -85,6 +87,12 @@ static inline void user_stack_pointer_set(struct pt_regs *regs,
regs->sp = val; regs->sp = val;
} }
/* Valid only for Kernel mode traps. */
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
return regs->sp;
}
/* Helpers for working with the frame pointer */ /* Helpers for working with the frame pointer */
static inline unsigned long frame_pointer(struct pt_regs *regs) static inline unsigned long frame_pointer(struct pt_regs *regs)
{ {
...@@ -101,6 +109,27 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) ...@@ -101,6 +109,27 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
return regs->a0; return regs->a0;
} }
extern int regs_query_register_offset(const char *name);
extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n);
/**
* regs_get_register() - get register value from its offset
* @regs: pt_regs from which register value is gotten
* @offset: offset of the register.
*
* regs_get_register returns the value of a register whose offset from @regs.
* The @offset is the offset of the register in struct pt_regs.
* If @offset is bigger than MAX_REG_OFFSET, this returns 0.
*/
static inline unsigned long regs_get_register(struct pt_regs *regs,
unsigned int offset)
{
if (unlikely(offset > MAX_REG_OFFSET))
return 0;
return *(unsigned long *)((unsigned long)regs + offset);
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_PTRACE_H */ #endif /* _ASM_RISCV_PTRACE_H */
...@@ -30,6 +30,7 @@ obj-y += riscv_ksyms.o ...@@ -30,6 +30,7 @@ obj-y += riscv_ksyms.o
obj-y += stacktrace.o obj-y += stacktrace.o
obj-y += cacheinfo.o obj-y += cacheinfo.o
obj-y += patch.o obj-y += patch.o
obj-y += probes/
obj-$(CONFIG_MMU) += vdso.o vdso/ obj-$(CONFIG_MMU) += vdso.o vdso/
obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o
......
...@@ -20,7 +20,12 @@ struct patch_insn { ...@@ -20,7 +20,12 @@ struct patch_insn {
}; };
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
static void *patch_map(void *addr, int fixmap) /*
* The fix_to_virt(, idx) needs a const value (not a dynamic variable of
* reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
* So use '__always_inline' and 'const unsigned int fixmap' here.
*/
static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
{ {
uintptr_t uintaddr = (uintptr_t) addr; uintptr_t uintaddr = (uintptr_t) addr;
struct page *page; struct page *page;
...@@ -37,7 +42,6 @@ static void *patch_map(void *addr, int fixmap) ...@@ -37,7 +42,6 @@ static void *patch_map(void *addr, int fixmap)
return (void *)set_fixmap_offset(fixmap, page_to_phys(page) + return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
(uintaddr & ~PAGE_MASK)); (uintaddr & ~PAGE_MASK));
} }
NOKPROBE_SYMBOL(patch_map);
static void patch_unmap(int fixmap) static void patch_unmap(int fixmap)
{ {
......
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o
obj-$(CONFIG_KPROBES) += kprobes_trampoline.o
obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o
obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o
obj-$(CONFIG_OPTPROBES) += opt.o opt_trampoline.o
CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE)
// SPDX-License-Identifier: GPL-2.0+
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <asm/sections.h>
#include "decode-insn.h"
#include "simulate-insn.h"
/* Return:
* INSN_REJECTED If instruction is one not allowed to kprobe,
* INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
*/
enum probe_insn __kprobes
riscv_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api)
{
probe_opcode_t insn = *addr;
/*
* Reject instructions list:
*/
RISCV_INSN_REJECTED(system, insn);
RISCV_INSN_REJECTED(fence, insn);
/*
* Simulate instructions list:
* TODO: the REJECTED ones below need to be implemented
*/
#ifdef CONFIG_RISCV_ISA_C
RISCV_INSN_REJECTED(c_j, insn);
RISCV_INSN_REJECTED(c_jr, insn);
RISCV_INSN_REJECTED(c_jal, insn);
RISCV_INSN_REJECTED(c_jalr, insn);
RISCV_INSN_REJECTED(c_beqz, insn);
RISCV_INSN_REJECTED(c_bnez, insn);
RISCV_INSN_REJECTED(c_ebreak, insn);
#endif
RISCV_INSN_SET_SIMULATE(jal, insn);
RISCV_INSN_SET_SIMULATE(jalr, insn);
RISCV_INSN_SET_SIMULATE(auipc, insn);
RISCV_INSN_SET_SIMULATE(branch, insn);
return INSN_GOOD;
}
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef _RISCV_KERNEL_KPROBES_DECODE_INSN_H
#define _RISCV_KERNEL_KPROBES_DECODE_INSN_H
#include <asm/sections.h>
#include <asm/kprobes.h>
enum probe_insn {
INSN_REJECTED,
INSN_GOOD_NO_SLOT,
INSN_GOOD,
};
enum probe_insn __kprobes
riscv_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *asi);
#endif /* _RISCV_KERNEL_KPROBES_DECODE_INSN_H */
// SPDX-License-Identifier: GPL-2.0+
#include <linux/kprobes.h>
#include <linux/extable.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
#include <asm/ptrace.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
#include <asm/bug.h>
#include <asm/patch.h>
#include "decode-insn.h"
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static void __kprobes
post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{
unsigned long offset = GET_INSN_LENGTH(p->opcode);
p->ainsn.api.restore = (unsigned long)p->addr + offset;
patch_text(p->ainsn.api.insn, p->opcode);
patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset),
__BUG_INSN_32);
}
static void __kprobes arch_prepare_simulate(struct kprobe *p)
{
p->ainsn.api.restore = 0;
}
static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (p->ainsn.api.handler)
p->ainsn.api.handler((u32)p->opcode,
(unsigned long)p->addr, regs);
post_kprobe_handler(p, kcb, regs);
}
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
unsigned long probe_addr = (unsigned long)p->addr;
if (probe_addr & 0x1) {
pr_warn("Address not aligned.\n");
return -EINVAL;
}
/* copy instruction */
p->opcode = *p->addr;
/* decode instruction */
switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) {
case INSN_REJECTED: /* insn not supported */
return -EINVAL;
case INSN_GOOD_NO_SLOT: /* insn need simulation */
p->ainsn.api.insn = NULL;
break;
case INSN_GOOD: /* instruction uses slot */
p->ainsn.api.insn = get_insn_slot();
if (!p->ainsn.api.insn)
return -ENOMEM;
break;
}
/* prepare the instruction */
if (p->ainsn.api.insn)
arch_prepare_ss_slot(p);
else
arch_prepare_simulate(p);
return 0;
}
/* install breakpoint in text */
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
if ((p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
patch_text(p->addr, __BUG_INSN_32);
else
patch_text(p->addr, __BUG_INSN_16);
}
/* remove breakpoint from text */
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
patch_text(p->addr, p->opcode);
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
}
static void __kprobes set_current_kprobe(struct kprobe *p)
{
__this_cpu_write(current_kprobe, p);
}
/*
* Interrupts need to be disabled before single-step mode is set, and not
* reenabled until after single-step mode ends.
* Without disabling interrupt on local CPU, there is a chance of
* interrupt occurrence in the period of exception return and start of
* out-of-line single-step, that result in wrongly single stepping
* into the interrupt handler.
*/
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
kcb->saved_status = regs->status;
regs->status &= ~SR_SPIE;
}
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
regs->status = kcb->saved_status;
}
static void __kprobes setup_singlestep(struct kprobe *p,
struct pt_regs *regs,
struct kprobe_ctlblk *kcb, int reenter)
{
unsigned long slot;
if (reenter) {
save_previous_kprobe(kcb);
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_REENTER;
} else {
kcb->kprobe_status = KPROBE_HIT_SS;
}
if (p->ainsn.api.insn) {
/* prepare for single stepping */
slot = (unsigned long)p->ainsn.api.insn;
/* IRQs and single stepping do not mix well. */
kprobes_save_local_irqflag(kcb, regs);
instruction_pointer_set(regs, slot);
} else {
/* insn simulation */
arch_simulate_insn(p, regs);
}
}
static int __kprobes reenter_kprobe(struct kprobe *p,
struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
switch (kcb->kprobe_status) {
case KPROBE_HIT_SSDONE:
case KPROBE_HIT_ACTIVE:
kprobes_inc_nmissed_count(p);
setup_singlestep(p, regs, kcb, 1);
break;
case KPROBE_HIT_SS:
case KPROBE_REENTER:
pr_warn("Unrecoverable kprobe detected.\n");
dump_kprobe(p);
BUG();
break;
default:
WARN_ON(1);
return 0;
}
return 1;
}
static void __kprobes
post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs)
{
/* return addr restore if non-branching insn */
if (cur->ainsn.api.restore != 0)
regs->epc = cur->ainsn.api.restore;
/* restore back original saved kprobe variables and continue */
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
return;
}
/* call post handler */
kcb->kprobe_status = KPROBE_HIT_SSDONE;
if (cur->post_handler) {
/* post_handler can hit breakpoint and single step
* again, so we enable D-flag for recursive exception.
*/
cur->post_handler(cur, regs, 0);
}
reset_current_kprobe();
}
int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
switch (kcb->kprobe_status) {
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
* We are here because the instruction being single
* stepped caused a page fault. We reset the current
* kprobe and the ip points back to the probe address
* and allow the page fault handler to continue as a
* normal page fault.
*/
regs->epc = (unsigned long) cur->addr;
BUG_ON(!instruction_pointer(regs));
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
if (fixup_exception(regs))
return 1;
}
return 0;
}
bool __kprobes
kprobe_breakpoint_handler(struct pt_regs *regs)
{
struct kprobe *p, *cur_kprobe;
struct kprobe_ctlblk *kcb;
unsigned long addr = instruction_pointer(regs);
kcb = get_kprobe_ctlblk();
cur_kprobe = kprobe_running();
p = get_kprobe((kprobe_opcode_t *) addr);
if (p) {
if (cur_kprobe) {
if (reenter_kprobe(p, regs, kcb))
return true;
} else {
/* Probe hit */
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
/*
* If we have no pre-handler or it returned 0, we
* continue with normal processing. If we have a
* pre-handler and it returned non-zero, it will
* modify the execution path and no need to single
* stepping. Let's just reset current kprobe and exit.
*
* pre_handler can hit a breakpoint and can step thru
* before return.
*/
if (!p->pre_handler || !p->pre_handler(p, regs))
setup_singlestep(p, regs, kcb, 0);
else
reset_current_kprobe();
}
return true;
}
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
* either a probepoint or a debugger breakpoint
* at this address. In either case, no further
* handling of this interrupt is appropriate.
* Return back to original instruction, and continue.
*/
return false;
}
bool __kprobes
kprobe_single_step_handler(struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long addr = instruction_pointer(regs);
struct kprobe *cur = kprobe_running();
if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
((unsigned long)&cur->ainsn.api.insn[0] + GET_INSN_LENGTH(cur->opcode) == addr)) {
kprobes_restore_local_irqflag(kcb, regs);
post_kprobe_handler(cur, kcb, regs);
return true;
}
/* not ours, kprobes should ignore it */
return false;
}
/*
* Provide a blacklist of symbols identifying ranges which cannot be kprobed.
* This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
*/
int __init arch_populate_kprobe_blacklist(void)
{
int ret;
ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
(unsigned long)__irqentry_text_end);
return ret;
}
void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
{
return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL);
}
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->ra;
ri->fp = NULL;
regs->ra = (unsigned long) &kretprobe_trampoline;
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
return 0;
}
int __init arch_init_kprobes(void)
{
return 0;
}
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Author: Patrick Stählin <me@packi.ch>
*/
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
.text
.altmacro
.macro save_all_base_regs
REG_S x1, PT_RA(sp)
REG_S x3, PT_GP(sp)
REG_S x4, PT_TP(sp)
REG_S x5, PT_T0(sp)
REG_S x6, PT_T1(sp)
REG_S x7, PT_T2(sp)
REG_S x8, PT_S0(sp)
REG_S x9, PT_S1(sp)
REG_S x10, PT_A0(sp)
REG_S x11, PT_A1(sp)
REG_S x12, PT_A2(sp)
REG_S x13, PT_A3(sp)
REG_S x14, PT_A4(sp)
REG_S x15, PT_A5(sp)
REG_S x16, PT_A6(sp)
REG_S x17, PT_A7(sp)
REG_S x18, PT_S2(sp)
REG_S x19, PT_S3(sp)
REG_S x20, PT_S4(sp)
REG_S x21, PT_S5(sp)
REG_S x22, PT_S6(sp)
REG_S x23, PT_S7(sp)
REG_S x24, PT_S8(sp)
REG_S x25, PT_S9(sp)
REG_S x26, PT_S10(sp)
REG_S x27, PT_S11(sp)
REG_S x28, PT_T3(sp)
REG_S x29, PT_T4(sp)
REG_S x30, PT_T5(sp)
REG_S x31, PT_T6(sp)
.endm
.macro restore_all_base_regs
REG_L x3, PT_GP(sp)
REG_L x4, PT_TP(sp)
REG_L x5, PT_T0(sp)
REG_L x6, PT_T1(sp)
REG_L x7, PT_T2(sp)
REG_L x8, PT_S0(sp)
REG_L x9, PT_S1(sp)
REG_L x10, PT_A0(sp)
REG_L x11, PT_A1(sp)
REG_L x12, PT_A2(sp)
REG_L x13, PT_A3(sp)
REG_L x14, PT_A4(sp)
REG_L x15, PT_A5(sp)
REG_L x16, PT_A6(sp)
REG_L x17, PT_A7(sp)
REG_L x18, PT_S2(sp)
REG_L x19, PT_S3(sp)
REG_L x20, PT_S4(sp)
REG_L x21, PT_S5(sp)
REG_L x22, PT_S6(sp)
REG_L x23, PT_S7(sp)
REG_L x24, PT_S8(sp)
REG_L x25, PT_S9(sp)
REG_L x26, PT_S10(sp)
REG_L x27, PT_S11(sp)
REG_L x28, PT_T3(sp)
REG_L x29, PT_T4(sp)
REG_L x30, PT_T5(sp)
REG_L x31, PT_T6(sp)
.endm
ENTRY(kretprobe_trampoline)
addi sp, sp, -(PT_SIZE_ON_STACK)
save_all_base_regs
move a0, sp /* pt_regs */
call trampoline_probe_handler
/* use the result as the return-address */
move ra, a0
restore_all_base_regs
addi sp, sp, PT_SIZE_ON_STACK
ret
ENDPROC(kretprobe_trampoline)
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Kernel Probes Jump Optimization (Optprobes)
*
* Copyright (C) IBM Corporation, 2002, 2004
* Copyright (C) Hitachi Ltd., 2012
* Copyright (C) Huawei Inc., 2014
* Copyright (C) Guokai Chen, 2022
* Author: Guokai Chen chenguokai17@mails.ucas.ac.cn
*/
#include <linux/kprobes.h>
#include <linux/jump_label.h>
#include <linux/extable.h>
#include <linux/stop_machine.h>
#include <linux/moduleloader.h>
#include <linux/kprobes.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
/* for patch_text */
#include <linux/ftrace.h>
#include <asm/patch.h>
#include "simulate-insn.h"
#include "decode-insn.h"
/*
* If the probed instruction doesn't use PC and is not system or fence
* we can copy it into template and have it executed directly without
* simulation or emulation.
*/
static enum probe_insn __kprobes can_kprobe_direct_exec(kprobe_opcode_t *addr)
{
/*
* instructions that use PC like: branch jump auipc
* instructions that belongs to system or fence like ebreak ecall fence.i
*/
kprobe_opcode_t inst = *addr;
RISCV_INSN_REJECTED(system, inst);
RISCV_INSN_REJECTED(fence, inst);
RISCV_INSN_REJECTED(branch, inst);
RISCV_INSN_REJECTED(jal, inst);
RISCV_INSN_REJECTED(jalr, inst);
RISCV_INSN_REJECTED(auipc, inst);
return INSN_GOOD;
}
#define TMPL_VAL_IDX \
((kprobe_opcode_t *)optprobe_template_val - \
(kprobe_opcode_t *)optprobe_template_entry)
#define TMPL_CALL_IDX \
((kprobe_opcode_t *)optprobe_template_call - \
(kprobe_opcode_t *)optprobe_template_entry)
#define TMPL_STORE_EPC_IDX \
((kprobe_opcode_t *)optprobe_template_store_epc - \
(kprobe_opcode_t *)optprobe_template_entry)
#define TMPL_END_IDX \
((kprobe_opcode_t *)optprobe_template_end - \
(kprobe_opcode_t *)optprobe_template_entry)
#define TMPL_ADD_SP \
((kprobe_opcode_t *)optprobe_template_add_sp - \
(kprobe_opcode_t *)optprobe_template_entry)
#define TMPL_SUB_SP \
((kprobe_opcode_t *)optprobe_template_sub_sp - \
(kprobe_opcode_t *)optprobe_template_entry)
#define TMPL_RESTORE_BEGIN \
((kprobe_opcode_t *)optprobe_template_restore_begin - \
(kprobe_opcode_t *)optprobe_template_entry)
#define TMPL_RESTORE_ORIGN_INSN \
((kprobe_opcode_t *)optprobe_template_restore_orig_insn - \
(kprobe_opcode_t *)optprobe_template_entry)
#define TMPL_RESTORE_RET \
((kprobe_opcode_t *)optprobe_template_ret - \
(kprobe_opcode_t *)optprobe_template_entry)
#define TMPL_RESTORE_END \
((kprobe_opcode_t *)optprobe_template_restore_end - \
(kprobe_opcode_t *)optprobe_template_entry)
#define FREE_SEARCH_DEPTH 32
/*
* RISC-V can always optimize an instruction if not null
*/
int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
{
return optinsn->insn != NULL;
}
/*
* In RISC-V ISA, jal has a quite limited jump range, To achive adequate
* range, auipc+jalr pair is utilized. It requires a replacement of two
* instructions, thus next instruction should be examined.
*/
int arch_check_optimized_kprobe(struct optimized_kprobe *op)
{
struct kprobe *p;
/* check if the next instruction has a kprobe */
p = get_kprobe(op->kp.addr + 1);
if (p && !kprobe_disabled(p))
return -EEXIST;
return 0;
}
/*
* In RISC-V ISA, auipc+jalr requires a free register
* Inspired by register renaming in OoO processor, we search backwards
* to find such a register that is not previously used as a source
* register and is used as a destination register before any branch or
* jump instruction.
*/
static int
__arch_find_free_register(kprobe_opcode_t *addr, int use_orig,
kprobe_opcode_t orig)
{
int i, rs1, rs2, rd;
kprobe_opcode_t inst;
int rs_mask = 0;
for (i = 0; i < FREE_SEARCH_DEPTH; i++) {
if (i == 0 && use_orig)
inst = orig;
else
inst = *(kprobe_opcode_t *)(addr + i);
/*
* Detailed handling:
* jalr/branch/system: must have reached the end, no result
* jal: if not chosen as result, must have reached the end
* arithmetic/load/store: record their rs
* jal/arithmetic/load: if proper rd found, return result
* others (float point/vector): ignore
*/
if (riscv_insn_is_branch(inst) || riscv_insn_is_jalr(inst) ||
riscv_insn_is_system(inst)) {
return 0;
}
/* instructions that has rs1 */
if (riscv_insn_is_arith_ri(inst) || riscv_insn_is_arith_rr(inst) ||
riscv_insn_is_load(inst) || riscv_insn_is_store(inst) ||
riscv_insn_is_amo(inst)) {
rs1 = (inst & 0xF8000) >> 15;
rs_mask |= 1 << rs1;
}
/* instructions that has rs2 */
if (riscv_insn_is_arith_rr(inst) || riscv_insn_is_store(inst) ||
riscv_insn_is_amo(inst)) {
rs2 = (inst & 0x1F00000) >> 20;
rs_mask |= 1 << rs2;
}
/* instructions that has rd */
if (riscv_insn_is_lui(inst) || riscv_insn_is_jal(inst) ||
riscv_insn_is_load(inst) || riscv_insn_is_arith_ri(inst) ||
riscv_insn_is_arith_rr(inst) || riscv_insn_is_amo(inst)) {
rd = (inst & 0xF80) >> 7;
if (rd != 0 && (rs_mask & (1 << rd)) == 0)
return rd;
if (riscv_insn_is_jal(inst))
return 0;
}
}
return 0;
}
/*
* If two free registers can be found at the beginning of both
* the start and the end of replaced code, it can be optimized
* Also, in-function jumps need to be checked to make sure that
* there is no jump to the second instruction to be replaced
*/
static int can_optimize(unsigned long paddr, kprobe_opcode_t orig)
{
unsigned long addr, size = 0, offset = 0, target;
s32 imm;
kprobe_opcode_t inst;
if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
return 0;
addr = paddr - offset;
/* if there are not enough space for our kprobe, skip */
if (addr + size <= paddr + MAX_OPTIMIZED_LENGTH)
return 0;
while (addr < paddr - offset + size) {
/* Check from the start until the end */
inst = *(kprobe_opcode_t *)addr;
/* branch and jal is capable of determing target before execution */
if (riscv_insn_is_branch(inst)) {
imm = branch_offset(inst);
target = addr + imm;
if (target == paddr + RVI_INST_SIZE)
return 0;
} else if (riscv_insn_is_jal(inst)) {
imm = jal_offset(inst);
target = addr + imm;
if (target == paddr + RVI_INST_SIZE)
return 0;
}
/* RVI is always 4 byte long */
addr += RVI_INST_SIZE;
}
if (can_kprobe_direct_exec((kprobe_opcode_t *)(paddr + 4)) != INSN_GOOD ||
can_kprobe_direct_exec(&orig) != INSN_GOOD)
return 0;
/* only valid when we find two free registers, the first of which stores
* detour buffer entry address and the second one stores the return address
* that is two instructions after the probe point
*/
return __arch_find_free_register((kprobe_opcode_t *)paddr, 1, orig) &&
__arch_find_free_register((kprobe_opcode_t *)paddr + MAX_COPIED_INSN, 0, 0);
}
/* Free optimized instruction slot */
static void
__arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
{
if (op->optinsn.insn) {
free_optinsn_slot(op->optinsn.insn, dirty);
op->optinsn.insn = NULL;
}
}
static void
optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
{
unsigned long flags;
struct kprobe_ctlblk *kcb;
/* Save skipped registers */
regs->epc = (unsigned long)op->kp.addr;
regs->orig_a0 = ~0UL;
local_irq_save(flags);
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
kprobes_inc_nmissed_count(&op->kp);
} else {
__this_cpu_write(current_kprobe, &op->kp);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
opt_pre_handler(&op->kp, regs);
__this_cpu_write(current_kprobe, NULL);
}
local_irq_restore(flags);
}
NOKPROBE_SYMBOL(optimized_callback)
/*
* R-type instruction as an example for the following patch functions
* 31 24 25 20 19 15 14 12 11 7 6 0
* funct7 | rs2 | rs1 | funct3 | rd | opcode
* 7 5 5 3 5 7
*/
#define RISCV_RD_CLEAR 0xfffff07fUL
#define RISCV_RS1_CLEAR 0xfff07fffUL
#define RISCV_RS2_CLEAR 0xfe0fffffUL
#define RISCV_RD_SHIFT 7
#define RISCV_RS1_SHIFT 15
#define RISCV_RS2_SHIFT 20
static inline kprobe_opcode_t
__arch_patch_rd(kprobe_opcode_t inst, unsigned long val)
{
inst &= RISCV_RD_CLEAR;
inst |= val << RISCV_RD_SHIFT;
return inst;
}
static inline kprobe_opcode_t
__arch_patch_rs1(kprobe_opcode_t inst, unsigned long val)
{
inst &= RISCV_RS1_CLEAR;
inst |= val << RISCV_RS1_SHIFT;
return inst;
}
static inline kprobe_opcode_t __arch_patch_rs2(kprobe_opcode_t inst,
unsigned long val)
{
inst &= RISCV_RS2_CLEAR;
inst |= val << RISCV_RS2_SHIFT;
return inst;
}
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
{
kprobe_opcode_t *code, *detour_slot, *detour_ret_addr;
long rel_chk;
unsigned long val;
int ret = 0;
if (!can_optimize((unsigned long)orig->addr, orig->opcode))
return -EILSEQ;
code = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL);
detour_slot = get_optinsn_slot();
if (!code || !detour_slot) {
ret = -ENOMEM;
goto on_err;
}
/*
* Verify if the address gap is within 4GB range, because this uses
* a auipc+jalr pair.
*/
rel_chk = (long)detour_slot - (long)orig->addr + 8;
if (abs(rel_chk) > U32_MAX) {
/*
* Different from x86, we free code buf directly instead of
* calling __arch_remove_optimized_kprobe() because
* we have not fill any field in op.
*/
ret = -ERANGE;
goto on_err;
}
/* Copy arch-dep-instance from template. */
memcpy(code, (unsigned long *)optprobe_template_entry,
TMPL_END_IDX * sizeof(kprobe_opcode_t));
/* Set probe information */
*(unsigned long *)(&code[TMPL_VAL_IDX]) = (unsigned long)op;
/* Set probe function call */
*(unsigned long *)(&code[TMPL_CALL_IDX]) = (unsigned long)optimized_callback;
/* The free register to which the EPC (return address) is stored,
* is dynamically allocated during opt probe setup. For every different
* probe address, epc is stored in a possibly different register,
* which need to be patched to reflect the real source.
* rs2 of optprobe_template_store_epc is the source register.
* After patch, optprobe_template_store_epc will be
* REG_S free_register, PT_EPC(sp)
*/
code[TMPL_STORE_EPC_IDX] =
__arch_patch_rs2(code[TMPL_STORE_EPC_IDX],
__arch_find_free_register(orig->addr, 1, orig->opcode));
/* Adjust return temp register */
val =
__arch_find_free_register(orig->addr +
MAX_COPIED_INSN, 0,
0);
/*
* Patch of optprobe_template_restore_end
* patch:
* rd and imm of auipc
* rs1 and imm of jalr
* after patch:
* auipc free_register, %hi(return_address)
* jalr x0, %lo(return_address)(free_register)
*
*/
detour_ret_addr = &detour_slot[optprobe_template_restore_end - optprobe_template_entry];
make_call(detour_ret_addr, (orig->addr + MAX_COPIED_INSN),
(code + TMPL_RESTORE_END));
code[TMPL_RESTORE_END] = __arch_patch_rd(code[TMPL_RESTORE_END], val);
code[TMPL_RESTORE_END + 1] =
__arch_patch_rs1(code[TMPL_RESTORE_END + 1], val);
code[TMPL_RESTORE_END + 1] = __arch_patch_rd(code[TMPL_RESTORE_END + 1], 0);
/* Copy insn and have it executed during restore */
code[TMPL_RESTORE_ORIGN_INSN] = orig->opcode;
code[TMPL_RESTORE_ORIGN_INSN + 1] =
*(kprobe_opcode_t *)(orig->addr + 1);
if (patch_text_nosync(detour_slot, code, MAX_OPTINSN_SIZE)) {
ret = -EPERM;
goto on_err;
}
kfree(code);
/* Set op->optinsn.insn means prepared. */
op->optinsn.insn = detour_slot;
return ret;
on_err:
kfree(code);
if (detour_slot)
free_optinsn_slot(detour_slot, 0);
return ret;
}
struct patch_probe {
void *addr;
void *insns;
size_t len;
atomic_t cpu_count;
};
static int patch_text_stop_machine(void *data)
{
struct patch_probe *arg = data;
int ret = 0;
if (atomic_inc_return(&arg->cpu_count) == num_online_cpus()) {
ret = patch_text_nosync(arg->addr, arg->insns, arg->len);
atomic_inc(&arg->cpu_count);
} else {
while (atomic_read(&arg->cpu_count) <= num_online_cpus())
cpu_relax();
/* ensure patch visibility */
smp_mb();
}
return ret;
}
void __kprobes arch_optimize_kprobes(struct list_head *oplist)
{
struct optimized_kprobe *op, *tmp;
kprobe_opcode_t val;
struct patch_probe pp;
list_for_each_entry_safe(op, tmp, oplist, list) {
kprobe_opcode_t insn[MAX_COPIED_INSN];
WARN_ON(kprobe_disabled(&op->kp));
/*
* Backup instructions which will be replaced
* by jump address
*/
memcpy(op->optinsn.copied_insn, op->kp.addr, JUMP_SIZE);
op->optinsn.copied_insn[0] = op->kp.opcode;
make_call(op->kp.addr, op->optinsn.insn, insn);
/*
* Extract free register from the third instruction of
* detour buffer (rs2 of REG_S free_register, PT_EPC(sp))
* to save another call of __arch_find_free_register
*/
val = (op->optinsn.insn[2] & 0x1F00000) >> 20;
/*
* After patch, it should be:
* auipc free_register, %hi(detour_buffer)
* jalr free_register, free_register, %lo(detour_buffer)
* where free_register will eventually save the return address
*/
insn[0] = __arch_patch_rd(insn[0], val);
insn[1] = __arch_patch_rd(insn[1], val);
insn[1] = __arch_patch_rs1(insn[1], val);
/*
* Similar to __arch_disarm_kprobe, operations which
* removing breakpoints must be wrapped by stop_machine
* to avoid racing.
*/
pp = (struct patch_probe){
.addr = op->kp.addr,
.insns = insn,
.len = JUMP_SIZE,
.cpu_count = ATOMIC_INIT(0),
};
WARN_ON(stop_machine_cpuslocked(patch_text_stop_machine, &pp, cpu_online_mask));
list_del_init(&op->list);
}
}
static int arch_disarm_kprobe_opt(void *vop)
{
struct optimized_kprobe *op = (struct optimized_kprobe *)vop;
struct patch_probe pp = {
.addr = op->kp.addr,
.insns = op->optinsn.copied_insn,
.len = JUMP_SIZE,
.cpu_count = ATOMIC_INIT(0),
};
WARN_ON(stop_machine_cpuslocked(patch_text_stop_machine, &pp, cpu_online_mask));
arch_arm_kprobe(&op->kp);
return 0;
}
void arch_unoptimize_kprobe(struct optimized_kprobe *op)
{
arch_disarm_kprobe_opt((void *)op);
}
/*
* Recover original instructions and breakpoints from relative jumps.
* Caller must call with locking kprobe_mutex.
*/
void arch_unoptimize_kprobes(struct list_head *oplist,
struct list_head *done_list)
{
struct optimized_kprobe *op, *tmp;
list_for_each_entry_safe(op, tmp, oplist, list) {
arch_unoptimize_kprobe(op);
list_move(&op->list, done_list);
}
}
int arch_within_optimized_kprobe(struct optimized_kprobe *op,
unsigned long addr)
{
return (op->kp.addr <= addr &&
op->kp.addr + (JUMP_SIZE / sizeof(kprobe_opcode_t)) > addr);
}
void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
{
__arch_remove_optimized_kprobe(op, 1);
}
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
* Copyright (C) 2022 Guokai Chen
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/csr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#ifdef CONFIG_OPTPROBES
ENTRY(optprobe_template_entry)
ENTRY(optprobe_template_sub_sp)
REG_S sp, (-(PT_SIZE_ON_STACK) + PT_SP)(sp)
addi sp, sp, -(PT_SIZE_ON_STACK)
ENTRY(optprobe_template_store_epc)
REG_S ra, PT_EPC(sp)
REG_S ra, PT_RA(sp)
REG_S gp, PT_GP(sp)
REG_S tp, PT_TP(sp)
REG_S t0, PT_T0(sp)
REG_S t1, PT_T1(sp)
REG_S t2, PT_T2(sp)
REG_S s0, PT_S0(sp)
REG_S s1, PT_S1(sp)
REG_S a0, PT_A0(sp)
REG_S a1, PT_A1(sp)
REG_S a2, PT_A2(sp)
REG_S a3, PT_A3(sp)
REG_S a4, PT_A4(sp)
REG_S a5, PT_A5(sp)
REG_S a6, PT_A6(sp)
REG_S a7, PT_A7(sp)
REG_S s2, PT_S2(sp)
REG_S s3, PT_S3(sp)
REG_S s4, PT_S4(sp)
REG_S s5, PT_S5(sp)
REG_S s6, PT_S6(sp)
REG_S s7, PT_S7(sp)
REG_S s8, PT_S8(sp)
REG_S s9, PT_S9(sp)
REG_S s10, PT_S10(sp)
REG_S s11, PT_S11(sp)
REG_S t3, PT_T3(sp)
REG_S t4, PT_T4(sp)
REG_S t5, PT_T5(sp)
REG_S t6, PT_T6(sp)
csrr t0, sstatus
csrr t1, stval
csrr t2, scause
REG_S t0, PT_STATUS(sp)
REG_S t1, PT_BADADDR(sp)
REG_S t2, PT_CAUSE(sp)
ENTRY(optprobe_template_add_sp)
move a1, sp
lla a0, 1f
REG_L a0, 0(a0)
REG_L a2, 2f
jalr 0(a2)
ENTRY(optprobe_template_restore_begin)
REG_L t0, PT_STATUS(sp)
REG_L t1, PT_BADADDR(sp)
REG_L t2, PT_CAUSE(sp)
csrw sstatus, t0
csrw stval, t1
csrw scause, t2
REG_L ra, PT_RA(sp)
REG_L gp, PT_GP(sp)
REG_L tp, PT_TP(sp)
REG_L t0, PT_T0(sp)
REG_L t1, PT_T1(sp)
REG_L t2, PT_T2(sp)
REG_L s0, PT_S0(sp)
REG_L s1, PT_S1(sp)
REG_L a0, PT_A0(sp)
REG_L a1, PT_A1(sp)
REG_L a2, PT_A2(sp)
REG_L a3, PT_A3(sp)
REG_L a4, PT_A4(sp)
REG_L a5, PT_A5(sp)
REG_L a6, PT_A6(sp)
REG_L a7, PT_A7(sp)
REG_L s2, PT_S2(sp)
REG_L s3, PT_S3(sp)
REG_L s4, PT_S4(sp)
REG_L s5, PT_S5(sp)
REG_L s6, PT_S6(sp)
REG_L s7, PT_S7(sp)
REG_L s8, PT_S8(sp)
REG_L s9, PT_S9(sp)
REG_L s10, PT_S10(sp)
REG_L s11, PT_S11(sp)
REG_L t3, PT_T3(sp)
REG_L t4, PT_T4(sp)
REG_L t5, PT_T5(sp)
REG_L t6, PT_T6(sp)
addi sp, sp, PT_SIZE_ON_STACK
ENTRY(optprobe_template_restore_orig_insn)
nop
nop
ENTRY(optprobe_template_restore_end)
ret_to_normal:
auipc ra, 0
jalr x0, 0(ra)
ENTRY(optprobe_template_val)
1:
.dword 0
ENTRY(optprobe_template_call)
2:
.dword 0
.dword 0
ENTRY(optprobe_template_end)
END(optprobe_template_end)
END(optprobe_template_call)
END(optprobe_template_val)
END(optprobe_template_restore_end)
END(optprobe_template_restore_orig_insn)
END(optprobe_template_restore_begin)
END(optprobe_template_add_sp)
END(optprobe_template_store_epc)
END(optprobe_template_sub_sp)
END(optprobe_template_entry)
#endif /* CONFIG_OPTPROBES */
// SPDX-License-Identifier: GPL-2.0+
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include "decode-insn.h"
#include "simulate-insn.h"
static inline bool rv_insn_reg_get_val(struct pt_regs *regs, u32 index,
unsigned long *ptr)
{
if (index == 0)
*ptr = 0;
else if (index <= 31)
*ptr = *((unsigned long *)regs + index);
else
return false;
return true;
}
static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index,
unsigned long val)
{
if (index == 0)
return false;
else if (index <= 31)
*((unsigned long *)regs + index) = val;
else
return false;
return true;
}
bool __kprobes simulate_jal(u32 opcode, unsigned long addr, struct pt_regs *regs)
{
/*
* 31 30 21 20 19 12 11 7 6 0
* imm [20] | imm[10:1] | imm[11] | imm[19:12] | rd | opcode
* 1 10 1 8 5 JAL/J
*/
bool ret;
u32 imm;
u32 index = (opcode >> 7) & 0x1f;
ret = rv_insn_reg_set_val(regs, index, addr + 4);
if (!ret)
return ret;
imm = ((opcode >> 21) & 0x3ff) << 1;
imm |= ((opcode >> 20) & 0x1) << 11;
imm |= ((opcode >> 12) & 0xff) << 12;
imm |= ((opcode >> 31) & 0x1) << 20;
instruction_pointer_set(regs, addr + sign_extend32((imm), 20));
return ret;
}
bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *regs)
{
/*
* 31 20 19 15 14 12 11 7 6 0
* offset[11:0] | rs1 | 010 | rd | opcode
* 12 5 3 5 JALR/JR
*/
bool ret;
unsigned long base_addr;
u32 imm = (opcode >> 20) & 0xfff;
u32 rd_index = (opcode >> 7) & 0x1f;
u32 rs1_index = (opcode >> 15) & 0x1f;
ret = rv_insn_reg_set_val(regs, rd_index, addr + 4);
if (!ret)
return ret;
ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr);
if (!ret)
return ret;
instruction_pointer_set(regs, (base_addr + sign_extend32((imm), 11))&~1);
return ret;
}
#define auipc_rd_idx(opcode) \
((opcode >> 7) & 0x1f)
#define auipc_imm(opcode) \
((((opcode) >> 12) & 0xfffff) << 12)
#if __riscv_xlen == 64
#define auipc_offset(opcode) sign_extend64(auipc_imm(opcode), 31)
#elif __riscv_xlen == 32
#define auipc_offset(opcode) auipc_imm(opcode)
#else
#error "Unexpected __riscv_xlen"
#endif
bool __kprobes simulate_auipc(u32 opcode, unsigned long addr, struct pt_regs *regs)
{
/*
* auipc instruction:
* 31 12 11 7 6 0
* | imm[31:12] | rd | opcode |
* 20 5 7
*/
u32 rd_idx = auipc_rd_idx(opcode);
unsigned long rd_val = addr + auipc_offset(opcode);
if (!rv_insn_reg_set_val(regs, rd_idx, rd_val))
return false;
instruction_pointer_set(regs, addr + 4);
return true;
}
#define branch_rs1_idx(opcode) \
(((opcode) >> 15) & 0x1f)
#define branch_rs2_idx(opcode) \
(((opcode) >> 20) & 0x1f)
#define branch_funct3(opcode) \
(((opcode) >> 12) & 0x7)
#define BRANCH_BEQ 0x0
#define BRANCH_BNE 0x1
#define BRANCH_BLT 0x4
#define BRANCH_BGE 0x5
#define BRANCH_BLTU 0x6
#define BRANCH_BGEU 0x7
bool __kprobes simulate_branch(u32 opcode, unsigned long addr, struct pt_regs *regs)
{
/*
* branch instructions:
* 31 30 25 24 20 19 15 14 12 11 8 7 6 0
* | imm[12] | imm[10:5] | rs2 | rs1 | funct3 | imm[4:1] | imm[11] | opcode |
* 1 6 5 5 3 4 1 7
* imm[12|10:5] rs2 rs1 000 imm[4:1|11] 1100011 BEQ
* imm[12|10:5] rs2 rs1 001 imm[4:1|11] 1100011 BNE
* imm[12|10:5] rs2 rs1 100 imm[4:1|11] 1100011 BLT
* imm[12|10:5] rs2 rs1 101 imm[4:1|11] 1100011 BGE
* imm[12|10:5] rs2 rs1 110 imm[4:1|11] 1100011 BLTU
* imm[12|10:5] rs2 rs1 111 imm[4:1|11] 1100011 BGEU
*/
s32 offset;
s32 offset_tmp;
unsigned long rs1_val;
unsigned long rs2_val;
if (!rv_insn_reg_get_val(regs, branch_rs1_idx(opcode), &rs1_val) ||
!rv_insn_reg_get_val(regs, branch_rs2_idx(opcode), &rs2_val))
return false;
offset_tmp = branch_offset(opcode);
switch (branch_funct3(opcode)) {
case BRANCH_BEQ:
offset = (rs1_val == rs2_val) ? offset_tmp : 4;
break;
case BRANCH_BNE:
offset = (rs1_val != rs2_val) ? offset_tmp : 4;
break;
case BRANCH_BLT:
offset = ((long)rs1_val < (long)rs2_val) ? offset_tmp : 4;
break;
case BRANCH_BGE:
offset = ((long)rs1_val >= (long)rs2_val) ? offset_tmp : 4;
break;
case BRANCH_BLTU:
offset = (rs1_val < rs2_val) ? offset_tmp : 4;
break;
case BRANCH_BGEU:
offset = (rs1_val >= rs2_val) ? offset_tmp : 4;
break;
default:
return false;
}
instruction_pointer_set(regs, addr + offset);
return true;
}
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef _RISCV_KERNEL_PROBES_SIMULATE_INSN_H
#define _RISCV_KERNEL_PROBES_SIMULATE_INSN_H
#define __RISCV_INSN_FUNCS(name, mask, val) \
static __always_inline bool riscv_insn_is_##name(probe_opcode_t code) \
{ \
BUILD_BUG_ON(~(mask) & (val)); \
return (code & (mask)) == (val); \
} \
bool simulate_##name(u32 opcode, unsigned long addr, \
struct pt_regs *regs)
#define RISCV_INSN_REJECTED(name, code) \
do { \
if (riscv_insn_is_##name(code)) { \
return INSN_REJECTED; \
} \
} while (0)
__RISCV_INSN_FUNCS(system, 0x7f, 0x73);
__RISCV_INSN_FUNCS(fence, 0x7f, 0x0f);
#define RISCV_INSN_SET_SIMULATE(name, code) \
do { \
if (riscv_insn_is_##name(code)) { \
api->handler = simulate_##name; \
return INSN_GOOD_NO_SLOT; \
} \
} while (0)
__RISCV_INSN_FUNCS(c_j, 0xe003, 0xa001);
__RISCV_INSN_FUNCS(c_jr, 0xf007, 0x8002);
__RISCV_INSN_FUNCS(c_jal, 0xe003, 0x2001);
__RISCV_INSN_FUNCS(c_jalr, 0xf007, 0x9002);
__RISCV_INSN_FUNCS(c_beqz, 0xe003, 0xc001);
__RISCV_INSN_FUNCS(c_bnez, 0xe003, 0xe001);
__RISCV_INSN_FUNCS(c_ebreak, 0xffff, 0x9002);
__RISCV_INSN_FUNCS(auipc, 0x7f, 0x17);
__RISCV_INSN_FUNCS(branch, 0x7f, 0x63);
__RISCV_INSN_FUNCS(jal, 0x7f, 0x6f);
__RISCV_INSN_FUNCS(jalr, 0x707f, 0x67);
/* 0111011 && 0110011 */
__RISCV_INSN_FUNCS(arith_rr, 0x77, 0x33);
/* 0011011 && 0010011 */
__RISCV_INSN_FUNCS(arith_ri, 0x77, 0x13);
__RISCV_INSN_FUNCS(lui, 0x7f, 0x37);
__RISCV_INSN_FUNCS(load, 0x7f, 0x03);
__RISCV_INSN_FUNCS(store, 0x7f, 0x23);
__RISCV_INSN_FUNCS(amo, 0x7f, 0x2f);
#define branch_imm(opcode) \
(((((opcode) >> 8) & 0xf) << 1) | \
((((opcode) >> 25) & 0x3f) << 5) | \
((((opcode) >> 7) & 0x1) << 11) | \
((((opcode) >> 31) & 0x1) << 12))
#define branch_offset(opcode) \
sign_extend32((branch_imm(opcode)), 12)
#define jal_imm(opcode) \
(((((opcode) >> 21) & 0x3ff) << 1) | \
((((opcode) >> 20) & 0x1) << 11) | \
((((opcode) >> 31) & 0x1) << 20))
#define jal_offset(opcode) \
sign_extend32(jal_imm(opcode), 20)
#endif /* _RISCV_KERNEL_PROBES_SIMULATE_INSN_H */
...@@ -118,6 +118,105 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) ...@@ -118,6 +118,105 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
return &riscv_user_native_view; return &riscv_user_native_view;
} }
struct pt_regs_offset {
const char *name;
int offset;
};
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(epc),
REG_OFFSET_NAME(ra),
REG_OFFSET_NAME(sp),
REG_OFFSET_NAME(gp),
REG_OFFSET_NAME(tp),
REG_OFFSET_NAME(t0),
REG_OFFSET_NAME(t1),
REG_OFFSET_NAME(t2),
REG_OFFSET_NAME(s0),
REG_OFFSET_NAME(s1),
REG_OFFSET_NAME(a0),
REG_OFFSET_NAME(a1),
REG_OFFSET_NAME(a2),
REG_OFFSET_NAME(a3),
REG_OFFSET_NAME(a4),
REG_OFFSET_NAME(a5),
REG_OFFSET_NAME(a6),
REG_OFFSET_NAME(a7),
REG_OFFSET_NAME(s2),
REG_OFFSET_NAME(s3),
REG_OFFSET_NAME(s4),
REG_OFFSET_NAME(s5),
REG_OFFSET_NAME(s6),
REG_OFFSET_NAME(s7),
REG_OFFSET_NAME(s8),
REG_OFFSET_NAME(s9),
REG_OFFSET_NAME(s10),
REG_OFFSET_NAME(s11),
REG_OFFSET_NAME(t3),
REG_OFFSET_NAME(t4),
REG_OFFSET_NAME(t5),
REG_OFFSET_NAME(t6),
REG_OFFSET_NAME(status),
REG_OFFSET_NAME(badaddr),
REG_OFFSET_NAME(cause),
REG_OFFSET_NAME(orig_a0),
REG_OFFSET_END,
};
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_within_kernel_stack() - check the address in the stack
* @regs: pt_regs which contains kernel stack pointer.
* @addr: address which is checked.
*
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
* If @addr is within the kernel stack, it returns true. If not, returns false.
*/
static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return (addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1));
}
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specified by @regs. If the @n th entry is NOT in the kernel stack,
* this returns 0.
*/
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return *addr;
else
return 0;
}
void ptrace_disable(struct task_struct *child) void ptrace_disable(struct task_struct *child)
{ {
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/kprobes.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/irq.h> #include <linux/irq.h>
...@@ -145,6 +146,14 @@ static inline unsigned long get_break_insn_length(unsigned long pc) ...@@ -145,6 +146,14 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
asmlinkage __visible void do_trap_break(struct pt_regs *regs) asmlinkage __visible void do_trap_break(struct pt_regs *regs)
{ {
#ifdef CONFIG_KPROBES
if (kprobe_single_step_handler(regs))
return;
if (kprobe_breakpoint_handler(regs))
return;
#endif
if (user_mode(regs)) if (user_mode(regs))
force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc); force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
#ifdef CONFIG_KGDB #ifdef CONFIG_KGDB
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/kprobes.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -202,6 +203,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -202,6 +203,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
tsk = current; tsk = current;
mm = tsk->mm; mm = tsk->mm;
if (kprobe_page_fault(regs, cause))
return;
/* /*
* Fault-in kernel-space virtual memory on-demand. * Fault-in kernel-space virtual memory on-demand.
* The 'reference' page table is init_mm.pgd. * The 'reference' page table is init_mm.pgd.
......
...@@ -445,23 +445,6 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr) ...@@ -445,23 +445,6 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
break; break;
case KPROBE_HIT_ACTIVE: case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE: case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(p);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it first.
*/
if (p->fault_handler && p->fault_handler(p, regs, trapnr))
return 1;
/* /*
* In case the user-specified fault handler returned * In case the user-specified fault handler returned
* zero, try to fix up. * zero, try to fix up.
......
...@@ -382,23 +382,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) ...@@ -382,23 +382,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
break; break;
case KPROBE_HIT_ACTIVE: case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE: case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it first.
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
/* /*
* In case the user-specified fault handler returned * In case the user-specified fault handler returned
* zero, try to fix up. * zero, try to fix up.
......
...@@ -345,23 +345,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) ...@@ -345,23 +345,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
break; break;
case KPROBE_HIT_ACTIVE: case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE: case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it first.
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
/* /*
* In case the user-specified fault handler returned * In case the user-specified fault handler returned
* zero, try to fix up. * zero, try to fix up.
......
...@@ -960,24 +960,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) ...@@ -960,24 +960,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
restore_previous_kprobe(kcb); restore_previous_kprobe(kcb);
else else
reset_current_kprobe(); reset_current_kprobe();
} else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
kcb->kprobe_status == KPROBE_HIT_SSDONE) {
/*
* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it first.
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
} }
return 0; return 0;
......
...@@ -52,8 +52,6 @@ struct kretprobe_instance; ...@@ -52,8 +52,6 @@ struct kretprobe_instance;
typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *); typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *, typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
unsigned long flags); unsigned long flags);
typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
int trapnr);
typedef int (*kretprobe_handler_t) (struct kretprobe_instance *, typedef int (*kretprobe_handler_t) (struct kretprobe_instance *,
struct pt_regs *); struct pt_regs *);
...@@ -81,12 +79,6 @@ struct kprobe { ...@@ -81,12 +79,6 @@ struct kprobe {
/* Called after addr is executed, unless... */ /* Called after addr is executed, unless... */
kprobe_post_handler_t post_handler; kprobe_post_handler_t post_handler;
/*
* ... called if executing addr causes a fault (eg. page fault).
* Return 1 if it handled fault, otherwise kernel will see it.
*/
kprobe_fault_handler_t fault_handler;
/* Saved opcode (which has been replaced with breakpoint) */ /* Saved opcode (which has been replaced with breakpoint) */
kprobe_opcode_t opcode; kprobe_opcode_t opcode;
......
...@@ -1193,23 +1193,6 @@ static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, ...@@ -1193,23 +1193,6 @@ static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
} }
NOKPROBE_SYMBOL(aggr_post_handler); NOKPROBE_SYMBOL(aggr_post_handler);
static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
int trapnr)
{
struct kprobe *cur = __this_cpu_read(kprobe_instance);
/*
* if we faulted "during" the execution of a user specified
* probe handler, invoke just that probe's fault handler
*/
if (cur && cur->fault_handler) {
if (cur->fault_handler(cur, regs, trapnr))
return 1;
}
return 0;
}
NOKPROBE_SYMBOL(aggr_fault_handler);
/* Walks the list and increments nmissed count for multiprobe case */ /* Walks the list and increments nmissed count for multiprobe case */
void kprobes_inc_nmissed_count(struct kprobe *p) void kprobes_inc_nmissed_count(struct kprobe *p)
{ {
...@@ -1407,7 +1390,6 @@ static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) ...@@ -1407,7 +1390,6 @@ static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
ap->addr = p->addr; ap->addr = p->addr;
ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
ap->pre_handler = aggr_pre_handler; ap->pre_handler = aggr_pre_handler;
ap->fault_handler = aggr_fault_handler;
/* We don't care the kprobe which has gone. */ /* We don't care the kprobe which has gone. */
if (p->post_handler && !kprobe_gone(p)) if (p->post_handler && !kprobe_gone(p))
ap->post_handler = aggr_post_handler; ap->post_handler = aggr_post_handler;
...@@ -2145,7 +2127,6 @@ int register_kretprobe(struct kretprobe *rp) ...@@ -2145,7 +2127,6 @@ int register_kretprobe(struct kretprobe *rp)
rp->kp.pre_handler = pre_handler_kretprobe; rp->kp.pre_handler = pre_handler_kretprobe;
rp->kp.post_handler = NULL; rp->kp.post_handler = NULL;
rp->kp.fault_handler = NULL;
/* Pre-allocate memory for max kretprobe instances */ /* Pre-allocate memory for max kretprobe instances */
if (rp->maxactive <= 0) { if (rp->maxactive <= 0) {
......
...@@ -79,26 +79,11 @@ static void __kprobes handler_post(struct kprobe *p, struct pt_regs *regs, ...@@ -79,26 +79,11 @@ static void __kprobes handler_post(struct kprobe *p, struct pt_regs *regs,
#endif #endif
} }
/*
* fault_handler: this is called if an exception is generated for any
* instruction within the pre- or post-handler, or when Kprobes
* single-steps the probed instruction.
*/
static int handler_fault(struct kprobe *p, struct pt_regs *regs, int trapnr)
{
pr_info("fault_handler: p->addr = 0x%p, trap #%dn", p->addr, trapnr);
/* Return 0 because we don't handle the fault. */
return 0;
}
/* NOKPROBE_SYMBOL() is also available */
NOKPROBE_SYMBOL(handler_fault);
static int __init kprobe_init(void) static int __init kprobe_init(void)
{ {
int ret; int ret;
kp.pre_handler = handler_pre; kp.pre_handler = handler_pre;
kp.post_handler = handler_post; kp.post_handler = handler_post;
kp.fault_handler = handler_fault;
ret = register_kprobe(&kp); ret = register_kprobe(&kp);
if (ret < 0) { if (ret < 0) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册