diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index e57e07b0edb64cf5b4d82d64adfc61de94c60424..f406bfa9a8cddc8a5ffeb8f3885f8aacaebca382 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -46,8 +46,8 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) /* This function only handles jump-optimized kprobe */ if (kp && kprobe_optimized(kp)) { op = container_of(kp, struct optimized_kprobe, kp); - /* If op->list is not empty, op is under optimizing */ - if (list_empty(&op->list)) + /* If op is optimized or under unoptimizing */ + if (list_empty(&op->list) || optprobe_queued_unopt(op)) goto found; } } diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index a0b92be98984ea58b3608b664d8f45d4af60ac32..ab39285f71a6c82c90248bf5af4e9f5e5b24cf51 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -378,6 +378,7 @@ extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs); DEFINE_INSN_CACHE_OPS(optinsn); extern void wait_for_kprobe_optimizer(void); +bool optprobe_queued_unopt(struct optimized_kprobe *op); #else /* !CONFIG_OPTPROBES */ static inline void wait_for_kprobe_optimizer(void) { } #endif /* CONFIG_OPTPROBES */ diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 6b6aff00b3b6f9f65aa110da6d228f5400a8eaa4..55e1807ca0541a030497cbce42ffe1986a978001 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -660,7 +660,7 @@ void wait_for_kprobe_optimizer(void) mutex_unlock(&kprobe_mutex); } -static bool optprobe_queued_unopt(struct optimized_kprobe *op) +bool optprobe_queued_unopt(struct optimized_kprobe *op) { struct optimized_kprobe *_op;