core.c 32.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 *  Kernel Probes (KProbes)
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * Copyright (C) IBM Corporation, 2002, 2004
 *
 * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
 *		Probes initial implementation ( includes contributions from
 *		Rusty Russell).
 * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
 *		interface to access function arguments.
25 26
 * 2004-Oct	Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
 *		<prasanna@in.ibm.com> adapted for x86_64 from i386.
L
Linus Torvalds 已提交
27 28
 * 2005-Mar	Roland McGrath <roland@redhat.com>
 *		Fixed to handle %rip-relative addressing mode correctly.
29 30 31 32
 * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
 *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
 *		<prasanna@in.ibm.com> added function-return probes.
 * 2005-May	Rusty Lynch <rusty.lynch@intel.com>
33
 *		Added function return probes functionality
34
 * 2006-Feb	Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
35
 *		kprobe-booster and kretprobe-booster for i386.
36
 * 2007-Dec	Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
37
 *		and kretprobe-booster for x86-64
38
 * 2007-Dec	Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
39 40
 *		<arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
 *		unified x86 kprobes code.
L
Linus Torvalds 已提交
41 42 43 44 45
 */
#include <linux/kprobes.h>
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/slab.h>
46
#include <linux/hardirq.h>
L
Linus Torvalds 已提交
47
#include <linux/preempt.h>
48
#include <linux/module.h>
49
#include <linux/kdebug.h>
50
#include <linux/kallsyms.h>
51
#include <linux/ftrace.h>
52
#include <linux/frame.h>
53

54 55
#include <asm/cacheflush.h>
#include <asm/desc.h>
L
Linus Torvalds 已提交
56
#include <asm/pgtable.h>
57
#include <asm/uaccess.h>
58
#include <asm/alternative.h>
59
#include <asm/insn.h>
60
#include <asm/debugreg.h>
L
Linus Torvalds 已提交
61

62
#include "common.h"
63

L
Linus Torvalds 已提交
64 65
void jprobe_return_end(void);

66 67
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
L
Linus Torvalds 已提交
68

69
#define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
70 71 72 73 74 75 76 77 78 79

#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
	(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
	  (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
	  (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) |   \
	  (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf))    \
	 << (row % 32))
	/*
	 * Undefined/reserved opcodes, conditional jump, Opcode Extension
	 * Groups, and some special opcodes can not boost.
80 81
	 * This is non-const and volatile to keep gcc from statically
	 * optimizing it out, as variable_test_bit makes gcc think only
82
	 * *(unsigned long*) is used.
83
	 */
84
static volatile u32 twobyte_is_boostable[256 / 32] = {
85 86 87
	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
	/*      ----------------------------------------------          */
	W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
88
	W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
	W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
	W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
	W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
	W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
	W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
	W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
	W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
	W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
	W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
	W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
	W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
	W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
	W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
	W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0)   /* f0 */
	/*      -----------------------------------------------         */
	/*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
};
#undef W

108 109 110 111 112
struct kretprobe_blackpoint kretprobe_blacklist[] = {
	{"__switch_to", }, /* This function switches only current task, but
			      doesn't switch kernel stack.*/
	{NULL, NULL}	/* Terminator */
};
113

114 115
const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);

116 117
static nokprobe_inline void
__synthesize_relative_insn(void *from, void *to, u8 op)
118
{
119 120
	struct __arch_relative_insn {
		u8 op;
121
		s32 raddr;
122
	} __packed *insn;
123 124 125 126 127 128 129

	insn = (struct __arch_relative_insn *)from;
	insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
	insn->op = op;
}

/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
130
void synthesize_reljump(void *from, void *to)
131 132
{
	__synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
133
}
134
NOKPROBE_SYMBOL(synthesize_reljump);
135

136
/* Insert a call instruction at address 'from', which calls address 'to'.*/
137
void synthesize_relcall(void *from, void *to)
138 139 140
{
	__synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
}
141
NOKPROBE_SYMBOL(synthesize_relcall);
142

143
/*
144
 * Skip the prefixes of the instruction.
145
 */
146
static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn)
147
{
148 149 150 151 152 153 154
	insn_attr_t attr;

	attr = inat_get_opcode_attribute((insn_byte_t)*insn);
	while (inat_is_legacy_prefix(attr)) {
		insn++;
		attr = inat_get_opcode_attribute((insn_byte_t)*insn);
	}
155
#ifdef CONFIG_X86_64
156 157
	if (inat_is_rex_prefix(attr))
		insn++;
158
#endif
159
	return insn;
160
}
161
NOKPROBE_SYMBOL(skip_prefixes);
162

163
/*
164 165
 * Returns non-zero if opcode is boostable.
 * RIP relative instructions are adjusted at copying time in 64 bits mode
166
 */
167
int can_boost(kprobe_opcode_t *opcodes)
168 169 170 171
{
	kprobe_opcode_t opcode;
	kprobe_opcode_t *orig_opcodes = opcodes;

172
	if (search_exception_tables((unsigned long)opcodes))
173 174
		return 0;	/* Page fault may occur on this address. */

175 176 177 178 179 180 181 182 183
retry:
	if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
		return 0;
	opcode = *(opcodes++);

	/* 2nd-byte opcode */
	if (opcode == 0x0f) {
		if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
			return 0;
184 185
		return test_bit(*opcodes,
				(unsigned long *)twobyte_is_boostable);
186 187 188
	}

	switch (opcode & 0xf0) {
189
#ifdef CONFIG_X86_64
190 191
	case 0x40:
		goto retry; /* REX prefix is boostable */
192
#endif
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
	case 0x60:
		if (0x63 < opcode && opcode < 0x67)
			goto retry; /* prefixes */
		/* can't boost Address-size override and bound */
		return (opcode != 0x62 && opcode != 0x67);
	case 0x70:
		return 0; /* can't boost conditional jump */
	case 0xc0:
		/* can't boost software-interruptions */
		return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
	case 0xd0:
		/* can boost AA* and XLAT */
		return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
	case 0xe0:
		/* can boost in/out and absolute jmps */
		return ((opcode & 0x04) || opcode == 0xea);
	case 0xf0:
		if ((opcode & 0x0c) == 0 && opcode != 0xf1)
			goto retry; /* lock/rep(ne) prefix */
		/* clear and set flags are boostable */
		return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
	default:
		/* segment override prefixes are boostable */
		if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
			goto retry; /* prefixes */
		/* CS override prefix and call are not boostable */
		return (opcode != 0x2e && opcode != 0x9a);
	}
}

223 224
static unsigned long
__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
225 226
{
	struct kprobe *kp;
227
	unsigned long faddr;
228

229
	kp = get_kprobe((void *)addr);
230
	faddr = ftrace_location(addr);
231 232 233 234 235 236 237
	/*
	 * Addresses inside the ftrace location are refused by
	 * arch_check_ftrace_location(). Something went terribly wrong
	 * if such an address is checked here.
	 */
	if (WARN_ON(faddr && faddr != addr))
		return 0UL;
238 239 240 241 242
	/*
	 * Use the current code if it is not modified by Kprobe
	 * and it cannot be modified by ftrace.
	 */
	if (!kp && !faddr)
243
		return addr;
244 245

	/*
246 247 248 249 250 251 252 253 254 255 256
	 * Basically, kp->ainsn.insn has an original instruction.
	 * However, RIP-relative instruction can not do single-stepping
	 * at different place, __copy_instruction() tweaks the displacement of
	 * that instruction. In that case, we can't recover the instruction
	 * from the kp->ainsn.insn.
	 *
	 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
	 * of the first byte of the probed instruction, which is overwritten
	 * by int3. And the instruction at kp->addr is not modified by kprobes
	 * except for the first byte, we can recover the original instruction
	 * from it and kp->opcode.
257
	 *
258 259 260 261 262
	 * In case of Kprobes using ftrace, we do not have a copy of
	 * the original instruction. In fact, the ftrace location might
	 * be modified at anytime and even could be in an inconsistent state.
	 * Fortunately, we know that the original code is the ideal 5-byte
	 * long NOP.
263
	 */
264 265 266 267 268
	memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
	if (faddr)
		memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
	else
		buf[0] = kp->opcode;
269 270 271 272 273 274 275
	return (unsigned long)buf;
}

/*
 * Recover the probed instruction at addr for further analysis.
 * Caller must lock kprobes by kprobe_mutex, or disable preemption
 * for preventing to release referencing kprobes.
276
 * Returns zero if the instruction can not get recovered.
277
 */
278
unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
279 280 281 282 283 284 285 286
{
	unsigned long __addr;

	__addr = __recover_optprobed_insn(buf, addr);
	if (__addr != addr)
		return __addr;

	return __recover_probed_insn(buf, addr);
287 288 289
}

/* Check if paddr is at an instruction boundary */
290
static int can_probe(unsigned long paddr)
291
{
292
	unsigned long addr, __addr, offset = 0;
293 294 295
	struct insn insn;
	kprobe_opcode_t buf[MAX_INSN_SIZE];

N
Namhyung Kim 已提交
296
	if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
297 298 299 300 301 302 303 304 305
		return 0;

	/* Decode instructions */
	addr = paddr - offset;
	while (addr < paddr) {
		/*
		 * Check if the instruction has been modified by another
		 * kprobe, in which case we replace the breakpoint by the
		 * original instruction in our buffer.
306 307 308
		 * Also, jump optimization will change the breakpoint to
		 * relative-jump. Since the relative-jump itself is
		 * normally used, we just go through if there is no kprobe.
309
		 */
310
		__addr = recover_probed_instruction(buf, addr);
311 312
		if (!__addr)
			return 0;
313
		kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
314
		insn_get_length(&insn);
315 316 317 318 319 320 321

		/*
		 * Another debugging subsystem might insert this breakpoint.
		 * In that case, we can't recover it.
		 */
		if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
			return 0;
322 323 324 325 326 327
		addr += insn.length;
	}

	return (addr == paddr);
}

L
Linus Torvalds 已提交
328
/*
329
 * Returns non-zero if opcode modifies the interrupt flag.
L
Linus Torvalds 已提交
330
 */
331
static int is_IF_modifier(kprobe_opcode_t *insn)
L
Linus Torvalds 已提交
332
{
333 334 335
	/* Skip prefixes */
	insn = skip_prefixes(insn);

L
Linus Torvalds 已提交
336 337 338 339 340 341 342
	switch (*insn) {
	case 0xfa:		/* cli */
	case 0xfb:		/* sti */
	case 0xcf:		/* iret/iretd */
	case 0x9d:		/* popf/popfd */
		return 1;
	}
343

L
Linus Torvalds 已提交
344 345 346 347
	return 0;
}

/*
348 349
 * Copy an instruction and adjust the displacement if the instruction
 * uses the %rip-relative addressing mode.
350
 * If it does, Return the address of the 32-bit displacement word.
L
Linus Torvalds 已提交
351
 * If not, return null.
352
 * Only applicable to 64-bit x86.
L
Linus Torvalds 已提交
353
 */
354
int __copy_instruction(u8 *dest, u8 *src)
L
Linus Torvalds 已提交
355
{
356
	struct insn insn;
357
	kprobe_opcode_t buf[MAX_INSN_SIZE];
358
	int length;
359 360
	unsigned long recovered_insn =
		recover_probed_instruction(buf, (unsigned long)src);
361

362 363
	if (!recovered_insn)
		return 0;
364
	kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
365
	insn_get_length(&insn);
366 367
	length = insn.length;

368
	/* Another subsystem puts a breakpoint, failed to recover */
369
	if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
370
		return 0;
371
	memcpy(dest, insn.kaddr, length);
372 373

#ifdef CONFIG_X86_64
374 375 376
	if (insn_rip_relative(&insn)) {
		s64 newdisp;
		u8 *disp;
377
		kernel_insn_init(&insn, dest, length);
378 379 380 381 382 383 384 385 386 387 388 389 390
		insn_get_displacement(&insn);
		/*
		 * The copied instruction uses the %rip-relative addressing
		 * mode.  Adjust the displacement for the difference between
		 * the original location of this instruction and the location
		 * of the copy that will actually be run.  The tricky bit here
		 * is making sure that the sign extension happens correctly in
		 * this calculation, since we need a signed 32-bit result to
		 * be sign-extended to 64 bits when it's added to the %rip
		 * value and yield the same 64-bit result that the sign-
		 * extension of the original signed 32-bit displacement would
		 * have given.
		 */
391
		newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
392 393 394 395 396
		if ((s64) (s32) newdisp != newdisp) {
			pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
			pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value);
			return 0;
		}
397
		disp = (u8 *) dest + insn_offset_displacement(&insn);
398
		*(s32 *) disp = (s32) newdisp;
L
Linus Torvalds 已提交
399
	}
400
#endif
401
	return length;
402
}
L
Linus Torvalds 已提交
403

404
static int arch_copy_kprobe(struct kprobe *p)
L
Linus Torvalds 已提交
405
{
406 407
	int ret;

408
	/* Copy an instruction with recovering if other optprobe modifies it.*/
409 410 411
	ret = __copy_instruction(p->ainsn.insn, p->addr);
	if (!ret)
		return -EINVAL;
412

413
	/*
414 415
	 * __copy_instruction can modify the displacement of the instruction,
	 * but it doesn't affect boostable check.
416
	 */
417
	if (can_boost(p->ainsn.insn))
418
		p->ainsn.boostable = 0;
419
	else
420
		p->ainsn.boostable = -1;
421

422 423 424
	/* Check whether the instruction modifies Interrupt Flag or not */
	p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);

425 426
	/* Also, displacement change doesn't affect the first byte */
	p->opcode = p->ainsn.insn[0];
427 428

	return 0;
L
Linus Torvalds 已提交
429 430
}

431
int arch_prepare_kprobe(struct kprobe *p)
432
{
433 434 435
	if (alternatives_text_reserved(p->addr, p->addr))
		return -EINVAL;

436 437
	if (!can_probe((unsigned long)p->addr))
		return -EILSEQ;
438 439 440 441
	/* insn: must be on special executable page on x86. */
	p->ainsn.insn = get_insn_slot();
	if (!p->ainsn.insn)
		return -ENOMEM;
442 443

	return arch_copy_kprobe(p);
444 445
}

446
void arch_arm_kprobe(struct kprobe *p)
L
Linus Torvalds 已提交
447
{
448
	text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
L
Linus Torvalds 已提交
449 450
}

451
void arch_disarm_kprobe(struct kprobe *p)
L
Linus Torvalds 已提交
452
{
453
	text_poke(p->addr, &p->opcode, 1);
454 455
}

456
void arch_remove_kprobe(struct kprobe *p)
457
{
458 459 460 461
	if (p->ainsn.insn) {
		free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
		p->ainsn.insn = NULL;
	}
L
Linus Torvalds 已提交
462 463
}

464 465
static nokprobe_inline void
save_previous_kprobe(struct kprobe_ctlblk *kcb)
466
{
467 468
	kcb->prev_kprobe.kp = kprobe_running();
	kcb->prev_kprobe.status = kcb->kprobe_status;
469 470
	kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
	kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
471 472
}

473 474
static nokprobe_inline void
restore_previous_kprobe(struct kprobe_ctlblk *kcb)
475
{
C
Christoph Lameter 已提交
476
	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
477
	kcb->kprobe_status = kcb->prev_kprobe.status;
478 479
	kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
	kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
480 481
}

482 483 484
static nokprobe_inline void
set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
		   struct kprobe_ctlblk *kcb)
485
{
C
Christoph Lameter 已提交
486
	__this_cpu_write(current_kprobe, p);
487
	kcb->kprobe_saved_flags = kcb->kprobe_old_flags
488
		= (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
489
	if (p->ainsn.if_modifier)
490
		kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
491 492
}

493
static nokprobe_inline void clear_btf(void)
R
Roland McGrath 已提交
494
{
P
Peter Zijlstra 已提交
495 496 497 498 499 500
	if (test_thread_flag(TIF_BLOCKSTEP)) {
		unsigned long debugctl = get_debugctlmsr();

		debugctl &= ~DEBUGCTLMSR_BTF;
		update_debugctlmsr(debugctl);
	}
R
Roland McGrath 已提交
501 502
}

503
static nokprobe_inline void restore_btf(void)
R
Roland McGrath 已提交
504
{
P
Peter Zijlstra 已提交
505 506 507 508 509 510
	if (test_thread_flag(TIF_BLOCKSTEP)) {
		unsigned long debugctl = get_debugctlmsr();

		debugctl |= DEBUGCTLMSR_BTF;
		update_debugctlmsr(debugctl);
	}
R
Roland McGrath 已提交
511 512
}

513
void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
514
{
515
	unsigned long *sara = stack_addr(regs);
516

517
	ri->ret_addr = (kprobe_opcode_t *) *sara;
518

519 520
	/* Replace the return addr with trampoline addr */
	*sara = (unsigned long) &kretprobe_trampoline;
521
}
522
NOKPROBE_SYMBOL(arch_prepare_kretprobe);
523

524 525
static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
			     struct kprobe_ctlblk *kcb, int reenter)
526
{
527 528 529
	if (setup_detour_execution(p, regs, reenter))
		return;

530
#if !defined(CONFIG_PREEMPT)
531 532
	if (p->ainsn.boostable == 1 && !p->post_handler) {
		/* Boost up -- we can execute copied instructions directly */
533 534 535 536 537 538 539
		if (!reenter)
			reset_current_kprobe();
		/*
		 * Reentering boosted probe doesn't reset current_kprobe,
		 * nor set current_kprobe, because it doesn't use single
		 * stepping.
		 */
540 541 542 543 544
		regs->ip = (unsigned long)p->ainsn.insn;
		preempt_enable_no_resched();
		return;
	}
#endif
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
	if (reenter) {
		save_previous_kprobe(kcb);
		set_current_kprobe(p, regs, kcb);
		kcb->kprobe_status = KPROBE_REENTER;
	} else
		kcb->kprobe_status = KPROBE_HIT_SS;
	/* Prepare real single stepping */
	clear_btf();
	regs->flags |= X86_EFLAGS_TF;
	regs->flags &= ~X86_EFLAGS_IF;
	/* single step inline if the instruction is an int3 */
	if (p->opcode == BREAKPOINT_INSTRUCTION)
		regs->ip = (unsigned long)p->addr;
	else
		regs->ip = (unsigned long)p->ainsn.insn;
560
}
561
NOKPROBE_SYMBOL(setup_singlestep);
562

H
Harvey Harrison 已提交
563 564 565 566 567
/*
 * We have reentered the kprobe_handler(), since another probe was hit while
 * within the handler. We save the original kprobes variables and just single
 * step on the instruction of the new probe without calling any user handlers.
 */
568 569
static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
			  struct kprobe_ctlblk *kcb)
H
Harvey Harrison 已提交
570
{
571 572 573
	switch (kcb->kprobe_status) {
	case KPROBE_HIT_SSDONE:
	case KPROBE_HIT_ACTIVE:
574
	case KPROBE_HIT_SS:
575
		kprobes_inc_nmissed_count(p);
576
		setup_singlestep(p, regs, kcb, 1);
577
		break;
578
	case KPROBE_REENTER:
579 580 581 582 583 584 585 586 587 588
		/* A probe has been hit in the codepath leading up to, or just
		 * after, single-stepping of a probed instruction. This entire
		 * codepath should strictly reside in .kprobes.text section.
		 * Raise a BUG or we'll continue in an endless reentering loop
		 * and eventually a stack overflow.
		 */
		printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
		       p->addr);
		dump_kprobe(p);
		BUG();
589 590 591
	default:
		/* impossible cases */
		WARN_ON(1);
592
		return 0;
593
	}
594

595
	return 1;
H
Harvey Harrison 已提交
596
}
597
NOKPROBE_SYMBOL(reenter_kprobe);
598

599 600
/*
 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
601
 * remain disabled throughout this function.
602
 */
603
int kprobe_int3_handler(struct pt_regs *regs)
L
Linus Torvalds 已提交
604
{
605
	kprobe_opcode_t *addr;
606
	struct kprobe *p;
607 608
	struct kprobe_ctlblk *kcb;

609
	if (user_mode(regs))
610 611
		return 0;

612
	addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
613 614
	/*
	 * We don't want to be preempted for the entire
615 616 617
	 * duration of kprobe processing. We conditionally
	 * re-enable preemption at the end of this function,
	 * and also in reenter_kprobe() and setup_singlestep().
618 619
	 */
	preempt_disable();
L
Linus Torvalds 已提交
620

621
	kcb = get_kprobe_ctlblk();
622
	p = get_kprobe(addr);
623

624 625
	if (p) {
		if (kprobe_running()) {
626 627
			if (reenter_kprobe(p, regs, kcb))
				return 1;
L
Linus Torvalds 已提交
628
		} else {
629 630
			set_current_kprobe(p, regs, kcb);
			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
631

L
Linus Torvalds 已提交
632
			/*
633 634 635 636 637 638
			 * If we have no pre-handler or it returned 0, we
			 * continue with normal processing.  If we have a
			 * pre-handler and it returned non-zero, it prepped
			 * for calling the break_handler below on re-entry
			 * for jprobe processing, so get out doing nothing
			 * more here.
L
Linus Torvalds 已提交
639
			 */
640
			if (!p->pre_handler || !p->pre_handler(p, regs))
641
				setup_singlestep(p, regs, kcb, 0);
642
			return 1;
643
		}
644 645 646 647 648 649 650 651 652 653 654 655 656
	} else if (*addr != BREAKPOINT_INSTRUCTION) {
		/*
		 * The breakpoint instruction was removed right
		 * after we hit it.  Another cpu has removed
		 * either a probepoint or a debugger breakpoint
		 * at this address.  In either case, no further
		 * handling of this interrupt is appropriate.
		 * Back up over the (now missing) int3 and run
		 * the original instruction.
		 */
		regs->ip = (unsigned long)addr;
		preempt_enable_no_resched();
		return 1;
657
	} else if (kprobe_running()) {
C
Christoph Lameter 已提交
658
		p = __this_cpu_read(current_kprobe);
659
		if (p->break_handler && p->break_handler(p, regs)) {
660 661
			if (!skip_singlestep(p, regs, kcb))
				setup_singlestep(p, regs, kcb, 0);
662
			return 1;
L
Linus Torvalds 已提交
663
		}
664
	} /* else: not a kprobe fault; let the kernel handle it */
L
Linus Torvalds 已提交
665

666
	preempt_enable_no_resched();
667
	return 0;
L
Linus Torvalds 已提交
668
}
669
NOKPROBE_SYMBOL(kprobe_int3_handler);
L
Linus Torvalds 已提交
670

671
/*
672 673
 * When a retprobed function returns, this code saves registers and
 * calls trampoline_handler() runs, which calls the kretprobe's handler.
674
 */
675 676 677 678
asm(
	".global kretprobe_trampoline\n"
	".type kretprobe_trampoline, @function\n"
	"kretprobe_trampoline:\n"
679
#ifdef CONFIG_X86_64
680 681 682 683 684 685 686 687 688 689
	/* We don't bother saving the ss register */
	"	pushq %rsp\n"
	"	pushfq\n"
	SAVE_REGS_STRING
	"	movq %rsp, %rdi\n"
	"	call trampoline_handler\n"
	/* Replace saved sp with true return address. */
	"	movq %rax, 152(%rsp)\n"
	RESTORE_REGS_STRING
	"	popfq\n"
690
#else
691 692 693 694 695 696 697 698 699 700 701
	"	pushf\n"
	SAVE_REGS_STRING
	"	movl %esp, %eax\n"
	"	call trampoline_handler\n"
	/* Move flags to cs */
	"	movl 56(%esp), %edx\n"
	"	movl %edx, 52(%esp)\n"
	/* Replace saved flags with true return address. */
	"	movl %eax, 56(%esp)\n"
	RESTORE_REGS_STRING
	"	popf\n"
702
#endif
703 704 705
	"	ret\n"
	".size kretprobe_trampoline, .-kretprobe_trampoline\n"
);
706
NOKPROBE_SYMBOL(kretprobe_trampoline);
707
STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
708 709

/*
710
 * Called from kretprobe_trampoline
711
 */
712
__visible __used void *trampoline_handler(struct pt_regs *regs)
713
{
B
bibo,mao 已提交
714
	struct kretprobe_instance *ri = NULL;
715
	struct hlist_head *head, empty_rp;
716
	struct hlist_node *tmp;
717
	unsigned long flags, orig_ret_address = 0;
718
	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
719
	kprobe_opcode_t *correct_ret_addr = NULL;
720

721
	INIT_HLIST_HEAD(&empty_rp);
722
	kretprobe_hash_lock(current, &head, &flags);
723
	/* fixup registers */
724
#ifdef CONFIG_X86_64
725
	regs->cs = __KERNEL_CS;
726 727
#else
	regs->cs = __KERNEL_CS | get_kernel_rpl();
728
	regs->gs = 0;
729
#endif
730
	regs->ip = trampoline_address;
731
	regs->orig_ax = ~0UL;
732

733 734
	/*
	 * It is possible to have multiple instances associated with a given
735
	 * task either because multiple functions in the call path have
736
	 * return probes installed on them, and/or more than one
737 738 739
	 * return probe was registered for a target function.
	 *
	 * We can handle this because:
740
	 *     - instances are always pushed into the head of the list
741
	 *     - when multiple return probes are registered for the same
742 743 744
	 *	 function, the (chronologically) first instance's ret_addr
	 *	 will be the real return address, and all the rest will
	 *	 point to kretprobe_trampoline.
745
	 */
746
	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
B
bibo,mao 已提交
747
		if (ri->task != current)
748
			/* another task is sharing our hash bucket */
B
bibo,mao 已提交
749
			continue;
750

751 752 753 754 755 756 757 758 759 760 761 762 763 764
		orig_ret_address = (unsigned long)ri->ret_addr;

		if (orig_ret_address != trampoline_address)
			/*
			 * This is the real return address. Any other
			 * instances associated with this task are for
			 * other calls deeper on the call stack
			 */
			break;
	}

	kretprobe_assert(ri, orig_ret_address, trampoline_address);

	correct_ret_addr = ri->ret_addr;
765
	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
766 767 768 769 770
		if (ri->task != current)
			/* another task is sharing our hash bucket */
			continue;

		orig_ret_address = (unsigned long)ri->ret_addr;
771
		if (ri->rp && ri->rp->handler) {
C
Christoph Lameter 已提交
772
			__this_cpu_write(current_kprobe, &ri->rp->kp);
773
			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
774
			ri->ret_addr = correct_ret_addr;
775
			ri->rp->handler(ri, regs);
C
Christoph Lameter 已提交
776
			__this_cpu_write(current_kprobe, NULL);
777
		}
778

779
		recycle_rp_inst(ri, &empty_rp);
780 781 782 783 784 785 786 787

		if (orig_ret_address != trampoline_address)
			/*
			 * This is the real return address. Any other
			 * instances associated with this task are for
			 * other calls deeper on the call stack
			 */
			break;
788
	}
789

790
	kretprobe_hash_unlock(current, &flags);
791

792
	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
793 794 795
		hlist_del(&ri->hlist);
		kfree(ri);
	}
796
	return (void *)orig_ret_address;
797
}
798
NOKPROBE_SYMBOL(trampoline_handler);
799

L
Linus Torvalds 已提交
800 801 802 803 804 805 806 807 808 809 810 811
/*
 * Called after single-stepping.  p->addr is the address of the
 * instruction whose first byte has been replaced by the "int 3"
 * instruction.  To avoid the SMP problems that can occur when we
 * temporarily put back the original opcode to single-step, we
 * single-stepped a copy of the instruction.  The address of this
 * copy is p->ainsn.insn.
 *
 * This function prepares to return from the post-single-step
 * interrupt.  We have to fix up the stack as follows:
 *
 * 0) Except in the case of absolute or indirect jump or call instructions,
812
 * the new ip is relative to the copied instruction.  We need to make
L
Linus Torvalds 已提交
813 814 815
 * it relative to the original instruction.
 *
 * 1) If the single-stepped instruction was pushfl, then the TF and IF
816
 * flags are set in the just-pushed flags, and may need to be cleared.
L
Linus Torvalds 已提交
817 818 819 820
 *
 * 2) If the single-stepped instruction was a call, the return address
 * that is atop the stack is the address following the copied instruction.
 * We need to make it the address following the original instruction.
821 822 823 824 825
 *
 * If this is the first time we've single-stepped the instruction at
 * this probepoint, and the instruction is boostable, boost it: add a
 * jump instruction after the copied instruction, that jumps to the next
 * instruction after the probepoint.
L
Linus Torvalds 已提交
826
 */
827 828
static void resume_execution(struct kprobe *p, struct pt_regs *regs,
			     struct kprobe_ctlblk *kcb)
L
Linus Torvalds 已提交
829
{
830 831 832
	unsigned long *tos = stack_addr(regs);
	unsigned long copy_ip = (unsigned long)p->ainsn.insn;
	unsigned long orig_ip = (unsigned long)p->addr;
L
Linus Torvalds 已提交
833 834
	kprobe_opcode_t *insn = p->ainsn.insn;

835 836
	/* Skip prefixes */
	insn = skip_prefixes(insn);
L
Linus Torvalds 已提交
837

838
	regs->flags &= ~X86_EFLAGS_TF;
L
Linus Torvalds 已提交
839
	switch (*insn) {
M
Masami Hiramatsu 已提交
840
	case 0x9c:	/* pushfl */
841
		*tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
842
		*tos |= kcb->kprobe_old_flags;
L
Linus Torvalds 已提交
843
		break;
M
Masami Hiramatsu 已提交
844 845
	case 0xc2:	/* iret/ret/lret */
	case 0xc3:
846
	case 0xca:
M
Masami Hiramatsu 已提交
847 848 849 850
	case 0xcb:
	case 0xcf:
	case 0xea:	/* jmp absolute -- ip is correct */
		/* ip is already adjusted, no more changes required */
851
		p->ainsn.boostable = 1;
M
Masami Hiramatsu 已提交
852 853
		goto no_change;
	case 0xe8:	/* call relative - Fix return addr */
854
		*tos = orig_ip + (*tos - copy_ip);
L
Linus Torvalds 已提交
855
		break;
H
Harvey Harrison 已提交
856
#ifdef CONFIG_X86_32
857 858 859 860
	case 0x9a:	/* call absolute -- same as call absolute, indirect */
		*tos = orig_ip + (*tos - copy_ip);
		goto no_change;
#endif
L
Linus Torvalds 已提交
861
	case 0xff:
862
		if ((insn[1] & 0x30) == 0x10) {
863 864 865 866 867 868
			/*
			 * call absolute, indirect
			 * Fix return addr; ip is correct.
			 * But this is not boostable
			 */
			*tos = orig_ip + (*tos - copy_ip);
M
Masami Hiramatsu 已提交
869
			goto no_change;
870 871 872 873 874 875
		} else if (((insn[1] & 0x31) == 0x20) ||
			   ((insn[1] & 0x31) == 0x21)) {
			/*
			 * jmp near and far, absolute indirect
			 * ip is correct. And this is boostable
			 */
876
			p->ainsn.boostable = 1;
M
Masami Hiramatsu 已提交
877
			goto no_change;
L
Linus Torvalds 已提交
878 879 880 881 882
		}
	default:
		break;
	}

883
	if (p->ainsn.boostable == 0) {
884 885
		if ((regs->ip > copy_ip) &&
		    (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
886 887 888 889
			/*
			 * These instructions can be executed directly if it
			 * jumps back to correct address.
			 */
890 891
			synthesize_reljump((void *)regs->ip,
				(void *)orig_ip + (regs->ip - copy_ip));
892 893 894 895 896 897
			p->ainsn.boostable = 1;
		} else {
			p->ainsn.boostable = -1;
		}
	}

898
	regs->ip += orig_ip - copy_ip;
899

M
Masami Hiramatsu 已提交
900
no_change:
R
Roland McGrath 已提交
901
	restore_btf();
L
Linus Torvalds 已提交
902
}
903
NOKPROBE_SYMBOL(resume_execution);
L
Linus Torvalds 已提交
904

905 906
/*
 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
907
 * remain disabled throughout this function.
908
 */
909
int kprobe_debug_handler(struct pt_regs *regs)
L
Linus Torvalds 已提交
910
{
911 912 913 914
	struct kprobe *cur = kprobe_running();
	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();

	if (!cur)
L
Linus Torvalds 已提交
915 916
		return 0;

917 918 919
	resume_execution(cur, regs, kcb);
	regs->flags |= kcb->kprobe_saved_flags;

920 921 922
	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
		kcb->kprobe_status = KPROBE_HIT_SSDONE;
		cur->post_handler(cur, regs, 0);
923
	}
L
Linus Torvalds 已提交
924

925
	/* Restore back the original saved kprobes variables and continue. */
926 927
	if (kcb->kprobe_status == KPROBE_REENTER) {
		restore_previous_kprobe(kcb);
928 929
		goto out;
	}
930
	reset_current_kprobe();
931
out:
L
Linus Torvalds 已提交
932 933 934
	preempt_enable_no_resched();

	/*
935
	 * if somebody else is singlestepping across a probe point, flags
L
Linus Torvalds 已提交
936 937 938
	 * will have TF set, in which case, continue the remaining processing
	 * of do_debug, as if this is not a probe hit.
	 */
939
	if (regs->flags & X86_EFLAGS_TF)
L
Linus Torvalds 已提交
940 941 942 943
		return 0;

	return 1;
}
944
NOKPROBE_SYMBOL(kprobe_debug_handler);
L
Linus Torvalds 已提交
945

946
int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
L
Linus Torvalds 已提交
947
{
948 949 950
	struct kprobe *cur = kprobe_running();
	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();

951 952 953 954
	if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) {
		/* This must happen on single-stepping */
		WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS &&
			kcb->kprobe_status != KPROBE_REENTER);
955 956 957
		/*
		 * We are here because the instruction being single
		 * stepped caused a page fault. We reset the current
958
		 * kprobe and the ip points back to the probe address
959 960 961
		 * and allow the page fault handler to continue as a
		 * normal page fault.
		 */
962
		regs->ip = (unsigned long)cur->addr;
963
		regs->flags |= kcb->kprobe_old_flags;
964 965 966 967
		if (kcb->kprobe_status == KPROBE_REENTER)
			restore_previous_kprobe(kcb);
		else
			reset_current_kprobe();
L
Linus Torvalds 已提交
968
		preempt_enable_no_resched();
969 970
	} else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
		   kcb->kprobe_status == KPROBE_HIT_SSDONE) {
971 972
		/*
		 * We increment the nmissed count for accounting,
973
		 * we can also use npre/npostfault count for accounting
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
		 * these specific fault cases.
		 */
		kprobes_inc_nmissed_count(cur);

		/*
		 * We come here because instructions in the pre/post
		 * handler caused the page_fault, this could happen
		 * if handler tries to access user space by
		 * copy_from_user(), get_user() etc. Let the
		 * user-specified handler try to fix it first.
		 */
		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
			return 1;

		/*
		 * In case the user-specified fault handler returned
		 * zero, try to fix up.
		 */
992
		if (fixup_exception(regs, trapnr))
993
			return 1;
H
Harvey Harrison 已提交
994

995
		/*
996
		 * fixup routine could not handle it,
997 998
		 * Let do_page_fault() fix it.
		 */
L
Linus Torvalds 已提交
999
	}
1000

L
Linus Torvalds 已提交
1001 1002
	return 0;
}
1003
NOKPROBE_SYMBOL(kprobe_fault_handler);
L
Linus Torvalds 已提交
1004 1005 1006 1007

/*
 * Wrapper routine for handling exceptions.
 */
1008 1009
int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
			     void *data)
L
Linus Torvalds 已提交
1010
{
J
Jan Engelhardt 已提交
1011
	struct die_args *args = data;
1012 1013
	int ret = NOTIFY_DONE;

1014
	if (args->regs && user_mode(args->regs))
1015 1016
		return ret;

1017
	if (val == DIE_GPF) {
1018 1019 1020 1021 1022 1023
		/*
		 * To be potentially processing a kprobe fault and to
		 * trust the result from kprobe_running(), we have
		 * be non-preemptible.
		 */
		if (!preemptible() && kprobe_running() &&
L
Linus Torvalds 已提交
1024
		    kprobe_fault_handler(args->regs, args->trapnr))
1025
			ret = NOTIFY_STOP;
L
Linus Torvalds 已提交
1026
	}
1027
	return ret;
L
Linus Torvalds 已提交
1028
}
1029
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
L
Linus Torvalds 已提交
1030

1031
int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
L
Linus Torvalds 已提交
1032 1033 1034
{
	struct jprobe *jp = container_of(p, struct jprobe, kp);
	unsigned long addr;
1035
	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
L
Linus Torvalds 已提交
1036

1037
	kcb->jprobe_saved_regs = *regs;
1038 1039 1040
	kcb->jprobe_saved_sp = stack_addr(regs);
	addr = (unsigned long)(kcb->jprobe_saved_sp);

L
Linus Torvalds 已提交
1041 1042 1043 1044 1045 1046 1047
	/*
	 * As Linus pointed out, gcc assumes that the callee
	 * owns the argument space and could overwrite it, e.g.
	 * tailcall optimization. So, to be absolutely safe
	 * we also save and restore enough stack bytes to cover
	 * the argument area.
	 */
1048
	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
1049
	       MIN_STACK_SIZE(addr));
1050
	regs->flags &= ~X86_EFLAGS_IF;
1051
	trace_hardirqs_off();
1052
	regs->ip = (unsigned long)(jp->entry);
1053 1054 1055 1056 1057 1058 1059 1060 1061

	/*
	 * jprobes use jprobe_return() which skips the normal return
	 * path of the function, and this messes up the accounting of the
	 * function graph tracer to get messed up.
	 *
	 * Pause function graph tracing while performing the jprobe function.
	 */
	pause_graph_tracing();
L
Linus Torvalds 已提交
1062 1063
	return 1;
}
1064
NOKPROBE_SYMBOL(setjmp_pre_handler);
L
Linus Torvalds 已提交
1065

1066
void jprobe_return(void)
L
Linus Torvalds 已提交
1067
{
1068 1069
	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();

1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
	asm volatile (
#ifdef CONFIG_X86_64
			"       xchg   %%rbx,%%rsp	\n"
#else
			"       xchgl   %%ebx,%%esp	\n"
#endif
			"       int3			\n"
			"       .globl jprobe_return_end\n"
			"       jprobe_return_end:	\n"
			"       nop			\n"::"b"
			(kcb->jprobe_saved_sp):"memory");
L
Linus Torvalds 已提交
1081
}
1082 1083
NOKPROBE_SYMBOL(jprobe_return);
NOKPROBE_SYMBOL(jprobe_return_end);
L
Linus Torvalds 已提交
1084

1085
int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
L
Linus Torvalds 已提交
1086
{
1087
	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1088
	u8 *addr = (u8 *) (regs->ip - 1);
L
Linus Torvalds 已提交
1089
	struct jprobe *jp = container_of(p, struct jprobe, kp);
1090
	void *saved_sp = kcb->jprobe_saved_sp;
L
Linus Torvalds 已提交
1091

1092 1093
	if ((addr > (u8 *) jprobe_return) &&
	    (addr < (u8 *) jprobe_return_end)) {
1094
		if (stack_addr(regs) != saved_sp) {
M
Masami Hiramatsu 已提交
1095
			struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
1096 1097
			printk(KERN_ERR
			       "current sp %p does not match saved sp %p\n",
1098
			       stack_addr(regs), saved_sp);
1099
			printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
1100
			show_regs(saved_regs);
1101
			printk(KERN_ERR "Current registers\n");
1102
			show_regs(regs);
L
Linus Torvalds 已提交
1103 1104
			BUG();
		}
1105 1106
		/* It's OK to start function graph tracing again */
		unpause_graph_tracing();
1107
		*regs = kcb->jprobe_saved_regs;
1108
		memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
1109
		preempt_enable_no_resched();
L
Linus Torvalds 已提交
1110 1111 1112 1113
		return 1;
	}
	return 0;
}
1114
NOKPROBE_SYMBOL(longjmp_break_handler);
1115

1116 1117 1118 1119 1120 1121 1122 1123
bool arch_within_kprobe_blacklist(unsigned long addr)
{
	return  (addr >= (unsigned long)__kprobes_text_start &&
		 addr < (unsigned long)__kprobes_text_end) ||
		(addr >= (unsigned long)__entry_text_start &&
		 addr < (unsigned long)__entry_text_end);
}

1124
int __init arch_init_kprobes(void)
1125
{
1126
	return 0;
1127
}
1128

1129
int arch_trampoline_kprobe(struct kprobe *p)
1130 1131 1132
{
	return 0;
}