kprobes.c 59.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 *  Kernel Probes (KProbes)
 *  kernel/kprobes.c
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * Copyright (C) IBM Corporation, 2002, 2004
 *
 * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
 *		Probes initial implementation (includes suggestions from
 *		Rusty Russell).
 * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
 *		hlists and exceptions notifier as suggested by Andi Kleen.
 * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
 *		interface to access function arguments.
 * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
 *		exceptions notifier to be first on the priority list.
30 31 32
 * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
 *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
 *		<prasanna@in.ibm.com> added function-return probes.
L
Linus Torvalds 已提交
33 34 35 36
 */
#include <linux/kprobes.h>
#include <linux/hash.h>
#include <linux/init.h>
T
Tim Schmielau 已提交
37
#include <linux/slab.h>
R
Randy Dunlap 已提交
38
#include <linux/stddef.h>
39
#include <linux/export.h>
40
#include <linux/moduleloader.h>
41
#include <linux/kallsyms.h>
42
#include <linux/freezer.h>
43 44
#include <linux/seq_file.h>
#include <linux/debugfs.h>
45
#include <linux/sysctl.h>
46
#include <linux/kdebug.h>
47
#include <linux/memory.h>
48
#include <linux/ftrace.h>
49
#include <linux/cpu.h>
50
#include <linux/jump_label.h>
51

52
#include <asm-generic/sections.h>
L
Linus Torvalds 已提交
53 54
#include <asm/cacheflush.h>
#include <asm/errno.h>
55
#include <asm/uaccess.h>
L
Linus Torvalds 已提交
56 57 58 59

#define KPROBE_HASH_BITS 6
#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)

60 61 62 63 64 65 66 67 68 69

/*
 * Some oddball architectures like 64bit powerpc have function descriptors
 * so this must be overridable.
 */
#ifndef kprobe_lookup_name
#define kprobe_lookup_name(name, addr) \
	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
#endif

70
static int kprobes_initialized;
L
Linus Torvalds 已提交
71
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
72
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
L
Linus Torvalds 已提交
73

74
/* NOTE: change this value only with kprobe_mutex held */
75
static bool kprobes_all_disarmed;
76

77 78
/* This protects kprobe_table and optimizing_list */
static DEFINE_MUTEX(kprobe_mutex);
79
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
80
static struct {
81
	raw_spinlock_t lock ____cacheline_aligned_in_smp;
82 83
} kretprobe_table_locks[KPROBE_TABLE_SIZE];

84
static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
85 86 87
{
	return &(kretprobe_table_locks[hash].lock);
}
L
Linus Torvalds 已提交
88

89 90
/* Blacklist -- list of struct kprobe_blacklist_entry */
static LIST_HEAD(kprobe_blacklist);
91

92
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
93 94 95 96 97 98 99
/*
 * kprobe->ainsn.insn points to the copy of the instruction to be
 * single-stepped. x86_64, POWER4 and above have no-exec support and
 * stepping on the instruction on a vmalloced/kmalloced/data page
 * is a recipe for disaster
 */
struct kprobe_insn_page {
100
	struct list_head list;
101
	kprobe_opcode_t *insns;		/* Page of instruction slots */
102
	struct kprobe_insn_cache *cache;
103
	int nused;
104
	int ngarbage;
105
	char slot_used[];
106 107
};

108 109 110 111 112 113 114 115 116
#define KPROBE_INSN_PAGE_SIZE(slots)			\
	(offsetof(struct kprobe_insn_page, slot_used) +	\
	 (sizeof(char) * (slots)))

static int slots_per_page(struct kprobe_insn_cache *c)
{
	return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
}

117 118 119 120 121 122
enum kprobe_slot_state {
	SLOT_CLEAN = 0,
	SLOT_DIRTY = 1,
	SLOT_USED = 2,
};

123 124 125 126 127 128 129 130 131 132
static void *alloc_insn_page(void)
{
	return module_alloc(PAGE_SIZE);
}

static void free_insn_page(void *page)
{
	module_free(NULL, page);
}

H
Heiko Carstens 已提交
133 134
struct kprobe_insn_cache kprobe_insn_slots = {
	.mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
135 136
	.alloc = alloc_insn_page,
	.free = free_insn_page,
137 138 139 140 141
	.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
	.insn_size = MAX_INSN_SIZE,
	.nr_garbage = 0,
};
static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
142

143
/**
144
 * __get_insn_slot() - Find a slot on an executable page for an instruction.
145 146
 * We allocate an executable page if there's no room on existing ones.
 */
H
Heiko Carstens 已提交
147
kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
148 149
{
	struct kprobe_insn_page *kip;
H
Heiko Carstens 已提交
150
	kprobe_opcode_t *slot = NULL;
151

H
Heiko Carstens 已提交
152
	mutex_lock(&c->mutex);
153
 retry:
154 155
	list_for_each_entry(kip, &c->pages, list) {
		if (kip->nused < slots_per_page(c)) {
156
			int i;
157
			for (i = 0; i < slots_per_page(c); i++) {
158 159
				if (kip->slot_used[i] == SLOT_CLEAN) {
					kip->slot_used[i] = SLOT_USED;
160
					kip->nused++;
H
Heiko Carstens 已提交
161 162
					slot = kip->insns + (i * c->insn_size);
					goto out;
163 164
				}
			}
165 166 167
			/* kip->nused is broken. Fix it. */
			kip->nused = slots_per_page(c);
			WARN_ON(1);
168 169 170
		}
	}

171
	/* If there are any garbage slots, collect it and try again. */
172
	if (c->nr_garbage && collect_garbage_slots(c) == 0)
173
		goto retry;
174 175 176

	/* All out of space.  Need to allocate a new page. */
	kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
177
	if (!kip)
H
Heiko Carstens 已提交
178
		goto out;
179 180 181 182 183 184

	/*
	 * Use module_alloc so this page is within +/- 2GB of where the
	 * kernel image and loaded module images reside. This is required
	 * so x86_64 can correctly handle the %rip-relative fixups.
	 */
185
	kip->insns = c->alloc();
186 187
	if (!kip->insns) {
		kfree(kip);
H
Heiko Carstens 已提交
188
		goto out;
189
	}
190
	INIT_LIST_HEAD(&kip->list);
191
	memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
192
	kip->slot_used[0] = SLOT_USED;
193
	kip->nused = 1;
194
	kip->ngarbage = 0;
195
	kip->cache = c;
196
	list_add(&kip->list, &c->pages);
H
Heiko Carstens 已提交
197 198 199 200
	slot = kip->insns;
out:
	mutex_unlock(&c->mutex);
	return slot;
201 202
}

203 204 205
/* Return 1 if all garbages are collected, otherwise 0. */
static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
{
206
	kip->slot_used[idx] = SLOT_CLEAN;
207 208 209 210 211 212 213 214
	kip->nused--;
	if (kip->nused == 0) {
		/*
		 * Page is no longer in use.  Free it unless
		 * it's the last one.  We keep the last one
		 * so as not to have to set it up again the
		 * next time somebody inserts a probe.
		 */
215
		if (!list_is_singular(&kip->list)) {
216
			list_del(&kip->list);
217
			kip->cache->free(kip->insns);
218 219 220 221 222 223 224
			kfree(kip);
		}
		return 1;
	}
	return 0;
}

225
static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
226
{
227
	struct kprobe_insn_page *kip, *next;
228

229 230
	/* Ensure no-one is interrupted on the garbages */
	synchronize_sched();
231

232
	list_for_each_entry_safe(kip, next, &c->pages, list) {
233 234 235 236
		int i;
		if (kip->ngarbage == 0)
			continue;
		kip->ngarbage = 0;	/* we will collect all garbages */
237
		for (i = 0; i < slots_per_page(c); i++) {
238
			if (kip->slot_used[i] == SLOT_DIRTY &&
239 240 241 242
			    collect_one_slot(kip, i))
				break;
		}
	}
243
	c->nr_garbage = 0;
244 245 246
	return 0;
}

H
Heiko Carstens 已提交
247 248
void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
				kprobe_opcode_t *slot, int dirty)
249 250 251
{
	struct kprobe_insn_page *kip;

H
Heiko Carstens 已提交
252
	mutex_lock(&c->mutex);
253
	list_for_each_entry(kip, &c->pages, list) {
254 255
		long idx = ((long)slot - (long)kip->insns) /
				(c->insn_size * sizeof(kprobe_opcode_t));
256 257
		if (idx >= 0 && idx < slots_per_page(c)) {
			WARN_ON(kip->slot_used[idx] != SLOT_USED);
258
			if (dirty) {
259
				kip->slot_used[idx] = SLOT_DIRTY;
260
				kip->ngarbage++;
261 262
				if (++c->nr_garbage > slots_per_page(c))
					collect_garbage_slots(c);
263
			} else
264
				collect_one_slot(kip, idx);
H
Heiko Carstens 已提交
265
			goto out;
266 267
		}
	}
268 269
	/* Could not free this slot. */
	WARN_ON(1);
H
Heiko Carstens 已提交
270 271
out:
	mutex_unlock(&c->mutex);
272
}
273

274 275
#ifdef CONFIG_OPTPROBES
/* For optimized_kprobe buffer */
H
Heiko Carstens 已提交
276 277
struct kprobe_insn_cache kprobe_optinsn_slots = {
	.mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
278 279
	.alloc = alloc_insn_page,
	.free = free_insn_page,
280 281 282 283 284
	.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
	/* .insn_size is initialized later */
	.nr_garbage = 0,
};
#endif
285
#endif
286

287 288 289
/* We have preemption disabled.. so it is safe to use __ versions */
static inline void set_kprobe_instance(struct kprobe *kp)
{
C
Christoph Lameter 已提交
290
	__this_cpu_write(kprobe_instance, kp);
291 292 293 294
}

static inline void reset_kprobe_instance(void)
{
C
Christoph Lameter 已提交
295
	__this_cpu_write(kprobe_instance, NULL);
296 297
}

298 299
/*
 * This routine is called either:
300
 * 	- under the kprobe_mutex - during kprobe_[un]register()
301
 * 				OR
302
 * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
303
 */
304
struct kprobe __kprobes *get_kprobe(void *addr)
L
Linus Torvalds 已提交
305 306
{
	struct hlist_head *head;
307
	struct kprobe *p;
L
Linus Torvalds 已提交
308 309

	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
310
	hlist_for_each_entry_rcu(p, head, hlist) {
L
Linus Torvalds 已提交
311 312 313
		if (p->addr == addr)
			return p;
	}
314

L
Linus Torvalds 已提交
315 316 317
	return NULL;
}

318 319 320 321 322 323 324 325
static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);

/* Return true if the kprobe is an aggregator */
static inline int kprobe_aggrprobe(struct kprobe *p)
{
	return p->pre_handler == aggr_pre_handler;
}

326 327 328 329 330 331 332
/* Return true(!0) if the kprobe is unused */
static inline int kprobe_unused(struct kprobe *p)
{
	return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
	       list_empty(&p->list);
}

333 334 335
/*
 * Keep all fields in the kprobe consistent
 */
336
static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
337
{
338 339
	memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
	memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
340 341 342
}

#ifdef CONFIG_OPTPROBES
343 344 345
/* NOTE: change this value only with kprobe_mutex held */
static bool kprobes_allow_optimization;

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
/*
 * Call all pre_handler on the list, but ignores its return value.
 * This must be called from arch-dep optimized caller.
 */
void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
	struct kprobe *kp;

	list_for_each_entry_rcu(kp, &p->list, list) {
		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
			set_kprobe_instance(kp);
			kp->pre_handler(kp, regs);
		}
		reset_kprobe_instance();
	}
}

363 364 365 366 367 368 369 370 371 372 373
/* Free optimized instructions and optimized_kprobe */
static __kprobes void free_aggr_kprobe(struct kprobe *p)
{
	struct optimized_kprobe *op;

	op = container_of(p, struct optimized_kprobe, kp);
	arch_remove_optimized_kprobe(op);
	arch_remove_kprobe(p);
	kfree(op);
}

374 375 376 377 378 379 380 381 382 383 384 385 386
/* Return true(!0) if the kprobe is ready for optimization. */
static inline int kprobe_optready(struct kprobe *p)
{
	struct optimized_kprobe *op;

	if (kprobe_aggrprobe(p)) {
		op = container_of(p, struct optimized_kprobe, kp);
		return arch_prepared_optinsn(&op->optinsn);
	}

	return 0;
}

387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
static inline int kprobe_disarmed(struct kprobe *p)
{
	struct optimized_kprobe *op;

	/* If kprobe is not aggr/opt probe, just return kprobe is disabled */
	if (!kprobe_aggrprobe(p))
		return kprobe_disabled(p);

	op = container_of(p, struct optimized_kprobe, kp);

	return kprobe_disabled(p) && list_empty(&op->list);
}

/* Return true(!0) if the probe is queued on (un)optimizing lists */
static int __kprobes kprobe_queued(struct kprobe *p)
{
	struct optimized_kprobe *op;

	if (kprobe_aggrprobe(p)) {
		op = container_of(p, struct optimized_kprobe, kp);
		if (!list_empty(&op->list))
			return 1;
	}
	return 0;
}

414 415 416 417
/*
 * Return an optimized kprobe whose optimizing code replaces
 * instructions including addr (exclude breakpoint).
 */
N
Namhyung Kim 已提交
418
static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
{
	int i;
	struct kprobe *p = NULL;
	struct optimized_kprobe *op;

	/* Don't check i == 0, since that is a breakpoint case. */
	for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
		p = get_kprobe((void *)(addr - i));

	if (p && kprobe_optready(p)) {
		op = container_of(p, struct optimized_kprobe, kp);
		if (arch_within_optimized_kprobe(op, addr))
			return p;
	}

	return NULL;
}

/* Optimization staging list, protected by kprobe_mutex */
static LIST_HEAD(optimizing_list);
439
static LIST_HEAD(unoptimizing_list);
440
static LIST_HEAD(freeing_list);
441 442 443 444 445

static void kprobe_optimizer(struct work_struct *work);
static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
#define OPTIMIZE_DELAY 5

446 447 448 449 450
/*
 * Optimize (replace a breakpoint with a jump) kprobes listed on
 * optimizing_list.
 */
static __kprobes void do_optimize_kprobes(void)
451
{
452 453 454 455 456
	/* Optimization never be done when disarmed */
	if (kprobes_all_disarmed || !kprobes_allow_optimization ||
	    list_empty(&optimizing_list))
		return;

457 458 459 460 461 462 463 464 465 466 467 468
	/*
	 * The optimization/unoptimization refers online_cpus via
	 * stop_machine() and cpu-hotplug modifies online_cpus.
	 * And same time, text_mutex will be held in cpu-hotplug and here.
	 * This combination can cause a deadlock (cpu-hotplug try to lock
	 * text_mutex but stop_machine can not be done because online_cpus
	 * has been changed)
	 * To avoid this deadlock, we need to call get_online_cpus()
	 * for preventing cpu-hotplug outside of text_mutex locking.
	 */
	get_online_cpus();
	mutex_lock(&text_mutex);
469
	arch_optimize_kprobes(&optimizing_list);
470 471
	mutex_unlock(&text_mutex);
	put_online_cpus();
472 473
}

474 475 476 477
/*
 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
 * if need) kprobes listed on unoptimizing_list.
 */
478
static __kprobes void do_unoptimize_kprobes(void)
479 480 481 482 483 484 485 486 487 488
{
	struct optimized_kprobe *op, *tmp;

	/* Unoptimization must be done anytime */
	if (list_empty(&unoptimizing_list))
		return;

	/* Ditto to do_optimize_kprobes */
	get_online_cpus();
	mutex_lock(&text_mutex);
489
	arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
490
	/* Loop free_list for disarming */
491
	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
		/* Disarm probes if marked disabled */
		if (kprobe_disabled(&op->kp))
			arch_disarm_kprobe(&op->kp);
		if (kprobe_unused(&op->kp)) {
			/*
			 * Remove unused probes from hash list. After waiting
			 * for synchronization, these probes are reclaimed.
			 * (reclaiming is done by do_free_cleaned_kprobes.)
			 */
			hlist_del_rcu(&op->kp.hlist);
		} else
			list_del_init(&op->list);
	}
	mutex_unlock(&text_mutex);
	put_online_cpus();
}

/* Reclaim all kprobes on the free_list */
510
static __kprobes void do_free_cleaned_kprobes(void)
511 512 513
{
	struct optimized_kprobe *op, *tmp;

514
	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
515 516 517 518 519 520 521 522 523
		BUG_ON(!kprobe_unused(&op->kp));
		list_del_init(&op->list);
		free_aggr_kprobe(&op->kp);
	}
}

/* Start optimizer after OPTIMIZE_DELAY passed */
static __kprobes void kick_kprobe_optimizer(void)
{
524
	schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
525 526
}

527 528 529
/* Kprobe jump optimizer */
static __kprobes void kprobe_optimizer(struct work_struct *work)
{
530
	mutex_lock(&kprobe_mutex);
531 532 533 534
	/* Lock modules while optimizing kprobes */
	mutex_lock(&module_mutex);

	/*
535 536 537
	 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
	 * kprobes before waiting for quiesence period.
	 */
538
	do_unoptimize_kprobes();
539 540 541

	/*
	 * Step 2: Wait for quiesence period to ensure all running interrupts
542 543 544 545 546 547 548
	 * are done. Because optprobe may modify multiple instructions
	 * there is a chance that Nth instruction is interrupted. In that
	 * case, running interrupt can return to 2nd-Nth byte of jump
	 * instruction. This wait is for avoiding it.
	 */
	synchronize_sched();

549
	/* Step 3: Optimize kprobes after quiesence period */
550
	do_optimize_kprobes();
551 552

	/* Step 4: Free cleaned kprobes after quiesence period */
553
	do_free_cleaned_kprobes();
554

555
	mutex_unlock(&module_mutex);
556
	mutex_unlock(&kprobe_mutex);
557

558
	/* Step 5: Kick optimizer again if needed */
559
	if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
560
		kick_kprobe_optimizer();
561 562 563 564 565
}

/* Wait for completing optimization and unoptimization */
static __kprobes void wait_for_kprobe_optimizer(void)
{
566 567 568 569 570 571 572 573 574 575 576 577 578 579
	mutex_lock(&kprobe_mutex);

	while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
		mutex_unlock(&kprobe_mutex);

		/* this will also make optimizing_work execute immmediately */
		flush_delayed_work(&optimizing_work);
		/* @optimizing_work might not have been queued yet, relax */
		cpu_relax();

		mutex_lock(&kprobe_mutex);
	}

	mutex_unlock(&kprobe_mutex);
580 581 582 583 584 585 586 587
}

/* Optimize kprobe if p is ready to be optimized */
static __kprobes void optimize_kprobe(struct kprobe *p)
{
	struct optimized_kprobe *op;

	/* Check if the kprobe is disabled or not ready for optimization. */
588
	if (!kprobe_optready(p) || !kprobes_allow_optimization ||
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
	    (kprobe_disabled(p) || kprobes_all_disarmed))
		return;

	/* Both of break_handler and post_handler are not supported. */
	if (p->break_handler || p->post_handler)
		return;

	op = container_of(p, struct optimized_kprobe, kp);

	/* Check there is no other kprobes at the optimized instructions */
	if (arch_check_optimized_kprobe(op) < 0)
		return;

	/* Check if it is already optimized. */
	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
		return;
	op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623

	if (!list_empty(&op->list))
		/* This is under unoptimizing. Just dequeue the probe */
		list_del_init(&op->list);
	else {
		list_add(&op->list, &optimizing_list);
		kick_kprobe_optimizer();
	}
}

/* Short cut to direct unoptimizing */
static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op)
{
	get_online_cpus();
	arch_unoptimize_kprobe(op);
	put_online_cpus();
	if (kprobe_disabled(&op->kp))
		arch_disarm_kprobe(&op->kp);
624 625 626
}

/* Unoptimize a kprobe if p is optimized */
627
static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force)
628 629 630
{
	struct optimized_kprobe *op;

631 632 633 634 635 636 637 638 639 640 641 642
	if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
		return; /* This is not an optprobe nor optimized */

	op = container_of(p, struct optimized_kprobe, kp);
	if (!kprobe_optimized(p)) {
		/* Unoptimized or unoptimizing case */
		if (force && !list_empty(&op->list)) {
			/*
			 * Only if this is unoptimizing kprobe and forced,
			 * forcibly unoptimize it. (No need to unoptimize
			 * unoptimized kprobe again :)
			 */
643
			list_del_init(&op->list);
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
			force_unoptimize_kprobe(op);
		}
		return;
	}

	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
	if (!list_empty(&op->list)) {
		/* Dequeue from the optimization queue */
		list_del_init(&op->list);
		return;
	}
	/* Optimized kprobe case */
	if (force)
		/* Forcibly update the code: this is a special case */
		force_unoptimize_kprobe(op);
	else {
		list_add(&op->list, &unoptimizing_list);
		kick_kprobe_optimizer();
662 663 664
	}
}

M
Masami Hiramatsu 已提交
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
/* Cancel unoptimizing for reusing */
static void reuse_unused_kprobe(struct kprobe *ap)
{
	struct optimized_kprobe *op;

	BUG_ON(!kprobe_unused(ap));
	/*
	 * Unused kprobe MUST be on the way of delayed unoptimizing (means
	 * there is still a relative jump) and disabled.
	 */
	op = container_of(ap, struct optimized_kprobe, kp);
	if (unlikely(list_empty(&op->list)))
		printk(KERN_WARNING "Warning: found a stray unused "
			"aggrprobe@%p\n", ap->addr);
	/* Enable the probe again */
	ap->flags &= ~KPROBE_FLAG_DISABLED;
	/* Optimize it again (remove from op->list) */
	BUG_ON(!kprobe_optready(ap));
	optimize_kprobe(ap);
}

686 687 688 689 690 691
/* Remove optimized instructions */
static void __kprobes kill_optimized_kprobe(struct kprobe *p)
{
	struct optimized_kprobe *op;

	op = container_of(p, struct optimized_kprobe, kp);
692 693
	if (!list_empty(&op->list))
		/* Dequeue from the (un)optimization queue */
694
		list_del_init(&op->list);
695
	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
696 697 698 699 700 701 702 703 704 705 706 707

	if (kprobe_unused(p)) {
		/* Enqueue if it is unused */
		list_add(&op->list, &freeing_list);
		/*
		 * Remove unused probes from the hash list. After waiting
		 * for synchronization, this probe is reclaimed.
		 * (reclaiming is done by do_free_cleaned_kprobes().)
		 */
		hlist_del_rcu(&op->kp.hlist);
	}

708
	/* Don't touch the code, because it is already freed. */
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
	arch_remove_optimized_kprobe(op);
}

/* Try to prepare optimized instructions */
static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
{
	struct optimized_kprobe *op;

	op = container_of(p, struct optimized_kprobe, kp);
	arch_prepare_optimized_kprobe(op);
}

/* Allocate new optimized_kprobe and try to prepare optimized instructions */
static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
{
	struct optimized_kprobe *op;

	op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
	if (!op)
		return NULL;

	INIT_LIST_HEAD(&op->list);
	op->kp.addr = p->addr;
	arch_prepare_optimized_kprobe(op);

	return &op->kp;
}

static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);

/*
 * Prepare an optimized_kprobe and optimize it
 * NOTE: p must be a normal registered kprobe
 */
static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
{
	struct kprobe *ap;
	struct optimized_kprobe *op;

748 749 750 751
	/* Impossible to optimize ftrace-based kprobe */
	if (kprobe_ftrace(p))
		return;

752 753 754 755
	/* For preparing optimization, jump_label_text_reserved() is called */
	jump_label_lock();
	mutex_lock(&text_mutex);

756 757
	ap = alloc_aggr_kprobe(p);
	if (!ap)
758
		goto out;
759 760 761 762

	op = container_of(ap, struct optimized_kprobe, kp);
	if (!arch_prepared_optinsn(&op->optinsn)) {
		/* If failed to setup optimizing, fallback to kprobe */
763 764
		arch_remove_optimized_kprobe(op);
		kfree(op);
765
		goto out;
766 767 768
	}

	init_aggr_kprobe(ap, p);
769 770 771 772 773
	optimize_kprobe(ap);	/* This just kicks optimizer thread */

out:
	mutex_unlock(&text_mutex);
	jump_label_unlock();
774 775
}

776 777 778 779 780 781 782
#ifdef CONFIG_SYSCTL
static void __kprobes optimize_all_kprobes(void)
{
	struct hlist_head *head;
	struct kprobe *p;
	unsigned int i;

783
	mutex_lock(&kprobe_mutex);
784 785
	/* If optimization is already allowed, just return */
	if (kprobes_allow_optimization)
786
		goto out;
787 788 789 790

	kprobes_allow_optimization = true;
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
		head = &kprobe_table[i];
791
		hlist_for_each_entry_rcu(p, head, hlist)
792 793 794 795
			if (!kprobe_disabled(p))
				optimize_kprobe(p);
	}
	printk(KERN_INFO "Kprobes globally optimized\n");
796 797
out:
	mutex_unlock(&kprobe_mutex);
798 799 800 801 802 803 804 805
}

static void __kprobes unoptimize_all_kprobes(void)
{
	struct hlist_head *head;
	struct kprobe *p;
	unsigned int i;

806
	mutex_lock(&kprobe_mutex);
807
	/* If optimization is already prohibited, just return */
808 809
	if (!kprobes_allow_optimization) {
		mutex_unlock(&kprobe_mutex);
810
		return;
811
	}
812 813 814 815

	kprobes_allow_optimization = false;
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
		head = &kprobe_table[i];
816
		hlist_for_each_entry_rcu(p, head, hlist) {
817
			if (!kprobe_disabled(p))
818
				unoptimize_kprobe(p, false);
819 820
		}
	}
821 822
	mutex_unlock(&kprobe_mutex);

823 824 825
	/* Wait for unoptimizing completion */
	wait_for_kprobe_optimizer();
	printk(KERN_INFO "Kprobes globally unoptimized\n");
826 827
}

828
static DEFINE_MUTEX(kprobe_sysctl_mutex);
829 830 831 832 833 834 835
int sysctl_kprobes_optimization;
int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
				      void __user *buffer, size_t *length,
				      loff_t *ppos)
{
	int ret;

836
	mutex_lock(&kprobe_sysctl_mutex);
837 838 839 840 841 842 843
	sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);

	if (sysctl_kprobes_optimization)
		optimize_all_kprobes();
	else
		unoptimize_all_kprobes();
844
	mutex_unlock(&kprobe_sysctl_mutex);
845 846 847 848 849

	return ret;
}
#endif /* CONFIG_SYSCTL */

850
/* Put a breakpoint for a probe. Must be called with text_mutex locked */
851 852
static void __kprobes __arm_kprobe(struct kprobe *p)
{
853
	struct kprobe *_p;
854 855

	/* Check collision with other optimized kprobes */
856 857
	_p = get_optimized_kprobe((unsigned long)p->addr);
	if (unlikely(_p))
858 859
		/* Fallback to unoptimized kprobe */
		unoptimize_kprobe(_p, true);
860 861 862 863 864

	arch_arm_kprobe(p);
	optimize_kprobe(p);	/* Try to optimize (add kprobe to a list) */
}

865 866
/* Remove the breakpoint of a probe. Must be called with text_mutex locked */
static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt)
867
{
868
	struct kprobe *_p;
869

870
	unoptimize_kprobe(p, false);	/* Try to unoptimize */
871

872 873 874 875 876 877 878 879
	if (!kprobe_queued(p)) {
		arch_disarm_kprobe(p);
		/* If another kprobe was blocked, optimize it. */
		_p = get_optimized_kprobe((unsigned long)p->addr);
		if (unlikely(_p) && reopt)
			optimize_kprobe(_p);
	}
	/* TODO: reoptimize others after unoptimized this probe */
880 881 882 883 884
}

#else /* !CONFIG_OPTPROBES */

#define optimize_kprobe(p)			do {} while (0)
885
#define unoptimize_kprobe(p, f)			do {} while (0)
886 887 888 889
#define kill_optimized_kprobe(p)		do {} while (0)
#define prepare_optimized_kprobe(p)		do {} while (0)
#define try_to_optimize_kprobe(p)		do {} while (0)
#define __arm_kprobe(p)				arch_arm_kprobe(p)
890 891 892
#define __disarm_kprobe(p, o)			arch_disarm_kprobe(p)
#define kprobe_disarmed(p)			kprobe_disabled(p)
#define wait_for_kprobe_optimizer()		do {} while (0)
893

M
Masami Hiramatsu 已提交
894 895 896 897 898 899 900
/* There should be no unused kprobes can be reused without optimization */
static void reuse_unused_kprobe(struct kprobe *ap)
{
	printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
	BUG_ON(kprobe_unused(ap));
}

901 902
static __kprobes void free_aggr_kprobe(struct kprobe *p)
{
903
	arch_remove_kprobe(p);
904 905 906 907 908 909 910 911 912
	kfree(p);
}

static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
{
	return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
}
#endif /* CONFIG_OPTPROBES */

913
#ifdef CONFIG_KPROBES_ON_FTRACE
914
static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
915
	.func = kprobe_ftrace_handler,
916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
	.flags = FTRACE_OPS_FL_SAVE_REGS,
};
static int kprobe_ftrace_enabled;

/* Must ensure p->addr is really on ftrace */
static int __kprobes prepare_kprobe(struct kprobe *p)
{
	if (!kprobe_ftrace(p))
		return arch_prepare_kprobe(p);

	return arch_prepare_kprobe_ftrace(p);
}

/* Caller must lock kprobe_mutex */
static void __kprobes arm_kprobe_ftrace(struct kprobe *p)
{
	int ret;

	ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
				   (unsigned long)p->addr, 0, 0);
	WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
	kprobe_ftrace_enabled++;
	if (kprobe_ftrace_enabled == 1) {
		ret = register_ftrace_function(&kprobe_ftrace_ops);
		WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
	}
}

/* Caller must lock kprobe_mutex */
static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
{
	int ret;

	kprobe_ftrace_enabled--;
	if (kprobe_ftrace_enabled == 0) {
		ret = unregister_ftrace_function(&kprobe_ftrace_ops);
		WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
	}
	ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
			   (unsigned long)p->addr, 1, 0);
	WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
}
958
#else	/* !CONFIG_KPROBES_ON_FTRACE */
959 960 961 962 963
#define prepare_kprobe(p)	arch_prepare_kprobe(p)
#define arm_kprobe_ftrace(p)	do {} while (0)
#define disarm_kprobe_ftrace(p)	do {} while (0)
#endif

964 965 966
/* Arm a kprobe with text_mutex */
static void __kprobes arm_kprobe(struct kprobe *kp)
{
967 968 969 970
	if (unlikely(kprobe_ftrace(kp))) {
		arm_kprobe_ftrace(kp);
		return;
	}
971 972 973 974 975
	/*
	 * Here, since __arm_kprobe() doesn't use stop_machine(),
	 * this doesn't cause deadlock on text_mutex. So, we don't
	 * need get_online_cpus().
	 */
976
	mutex_lock(&text_mutex);
977
	__arm_kprobe(kp);
978 979 980 981
	mutex_unlock(&text_mutex);
}

/* Disarm a kprobe with text_mutex */
982
static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt)
983
{
984 985 986 987
	if (unlikely(kprobe_ftrace(kp))) {
		disarm_kprobe_ftrace(kp);
		return;
	}
988
	/* Ditto */
989
	mutex_lock(&text_mutex);
990
	__disarm_kprobe(kp, reopt);
991 992 993
	mutex_unlock(&text_mutex);
}

994 995 996 997
/*
 * Aggregate handlers for multiple kprobes support - these handlers
 * take care of invoking the individual kprobe handlers on p->list
 */
998
static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
999 1000 1001
{
	struct kprobe *kp;

1002
	list_for_each_entry_rcu(kp, &p->list, list) {
1003
		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1004
			set_kprobe_instance(kp);
1005 1006
			if (kp->pre_handler(kp, regs))
				return 1;
1007
		}
1008
		reset_kprobe_instance();
1009 1010 1011 1012
	}
	return 0;
}

1013 1014
static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
					unsigned long flags)
1015 1016 1017
{
	struct kprobe *kp;

1018
	list_for_each_entry_rcu(kp, &p->list, list) {
1019
		if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1020
			set_kprobe_instance(kp);
1021
			kp->post_handler(kp, regs, flags);
1022
			reset_kprobe_instance();
1023 1024 1025 1026
		}
	}
}

1027 1028
static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
					int trapnr)
1029
{
C
Christoph Lameter 已提交
1030
	struct kprobe *cur = __this_cpu_read(kprobe_instance);
1031

1032 1033 1034 1035
	/*
	 * if we faulted "during" the execution of a user specified
	 * probe handler, invoke just that probe's fault handler
	 */
1036 1037
	if (cur && cur->fault_handler) {
		if (cur->fault_handler(cur, regs, trapnr))
1038 1039 1040 1041 1042
			return 1;
	}
	return 0;
}

1043
static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
1044
{
C
Christoph Lameter 已提交
1045
	struct kprobe *cur = __this_cpu_read(kprobe_instance);
1046 1047 1048 1049 1050
	int ret = 0;

	if (cur && cur->break_handler) {
		if (cur->break_handler(cur, regs))
			ret = 1;
1051
	}
1052 1053
	reset_kprobe_instance();
	return ret;
1054 1055
}

1056 1057 1058 1059
/* Walks the list and increments nmissed count for multiprobe case */
void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
{
	struct kprobe *kp;
1060
	if (!kprobe_aggrprobe(p)) {
1061 1062 1063 1064 1065 1066 1067 1068
		p->nmissed++;
	} else {
		list_for_each_entry_rcu(kp, &p->list, list)
			kp->nmissed++;
	}
	return;
}

1069 1070
void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
				struct hlist_head *head)
1071
{
1072 1073
	struct kretprobe *rp = ri->rp;

1074 1075
	/* remove rp inst off the rprobe_inst_table */
	hlist_del(&ri->hlist);
1076 1077
	INIT_HLIST_NODE(&ri->hlist);
	if (likely(rp)) {
1078
		raw_spin_lock(&rp->lock);
1079
		hlist_add_head(&ri->hlist, &rp->free_instances);
1080
		raw_spin_unlock(&rp->lock);
1081 1082
	} else
		/* Unregistering */
1083
		hlist_add_head(&ri->hlist, head);
1084 1085
}

1086
void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
1087
			 struct hlist_head **head, unsigned long *flags)
1088
__acquires(hlist_lock)
1089 1090
{
	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1091
	raw_spinlock_t *hlist_lock;
1092 1093 1094

	*head = &kretprobe_inst_table[hash];
	hlist_lock = kretprobe_table_lock_ptr(hash);
1095
	raw_spin_lock_irqsave(hlist_lock, *flags);
1096 1097
}

1098 1099
static void __kprobes kretprobe_table_lock(unsigned long hash,
	unsigned long *flags)
1100
__acquires(hlist_lock)
1101
{
1102 1103
	raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
	raw_spin_lock_irqsave(hlist_lock, *flags);
1104 1105
}

1106 1107
void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
	unsigned long *flags)
1108
__releases(hlist_lock)
1109 1110
{
	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1111
	raw_spinlock_t *hlist_lock;
1112 1113

	hlist_lock = kretprobe_table_lock_ptr(hash);
1114
	raw_spin_unlock_irqrestore(hlist_lock, *flags);
1115 1116
}

N
Namhyung Kim 已提交
1117 1118
static void __kprobes kretprobe_table_unlock(unsigned long hash,
       unsigned long *flags)
1119
__releases(hlist_lock)
1120
{
1121 1122
	raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
	raw_spin_unlock_irqrestore(hlist_lock, *flags);
1123 1124 1125
}

/*
1126 1127 1128 1129
 * This function is called from finish_task_switch when task tk becomes dead,
 * so that we can recycle any function-return probe instances associated
 * with this task. These left over instances represent probed functions
 * that have been called but will never return.
1130
 */
1131
void __kprobes kprobe_flush_task(struct task_struct *tk)
1132
{
B
bibo,mao 已提交
1133
	struct kretprobe_instance *ri;
1134
	struct hlist_head *head, empty_rp;
1135
	struct hlist_node *tmp;
1136
	unsigned long hash, flags = 0;
1137

1138 1139 1140 1141
	if (unlikely(!kprobes_initialized))
		/* Early boot.  kretprobe_table_locks not yet initialized. */
		return;

1142
	INIT_HLIST_HEAD(&empty_rp);
1143 1144 1145
	hash = hash_ptr(tk, KPROBE_HASH_BITS);
	head = &kretprobe_inst_table[hash];
	kretprobe_table_lock(hash, &flags);
1146
	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
B
bibo,mao 已提交
1147
		if (ri->task == tk)
1148
			recycle_rp_inst(ri, &empty_rp);
B
bibo,mao 已提交
1149
	}
1150
	kretprobe_table_unlock(hash, &flags);
1151
	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1152 1153 1154
		hlist_del(&ri->hlist);
		kfree(ri);
	}
1155 1156 1157 1158 1159
}

static inline void free_rp_inst(struct kretprobe *rp)
{
	struct kretprobe_instance *ri;
1160
	struct hlist_node *next;
1161

1162
	hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1163
		hlist_del(&ri->hlist);
1164 1165 1166 1167
		kfree(ri);
	}
}

1168 1169
static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
{
1170
	unsigned long flags, hash;
1171
	struct kretprobe_instance *ri;
1172
	struct hlist_node *next;
1173 1174
	struct hlist_head *head;

1175
	/* No race here */
1176 1177 1178
	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
		kretprobe_table_lock(hash, &flags);
		head = &kretprobe_inst_table[hash];
1179
		hlist_for_each_entry_safe(ri, next, head, hlist) {
1180 1181 1182 1183
			if (ri->rp == rp)
				ri->rp = NULL;
		}
		kretprobe_table_unlock(hash, &flags);
1184 1185 1186 1187
	}
	free_rp_inst(rp);
}

1188
/*
1189
* Add the new probe to ap->list. Fail if this is the
1190 1191
* second jprobe at the address - two jprobes can't coexist
*/
1192
static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1193
{
1194
	BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1195 1196

	if (p->break_handler || p->post_handler)
1197
		unoptimize_kprobe(ap, true);	/* Fall back to normal kprobe */
1198

1199
	if (p->break_handler) {
1200
		if (ap->break_handler)
1201
			return -EEXIST;
1202 1203
		list_add_tail_rcu(&p->list, &ap->list);
		ap->break_handler = aggr_break_handler;
1204
	} else
1205 1206 1207
		list_add_rcu(&p->list, &ap->list);
	if (p->post_handler && !ap->post_handler)
		ap->post_handler = aggr_post_handler;
1208

1209 1210 1211
	return 0;
}

1212 1213 1214 1215
/*
 * Fill in the required fields of the "manager kprobe". Replace the
 * earlier kprobe in the hlist with the manager kprobe
 */
1216
static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1217
{
1218
	/* Copy p's insn slot to ap */
1219
	copy_kprobe(p, ap);
1220
	flush_insn_slot(ap);
1221
	ap->addr = p->addr;
1222
	ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1223 1224
	ap->pre_handler = aggr_pre_handler;
	ap->fault_handler = aggr_fault_handler;
1225 1226
	/* We don't care the kprobe which has gone. */
	if (p->post_handler && !kprobe_gone(p))
1227
		ap->post_handler = aggr_post_handler;
1228
	if (p->break_handler && !kprobe_gone(p))
1229
		ap->break_handler = aggr_break_handler;
1230 1231

	INIT_LIST_HEAD(&ap->list);
1232
	INIT_HLIST_NODE(&ap->hlist);
1233

1234
	list_add_rcu(&p->list, &ap->list);
1235
	hlist_replace_rcu(&p->hlist, &ap->hlist);
1236 1237 1238 1239 1240 1241
}

/*
 * This is the second or subsequent kprobe at the address - handle
 * the intricacies
 */
1242
static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
1243
					  struct kprobe *p)
1244 1245
{
	int ret = 0;
1246
	struct kprobe *ap = orig_p;
1247

1248 1249 1250 1251 1252 1253 1254 1255 1256
	/* For preparing optimization, jump_label_text_reserved() is called */
	jump_label_lock();
	/*
	 * Get online CPUs to avoid text_mutex deadlock.with stop machine,
	 * which is invoked by unoptimize_kprobe() in add_new_kprobe()
	 */
	get_online_cpus();
	mutex_lock(&text_mutex);

1257 1258 1259
	if (!kprobe_aggrprobe(orig_p)) {
		/* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
		ap = alloc_aggr_kprobe(orig_p);
1260 1261 1262 1263
		if (!ap) {
			ret = -ENOMEM;
			goto out;
		}
1264
		init_aggr_kprobe(ap, orig_p);
1265
	} else if (kprobe_unused(ap))
M
Masami Hiramatsu 已提交
1266 1267
		/* This probe is going to die. Rescue it */
		reuse_unused_kprobe(ap);
1268 1269

	if (kprobe_gone(ap)) {
1270 1271 1272 1273 1274 1275
		/*
		 * Attempting to insert new probe at the same location that
		 * had a probe in the module vaddr area which already
		 * freed. So, the instruction slot has already been
		 * released. We need a new slot for the new probe.
		 */
1276
		ret = arch_prepare_kprobe(ap);
1277
		if (ret)
1278 1279 1280 1281 1282
			/*
			 * Even if fail to allocate new slot, don't need to
			 * free aggr_probe. It will be used next time, or
			 * freed by unregister_kprobe.
			 */
1283
			goto out;
1284

1285 1286 1287
		/* Prepare optimized instructions if possible. */
		prepare_optimized_kprobe(ap);

1288
		/*
1289 1290
		 * Clear gone flag to prevent allocating new slot again, and
		 * set disabled flag because it is not armed yet.
1291
		 */
1292 1293
		ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
			    | KPROBE_FLAG_DISABLED;
1294
	}
1295

1296
	/* Copy ap's insn slot to p */
1297
	copy_kprobe(ap, p);
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
	ret = add_new_kprobe(ap, p);

out:
	mutex_unlock(&text_mutex);
	put_online_cpus();
	jump_label_unlock();

	if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
		ap->flags &= ~KPROBE_FLAG_DISABLED;
		if (!kprobes_all_disarmed)
			/* Arm the breakpoint again. */
			arm_kprobe(ap);
	}
	return ret;
1312 1313
}

1314 1315 1316 1317 1318 1319 1320
bool __weak arch_within_kprobe_blacklist(unsigned long addr)
{
	/* The __kprobes marked functions and entry code must not be probed */
	return addr >= (unsigned long)__kprobes_text_start &&
	       addr < (unsigned long)__kprobes_text_end;
}

1321
static bool __kprobes within_kprobe_blacklist(unsigned long addr)
1322
{
1323
	struct kprobe_blacklist_entry *ent;
1324

1325
	if (arch_within_kprobe_blacklist(addr))
1326
		return true;
1327 1328 1329 1330
	/*
	 * If there exists a kprobe_blacklist, verify and
	 * fail any probe registration in the prohibited area
	 */
1331 1332 1333
	list_for_each_entry(ent, &kprobe_blacklist, list) {
		if (addr >= ent->start_addr && addr < ent->end_addr)
			return true;
1334
	}
1335 1336

	return false;
1337 1338
}

1339 1340 1341
/*
 * If we have a symbol_name argument, look it up and add the offset field
 * to it. This way, we can specify a relative address to a symbol.
1342 1343
 * This returns encoded errors if it fails to look up symbol or invalid
 * combination of parameters.
1344 1345 1346 1347
 */
static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
{
	kprobe_opcode_t *addr = p->addr;
1348 1349 1350 1351 1352

	if ((p->symbol_name && p->addr) ||
	    (!p->symbol_name && !p->addr))
		goto invalid;

1353 1354
	if (p->symbol_name) {
		kprobe_lookup_name(p->symbol_name, addr);
1355 1356
		if (!addr)
			return ERR_PTR(-ENOENT);
1357 1358
	}

1359 1360 1361 1362 1363 1364
	addr = (kprobe_opcode_t *)(((char *)addr) + p->offset);
	if (addr)
		return addr;

invalid:
	return ERR_PTR(-EINVAL);
1365 1366
}

1367 1368 1369
/* Check passed kprobe is valid and return kprobe in kprobe_table. */
static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
{
1370
	struct kprobe *ap, *list_p;
1371

1372 1373
	ap = get_kprobe(p->addr);
	if (unlikely(!ap))
1374 1375
		return NULL;

1376 1377
	if (p != ap) {
		list_for_each_entry_rcu(list_p, &ap->list, list)
1378 1379 1380 1381 1382 1383
			if (list_p == p)
			/* kprobe p is a valid probe */
				goto valid;
		return NULL;
	}
valid:
1384
	return ap;
1385 1386 1387 1388 1389 1390 1391 1392
}

/* Return error if the kprobe is being re-registered */
static inline int check_kprobe_rereg(struct kprobe *p)
{
	int ret = 0;

	mutex_lock(&kprobe_mutex);
1393
	if (__get_valid_kprobe(p))
1394 1395
		ret = -EINVAL;
	mutex_unlock(&kprobe_mutex);
1396

1397 1398 1399
	return ret;
}

1400 1401
static __kprobes int check_kprobe_address_safe(struct kprobe *p,
					       struct module **probed_mod)
L
Linus Torvalds 已提交
1402 1403
{
	int ret = 0;
1404 1405 1406 1407 1408 1409 1410 1411
	unsigned long ftrace_addr;

	/*
	 * If the address is located on a ftrace nop, set the
	 * breakpoint to the following instruction.
	 */
	ftrace_addr = ftrace_location((unsigned long)p->addr);
	if (ftrace_addr) {
1412
#ifdef CONFIG_KPROBES_ON_FTRACE
1413 1414 1415 1416
		/* Given address is not on the instruction boundary */
		if ((unsigned long)p->addr != ftrace_addr)
			return -EILSEQ;
		p->flags |= KPROBE_FLAG_FTRACE;
1417
#else	/* !CONFIG_KPROBES_ON_FTRACE */
1418 1419 1420
		return -EINVAL;
#endif
	}
1421

1422
	jump_label_lock();
1423
	preempt_disable();
1424 1425

	/* Ensure it is not in reserved area nor out of text */
1426
	if (!kernel_text_address((unsigned long) p->addr) ||
1427
	    within_kprobe_blacklist((unsigned long) p->addr) ||
1428 1429
	    jump_label_text_reserved(p->addr, p->addr)) {
		ret = -EINVAL;
1430
		goto out;
1431
	}
1432

1433 1434 1435
	/* Check if are we probing a module */
	*probed_mod = __module_text_address((unsigned long) p->addr);
	if (*probed_mod) {
1436
		/*
1437 1438
		 * We must hold a refcount of the probed module while updating
		 * its code to prohibit unexpected unloading.
1439
		 */
1440 1441 1442 1443
		if (unlikely(!try_module_get(*probed_mod))) {
			ret = -ENOENT;
			goto out;
		}
1444

1445 1446 1447 1448
		/*
		 * If the module freed .init.text, we couldn't insert
		 * kprobes in there.
		 */
1449 1450 1451 1452 1453
		if (within_module_init((unsigned long)p->addr, *probed_mod) &&
		    (*probed_mod)->state != MODULE_STATE_COMING) {
			module_put(*probed_mod);
			*probed_mod = NULL;
			ret = -ENOENT;
1454
		}
1455
	}
1456
out:
1457
	preempt_enable();
1458
	jump_label_unlock();
L
Linus Torvalds 已提交
1459

1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481
	return ret;
}

int __kprobes register_kprobe(struct kprobe *p)
{
	int ret;
	struct kprobe *old_p;
	struct module *probed_mod;
	kprobe_opcode_t *addr;

	/* Adjust probe address from symbol */
	addr = kprobe_addr(p);
	if (IS_ERR(addr))
		return PTR_ERR(addr);
	p->addr = addr;

	ret = check_kprobe_rereg(p);
	if (ret)
		return ret;

	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
	p->flags &= KPROBE_FLAG_DISABLED;
1482
	p->nmissed = 0;
1483
	INIT_LIST_HEAD(&p->list);
1484

1485 1486 1487 1488 1489
	ret = check_kprobe_address_safe(p, &probed_mod);
	if (ret)
		return ret;

	mutex_lock(&kprobe_mutex);
1490

1491 1492
	old_p = get_kprobe(p->addr);
	if (old_p) {
1493
		/* Since this may unoptimize old_p, locking text_mutex. */
1494
		ret = register_aggr_kprobe(old_p, p);
L
Linus Torvalds 已提交
1495 1496 1497
		goto out;
	}

1498
	mutex_lock(&text_mutex);	/* Avoiding text modification */
1499
	ret = prepare_kprobe(p);
1500
	mutex_unlock(&text_mutex);
1501
	if (ret)
1502
		goto out;
1503

1504
	INIT_HLIST_NODE(&p->hlist);
1505
	hlist_add_head_rcu(&p->hlist,
L
Linus Torvalds 已提交
1506 1507
		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);

1508
	if (!kprobes_all_disarmed && !kprobe_disabled(p))
1509
		arm_kprobe(p);
1510 1511 1512

	/* Try to optimize kprobe */
	try_to_optimize_kprobe(p);
1513

L
Linus Torvalds 已提交
1514
out:
I
Ingo Molnar 已提交
1515
	mutex_unlock(&kprobe_mutex);
1516

1517
	if (probed_mod)
1518
		module_put(probed_mod);
1519

L
Linus Torvalds 已提交
1520 1521
	return ret;
}
1522
EXPORT_SYMBOL_GPL(register_kprobe);
L
Linus Torvalds 已提交
1523

1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556
/* Check if all probes on the aggrprobe are disabled */
static int __kprobes aggr_kprobe_disabled(struct kprobe *ap)
{
	struct kprobe *kp;

	list_for_each_entry_rcu(kp, &ap->list, list)
		if (!kprobe_disabled(kp))
			/*
			 * There is an active probe on the list.
			 * We can't disable this ap.
			 */
			return 0;

	return 1;
}

/* Disable one kprobe: Make sure called under kprobe_mutex is locked */
static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
{
	struct kprobe *orig_p;

	/* Get an original kprobe for return */
	orig_p = __get_valid_kprobe(p);
	if (unlikely(orig_p == NULL))
		return NULL;

	if (!kprobe_disabled(p)) {
		/* Disable probe if it is a child probe */
		if (p != orig_p)
			p->flags |= KPROBE_FLAG_DISABLED;

		/* Try to disarm and disable this/parent probe */
		if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1557
			disarm_kprobe(orig_p, true);
1558 1559 1560 1561 1562 1563 1564
			orig_p->flags |= KPROBE_FLAG_DISABLED;
		}
	}

	return orig_p;
}

1565 1566 1567 1568 1569
/*
 * Unregister a kprobe without a scheduler synchronization.
 */
static int __kprobes __unregister_kprobe_top(struct kprobe *p)
{
1570
	struct kprobe *ap, *list_p;
1571

1572 1573
	/* Disable kprobe. This will disarm it if needed. */
	ap = __disable_kprobe(p);
1574
	if (ap == NULL)
1575 1576
		return -EINVAL;

1577
	if (ap == p)
1578
		/*
1579 1580
		 * This probe is an independent(and non-optimized) kprobe
		 * (not an aggrprobe). Remove from the hash list.
1581
		 */
1582 1583 1584 1585 1586
		goto disarmed;

	/* Following process expects this probe is an aggrprobe */
	WARN_ON(!kprobe_aggrprobe(ap));

1587 1588 1589 1590 1591
	if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
		/*
		 * !disarmed could be happen if the probe is under delayed
		 * unoptimizing.
		 */
1592 1593 1594
		goto disarmed;
	else {
		/* If disabling probe has special handlers, update aggrprobe */
1595
		if (p->break_handler && !kprobe_gone(p))
1596
			ap->break_handler = NULL;
1597
		if (p->post_handler && !kprobe_gone(p)) {
1598
			list_for_each_entry_rcu(list_p, &ap->list, list) {
1599 1600 1601
				if ((list_p != p) && (list_p->post_handler))
					goto noclean;
			}
1602
			ap->post_handler = NULL;
1603 1604
		}
noclean:
1605 1606 1607 1608
		/*
		 * Remove from the aggrprobe: this path will do nothing in
		 * __unregister_kprobe_bottom().
		 */
1609
		list_del_rcu(&p->list);
1610 1611 1612 1613 1614 1615
		if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
			/*
			 * Try to optimize this probe again, because post
			 * handler may have been changed.
			 */
			optimize_kprobe(ap);
1616
	}
1617
	return 0;
1618 1619

disarmed:
1620
	BUG_ON(!kprobe_disarmed(ap));
1621 1622
	hlist_del_rcu(&ap->hlist);
	return 0;
1623
}
1624

1625 1626
static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
{
1627
	struct kprobe *ap;
1628

1629
	if (list_empty(&p->list))
1630
		/* This is an independent kprobe */
1631
		arch_remove_kprobe(p);
1632
	else if (list_is_singular(&p->list)) {
1633
		/* This is the last child of an aggrprobe */
1634
		ap = list_entry(p->list.next, struct kprobe, list);
1635
		list_del(&p->list);
1636
		free_aggr_kprobe(ap);
1637
	}
1638
	/* Otherwise, do nothing. */
1639 1640
}

1641
int __kprobes register_kprobes(struct kprobe **kps, int num)
1642 1643 1644 1645 1646 1647
{
	int i, ret = 0;

	if (num <= 0)
		return -EINVAL;
	for (i = 0; i < num; i++) {
1648
		ret = register_kprobe(kps[i]);
1649 1650 1651
		if (ret < 0) {
			if (i > 0)
				unregister_kprobes(kps, i);
1652
			break;
1653
		}
1654
	}
1655 1656
	return ret;
}
1657
EXPORT_SYMBOL_GPL(register_kprobes);
1658 1659 1660 1661 1662

void __kprobes unregister_kprobe(struct kprobe *p)
{
	unregister_kprobes(&p, 1);
}
1663
EXPORT_SYMBOL_GPL(unregister_kprobe);
1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680

void __kprobes unregister_kprobes(struct kprobe **kps, int num)
{
	int i;

	if (num <= 0)
		return;
	mutex_lock(&kprobe_mutex);
	for (i = 0; i < num; i++)
		if (__unregister_kprobe_top(kps[i]) < 0)
			kps[i]->addr = NULL;
	mutex_unlock(&kprobe_mutex);

	synchronize_sched();
	for (i = 0; i < num; i++)
		if (kps[i]->addr)
			__unregister_kprobe_bottom(kps[i]);
L
Linus Torvalds 已提交
1681
}
1682
EXPORT_SYMBOL_GPL(unregister_kprobes);
L
Linus Torvalds 已提交
1683 1684

static struct notifier_block kprobe_exceptions_nb = {
1685 1686 1687 1688
	.notifier_call = kprobe_exceptions_notify,
	.priority = 0x7fffffff /* we need to be notified first */
};

1689 1690 1691 1692
unsigned long __weak arch_deref_entry_point(void *entry)
{
	return (unsigned long)entry;
}
L
Linus Torvalds 已提交
1693

1694
int __kprobes register_jprobes(struct jprobe **jps, int num)
L
Linus Torvalds 已提交
1695
{
1696 1697
	struct jprobe *jp;
	int ret = 0, i;
1698

1699
	if (num <= 0)
1700
		return -EINVAL;
1701
	for (i = 0; i < num; i++) {
1702
		unsigned long addr, offset;
1703 1704 1705
		jp = jps[i];
		addr = arch_deref_entry_point(jp->entry);

1706 1707 1708 1709 1710 1711 1712 1713
		/* Verify probepoint is a function entry point */
		if (kallsyms_lookup_size_offset(addr, NULL, &offset) &&
		    offset == 0) {
			jp->kp.pre_handler = setjmp_pre_handler;
			jp->kp.break_handler = longjmp_break_handler;
			ret = register_kprobe(&jp->kp);
		} else
			ret = -EINVAL;
1714

1715 1716 1717
		if (ret < 0) {
			if (i > 0)
				unregister_jprobes(jps, i);
1718 1719 1720 1721 1722
			break;
		}
	}
	return ret;
}
1723
EXPORT_SYMBOL_GPL(register_jprobes);
1724

1725 1726
int __kprobes register_jprobe(struct jprobe *jp)
{
1727
	return register_jprobes(&jp, 1);
L
Linus Torvalds 已提交
1728
}
1729
EXPORT_SYMBOL_GPL(register_jprobe);
L
Linus Torvalds 已提交
1730

1731
void __kprobes unregister_jprobe(struct jprobe *jp)
L
Linus Torvalds 已提交
1732
{
1733 1734
	unregister_jprobes(&jp, 1);
}
1735
EXPORT_SYMBOL_GPL(unregister_jprobe);
1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753

void __kprobes unregister_jprobes(struct jprobe **jps, int num)
{
	int i;

	if (num <= 0)
		return;
	mutex_lock(&kprobe_mutex);
	for (i = 0; i < num; i++)
		if (__unregister_kprobe_top(&jps[i]->kp) < 0)
			jps[i]->kp.addr = NULL;
	mutex_unlock(&kprobe_mutex);

	synchronize_sched();
	for (i = 0; i < num; i++) {
		if (jps[i]->kp.addr)
			__unregister_kprobe_bottom(&jps[i]->kp);
	}
L
Linus Torvalds 已提交
1754
}
1755
EXPORT_SYMBOL_GPL(unregister_jprobes);
L
Linus Torvalds 已提交
1756

1757
#ifdef CONFIG_KRETPROBES
1758 1759 1760 1761 1762 1763 1764 1765
/*
 * This kprobe pre_handler is registered with every kretprobe. When probe
 * hits it will set up the return probe.
 */
static int __kprobes pre_handler_kretprobe(struct kprobe *p,
					   struct pt_regs *regs)
{
	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1766 1767
	unsigned long hash, flags = 0;
	struct kretprobe_instance *ri;
1768 1769

	/*TODO: consider to only swap the RA after the last pre_handler fired */
1770
	hash = hash_ptr(current, KPROBE_HASH_BITS);
1771
	raw_spin_lock_irqsave(&rp->lock, flags);
1772 1773
	if (!hlist_empty(&rp->free_instances)) {
		ri = hlist_entry(rp->free_instances.first,
1774 1775
				struct kretprobe_instance, hlist);
		hlist_del(&ri->hlist);
1776
		raw_spin_unlock_irqrestore(&rp->lock, flags);
1777

1778 1779
		ri->rp = rp;
		ri->task = current;
1780

1781 1782 1783 1784
		if (rp->entry_handler && rp->entry_handler(ri, regs)) {
			raw_spin_lock_irqsave(&rp->lock, flags);
			hlist_add_head(&ri->hlist, &rp->free_instances);
			raw_spin_unlock_irqrestore(&rp->lock, flags);
1785
			return 0;
1786
		}
1787

1788 1789 1790
		arch_prepare_kretprobe(ri, regs);

		/* XXX(hch): why is there no hlist_move_head? */
1791 1792 1793 1794 1795
		INIT_HLIST_NODE(&ri->hlist);
		kretprobe_table_lock(hash, &flags);
		hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
		kretprobe_table_unlock(hash, &flags);
	} else {
1796
		rp->nmissed++;
1797
		raw_spin_unlock_irqrestore(&rp->lock, flags);
1798
	}
1799 1800 1801
	return 0;
}

1802
int __kprobes register_kretprobe(struct kretprobe *rp)
1803 1804 1805 1806
{
	int ret = 0;
	struct kretprobe_instance *inst;
	int i;
1807
	void *addr;
1808 1809

	if (kretprobe_blacklist_size) {
1810
		addr = kprobe_addr(&rp->kp);
1811 1812
		if (IS_ERR(addr))
			return PTR_ERR(addr);
1813 1814 1815 1816 1817 1818

		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
			if (kretprobe_blacklist[i].addr == addr)
				return -EINVAL;
		}
	}
1819 1820

	rp->kp.pre_handler = pre_handler_kretprobe;
1821 1822 1823
	rp->kp.post_handler = NULL;
	rp->kp.fault_handler = NULL;
	rp->kp.break_handler = NULL;
1824 1825 1826 1827

	/* Pre-allocate memory for max kretprobe instances */
	if (rp->maxactive <= 0) {
#ifdef CONFIG_PREEMPT
1828
		rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1829
#else
1830
		rp->maxactive = num_possible_cpus();
1831 1832
#endif
	}
1833
	raw_spin_lock_init(&rp->lock);
1834 1835
	INIT_HLIST_HEAD(&rp->free_instances);
	for (i = 0; i < rp->maxactive; i++) {
1836 1837
		inst = kmalloc(sizeof(struct kretprobe_instance) +
			       rp->data_size, GFP_KERNEL);
1838 1839 1840 1841
		if (inst == NULL) {
			free_rp_inst(rp);
			return -ENOMEM;
		}
1842 1843
		INIT_HLIST_NODE(&inst->hlist);
		hlist_add_head(&inst->hlist, &rp->free_instances);
1844 1845 1846 1847
	}

	rp->nmissed = 0;
	/* Establish function entry probe point */
1848
	ret = register_kprobe(&rp->kp);
1849
	if (ret != 0)
1850 1851 1852
		free_rp_inst(rp);
	return ret;
}
1853
EXPORT_SYMBOL_GPL(register_kretprobe);
1854

1855
int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1856 1857 1858 1859 1860 1861
{
	int ret = 0, i;

	if (num <= 0)
		return -EINVAL;
	for (i = 0; i < num; i++) {
1862
		ret = register_kretprobe(rps[i]);
1863 1864 1865
		if (ret < 0) {
			if (i > 0)
				unregister_kretprobes(rps, i);
1866 1867 1868 1869 1870
			break;
		}
	}
	return ret;
}
1871
EXPORT_SYMBOL_GPL(register_kretprobes);
1872 1873 1874 1875 1876

void __kprobes unregister_kretprobe(struct kretprobe *rp)
{
	unregister_kretprobes(&rp, 1);
}
1877
EXPORT_SYMBOL_GPL(unregister_kretprobe);
1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898

void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
{
	int i;

	if (num <= 0)
		return;
	mutex_lock(&kprobe_mutex);
	for (i = 0; i < num; i++)
		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
			rps[i]->kp.addr = NULL;
	mutex_unlock(&kprobe_mutex);

	synchronize_sched();
	for (i = 0; i < num; i++) {
		if (rps[i]->kp.addr) {
			__unregister_kprobe_bottom(&rps[i]->kp);
			cleanup_rp_inst(rps[i]);
		}
	}
}
1899
EXPORT_SYMBOL_GPL(unregister_kretprobes);
1900

1901
#else /* CONFIG_KRETPROBES */
1902
int __kprobes register_kretprobe(struct kretprobe *rp)
1903 1904 1905
{
	return -ENOSYS;
}
1906
EXPORT_SYMBOL_GPL(register_kretprobe);
1907

1908
int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1909
{
1910
	return -ENOSYS;
1911
}
1912 1913
EXPORT_SYMBOL_GPL(register_kretprobes);

1914
void __kprobes unregister_kretprobe(struct kretprobe *rp)
1915
{
1916
}
1917
EXPORT_SYMBOL_GPL(unregister_kretprobe);
1918

1919 1920 1921
void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
{
}
1922
EXPORT_SYMBOL_GPL(unregister_kretprobes);
1923

1924 1925 1926 1927
static int __kprobes pre_handler_kretprobe(struct kprobe *p,
					   struct pt_regs *regs)
{
	return 0;
1928 1929
}

1930 1931
#endif /* CONFIG_KRETPROBES */

1932 1933 1934 1935
/* Set the kprobe gone and remove its instruction buffer. */
static void __kprobes kill_kprobe(struct kprobe *p)
{
	struct kprobe *kp;
1936

1937
	p->flags |= KPROBE_FLAG_GONE;
1938
	if (kprobe_aggrprobe(p)) {
1939 1940 1941 1942 1943 1944 1945 1946
		/*
		 * If this is an aggr_kprobe, we have to list all the
		 * chained probes and mark them GONE.
		 */
		list_for_each_entry_rcu(kp, &p->list, list)
			kp->flags |= KPROBE_FLAG_GONE;
		p->post_handler = NULL;
		p->break_handler = NULL;
1947
		kill_optimized_kprobe(p);
1948 1949 1950 1951 1952 1953 1954 1955
	}
	/*
	 * Here, we can remove insn_slot safely, because no thread calls
	 * the original probed function (which will be freed soon) any more.
	 */
	arch_remove_kprobe(p);
}

1956 1957 1958 1959 1960 1961 1962
/* Disable one kprobe */
int __kprobes disable_kprobe(struct kprobe *kp)
{
	int ret = 0;

	mutex_lock(&kprobe_mutex);

1963 1964
	/* Disable this kprobe */
	if (__disable_kprobe(kp) == NULL)
1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
		ret = -EINVAL;

	mutex_unlock(&kprobe_mutex);
	return ret;
}
EXPORT_SYMBOL_GPL(disable_kprobe);

/* Enable one kprobe */
int __kprobes enable_kprobe(struct kprobe *kp)
{
	int ret = 0;
	struct kprobe *p;

	mutex_lock(&kprobe_mutex);

	/* Check whether specified probe is valid. */
	p = __get_valid_kprobe(kp);
	if (unlikely(p == NULL)) {
		ret = -EINVAL;
		goto out;
	}

	if (kprobe_gone(kp)) {
		/* This kprobe has gone, we couldn't enable it. */
		ret = -EINVAL;
		goto out;
	}

	if (p != kp)
		kp->flags &= ~KPROBE_FLAG_DISABLED;

	if (!kprobes_all_disarmed && kprobe_disabled(p)) {
		p->flags &= ~KPROBE_FLAG_DISABLED;
		arm_kprobe(p);
	}
out:
	mutex_unlock(&kprobe_mutex);
	return ret;
}
EXPORT_SYMBOL_GPL(enable_kprobe);

2006 2007 2008 2009 2010 2011 2012
void __kprobes dump_kprobe(struct kprobe *kp)
{
	printk(KERN_WARNING "Dumping kprobe:\n");
	printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
	       kp->symbol_name, kp->addr, kp->offset);
}

2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044
/*
 * Lookup and populate the kprobe_blacklist.
 *
 * Unlike the kretprobe blacklist, we'll need to determine
 * the range of addresses that belong to the said functions,
 * since a kprobe need not necessarily be at the beginning
 * of a function.
 */
static int __init populate_kprobe_blacklist(unsigned long *start,
					     unsigned long *end)
{
	unsigned long *iter;
	struct kprobe_blacklist_entry *ent;
	unsigned long offset = 0, size = 0;

	for (iter = start; iter < end; iter++) {
		if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) {
			pr_err("Failed to find blacklist %p\n", (void *)*iter);
			continue;
		}

		ent = kmalloc(sizeof(*ent), GFP_KERNEL);
		if (!ent)
			return -ENOMEM;
		ent->start_addr = *iter;
		ent->end_addr = *iter + size;
		INIT_LIST_HEAD(&ent->list);
		list_add_tail(&ent->list, &kprobe_blacklist);
	}
	return 0;
}

2045 2046 2047 2048 2049 2050 2051 2052
/* Module notifier call back, checking kprobes on the module */
static int __kprobes kprobes_module_callback(struct notifier_block *nb,
					     unsigned long val, void *data)
{
	struct module *mod = data;
	struct hlist_head *head;
	struct kprobe *p;
	unsigned int i;
2053
	int checkcore = (val == MODULE_STATE_GOING);
2054

2055
	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2056 2057 2058
		return NOTIFY_DONE;

	/*
2059 2060 2061 2062
	 * When MODULE_STATE_GOING was notified, both of module .text and
	 * .init.text sections would be freed. When MODULE_STATE_LIVE was
	 * notified, only .init.text section would be freed. We need to
	 * disable kprobes which have been inserted in the sections.
2063 2064 2065 2066
	 */
	mutex_lock(&kprobe_mutex);
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
		head = &kprobe_table[i];
2067
		hlist_for_each_entry_rcu(p, head, hlist)
2068 2069 2070
			if (within_module_init((unsigned long)p->addr, mod) ||
			    (checkcore &&
			     within_module_core((unsigned long)p->addr, mod))) {
2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087
				/*
				 * The vaddr this probe is installed will soon
				 * be vfreed buy not synced to disk. Hence,
				 * disarming the breakpoint isn't needed.
				 */
				kill_kprobe(p);
			}
	}
	mutex_unlock(&kprobe_mutex);
	return NOTIFY_DONE;
}

static struct notifier_block kprobe_module_nb = {
	.notifier_call = kprobes_module_callback,
	.priority = 0
};

2088 2089 2090 2091
/* Markers of _kprobe_blacklist section */
extern unsigned long __start_kprobe_blacklist[];
extern unsigned long __stop_kprobe_blacklist[];

L
Linus Torvalds 已提交
2092 2093 2094 2095 2096 2097
static int __init init_kprobes(void)
{
	int i, err = 0;

	/* FIXME allocate the probe table, currently defined statically */
	/* initialize all list heads */
2098
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
L
Linus Torvalds 已提交
2099
		INIT_HLIST_HEAD(&kprobe_table[i]);
2100
		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
2101
		raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2102
	}
L
Linus Torvalds 已提交
2103

2104 2105 2106 2107 2108
	err = populate_kprobe_blacklist(__start_kprobe_blacklist,
					__stop_kprobe_blacklist);
	if (err) {
		pr_err("kprobes: failed to populate blacklist: %d\n", err);
		pr_err("Please take care of using kprobes.\n");
2109 2110
	}

2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
	if (kretprobe_blacklist_size) {
		/* lookup the function address from its name */
		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
			kprobe_lookup_name(kretprobe_blacklist[i].name,
					   kretprobe_blacklist[i].addr);
			if (!kretprobe_blacklist[i].addr)
				printk("kretprobe: lookup failed: %s\n",
				       kretprobe_blacklist[i].name);
		}
	}

2122 2123
#if defined(CONFIG_OPTPROBES)
#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2124 2125 2126
	/* Init kprobe_optinsn_slots */
	kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
#endif
2127 2128 2129
	/* By default, kprobes can be optimized */
	kprobes_allow_optimization = true;
#endif
2130

2131 2132
	/* By default, kprobes are armed */
	kprobes_all_disarmed = false;
2133

2134
	err = arch_init_kprobes();
2135 2136
	if (!err)
		err = register_die_notifier(&kprobe_exceptions_nb);
2137 2138 2139
	if (!err)
		err = register_module_notifier(&kprobe_module_nb);

2140
	kprobes_initialized = (err == 0);
2141

2142 2143
	if (!err)
		init_test_probes();
L
Linus Torvalds 已提交
2144 2145 2146
	return err;
}

2147 2148
#ifdef CONFIG_DEBUG_FS
static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
2149
		const char *sym, int offset, char *modname, struct kprobe *pp)
2150 2151 2152 2153 2154 2155 2156 2157 2158
{
	char *kprobe_type;

	if (p->pre_handler == pre_handler_kretprobe)
		kprobe_type = "r";
	else if (p->pre_handler == setjmp_pre_handler)
		kprobe_type = "j";
	else
		kprobe_type = "k";
2159

2160
	if (sym)
2161
		seq_printf(pi, "%p  %s  %s+0x%x  %s ",
2162
			p->addr, kprobe_type, sym, offset,
2163
			(modname ? modname : " "));
2164
	else
2165 2166 2167 2168 2169
		seq_printf(pi, "%p  %s  %p ",
			p->addr, kprobe_type, p->addr);

	if (!pp)
		pp = p;
2170
	seq_printf(pi, "%s%s%s%s\n",
2171 2172
		(kprobe_gone(p) ? "[GONE]" : ""),
		((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
2173 2174
		(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
		(kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200
}

static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
{
	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
}

static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
{
	(*pos)++;
	if (*pos >= KPROBE_TABLE_SIZE)
		return NULL;
	return pos;
}

static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
{
	/* Nothing to do */
}

static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
{
	struct hlist_head *head;
	struct kprobe *p, *kp;
	const char *sym = NULL;
	unsigned int i = *(loff_t *) v;
A
Alexey Dobriyan 已提交
2201
	unsigned long offset = 0;
2202
	char *modname, namebuf[KSYM_NAME_LEN];
2203 2204 2205

	head = &kprobe_table[i];
	preempt_disable();
2206
	hlist_for_each_entry_rcu(p, head, hlist) {
A
Alexey Dobriyan 已提交
2207
		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2208
					&offset, &modname, namebuf);
2209
		if (kprobe_aggrprobe(p)) {
2210
			list_for_each_entry_rcu(kp, &p->list, list)
2211
				report_probe(pi, kp, sym, offset, modname, p);
2212
		} else
2213
			report_probe(pi, p, sym, offset, modname, NULL);
2214 2215 2216 2217 2218
	}
	preempt_enable();
	return 0;
}

J
James Morris 已提交
2219
static const struct seq_operations kprobes_seq_ops = {
2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230
	.start = kprobe_seq_start,
	.next  = kprobe_seq_next,
	.stop  = kprobe_seq_stop,
	.show  = show_kprobe_addr
};

static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
{
	return seq_open(filp, &kprobes_seq_ops);
}

2231
static const struct file_operations debugfs_kprobes_operations = {
2232 2233 2234 2235 2236 2237
	.open           = kprobes_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
};

2238
static void __kprobes arm_all_kprobes(void)
2239 2240 2241 2242 2243 2244 2245
{
	struct hlist_head *head;
	struct kprobe *p;
	unsigned int i;

	mutex_lock(&kprobe_mutex);

2246 2247
	/* If kprobes are armed, just return */
	if (!kprobes_all_disarmed)
2248 2249
		goto already_enabled;

2250
	/* Arming kprobes doesn't optimize kprobe itself */
2251 2252
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
		head = &kprobe_table[i];
2253
		hlist_for_each_entry_rcu(p, head, hlist)
2254
			if (!kprobe_disabled(p))
2255
				arm_kprobe(p);
2256 2257
	}

2258
	kprobes_all_disarmed = false;
2259 2260 2261 2262 2263 2264 2265
	printk(KERN_INFO "Kprobes globally enabled\n");

already_enabled:
	mutex_unlock(&kprobe_mutex);
	return;
}

2266
static void __kprobes disarm_all_kprobes(void)
2267 2268 2269 2270 2271 2272 2273
{
	struct hlist_head *head;
	struct kprobe *p;
	unsigned int i;

	mutex_lock(&kprobe_mutex);

2274
	/* If kprobes are already disarmed, just return */
2275 2276 2277 2278
	if (kprobes_all_disarmed) {
		mutex_unlock(&kprobe_mutex);
		return;
	}
2279

2280
	kprobes_all_disarmed = true;
2281
	printk(KERN_INFO "Kprobes globally disabled\n");
2282

2283 2284
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
		head = &kprobe_table[i];
2285
		hlist_for_each_entry_rcu(p, head, hlist) {
2286
			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2287
				disarm_kprobe(p, false);
2288 2289 2290 2291
		}
	}
	mutex_unlock(&kprobe_mutex);

2292 2293
	/* Wait for disarming all kprobes by optimizer */
	wait_for_kprobe_optimizer();
2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305
}

/*
 * XXX: The debugfs bool file interface doesn't allow for callbacks
 * when the bool state is switched. We can reuse that facility when
 * available
 */
static ssize_t read_enabled_file_bool(struct file *file,
	       char __user *user_buf, size_t count, loff_t *ppos)
{
	char buf[3];

2306
	if (!kprobes_all_disarmed)
2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318
		buf[0] = '1';
	else
		buf[0] = '0';
	buf[1] = '\n';
	buf[2] = 0x00;
	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}

static ssize_t write_enabled_file_bool(struct file *file,
	       const char __user *user_buf, size_t count, loff_t *ppos)
{
	char buf[32];
2319
	size_t buf_size;
2320 2321 2322 2323 2324

	buf_size = min(count, (sizeof(buf)-1));
	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;

2325
	buf[buf_size] = '\0';
2326 2327 2328 2329
	switch (buf[0]) {
	case 'y':
	case 'Y':
	case '1':
2330
		arm_all_kprobes();
2331 2332 2333 2334
		break;
	case 'n':
	case 'N':
	case '0':
2335
		disarm_all_kprobes();
2336
		break;
2337 2338
	default:
		return -EINVAL;
2339 2340 2341 2342 2343
	}

	return count;
}

2344
static const struct file_operations fops_kp = {
2345 2346
	.read =         read_enabled_file_bool,
	.write =        write_enabled_file_bool,
2347
	.llseek =	default_llseek,
2348 2349
};

2350 2351 2352
static int __kprobes debugfs_kprobe_init(void)
{
	struct dentry *dir, *file;
2353
	unsigned int value = 1;
2354 2355 2356 2357 2358

	dir = debugfs_create_dir("kprobes", NULL);
	if (!dir)
		return -ENOMEM;

R
Randy Dunlap 已提交
2359
	file = debugfs_create_file("list", 0444, dir, NULL,
2360 2361 2362 2363 2364 2365
				&debugfs_kprobes_operations);
	if (!file) {
		debugfs_remove(dir);
		return -ENOMEM;
	}

2366 2367 2368 2369 2370 2371 2372
	file = debugfs_create_file("enabled", 0600, dir,
					&value, &fops_kp);
	if (!file) {
		debugfs_remove(dir);
		return -ENOMEM;
	}

2373 2374 2375 2376 2377 2378 2379
	return 0;
}

late_initcall(debugfs_kprobe_init);
#endif /* CONFIG_DEBUG_FS */

module_init(init_kprobes);
L
Linus Torvalds 已提交
2380

2381
/* defined in arch/.../kernel/kprobes.c */
L
Linus Torvalds 已提交
2382
EXPORT_SYMBOL_GPL(jprobe_return);