kprobes.c 49.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 *  Kernel Probes (KProbes)
 *  kernel/kprobes.c
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * Copyright (C) IBM Corporation, 2002, 2004
 *
 * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
 *		Probes initial implementation (includes suggestions from
 *		Rusty Russell).
 * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
 *		hlists and exceptions notifier as suggested by Andi Kleen.
 * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
 *		interface to access function arguments.
 * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
 *		exceptions notifier to be first on the priority list.
30 31 32
 * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
 *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
 *		<prasanna@in.ibm.com> added function-return probes.
L
Linus Torvalds 已提交
33 34 35 36
 */
#include <linux/kprobes.h>
#include <linux/hash.h>
#include <linux/init.h>
T
Tim Schmielau 已提交
37
#include <linux/slab.h>
R
Randy Dunlap 已提交
38
#include <linux/stddef.h>
L
Linus Torvalds 已提交
39
#include <linux/module.h>
40
#include <linux/moduleloader.h>
41
#include <linux/kallsyms.h>
42
#include <linux/freezer.h>
43 44
#include <linux/seq_file.h>
#include <linux/debugfs.h>
45
#include <linux/sysctl.h>
46
#include <linux/kdebug.h>
47
#include <linux/memory.h>
48
#include <linux/ftrace.h>
49
#include <linux/cpu.h>
50

51
#include <asm-generic/sections.h>
L
Linus Torvalds 已提交
52 53
#include <asm/cacheflush.h>
#include <asm/errno.h>
54
#include <asm/uaccess.h>
L
Linus Torvalds 已提交
55 56 57 58

#define KPROBE_HASH_BITS 6
#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)

59 60 61 62 63 64 65 66 67 68

/*
 * Some oddball architectures like 64bit powerpc have function descriptors
 * so this must be overridable.
 */
#ifndef kprobe_lookup_name
#define kprobe_lookup_name(name, addr) \
	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
#endif

69
static int kprobes_initialized;
L
Linus Torvalds 已提交
70
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
71
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
L
Linus Torvalds 已提交
72

73
/* NOTE: change this value only with kprobe_mutex held */
74
static bool kprobes_all_disarmed;
75

76
static DEFINE_MUTEX(kprobe_mutex);	/* Protects kprobe_table */
77
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
78
static struct {
79
	spinlock_t lock ____cacheline_aligned_in_smp;
80 81 82 83 84 85
} kretprobe_table_locks[KPROBE_TABLE_SIZE];

static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
{
	return &(kretprobe_table_locks[hash].lock);
}
L
Linus Torvalds 已提交
86

87 88 89 90 91 92 93
/*
 * Normally, functions that we'd want to prohibit kprobes in, are marked
 * __kprobes. But, there are cases where such functions already belong to
 * a different section (__sched for preempt_schedule)
 *
 * For such cases, we now have a blacklist
 */
94
static struct kprobe_blackpoint kprobe_blacklist[] = {
95
	{"preempt_schedule",},
96
	{"native_get_debugreg",},
97 98
	{"irq_entries_start",},
	{"common_interrupt",},
99
	{"mcount",},	/* mcount can be called from everywhere */
100 101 102
	{NULL}    /* Terminator */
};

103
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
104 105 106 107 108 109 110
/*
 * kprobe->ainsn.insn points to the copy of the instruction to be
 * single-stepped. x86_64, POWER4 and above have no-exec support and
 * stepping on the instruction on a vmalloced/kmalloced/data page
 * is a recipe for disaster
 */
struct kprobe_insn_page {
111
	struct list_head list;
112 113
	kprobe_opcode_t *insns;		/* Page of instruction slots */
	int nused;
114
	int ngarbage;
115
	char slot_used[];
116 117
};

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
#define KPROBE_INSN_PAGE_SIZE(slots)			\
	(offsetof(struct kprobe_insn_page, slot_used) +	\
	 (sizeof(char) * (slots)))

struct kprobe_insn_cache {
	struct list_head pages;	/* list of kprobe_insn_page */
	size_t insn_size;	/* size of instruction slot */
	int nr_garbage;
};

static int slots_per_page(struct kprobe_insn_cache *c)
{
	return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
}

133 134 135 136 137 138
enum kprobe_slot_state {
	SLOT_CLEAN = 0,
	SLOT_DIRTY = 1,
	SLOT_USED = 2,
};

139 140 141 142 143 144 145
static DEFINE_MUTEX(kprobe_insn_mutex);	/* Protects kprobe_insn_slots */
static struct kprobe_insn_cache kprobe_insn_slots = {
	.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
	.insn_size = MAX_INSN_SIZE,
	.nr_garbage = 0,
};
static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
146

147
/**
148
 * __get_insn_slot() - Find a slot on an executable page for an instruction.
149 150
 * We allocate an executable page if there's no room on existing ones.
 */
151
static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
152 153 154
{
	struct kprobe_insn_page *kip;

155
 retry:
156 157
	list_for_each_entry(kip, &c->pages, list) {
		if (kip->nused < slots_per_page(c)) {
158
			int i;
159
			for (i = 0; i < slots_per_page(c); i++) {
160 161
				if (kip->slot_used[i] == SLOT_CLEAN) {
					kip->slot_used[i] = SLOT_USED;
162
					kip->nused++;
163
					return kip->insns + (i * c->insn_size);
164 165
				}
			}
166 167 168
			/* kip->nused is broken. Fix it. */
			kip->nused = slots_per_page(c);
			WARN_ON(1);
169 170 171
		}
	}

172
	/* If there are any garbage slots, collect it and try again. */
173
	if (c->nr_garbage && collect_garbage_slots(c) == 0)
174
		goto retry;
175 176 177

	/* All out of space.  Need to allocate a new page. */
	kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
178
	if (!kip)
179 180 181 182 183 184 185 186 187 188 189 190
		return NULL;

	/*
	 * Use module_alloc so this page is within +/- 2GB of where the
	 * kernel image and loaded module images reside. This is required
	 * so x86_64 can correctly handle the %rip-relative fixups.
	 */
	kip->insns = module_alloc(PAGE_SIZE);
	if (!kip->insns) {
		kfree(kip);
		return NULL;
	}
191
	INIT_LIST_HEAD(&kip->list);
192
	memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
193
	kip->slot_used[0] = SLOT_USED;
194
	kip->nused = 1;
195
	kip->ngarbage = 0;
196
	list_add(&kip->list, &c->pages);
197 198 199
	return kip->insns;
}

200

201 202
kprobe_opcode_t __kprobes *get_insn_slot(void)
{
203 204
	kprobe_opcode_t *ret = NULL;

205
	mutex_lock(&kprobe_insn_mutex);
206
	ret = __get_insn_slot(&kprobe_insn_slots);
207
	mutex_unlock(&kprobe_insn_mutex);
208

209 210 211
	return ret;
}

212 213 214
/* Return 1 if all garbages are collected, otherwise 0. */
static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
{
215
	kip->slot_used[idx] = SLOT_CLEAN;
216 217 218 219 220 221 222 223
	kip->nused--;
	if (kip->nused == 0) {
		/*
		 * Page is no longer in use.  Free it unless
		 * it's the last one.  We keep the last one
		 * so as not to have to set it up again the
		 * next time somebody inserts a probe.
		 */
224
		if (!list_is_singular(&kip->list)) {
225
			list_del(&kip->list);
226 227 228 229 230 231 232 233
			module_free(NULL, kip->insns);
			kfree(kip);
		}
		return 1;
	}
	return 0;
}

234
static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
235
{
236
	struct kprobe_insn_page *kip, *next;
237

238 239
	/* Ensure no-one is interrupted on the garbages */
	synchronize_sched();
240

241
	list_for_each_entry_safe(kip, next, &c->pages, list) {
242 243 244 245
		int i;
		if (kip->ngarbage == 0)
			continue;
		kip->ngarbage = 0;	/* we will collect all garbages */
246
		for (i = 0; i < slots_per_page(c); i++) {
247
			if (kip->slot_used[i] == SLOT_DIRTY &&
248 249 250 251
			    collect_one_slot(kip, i))
				break;
		}
	}
252
	c->nr_garbage = 0;
253 254 255
	return 0;
}

256 257
static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
				       kprobe_opcode_t *slot, int dirty)
258 259 260
{
	struct kprobe_insn_page *kip;

261 262 263 264
	list_for_each_entry(kip, &c->pages, list) {
		long idx = ((long)slot - (long)kip->insns) / c->insn_size;
		if (idx >= 0 && idx < slots_per_page(c)) {
			WARN_ON(kip->slot_used[idx] != SLOT_USED);
265
			if (dirty) {
266
				kip->slot_used[idx] = SLOT_DIRTY;
267
				kip->ngarbage++;
268 269
				if (++c->nr_garbage > slots_per_page(c))
					collect_garbage_slots(c);
270
			} else
271 272
				collect_one_slot(kip, idx);
			return;
273 274
		}
	}
275 276 277
	/* Could not free this slot. */
	WARN_ON(1);
}
278

279 280 281 282
void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
{
	mutex_lock(&kprobe_insn_mutex);
	__free_insn_slot(&kprobe_insn_slots, slot, dirty);
283
	mutex_unlock(&kprobe_insn_mutex);
284
}
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
#ifdef CONFIG_OPTPROBES
/* For optimized_kprobe buffer */
static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
static struct kprobe_insn_cache kprobe_optinsn_slots = {
	.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
	/* .insn_size is initialized later */
	.nr_garbage = 0,
};
/* Get a slot for optimized_kprobe buffer */
kprobe_opcode_t __kprobes *get_optinsn_slot(void)
{
	kprobe_opcode_t *ret = NULL;

	mutex_lock(&kprobe_optinsn_mutex);
	ret = __get_insn_slot(&kprobe_optinsn_slots);
	mutex_unlock(&kprobe_optinsn_mutex);

	return ret;
}

void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
{
	mutex_lock(&kprobe_optinsn_mutex);
	__free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
	mutex_unlock(&kprobe_optinsn_mutex);
}
#endif
312
#endif
313

314 315 316 317 318 319 320 321 322 323 324
/* We have preemption disabled.. so it is safe to use __ versions */
static inline void set_kprobe_instance(struct kprobe *kp)
{
	__get_cpu_var(kprobe_instance) = kp;
}

static inline void reset_kprobe_instance(void)
{
	__get_cpu_var(kprobe_instance) = NULL;
}

325 326
/*
 * This routine is called either:
327
 * 	- under the kprobe_mutex - during kprobe_[un]register()
328
 * 				OR
329
 * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
330
 */
331
struct kprobe __kprobes *get_kprobe(void *addr)
L
Linus Torvalds 已提交
332 333 334
{
	struct hlist_head *head;
	struct hlist_node *node;
335
	struct kprobe *p;
L
Linus Torvalds 已提交
336 337

	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
338
	hlist_for_each_entry_rcu(p, node, head, hlist) {
L
Linus Torvalds 已提交
339 340 341
		if (p->addr == addr)
			return p;
	}
342

L
Linus Torvalds 已提交
343 344 345
	return NULL;
}

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);

/* Return true if the kprobe is an aggregator */
static inline int kprobe_aggrprobe(struct kprobe *p)
{
	return p->pre_handler == aggr_pre_handler;
}

/*
 * Keep all fields in the kprobe consistent
 */
static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
{
	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
}

#ifdef CONFIG_OPTPROBES
364 365 366
/* NOTE: change this value only with kprobe_mutex held */
static bool kprobes_allow_optimization;

367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
/*
 * Call all pre_handler on the list, but ignores its return value.
 * This must be called from arch-dep optimized caller.
 */
void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
	struct kprobe *kp;

	list_for_each_entry_rcu(kp, &p->list, list) {
		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
			set_kprobe_instance(kp);
			kp->pre_handler(kp, regs);
		}
		reset_kprobe_instance();
	}
}

/* Return true(!0) if the kprobe is ready for optimization. */
static inline int kprobe_optready(struct kprobe *p)
{
	struct optimized_kprobe *op;

	if (kprobe_aggrprobe(p)) {
		op = container_of(p, struct optimized_kprobe, kp);
		return arch_prepared_optinsn(&op->optinsn);
	}

	return 0;
}

/*
 * Return an optimized kprobe whose optimizing code replaces
 * instructions including addr (exclude breakpoint).
 */
struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
{
	int i;
	struct kprobe *p = NULL;
	struct optimized_kprobe *op;

	/* Don't check i == 0, since that is a breakpoint case. */
	for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
		p = get_kprobe((void *)(addr - i));

	if (p && kprobe_optready(p)) {
		op = container_of(p, struct optimized_kprobe, kp);
		if (arch_within_optimized_kprobe(op, addr))
			return p;
	}

	return NULL;
}

/* Optimization staging list, protected by kprobe_mutex */
static LIST_HEAD(optimizing_list);

static void kprobe_optimizer(struct work_struct *work);
static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
#define OPTIMIZE_DELAY 5

/* Kprobe jump optimizer */
static __kprobes void kprobe_optimizer(struct work_struct *work)
{
	struct optimized_kprobe *op, *tmp;

	/* Lock modules while optimizing kprobes */
	mutex_lock(&module_mutex);
	mutex_lock(&kprobe_mutex);
435
	if (kprobes_all_disarmed || !kprobes_allow_optimization)
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
		goto end;

	/*
	 * Wait for quiesence period to ensure all running interrupts
	 * are done. Because optprobe may modify multiple instructions
	 * there is a chance that Nth instruction is interrupted. In that
	 * case, running interrupt can return to 2nd-Nth byte of jump
	 * instruction. This wait is for avoiding it.
	 */
	synchronize_sched();

	/*
	 * The optimization/unoptimization refers online_cpus via
	 * stop_machine() and cpu-hotplug modifies online_cpus.
	 * And same time, text_mutex will be held in cpu-hotplug and here.
	 * This combination can cause a deadlock (cpu-hotplug try to lock
	 * text_mutex but stop_machine can not be done because online_cpus
	 * has been changed)
	 * To avoid this deadlock, we need to call get_online_cpus()
	 * for preventing cpu-hotplug outside of text_mutex locking.
	 */
	get_online_cpus();
	mutex_lock(&text_mutex);
	list_for_each_entry_safe(op, tmp, &optimizing_list, list) {
		WARN_ON(kprobe_disabled(&op->kp));
		if (arch_optimize_kprobe(op) < 0)
			op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
		list_del_init(&op->list);
	}
	mutex_unlock(&text_mutex);
	put_online_cpus();
end:
	mutex_unlock(&kprobe_mutex);
	mutex_unlock(&module_mutex);
}

/* Optimize kprobe if p is ready to be optimized */
static __kprobes void optimize_kprobe(struct kprobe *p)
{
	struct optimized_kprobe *op;

	/* Check if the kprobe is disabled or not ready for optimization. */
478
	if (!kprobe_optready(p) || !kprobes_allow_optimization ||
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
	    (kprobe_disabled(p) || kprobes_all_disarmed))
		return;

	/* Both of break_handler and post_handler are not supported. */
	if (p->break_handler || p->post_handler)
		return;

	op = container_of(p, struct optimized_kprobe, kp);

	/* Check there is no other kprobes at the optimized instructions */
	if (arch_check_optimized_kprobe(op) < 0)
		return;

	/* Check if it is already optimized. */
	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
		return;

	op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
	list_add(&op->list, &optimizing_list);
	if (!delayed_work_pending(&optimizing_work))
		schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
}

/* Unoptimize a kprobe if p is optimized */
static __kprobes void unoptimize_kprobe(struct kprobe *p)
{
	struct optimized_kprobe *op;

	if ((p->flags & KPROBE_FLAG_OPTIMIZED) && kprobe_aggrprobe(p)) {
		op = container_of(p, struct optimized_kprobe, kp);
		if (!list_empty(&op->list))
			/* Dequeue from the optimization queue */
			list_del_init(&op->list);
		else
			/* Replace jump with break */
			arch_unoptimize_kprobe(op);
		op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
	}
}

/* Remove optimized instructions */
static void __kprobes kill_optimized_kprobe(struct kprobe *p)
{
	struct optimized_kprobe *op;

	op = container_of(p, struct optimized_kprobe, kp);
	if (!list_empty(&op->list)) {
		/* Dequeue from the optimization queue */
		list_del_init(&op->list);
		op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
	}
	/* Don't unoptimize, because the target code will be freed. */
	arch_remove_optimized_kprobe(op);
}

/* Try to prepare optimized instructions */
static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
{
	struct optimized_kprobe *op;

	op = container_of(p, struct optimized_kprobe, kp);
	arch_prepare_optimized_kprobe(op);
}

/* Free optimized instructions and optimized_kprobe */
static __kprobes void free_aggr_kprobe(struct kprobe *p)
{
	struct optimized_kprobe *op;

	op = container_of(p, struct optimized_kprobe, kp);
	arch_remove_optimized_kprobe(op);
	kfree(op);
}

/* Allocate new optimized_kprobe and try to prepare optimized instructions */
static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
{
	struct optimized_kprobe *op;

	op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
	if (!op)
		return NULL;

	INIT_LIST_HEAD(&op->list);
	op->kp.addr = p->addr;
	arch_prepare_optimized_kprobe(op);

	return &op->kp;
}

static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);

/*
 * Prepare an optimized_kprobe and optimize it
 * NOTE: p must be a normal registered kprobe
 */
static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
{
	struct kprobe *ap;
	struct optimized_kprobe *op;

	ap = alloc_aggr_kprobe(p);
	if (!ap)
		return;

	op = container_of(ap, struct optimized_kprobe, kp);
	if (!arch_prepared_optinsn(&op->optinsn)) {
		/* If failed to setup optimizing, fallback to kprobe */
		free_aggr_kprobe(ap);
		return;
	}

	init_aggr_kprobe(ap, p);
	optimize_kprobe(ap);
}

595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
#ifdef CONFIG_SYSCTL
static void __kprobes optimize_all_kprobes(void)
{
	struct hlist_head *head;
	struct hlist_node *node;
	struct kprobe *p;
	unsigned int i;

	/* If optimization is already allowed, just return */
	if (kprobes_allow_optimization)
		return;

	kprobes_allow_optimization = true;
	mutex_lock(&text_mutex);
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
		head = &kprobe_table[i];
		hlist_for_each_entry_rcu(p, node, head, hlist)
			if (!kprobe_disabled(p))
				optimize_kprobe(p);
	}
	mutex_unlock(&text_mutex);
	printk(KERN_INFO "Kprobes globally optimized\n");
}

static void __kprobes unoptimize_all_kprobes(void)
{
	struct hlist_head *head;
	struct hlist_node *node;
	struct kprobe *p;
	unsigned int i;

	/* If optimization is already prohibited, just return */
	if (!kprobes_allow_optimization)
		return;

	kprobes_allow_optimization = false;
	printk(KERN_INFO "Kprobes globally unoptimized\n");
	get_online_cpus();	/* For avoiding text_mutex deadlock */
	mutex_lock(&text_mutex);
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
		head = &kprobe_table[i];
		hlist_for_each_entry_rcu(p, node, head, hlist) {
			if (!kprobe_disabled(p))
				unoptimize_kprobe(p);
		}
	}

	mutex_unlock(&text_mutex);
	put_online_cpus();
	/* Allow all currently running kprobes to complete */
	synchronize_sched();
}

int sysctl_kprobes_optimization;
int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
				      void __user *buffer, size_t *length,
				      loff_t *ppos)
{
	int ret;

	mutex_lock(&kprobe_mutex);
	sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);

	if (sysctl_kprobes_optimization)
		optimize_all_kprobes();
	else
		unoptimize_all_kprobes();
	mutex_unlock(&kprobe_mutex);

	return ret;
}
#endif /* CONFIG_SYSCTL */

669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
static void __kprobes __arm_kprobe(struct kprobe *p)
{
	struct kprobe *old_p;

	/* Check collision with other optimized kprobes */
	old_p = get_optimized_kprobe((unsigned long)p->addr);
	if (unlikely(old_p))
		unoptimize_kprobe(old_p); /* Fallback to unoptimized kprobe */

	arch_arm_kprobe(p);
	optimize_kprobe(p);	/* Try to optimize (add kprobe to a list) */
}

static void __kprobes __disarm_kprobe(struct kprobe *p)
{
	struct kprobe *old_p;

	unoptimize_kprobe(p);	/* Try to unoptimize */
	arch_disarm_kprobe(p);

	/* If another kprobe was blocked, optimize it. */
	old_p = get_optimized_kprobe((unsigned long)p->addr);
	if (unlikely(old_p))
		optimize_kprobe(old_p);
}

#else /* !CONFIG_OPTPROBES */

#define optimize_kprobe(p)			do {} while (0)
#define unoptimize_kprobe(p)			do {} while (0)
#define kill_optimized_kprobe(p)		do {} while (0)
#define prepare_optimized_kprobe(p)		do {} while (0)
#define try_to_optimize_kprobe(p)		do {} while (0)
#define __arm_kprobe(p)				arch_arm_kprobe(p)
#define __disarm_kprobe(p)			arch_disarm_kprobe(p)

static __kprobes void free_aggr_kprobe(struct kprobe *p)
{
	kfree(p);
}

static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
{
	return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
}
#endif /* CONFIG_OPTPROBES */

716 717 718
/* Arm a kprobe with text_mutex */
static void __kprobes arm_kprobe(struct kprobe *kp)
{
719 720 721 722 723
	/*
	 * Here, since __arm_kprobe() doesn't use stop_machine(),
	 * this doesn't cause deadlock on text_mutex. So, we don't
	 * need get_online_cpus().
	 */
724
	mutex_lock(&text_mutex);
725
	__arm_kprobe(kp);
726 727 728 729 730 731
	mutex_unlock(&text_mutex);
}

/* Disarm a kprobe with text_mutex */
static void __kprobes disarm_kprobe(struct kprobe *kp)
{
732
	get_online_cpus();	/* For avoiding text_mutex deadlock */
733
	mutex_lock(&text_mutex);
734
	__disarm_kprobe(kp);
735
	mutex_unlock(&text_mutex);
736
	put_online_cpus();
737 738
}

739 740 741 742
/*
 * Aggregate handlers for multiple kprobes support - these handlers
 * take care of invoking the individual kprobe handlers on p->list
 */
743
static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
744 745 746
{
	struct kprobe *kp;

747
	list_for_each_entry_rcu(kp, &p->list, list) {
748
		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
749
			set_kprobe_instance(kp);
750 751
			if (kp->pre_handler(kp, regs))
				return 1;
752
		}
753
		reset_kprobe_instance();
754 755 756 757
	}
	return 0;
}

758 759
static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
					unsigned long flags)
760 761 762
{
	struct kprobe *kp;

763
	list_for_each_entry_rcu(kp, &p->list, list) {
764
		if (kp->post_handler && likely(!kprobe_disabled(kp))) {
765
			set_kprobe_instance(kp);
766
			kp->post_handler(kp, regs, flags);
767
			reset_kprobe_instance();
768 769 770 771
		}
	}
}

772 773
static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
					int trapnr)
774
{
775 776
	struct kprobe *cur = __get_cpu_var(kprobe_instance);

777 778 779 780
	/*
	 * if we faulted "during" the execution of a user specified
	 * probe handler, invoke just that probe's fault handler
	 */
781 782
	if (cur && cur->fault_handler) {
		if (cur->fault_handler(cur, regs, trapnr))
783 784 785 786 787
			return 1;
	}
	return 0;
}

788
static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
789
{
790 791 792 793 794 795
	struct kprobe *cur = __get_cpu_var(kprobe_instance);
	int ret = 0;

	if (cur && cur->break_handler) {
		if (cur->break_handler(cur, regs))
			ret = 1;
796
	}
797 798
	reset_kprobe_instance();
	return ret;
799 800
}

801 802 803 804
/* Walks the list and increments nmissed count for multiprobe case */
void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
{
	struct kprobe *kp;
805
	if (!kprobe_aggrprobe(p)) {
806 807 808 809 810 811 812 813
		p->nmissed++;
	} else {
		list_for_each_entry_rcu(kp, &p->list, list)
			kp->nmissed++;
	}
	return;
}

814 815
void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
				struct hlist_head *head)
816
{
817 818
	struct kretprobe *rp = ri->rp;

819 820
	/* remove rp inst off the rprobe_inst_table */
	hlist_del(&ri->hlist);
821 822 823 824 825
	INIT_HLIST_NODE(&ri->hlist);
	if (likely(rp)) {
		spin_lock(&rp->lock);
		hlist_add_head(&ri->hlist, &rp->free_instances);
		spin_unlock(&rp->lock);
826 827
	} else
		/* Unregistering */
828
		hlist_add_head(&ri->hlist, head);
829 830
}

831
void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
832 833 834 835 836 837 838 839 840 841
			 struct hlist_head **head, unsigned long *flags)
{
	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
	spinlock_t *hlist_lock;

	*head = &kretprobe_inst_table[hash];
	hlist_lock = kretprobe_table_lock_ptr(hash);
	spin_lock_irqsave(hlist_lock, *flags);
}

842 843
static void __kprobes kretprobe_table_lock(unsigned long hash,
	unsigned long *flags)
844
{
845 846 847 848
	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
	spin_lock_irqsave(hlist_lock, *flags);
}

849 850
void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
	unsigned long *flags)
851 852 853 854 855 856 857 858
{
	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
	spinlock_t *hlist_lock;

	hlist_lock = kretprobe_table_lock_ptr(hash);
	spin_unlock_irqrestore(hlist_lock, *flags);
}

859
void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
860 861 862
{
	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
	spin_unlock_irqrestore(hlist_lock, *flags);
863 864 865
}

/*
866 867 868 869
 * This function is called from finish_task_switch when task tk becomes dead,
 * so that we can recycle any function-return probe instances associated
 * with this task. These left over instances represent probed functions
 * that have been called but will never return.
870
 */
871
void __kprobes kprobe_flush_task(struct task_struct *tk)
872
{
B
bibo,mao 已提交
873
	struct kretprobe_instance *ri;
874
	struct hlist_head *head, empty_rp;
875
	struct hlist_node *node, *tmp;
876
	unsigned long hash, flags = 0;
877

878 879 880 881 882 883 884
	if (unlikely(!kprobes_initialized))
		/* Early boot.  kretprobe_table_locks not yet initialized. */
		return;

	hash = hash_ptr(tk, KPROBE_HASH_BITS);
	head = &kretprobe_inst_table[hash];
	kretprobe_table_lock(hash, &flags);
B
bibo,mao 已提交
885 886
	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
		if (ri->task == tk)
887
			recycle_rp_inst(ri, &empty_rp);
B
bibo,mao 已提交
888
	}
889 890
	kretprobe_table_unlock(hash, &flags);
	INIT_HLIST_HEAD(&empty_rp);
891 892 893 894
	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
		hlist_del(&ri->hlist);
		kfree(ri);
	}
895 896 897 898 899
}

static inline void free_rp_inst(struct kretprobe *rp)
{
	struct kretprobe_instance *ri;
900 901
	struct hlist_node *pos, *next;

902 903
	hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
		hlist_del(&ri->hlist);
904 905 906 907
		kfree(ri);
	}
}

908 909
static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
{
910
	unsigned long flags, hash;
911 912
	struct kretprobe_instance *ri;
	struct hlist_node *pos, *next;
913 914
	struct hlist_head *head;

915
	/* No race here */
916 917 918 919 920 921 922 923
	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
		kretprobe_table_lock(hash, &flags);
		head = &kretprobe_inst_table[hash];
		hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
			if (ri->rp == rp)
				ri->rp = NULL;
		}
		kretprobe_table_unlock(hash, &flags);
924 925 926 927
	}
	free_rp_inst(rp);
}

928
/*
929
* Add the new probe to ap->list. Fail if this is the
930 931
* second jprobe at the address - two jprobes can't coexist
*/
932
static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
933
{
934
	BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
935 936 937 938

	if (p->break_handler || p->post_handler)
		unoptimize_kprobe(ap);	/* Fall back to normal kprobe */

939
	if (p->break_handler) {
940
		if (ap->break_handler)
941
			return -EEXIST;
942 943
		list_add_tail_rcu(&p->list, &ap->list);
		ap->break_handler = aggr_break_handler;
944
	} else
945 946 947
		list_add_rcu(&p->list, &ap->list);
	if (p->post_handler && !ap->post_handler)
		ap->post_handler = aggr_post_handler;
948 949 950 951 952

	if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
		ap->flags &= ~KPROBE_FLAG_DISABLED;
		if (!kprobes_all_disarmed)
			/* Arm the breakpoint again. */
953
			__arm_kprobe(ap);
954
	}
955 956 957
	return 0;
}

958 959 960 961
/*
 * Fill in the required fields of the "manager kprobe". Replace the
 * earlier kprobe in the hlist with the manager kprobe
 */
962
static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
963
{
964
	/* Copy p's insn slot to ap */
965
	copy_kprobe(p, ap);
966
	flush_insn_slot(ap);
967
	ap->addr = p->addr;
968
	ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
969 970
	ap->pre_handler = aggr_pre_handler;
	ap->fault_handler = aggr_fault_handler;
971 972
	/* We don't care the kprobe which has gone. */
	if (p->post_handler && !kprobe_gone(p))
973
		ap->post_handler = aggr_post_handler;
974
	if (p->break_handler && !kprobe_gone(p))
975
		ap->break_handler = aggr_break_handler;
976 977

	INIT_LIST_HEAD(&ap->list);
978
	INIT_HLIST_NODE(&ap->hlist);
979

980
	list_add_rcu(&p->list, &ap->list);
981
	hlist_replace_rcu(&p->hlist, &ap->hlist);
982 983 984 985 986 987
}

/*
 * This is the second or subsequent kprobe at the address - handle
 * the intricacies
 */
988 989
static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
					  struct kprobe *p)
990 991
{
	int ret = 0;
992
	struct kprobe *ap = old_p;
993

994 995 996
	if (!kprobe_aggrprobe(old_p)) {
		/* If old_p is not an aggr_kprobe, create new aggr_kprobe. */
		ap = alloc_aggr_kprobe(old_p);
997 998
		if (!ap)
			return -ENOMEM;
999
		init_aggr_kprobe(ap, old_p);
1000 1001 1002
	}

	if (kprobe_gone(ap)) {
1003 1004 1005 1006 1007 1008
		/*
		 * Attempting to insert new probe at the same location that
		 * had a probe in the module vaddr area which already
		 * freed. So, the instruction slot has already been
		 * released. We need a new slot for the new probe.
		 */
1009
		ret = arch_prepare_kprobe(ap);
1010
		if (ret)
1011 1012 1013 1014 1015
			/*
			 * Even if fail to allocate new slot, don't need to
			 * free aggr_probe. It will be used next time, or
			 * freed by unregister_kprobe.
			 */
1016
			return ret;
1017

1018 1019 1020
		/* Prepare optimized instructions if possible. */
		prepare_optimized_kprobe(ap);

1021
		/*
1022 1023
		 * Clear gone flag to prevent allocating new slot again, and
		 * set disabled flag because it is not armed yet.
1024
		 */
1025 1026
		ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
			    | KPROBE_FLAG_DISABLED;
1027
	}
1028

1029
	/* Copy ap's insn slot to p */
1030 1031
	copy_kprobe(ap, p);
	return add_new_kprobe(ap, p);
1032 1033
}

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
/* Try to disable aggr_kprobe, and return 1 if succeeded.*/
static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
{
	struct kprobe *kp;

	list_for_each_entry_rcu(kp, &p->list, list) {
		if (!kprobe_disabled(kp))
			/*
			 * There is an active probe on the list.
			 * We can't disable aggr_kprobe.
			 */
			return 0;
	}
	p->flags |= KPROBE_FLAG_DISABLED;
	return 1;
}

1051 1052
static int __kprobes in_kprobes_functions(unsigned long addr)
{
1053 1054
	struct kprobe_blackpoint *kb;

1055 1056
	if (addr >= (unsigned long)__kprobes_text_start &&
	    addr < (unsigned long)__kprobes_text_end)
1057
		return -EINVAL;
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
	/*
	 * If there exists a kprobe_blacklist, verify and
	 * fail any probe registration in the prohibited area
	 */
	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
		if (kb->start_addr) {
			if (addr >= kb->start_addr &&
			    addr < (kb->start_addr + kb->range))
				return -EINVAL;
		}
	}
1069 1070 1071
	return 0;
}

1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
/*
 * If we have a symbol_name argument, look it up and add the offset field
 * to it. This way, we can specify a relative address to a symbol.
 */
static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
{
	kprobe_opcode_t *addr = p->addr;
	if (p->symbol_name) {
		if (addr)
			return NULL;
		kprobe_lookup_name(p->symbol_name, addr);
	}

	if (!addr)
		return NULL;
	return (kprobe_opcode_t *)(((char *)addr) + p->offset);
}

1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
/* Check passed kprobe is valid and return kprobe in kprobe_table. */
static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
{
	struct kprobe *old_p, *list_p;

	old_p = get_kprobe(p->addr);
	if (unlikely(!old_p))
		return NULL;

	if (p != old_p) {
		list_for_each_entry_rcu(list_p, &old_p->list, list)
			if (list_p == p)
			/* kprobe p is a valid probe */
				goto valid;
		return NULL;
	}
valid:
	return old_p;
}

/* Return error if the kprobe is being re-registered */
static inline int check_kprobe_rereg(struct kprobe *p)
{
	int ret = 0;
	struct kprobe *old_p;

	mutex_lock(&kprobe_mutex);
	old_p = __get_valid_kprobe(p);
	if (old_p)
		ret = -EINVAL;
	mutex_unlock(&kprobe_mutex);
	return ret;
}

1124
int __kprobes register_kprobe(struct kprobe *p)
L
Linus Torvalds 已提交
1125 1126
{
	int ret = 0;
1127
	struct kprobe *old_p;
1128
	struct module *probed_mod;
1129
	kprobe_opcode_t *addr;
1130

1131 1132
	addr = kprobe_addr(p);
	if (!addr)
1133
		return -EINVAL;
1134
	p->addr = addr;
1135

1136 1137 1138 1139
	ret = check_kprobe_rereg(p);
	if (ret)
		return ret;

1140
	preempt_disable();
1141
	if (!kernel_text_address((unsigned long) p->addr) ||
1142 1143
	    in_kprobes_functions((unsigned long) p->addr) ||
	    ftrace_text_reserved(p->addr, p->addr)) {
1144
		preempt_enable();
1145
		return -EINVAL;
1146
	}
1147

1148 1149 1150
	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
	p->flags &= KPROBE_FLAG_DISABLED;

1151 1152 1153
	/*
	 * Check if are we probing a module.
	 */
1154
	probed_mod = __module_text_address((unsigned long) p->addr);
1155 1156
	if (probed_mod) {
		/*
1157 1158
		 * We must hold a refcount of the probed module while updating
		 * its code to prohibit unexpected unloading.
1159
		 */
1160 1161 1162 1163
		if (unlikely(!try_module_get(probed_mod))) {
			preempt_enable();
			return -EINVAL;
		}
1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
		/*
		 * If the module freed .init.text, we couldn't insert
		 * kprobes in there.
		 */
		if (within_module_init((unsigned long)p->addr, probed_mod) &&
		    probed_mod->state != MODULE_STATE_COMING) {
			module_put(probed_mod);
			preempt_enable();
			return -EINVAL;
		}
1174
	}
1175
	preempt_enable();
L
Linus Torvalds 已提交
1176

1177
	p->nmissed = 0;
1178
	INIT_LIST_HEAD(&p->list);
I
Ingo Molnar 已提交
1179
	mutex_lock(&kprobe_mutex);
1180 1181 1182 1183

	get_online_cpus();	/* For avoiding text_mutex deadlock. */
	mutex_lock(&text_mutex);

1184 1185
	old_p = get_kprobe(p->addr);
	if (old_p) {
1186
		/* Since this may unoptimize old_p, locking text_mutex. */
1187
		ret = register_aggr_kprobe(old_p, p);
L
Linus Torvalds 已提交
1188 1189 1190
		goto out;
	}

1191 1192
	ret = arch_prepare_kprobe(p);
	if (ret)
1193
		goto out;
1194

1195
	INIT_HLIST_NODE(&p->hlist);
1196
	hlist_add_head_rcu(&p->hlist,
L
Linus Torvalds 已提交
1197 1198
		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);

1199
	if (!kprobes_all_disarmed && !kprobe_disabled(p))
1200 1201 1202 1203
		__arm_kprobe(p);

	/* Try to optimize kprobe */
	try_to_optimize_kprobe(p);
1204

L
Linus Torvalds 已提交
1205
out:
1206 1207
	mutex_unlock(&text_mutex);
	put_online_cpus();
I
Ingo Molnar 已提交
1208
	mutex_unlock(&kprobe_mutex);
1209

1210
	if (probed_mod)
1211
		module_put(probed_mod);
1212

L
Linus Torvalds 已提交
1213 1214
	return ret;
}
1215
EXPORT_SYMBOL_GPL(register_kprobe);
L
Linus Torvalds 已提交
1216

1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
/*
 * Unregister a kprobe without a scheduler synchronization.
 */
static int __kprobes __unregister_kprobe_top(struct kprobe *p)
{
	struct kprobe *old_p, *list_p;

	old_p = __get_valid_kprobe(p);
	if (old_p == NULL)
		return -EINVAL;

1228
	if (old_p == p ||
1229
	    (kprobe_aggrprobe(old_p) &&
1230
	     list_is_singular(&old_p->list))) {
1231 1232
		/*
		 * Only probe on the hash list. Disarm only if kprobes are
1233 1234
		 * enabled and not gone - otherwise, the breakpoint would
		 * already have been removed. We save on flushing icache.
1235
		 */
1236
		if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
1237
			disarm_kprobe(old_p);
1238 1239
		hlist_del_rcu(&old_p->hlist);
	} else {
1240
		if (p->break_handler && !kprobe_gone(p))
1241
			old_p->break_handler = NULL;
1242
		if (p->post_handler && !kprobe_gone(p)) {
1243 1244 1245 1246 1247 1248 1249
			list_for_each_entry_rcu(list_p, &old_p->list, list) {
				if ((list_p != p) && (list_p->post_handler))
					goto noclean;
			}
			old_p->post_handler = NULL;
		}
noclean:
1250
		list_del_rcu(&p->list);
1251 1252
		if (!kprobe_disabled(old_p)) {
			try_to_disable_aggr_kprobe(old_p);
1253 1254 1255 1256 1257 1258 1259
			if (!kprobes_all_disarmed) {
				if (kprobe_disabled(old_p))
					disarm_kprobe(old_p);
				else
					/* Try to optimize this probe again */
					optimize_kprobe(old_p);
			}
1260
		}
1261
	}
1262 1263
	return 0;
}
1264

1265 1266 1267
static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
{
	struct kprobe *old_p;
1268

1269
	if (list_empty(&p->list))
1270
		arch_remove_kprobe(p);
1271 1272 1273 1274 1275
	else if (list_is_singular(&p->list)) {
		/* "p" is the last child of an aggr_kprobe */
		old_p = list_entry(p->list.next, struct kprobe, list);
		list_del(&p->list);
		arch_remove_kprobe(old_p);
1276
		free_aggr_kprobe(old_p);
1277 1278 1279
	}
}

1280
int __kprobes register_kprobes(struct kprobe **kps, int num)
1281 1282 1283 1284 1285 1286
{
	int i, ret = 0;

	if (num <= 0)
		return -EINVAL;
	for (i = 0; i < num; i++) {
1287
		ret = register_kprobe(kps[i]);
1288 1289 1290
		if (ret < 0) {
			if (i > 0)
				unregister_kprobes(kps, i);
1291
			break;
1292
		}
1293
	}
1294 1295
	return ret;
}
1296
EXPORT_SYMBOL_GPL(register_kprobes);
1297 1298 1299 1300 1301

void __kprobes unregister_kprobe(struct kprobe *p)
{
	unregister_kprobes(&p, 1);
}
1302
EXPORT_SYMBOL_GPL(unregister_kprobe);
1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319

void __kprobes unregister_kprobes(struct kprobe **kps, int num)
{
	int i;

	if (num <= 0)
		return;
	mutex_lock(&kprobe_mutex);
	for (i = 0; i < num; i++)
		if (__unregister_kprobe_top(kps[i]) < 0)
			kps[i]->addr = NULL;
	mutex_unlock(&kprobe_mutex);

	synchronize_sched();
	for (i = 0; i < num; i++)
		if (kps[i]->addr)
			__unregister_kprobe_bottom(kps[i]);
L
Linus Torvalds 已提交
1320
}
1321
EXPORT_SYMBOL_GPL(unregister_kprobes);
L
Linus Torvalds 已提交
1322 1323

static struct notifier_block kprobe_exceptions_nb = {
1324 1325 1326 1327
	.notifier_call = kprobe_exceptions_notify,
	.priority = 0x7fffffff /* we need to be notified first */
};

1328 1329 1330 1331
unsigned long __weak arch_deref_entry_point(void *entry)
{
	return (unsigned long)entry;
}
L
Linus Torvalds 已提交
1332

1333
int __kprobes register_jprobes(struct jprobe **jps, int num)
L
Linus Torvalds 已提交
1334
{
1335 1336
	struct jprobe *jp;
	int ret = 0, i;
1337

1338
	if (num <= 0)
1339
		return -EINVAL;
1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350
	for (i = 0; i < num; i++) {
		unsigned long addr;
		jp = jps[i];
		addr = arch_deref_entry_point(jp->entry);

		if (!kernel_text_address(addr))
			ret = -EINVAL;
		else {
			/* Todo: Verify probepoint is a function entry point */
			jp->kp.pre_handler = setjmp_pre_handler;
			jp->kp.break_handler = longjmp_break_handler;
1351
			ret = register_kprobe(&jp->kp);
1352
		}
1353 1354 1355
		if (ret < 0) {
			if (i > 0)
				unregister_jprobes(jps, i);
1356 1357 1358 1359 1360
			break;
		}
	}
	return ret;
}
1361
EXPORT_SYMBOL_GPL(register_jprobes);
1362

1363 1364
int __kprobes register_jprobe(struct jprobe *jp)
{
1365
	return register_jprobes(&jp, 1);
L
Linus Torvalds 已提交
1366
}
1367
EXPORT_SYMBOL_GPL(register_jprobe);
L
Linus Torvalds 已提交
1368

1369
void __kprobes unregister_jprobe(struct jprobe *jp)
L
Linus Torvalds 已提交
1370
{
1371 1372
	unregister_jprobes(&jp, 1);
}
1373
EXPORT_SYMBOL_GPL(unregister_jprobe);
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391

void __kprobes unregister_jprobes(struct jprobe **jps, int num)
{
	int i;

	if (num <= 0)
		return;
	mutex_lock(&kprobe_mutex);
	for (i = 0; i < num; i++)
		if (__unregister_kprobe_top(&jps[i]->kp) < 0)
			jps[i]->kp.addr = NULL;
	mutex_unlock(&kprobe_mutex);

	synchronize_sched();
	for (i = 0; i < num; i++) {
		if (jps[i]->kp.addr)
			__unregister_kprobe_bottom(&jps[i]->kp);
	}
L
Linus Torvalds 已提交
1392
}
1393
EXPORT_SYMBOL_GPL(unregister_jprobes);
L
Linus Torvalds 已提交
1394

1395
#ifdef CONFIG_KRETPROBES
1396 1397 1398 1399 1400 1401 1402 1403
/*
 * This kprobe pre_handler is registered with every kretprobe. When probe
 * hits it will set up the return probe.
 */
static int __kprobes pre_handler_kretprobe(struct kprobe *p,
					   struct pt_regs *regs)
{
	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1404 1405
	unsigned long hash, flags = 0;
	struct kretprobe_instance *ri;
1406 1407

	/*TODO: consider to only swap the RA after the last pre_handler fired */
1408 1409
	hash = hash_ptr(current, KPROBE_HASH_BITS);
	spin_lock_irqsave(&rp->lock, flags);
1410 1411
	if (!hlist_empty(&rp->free_instances)) {
		ri = hlist_entry(rp->free_instances.first,
1412 1413 1414 1415
				struct kretprobe_instance, hlist);
		hlist_del(&ri->hlist);
		spin_unlock_irqrestore(&rp->lock, flags);

1416 1417
		ri->rp = rp;
		ri->task = current;
1418

1419
		if (rp->entry_handler && rp->entry_handler(ri, regs))
1420 1421
			return 0;

1422 1423 1424
		arch_prepare_kretprobe(ri, regs);

		/* XXX(hch): why is there no hlist_move_head? */
1425 1426 1427 1428 1429
		INIT_HLIST_NODE(&ri->hlist);
		kretprobe_table_lock(hash, &flags);
		hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
		kretprobe_table_unlock(hash, &flags);
	} else {
1430
		rp->nmissed++;
1431 1432
		spin_unlock_irqrestore(&rp->lock, flags);
	}
1433 1434 1435
	return 0;
}

1436
int __kprobes register_kretprobe(struct kretprobe *rp)
1437 1438 1439 1440
{
	int ret = 0;
	struct kretprobe_instance *inst;
	int i;
1441
	void *addr;
1442 1443

	if (kretprobe_blacklist_size) {
1444 1445 1446
		addr = kprobe_addr(&rp->kp);
		if (!addr)
			return -EINVAL;
1447 1448 1449 1450 1451 1452

		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
			if (kretprobe_blacklist[i].addr == addr)
				return -EINVAL;
		}
	}
1453 1454

	rp->kp.pre_handler = pre_handler_kretprobe;
1455 1456 1457
	rp->kp.post_handler = NULL;
	rp->kp.fault_handler = NULL;
	rp->kp.break_handler = NULL;
1458 1459 1460 1461

	/* Pre-allocate memory for max kretprobe instances */
	if (rp->maxactive <= 0) {
#ifdef CONFIG_PREEMPT
1462
		rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1463
#else
1464
		rp->maxactive = num_possible_cpus();
1465 1466
#endif
	}
1467
	spin_lock_init(&rp->lock);
1468 1469
	INIT_HLIST_HEAD(&rp->free_instances);
	for (i = 0; i < rp->maxactive; i++) {
1470 1471
		inst = kmalloc(sizeof(struct kretprobe_instance) +
			       rp->data_size, GFP_KERNEL);
1472 1473 1474 1475
		if (inst == NULL) {
			free_rp_inst(rp);
			return -ENOMEM;
		}
1476 1477
		INIT_HLIST_NODE(&inst->hlist);
		hlist_add_head(&inst->hlist, &rp->free_instances);
1478 1479 1480 1481
	}

	rp->nmissed = 0;
	/* Establish function entry probe point */
1482
	ret = register_kprobe(&rp->kp);
1483
	if (ret != 0)
1484 1485 1486
		free_rp_inst(rp);
	return ret;
}
1487
EXPORT_SYMBOL_GPL(register_kretprobe);
1488

1489
int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1490 1491 1492 1493 1494 1495
{
	int ret = 0, i;

	if (num <= 0)
		return -EINVAL;
	for (i = 0; i < num; i++) {
1496
		ret = register_kretprobe(rps[i]);
1497 1498 1499
		if (ret < 0) {
			if (i > 0)
				unregister_kretprobes(rps, i);
1500 1501 1502 1503 1504
			break;
		}
	}
	return ret;
}
1505
EXPORT_SYMBOL_GPL(register_kretprobes);
1506 1507 1508 1509 1510

void __kprobes unregister_kretprobe(struct kretprobe *rp)
{
	unregister_kretprobes(&rp, 1);
}
1511
EXPORT_SYMBOL_GPL(unregister_kretprobe);
1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532

void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
{
	int i;

	if (num <= 0)
		return;
	mutex_lock(&kprobe_mutex);
	for (i = 0; i < num; i++)
		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
			rps[i]->kp.addr = NULL;
	mutex_unlock(&kprobe_mutex);

	synchronize_sched();
	for (i = 0; i < num; i++) {
		if (rps[i]->kp.addr) {
			__unregister_kprobe_bottom(&rps[i]->kp);
			cleanup_rp_inst(rps[i]);
		}
	}
}
1533
EXPORT_SYMBOL_GPL(unregister_kretprobes);
1534

1535
#else /* CONFIG_KRETPROBES */
1536
int __kprobes register_kretprobe(struct kretprobe *rp)
1537 1538 1539
{
	return -ENOSYS;
}
1540
EXPORT_SYMBOL_GPL(register_kretprobe);
1541

1542
int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1543
{
1544
	return -ENOSYS;
1545
}
1546 1547
EXPORT_SYMBOL_GPL(register_kretprobes);

1548
void __kprobes unregister_kretprobe(struct kretprobe *rp)
1549
{
1550
}
1551
EXPORT_SYMBOL_GPL(unregister_kretprobe);
1552

1553 1554 1555
void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
{
}
1556
EXPORT_SYMBOL_GPL(unregister_kretprobes);
1557

1558 1559 1560 1561
static int __kprobes pre_handler_kretprobe(struct kprobe *p,
					   struct pt_regs *regs)
{
	return 0;
1562 1563
}

1564 1565
#endif /* CONFIG_KRETPROBES */

1566 1567 1568 1569
/* Set the kprobe gone and remove its instruction buffer. */
static void __kprobes kill_kprobe(struct kprobe *p)
{
	struct kprobe *kp;
1570

1571
	p->flags |= KPROBE_FLAG_GONE;
1572
	if (kprobe_aggrprobe(p)) {
1573 1574 1575 1576 1577 1578 1579 1580
		/*
		 * If this is an aggr_kprobe, we have to list all the
		 * chained probes and mark them GONE.
		 */
		list_for_each_entry_rcu(kp, &p->list, list)
			kp->flags |= KPROBE_FLAG_GONE;
		p->post_handler = NULL;
		p->break_handler = NULL;
1581
		kill_optimized_kprobe(p);
1582 1583 1584 1585 1586 1587 1588 1589
	}
	/*
	 * Here, we can remove insn_slot safely, because no thread calls
	 * the original probed function (which will be freed soon) any more.
	 */
	arch_remove_kprobe(p);
}

1590 1591 1592 1593 1594 1595 1596
void __kprobes dump_kprobe(struct kprobe *kp)
{
	printk(KERN_WARNING "Dumping kprobe:\n");
	printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
	       kp->symbol_name, kp->addr, kp->offset);
}

1597 1598 1599 1600 1601 1602 1603 1604 1605
/* Module notifier call back, checking kprobes on the module */
static int __kprobes kprobes_module_callback(struct notifier_block *nb,
					     unsigned long val, void *data)
{
	struct module *mod = data;
	struct hlist_head *head;
	struct hlist_node *node;
	struct kprobe *p;
	unsigned int i;
1606
	int checkcore = (val == MODULE_STATE_GOING);
1607

1608
	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
1609 1610 1611
		return NOTIFY_DONE;

	/*
1612 1613 1614 1615
	 * When MODULE_STATE_GOING was notified, both of module .text and
	 * .init.text sections would be freed. When MODULE_STATE_LIVE was
	 * notified, only .init.text section would be freed. We need to
	 * disable kprobes which have been inserted in the sections.
1616 1617 1618 1619 1620
	 */
	mutex_lock(&kprobe_mutex);
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
		head = &kprobe_table[i];
		hlist_for_each_entry_rcu(p, node, head, hlist)
1621 1622 1623
			if (within_module_init((unsigned long)p->addr, mod) ||
			    (checkcore &&
			     within_module_core((unsigned long)p->addr, mod))) {
1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640
				/*
				 * The vaddr this probe is installed will soon
				 * be vfreed buy not synced to disk. Hence,
				 * disarming the breakpoint isn't needed.
				 */
				kill_kprobe(p);
			}
	}
	mutex_unlock(&kprobe_mutex);
	return NOTIFY_DONE;
}

static struct notifier_block kprobe_module_nb = {
	.notifier_call = kprobes_module_callback,
	.priority = 0
};

L
Linus Torvalds 已提交
1641 1642 1643
static int __init init_kprobes(void)
{
	int i, err = 0;
1644 1645 1646 1647 1648
	unsigned long offset = 0, size = 0;
	char *modname, namebuf[128];
	const char *symbol_name;
	void *addr;
	struct kprobe_blackpoint *kb;
L
Linus Torvalds 已提交
1649 1650 1651

	/* FIXME allocate the probe table, currently defined statically */
	/* initialize all list heads */
1652
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
L
Linus Torvalds 已提交
1653
		INIT_HLIST_HEAD(&kprobe_table[i]);
1654
		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1655
		spin_lock_init(&(kretprobe_table_locks[i].lock));
1656
	}
L
Linus Torvalds 已提交
1657

1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679
	/*
	 * Lookup and populate the kprobe_blacklist.
	 *
	 * Unlike the kretprobe blacklist, we'll need to determine
	 * the range of addresses that belong to the said functions,
	 * since a kprobe need not necessarily be at the beginning
	 * of a function.
	 */
	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
		kprobe_lookup_name(kb->name, addr);
		if (!addr)
			continue;

		kb->start_addr = (unsigned long)addr;
		symbol_name = kallsyms_lookup(kb->start_addr,
				&size, &offset, &modname, namebuf);
		if (!symbol_name)
			kb->range = 0;
		else
			kb->range = size;
	}

1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690
	if (kretprobe_blacklist_size) {
		/* lookup the function address from its name */
		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
			kprobe_lookup_name(kretprobe_blacklist[i].name,
					   kretprobe_blacklist[i].addr);
			if (!kretprobe_blacklist[i].addr)
				printk("kretprobe: lookup failed: %s\n",
				       kretprobe_blacklist[i].name);
		}
	}

1691 1692
#if defined(CONFIG_OPTPROBES)
#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
1693 1694 1695
	/* Init kprobe_optinsn_slots */
	kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
#endif
1696 1697 1698
	/* By default, kprobes can be optimized */
	kprobes_allow_optimization = true;
#endif
1699

1700 1701
	/* By default, kprobes are armed */
	kprobes_all_disarmed = false;
1702

1703
	err = arch_init_kprobes();
1704 1705
	if (!err)
		err = register_die_notifier(&kprobe_exceptions_nb);
1706 1707 1708
	if (!err)
		err = register_module_notifier(&kprobe_module_nb);

1709
	kprobes_initialized = (err == 0);
1710

1711 1712
	if (!err)
		init_test_probes();
L
Linus Torvalds 已提交
1713 1714 1715
	return err;
}

1716 1717
#ifdef CONFIG_DEBUG_FS
static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1718
		const char *sym, int offset, char *modname, struct kprobe *pp)
1719 1720 1721 1722 1723 1724 1725 1726 1727
{
	char *kprobe_type;

	if (p->pre_handler == pre_handler_kretprobe)
		kprobe_type = "r";
	else if (p->pre_handler == setjmp_pre_handler)
		kprobe_type = "j";
	else
		kprobe_type = "k";
1728

1729
	if (sym)
1730
		seq_printf(pi, "%p  %s  %s+0x%x  %s ",
1731
			p->addr, kprobe_type, sym, offset,
1732
			(modname ? modname : " "));
1733
	else
1734 1735 1736 1737 1738 1739 1740 1741 1742
		seq_printf(pi, "%p  %s  %p ",
			p->addr, kprobe_type, p->addr);

	if (!pp)
		pp = p;
	seq_printf(pi, "%s%s%s\n",
		(kprobe_gone(p) ? "[GONE]" : ""),
		((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
		(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""));
1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769
}

static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
{
	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
}

static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
{
	(*pos)++;
	if (*pos >= KPROBE_TABLE_SIZE)
		return NULL;
	return pos;
}

static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
{
	/* Nothing to do */
}

static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
{
	struct hlist_head *head;
	struct hlist_node *node;
	struct kprobe *p, *kp;
	const char *sym = NULL;
	unsigned int i = *(loff_t *) v;
A
Alexey Dobriyan 已提交
1770
	unsigned long offset = 0;
1771 1772 1773 1774 1775
	char *modname, namebuf[128];

	head = &kprobe_table[i];
	preempt_disable();
	hlist_for_each_entry_rcu(p, node, head, hlist) {
A
Alexey Dobriyan 已提交
1776
		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
1777
					&offset, &modname, namebuf);
1778
		if (kprobe_aggrprobe(p)) {
1779
			list_for_each_entry_rcu(kp, &p->list, list)
1780
				report_probe(pi, kp, sym, offset, modname, p);
1781
		} else
1782
			report_probe(pi, p, sym, offset, modname, NULL);
1783 1784 1785 1786 1787
	}
	preempt_enable();
	return 0;
}

J
James Morris 已提交
1788
static const struct seq_operations kprobes_seq_ops = {
1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
	.start = kprobe_seq_start,
	.next  = kprobe_seq_next,
	.stop  = kprobe_seq_stop,
	.show  = show_kprobe_addr
};

static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
{
	return seq_open(filp, &kprobes_seq_ops);
}

1800
static const struct file_operations debugfs_kprobes_operations = {
1801 1802 1803 1804 1805 1806
	.open           = kprobes_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
};

1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
/* Disable one kprobe */
int __kprobes disable_kprobe(struct kprobe *kp)
{
	int ret = 0;
	struct kprobe *p;

	mutex_lock(&kprobe_mutex);

	/* Check whether specified probe is valid. */
	p = __get_valid_kprobe(kp);
	if (unlikely(p == NULL)) {
		ret = -EINVAL;
		goto out;
	}

	/* If the probe is already disabled (or gone), just return */
	if (kprobe_disabled(kp))
		goto out;

	kp->flags |= KPROBE_FLAG_DISABLED;
	if (p != kp)
		/* When kp != p, p is always enabled. */
		try_to_disable_aggr_kprobe(p);

	if (!kprobes_all_disarmed && kprobe_disabled(p))
1832
		disarm_kprobe(p);
1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861
out:
	mutex_unlock(&kprobe_mutex);
	return ret;
}
EXPORT_SYMBOL_GPL(disable_kprobe);

/* Enable one kprobe */
int __kprobes enable_kprobe(struct kprobe *kp)
{
	int ret = 0;
	struct kprobe *p;

	mutex_lock(&kprobe_mutex);

	/* Check whether specified probe is valid. */
	p = __get_valid_kprobe(kp);
	if (unlikely(p == NULL)) {
		ret = -EINVAL;
		goto out;
	}

	if (kprobe_gone(kp)) {
		/* This kprobe has gone, we couldn't enable it. */
		ret = -EINVAL;
		goto out;
	}

	if (p != kp)
		kp->flags &= ~KPROBE_FLAG_DISABLED;
1862 1863 1864 1865 1866

	if (!kprobes_all_disarmed && kprobe_disabled(p)) {
		p->flags &= ~KPROBE_FLAG_DISABLED;
		arm_kprobe(p);
	}
1867 1868 1869 1870 1871 1872
out:
	mutex_unlock(&kprobe_mutex);
	return ret;
}
EXPORT_SYMBOL_GPL(enable_kprobe);

1873
static void __kprobes arm_all_kprobes(void)
1874 1875 1876 1877 1878 1879 1880 1881
{
	struct hlist_head *head;
	struct hlist_node *node;
	struct kprobe *p;
	unsigned int i;

	mutex_lock(&kprobe_mutex);

1882 1883
	/* If kprobes are armed, just return */
	if (!kprobes_all_disarmed)
1884 1885
		goto already_enabled;

1886
	/* Arming kprobes doesn't optimize kprobe itself */
1887
	mutex_lock(&text_mutex);
1888 1889 1890
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
		head = &kprobe_table[i];
		hlist_for_each_entry_rcu(p, node, head, hlist)
1891
			if (!kprobe_disabled(p))
1892
				__arm_kprobe(p);
1893
	}
1894
	mutex_unlock(&text_mutex);
1895

1896
	kprobes_all_disarmed = false;
1897 1898 1899 1900 1901 1902 1903
	printk(KERN_INFO "Kprobes globally enabled\n");

already_enabled:
	mutex_unlock(&kprobe_mutex);
	return;
}

1904
static void __kprobes disarm_all_kprobes(void)
1905 1906 1907 1908 1909 1910 1911 1912
{
	struct hlist_head *head;
	struct hlist_node *node;
	struct kprobe *p;
	unsigned int i;

	mutex_lock(&kprobe_mutex);

1913 1914
	/* If kprobes are already disarmed, just return */
	if (kprobes_all_disarmed)
1915 1916
		goto already_disabled;

1917
	kprobes_all_disarmed = true;
1918
	printk(KERN_INFO "Kprobes globally disabled\n");
1919 1920 1921 1922 1923 1924

	/*
	 * Here we call get_online_cpus() for avoiding text_mutex deadlock,
	 * because disarming may also unoptimize kprobes.
	 */
	get_online_cpus();
1925
	mutex_lock(&text_mutex);
1926 1927 1928
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
		head = &kprobe_table[i];
		hlist_for_each_entry_rcu(p, node, head, hlist) {
1929
			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
1930
				__disarm_kprobe(p);
1931 1932 1933
		}
	}

1934
	mutex_unlock(&text_mutex);
1935
	put_online_cpus();
1936 1937 1938
	mutex_unlock(&kprobe_mutex);
	/* Allow all currently running kprobes to complete */
	synchronize_sched();
1939
	return;
1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955

already_disabled:
	mutex_unlock(&kprobe_mutex);
	return;
}

/*
 * XXX: The debugfs bool file interface doesn't allow for callbacks
 * when the bool state is switched. We can reuse that facility when
 * available
 */
static ssize_t read_enabled_file_bool(struct file *file,
	       char __user *user_buf, size_t count, loff_t *ppos)
{
	char buf[3];

1956
	if (!kprobes_all_disarmed)
1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978
		buf[0] = '1';
	else
		buf[0] = '0';
	buf[1] = '\n';
	buf[2] = 0x00;
	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}

static ssize_t write_enabled_file_bool(struct file *file,
	       const char __user *user_buf, size_t count, loff_t *ppos)
{
	char buf[32];
	int buf_size;

	buf_size = min(count, (sizeof(buf)-1));
	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;

	switch (buf[0]) {
	case 'y':
	case 'Y':
	case '1':
1979
		arm_all_kprobes();
1980 1981 1982 1983
		break;
	case 'n':
	case 'N':
	case '0':
1984
		disarm_all_kprobes();
1985 1986 1987 1988 1989 1990
		break;
	}

	return count;
}

1991
static const struct file_operations fops_kp = {
1992 1993 1994 1995
	.read =         read_enabled_file_bool,
	.write =        write_enabled_file_bool,
};

1996 1997 1998
static int __kprobes debugfs_kprobe_init(void)
{
	struct dentry *dir, *file;
1999
	unsigned int value = 1;
2000 2001 2002 2003 2004

	dir = debugfs_create_dir("kprobes", NULL);
	if (!dir)
		return -ENOMEM;

R
Randy Dunlap 已提交
2005
	file = debugfs_create_file("list", 0444, dir, NULL,
2006 2007 2008 2009 2010 2011
				&debugfs_kprobes_operations);
	if (!file) {
		debugfs_remove(dir);
		return -ENOMEM;
	}

2012 2013 2014 2015 2016 2017 2018
	file = debugfs_create_file("enabled", 0600, dir,
					&value, &fops_kp);
	if (!file) {
		debugfs_remove(dir);
		return -ENOMEM;
	}

2019 2020 2021 2022 2023 2024 2025
	return 0;
}

late_initcall(debugfs_kprobe_init);
#endif /* CONFIG_DEBUG_FS */

module_init(init_kprobes);
L
Linus Torvalds 已提交
2026

2027
/* defined in arch/.../kernel/kprobes.c */
L
Linus Torvalds 已提交
2028
EXPORT_SYMBOL_GPL(jprobe_return);