kprobes.c 50.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 *  Kernel Probes (KProbes)
 *  kernel/kprobes.c
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * Copyright (C) IBM Corporation, 2002, 2004
 *
 * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
 *		Probes initial implementation (includes suggestions from
 *		Rusty Russell).
 * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
 *		hlists and exceptions notifier as suggested by Andi Kleen.
 * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
 *		interface to access function arguments.
 * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
 *		exceptions notifier to be first on the priority list.
30 31 32
 * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
 *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
 *		<prasanna@in.ibm.com> added function-return probes.
L
Linus Torvalds 已提交
33 34 35 36
 */
#include <linux/kprobes.h>
#include <linux/hash.h>
#include <linux/init.h>
T
Tim Schmielau 已提交
37
#include <linux/slab.h>
R
Randy Dunlap 已提交
38
#include <linux/stddef.h>
L
Linus Torvalds 已提交
39
#include <linux/module.h>
40
#include <linux/moduleloader.h>
41
#include <linux/kallsyms.h>
42
#include <linux/freezer.h>
43 44
#include <linux/seq_file.h>
#include <linux/debugfs.h>
45
#include <linux/sysctl.h>
46
#include <linux/kdebug.h>
47
#include <linux/memory.h>
48
#include <linux/ftrace.h>
49
#include <linux/cpu.h>
50

51
#include <asm-generic/sections.h>
L
Linus Torvalds 已提交
52 53
#include <asm/cacheflush.h>
#include <asm/errno.h>
54
#include <asm/uaccess.h>
L
Linus Torvalds 已提交
55 56 57 58

#define KPROBE_HASH_BITS 6
#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)

59 60 61 62 63 64 65 66 67 68

/*
 * Some oddball architectures like 64bit powerpc have function descriptors
 * so this must be overridable.
 */
#ifndef kprobe_lookup_name
#define kprobe_lookup_name(name, addr) \
	addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
#endif

69
static int kprobes_initialized;
L
Linus Torvalds 已提交
70
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
71
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
L
Linus Torvalds 已提交
72

73
/* NOTE: change this value only with kprobe_mutex held */
74
static bool kprobes_all_disarmed;
75

76
static DEFINE_MUTEX(kprobe_mutex);	/* Protects kprobe_table */
77
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
78
static struct {
79
	spinlock_t lock ____cacheline_aligned_in_smp;
80 81 82 83 84 85
} kretprobe_table_locks[KPROBE_TABLE_SIZE];

static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
{
	return &(kretprobe_table_locks[hash].lock);
}
L
Linus Torvalds 已提交
86

87 88 89 90 91 92 93
/*
 * Normally, functions that we'd want to prohibit kprobes in, are marked
 * __kprobes. But, there are cases where such functions already belong to
 * a different section (__sched for preempt_schedule)
 *
 * For such cases, we now have a blacklist
 */
94
static struct kprobe_blackpoint kprobe_blacklist[] = {
95
	{"preempt_schedule",},
96
	{"native_get_debugreg",},
97 98
	{"irq_entries_start",},
	{"common_interrupt",},
99
	{"mcount",},	/* mcount can be called from everywhere */
100 101 102
	{NULL}    /* Terminator */
};

103
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
104 105 106 107 108 109 110
/*
 * kprobe->ainsn.insn points to the copy of the instruction to be
 * single-stepped. x86_64, POWER4 and above have no-exec support and
 * stepping on the instruction on a vmalloced/kmalloced/data page
 * is a recipe for disaster
 */
struct kprobe_insn_page {
111
	struct list_head list;
112 113
	kprobe_opcode_t *insns;		/* Page of instruction slots */
	int nused;
114
	int ngarbage;
115
	char slot_used[];
116 117
};

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
#define KPROBE_INSN_PAGE_SIZE(slots)			\
	(offsetof(struct kprobe_insn_page, slot_used) +	\
	 (sizeof(char) * (slots)))

struct kprobe_insn_cache {
	struct list_head pages;	/* list of kprobe_insn_page */
	size_t insn_size;	/* size of instruction slot */
	int nr_garbage;
};

static int slots_per_page(struct kprobe_insn_cache *c)
{
	return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
}

133 134 135 136 137 138
enum kprobe_slot_state {
	SLOT_CLEAN = 0,
	SLOT_DIRTY = 1,
	SLOT_USED = 2,
};

139 140 141 142 143 144 145
static DEFINE_MUTEX(kprobe_insn_mutex);	/* Protects kprobe_insn_slots */
static struct kprobe_insn_cache kprobe_insn_slots = {
	.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
	.insn_size = MAX_INSN_SIZE,
	.nr_garbage = 0,
};
static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
146

147
/**
148
 * __get_insn_slot() - Find a slot on an executable page for an instruction.
149 150
 * We allocate an executable page if there's no room on existing ones.
 */
151
static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
152 153 154
{
	struct kprobe_insn_page *kip;

155
 retry:
156 157
	list_for_each_entry(kip, &c->pages, list) {
		if (kip->nused < slots_per_page(c)) {
158
			int i;
159
			for (i = 0; i < slots_per_page(c); i++) {
160 161
				if (kip->slot_used[i] == SLOT_CLEAN) {
					kip->slot_used[i] = SLOT_USED;
162
					kip->nused++;
163
					return kip->insns + (i * c->insn_size);
164 165
				}
			}
166 167 168
			/* kip->nused is broken. Fix it. */
			kip->nused = slots_per_page(c);
			WARN_ON(1);
169 170 171
		}
	}

172
	/* If there are any garbage slots, collect it and try again. */
173
	if (c->nr_garbage && collect_garbage_slots(c) == 0)
174
		goto retry;
175 176 177

	/* All out of space.  Need to allocate a new page. */
	kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
178
	if (!kip)
179 180 181 182 183 184 185 186 187 188 189 190
		return NULL;

	/*
	 * Use module_alloc so this page is within +/- 2GB of where the
	 * kernel image and loaded module images reside. This is required
	 * so x86_64 can correctly handle the %rip-relative fixups.
	 */
	kip->insns = module_alloc(PAGE_SIZE);
	if (!kip->insns) {
		kfree(kip);
		return NULL;
	}
191
	INIT_LIST_HEAD(&kip->list);
192
	memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
193
	kip->slot_used[0] = SLOT_USED;
194
	kip->nused = 1;
195
	kip->ngarbage = 0;
196
	list_add(&kip->list, &c->pages);
197 198 199
	return kip->insns;
}

200

201 202
kprobe_opcode_t __kprobes *get_insn_slot(void)
{
203 204
	kprobe_opcode_t *ret = NULL;

205
	mutex_lock(&kprobe_insn_mutex);
206
	ret = __get_insn_slot(&kprobe_insn_slots);
207
	mutex_unlock(&kprobe_insn_mutex);
208

209 210 211
	return ret;
}

212 213 214
/* Return 1 if all garbages are collected, otherwise 0. */
static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
{
215
	kip->slot_used[idx] = SLOT_CLEAN;
216 217 218 219 220 221 222 223
	kip->nused--;
	if (kip->nused == 0) {
		/*
		 * Page is no longer in use.  Free it unless
		 * it's the last one.  We keep the last one
		 * so as not to have to set it up again the
		 * next time somebody inserts a probe.
		 */
224
		if (!list_is_singular(&kip->list)) {
225
			list_del(&kip->list);
226 227 228 229 230 231 232 233
			module_free(NULL, kip->insns);
			kfree(kip);
		}
		return 1;
	}
	return 0;
}

234
static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
235
{
236
	struct kprobe_insn_page *kip, *next;
237

238 239
	/* Ensure no-one is interrupted on the garbages */
	synchronize_sched();
240

241
	list_for_each_entry_safe(kip, next, &c->pages, list) {
242 243 244 245
		int i;
		if (kip->ngarbage == 0)
			continue;
		kip->ngarbage = 0;	/* we will collect all garbages */
246
		for (i = 0; i < slots_per_page(c); i++) {
247
			if (kip->slot_used[i] == SLOT_DIRTY &&
248 249 250 251
			    collect_one_slot(kip, i))
				break;
		}
	}
252
	c->nr_garbage = 0;
253 254 255
	return 0;
}

256 257
static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
				       kprobe_opcode_t *slot, int dirty)
258 259 260
{
	struct kprobe_insn_page *kip;

261
	list_for_each_entry(kip, &c->pages, list) {
262 263
		long idx = ((long)slot - (long)kip->insns) /
				(c->insn_size * sizeof(kprobe_opcode_t));
264 265
		if (idx >= 0 && idx < slots_per_page(c)) {
			WARN_ON(kip->slot_used[idx] != SLOT_USED);
266
			if (dirty) {
267
				kip->slot_used[idx] = SLOT_DIRTY;
268
				kip->ngarbage++;
269 270
				if (++c->nr_garbage > slots_per_page(c))
					collect_garbage_slots(c);
271
			} else
272 273
				collect_one_slot(kip, idx);
			return;
274 275
		}
	}
276 277 278
	/* Could not free this slot. */
	WARN_ON(1);
}
279

280 281 282 283
void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
{
	mutex_lock(&kprobe_insn_mutex);
	__free_insn_slot(&kprobe_insn_slots, slot, dirty);
284
	mutex_unlock(&kprobe_insn_mutex);
285
}
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
#ifdef CONFIG_OPTPROBES
/* For optimized_kprobe buffer */
static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
static struct kprobe_insn_cache kprobe_optinsn_slots = {
	.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
	/* .insn_size is initialized later */
	.nr_garbage = 0,
};
/* Get a slot for optimized_kprobe buffer */
kprobe_opcode_t __kprobes *get_optinsn_slot(void)
{
	kprobe_opcode_t *ret = NULL;

	mutex_lock(&kprobe_optinsn_mutex);
	ret = __get_insn_slot(&kprobe_optinsn_slots);
	mutex_unlock(&kprobe_optinsn_mutex);

	return ret;
}

void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
{
	mutex_lock(&kprobe_optinsn_mutex);
	__free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
	mutex_unlock(&kprobe_optinsn_mutex);
}
#endif
313
#endif
314

315 316 317 318 319 320 321 322 323 324 325
/* We have preemption disabled.. so it is safe to use __ versions */
static inline void set_kprobe_instance(struct kprobe *kp)
{
	__get_cpu_var(kprobe_instance) = kp;
}

static inline void reset_kprobe_instance(void)
{
	__get_cpu_var(kprobe_instance) = NULL;
}

326 327
/*
 * This routine is called either:
328
 * 	- under the kprobe_mutex - during kprobe_[un]register()
329
 * 				OR
330
 * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
331
 */
332
struct kprobe __kprobes *get_kprobe(void *addr)
L
Linus Torvalds 已提交
333 334 335
{
	struct hlist_head *head;
	struct hlist_node *node;
336
	struct kprobe *p;
L
Linus Torvalds 已提交
337 338

	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
339
	hlist_for_each_entry_rcu(p, node, head, hlist) {
L
Linus Torvalds 已提交
340 341 342
		if (p->addr == addr)
			return p;
	}
343

L
Linus Torvalds 已提交
344 345 346
	return NULL;
}

347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);

/* Return true if the kprobe is an aggregator */
static inline int kprobe_aggrprobe(struct kprobe *p)
{
	return p->pre_handler == aggr_pre_handler;
}

/*
 * Keep all fields in the kprobe consistent
 */
static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
{
	memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
	memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
}

#ifdef CONFIG_OPTPROBES
365 366 367
/* NOTE: change this value only with kprobe_mutex held */
static bool kprobes_allow_optimization;

368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
/*
 * Call all pre_handler on the list, but ignores its return value.
 * This must be called from arch-dep optimized caller.
 */
void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
	struct kprobe *kp;

	list_for_each_entry_rcu(kp, &p->list, list) {
		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
			set_kprobe_instance(kp);
			kp->pre_handler(kp, regs);
		}
		reset_kprobe_instance();
	}
}

/* Return true(!0) if the kprobe is ready for optimization. */
static inline int kprobe_optready(struct kprobe *p)
{
	struct optimized_kprobe *op;

	if (kprobe_aggrprobe(p)) {
		op = container_of(p, struct optimized_kprobe, kp);
		return arch_prepared_optinsn(&op->optinsn);
	}

	return 0;
}

/*
 * Return an optimized kprobe whose optimizing code replaces
 * instructions including addr (exclude breakpoint).
 */
struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
{
	int i;
	struct kprobe *p = NULL;
	struct optimized_kprobe *op;

	/* Don't check i == 0, since that is a breakpoint case. */
	for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
		p = get_kprobe((void *)(addr - i));

	if (p && kprobe_optready(p)) {
		op = container_of(p, struct optimized_kprobe, kp);
		if (arch_within_optimized_kprobe(op, addr))
			return p;
	}

	return NULL;
}

/* Optimization staging list, protected by kprobe_mutex */
static LIST_HEAD(optimizing_list);

static void kprobe_optimizer(struct work_struct *work);
static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
#define OPTIMIZE_DELAY 5

/* Kprobe jump optimizer */
static __kprobes void kprobe_optimizer(struct work_struct *work)
{
	struct optimized_kprobe *op, *tmp;

	/* Lock modules while optimizing kprobes */
	mutex_lock(&module_mutex);
	mutex_lock(&kprobe_mutex);
436
	if (kprobes_all_disarmed || !kprobes_allow_optimization)
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
		goto end;

	/*
	 * Wait for quiesence period to ensure all running interrupts
	 * are done. Because optprobe may modify multiple instructions
	 * there is a chance that Nth instruction is interrupted. In that
	 * case, running interrupt can return to 2nd-Nth byte of jump
	 * instruction. This wait is for avoiding it.
	 */
	synchronize_sched();

	/*
	 * The optimization/unoptimization refers online_cpus via
	 * stop_machine() and cpu-hotplug modifies online_cpus.
	 * And same time, text_mutex will be held in cpu-hotplug and here.
	 * This combination can cause a deadlock (cpu-hotplug try to lock
	 * text_mutex but stop_machine can not be done because online_cpus
	 * has been changed)
	 * To avoid this deadlock, we need to call get_online_cpus()
	 * for preventing cpu-hotplug outside of text_mutex locking.
	 */
	get_online_cpus();
	mutex_lock(&text_mutex);
	list_for_each_entry_safe(op, tmp, &optimizing_list, list) {
		WARN_ON(kprobe_disabled(&op->kp));
		if (arch_optimize_kprobe(op) < 0)
			op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
		list_del_init(&op->list);
	}
	mutex_unlock(&text_mutex);
	put_online_cpus();
end:
	mutex_unlock(&kprobe_mutex);
	mutex_unlock(&module_mutex);
}

/* Optimize kprobe if p is ready to be optimized */
static __kprobes void optimize_kprobe(struct kprobe *p)
{
	struct optimized_kprobe *op;

	/* Check if the kprobe is disabled or not ready for optimization. */
479
	if (!kprobe_optready(p) || !kprobes_allow_optimization ||
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
	    (kprobe_disabled(p) || kprobes_all_disarmed))
		return;

	/* Both of break_handler and post_handler are not supported. */
	if (p->break_handler || p->post_handler)
		return;

	op = container_of(p, struct optimized_kprobe, kp);

	/* Check there is no other kprobes at the optimized instructions */
	if (arch_check_optimized_kprobe(op) < 0)
		return;

	/* Check if it is already optimized. */
	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
		return;

	op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
	list_add(&op->list, &optimizing_list);
	if (!delayed_work_pending(&optimizing_work))
		schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
}

/* Unoptimize a kprobe if p is optimized */
static __kprobes void unoptimize_kprobe(struct kprobe *p)
{
	struct optimized_kprobe *op;

	if ((p->flags & KPROBE_FLAG_OPTIMIZED) && kprobe_aggrprobe(p)) {
		op = container_of(p, struct optimized_kprobe, kp);
		if (!list_empty(&op->list))
			/* Dequeue from the optimization queue */
			list_del_init(&op->list);
		else
			/* Replace jump with break */
			arch_unoptimize_kprobe(op);
		op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
	}
}

/* Remove optimized instructions */
static void __kprobes kill_optimized_kprobe(struct kprobe *p)
{
	struct optimized_kprobe *op;

	op = container_of(p, struct optimized_kprobe, kp);
	if (!list_empty(&op->list)) {
		/* Dequeue from the optimization queue */
		list_del_init(&op->list);
		op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
	}
	/* Don't unoptimize, because the target code will be freed. */
	arch_remove_optimized_kprobe(op);
}

/* Try to prepare optimized instructions */
static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
{
	struct optimized_kprobe *op;

	op = container_of(p, struct optimized_kprobe, kp);
	arch_prepare_optimized_kprobe(op);
}

/* Free optimized instructions and optimized_kprobe */
static __kprobes void free_aggr_kprobe(struct kprobe *p)
{
	struct optimized_kprobe *op;

	op = container_of(p, struct optimized_kprobe, kp);
	arch_remove_optimized_kprobe(op);
	kfree(op);
}

/* Allocate new optimized_kprobe and try to prepare optimized instructions */
static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
{
	struct optimized_kprobe *op;

	op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
	if (!op)
		return NULL;

	INIT_LIST_HEAD(&op->list);
	op->kp.addr = p->addr;
	arch_prepare_optimized_kprobe(op);

	return &op->kp;
}

static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);

/*
 * Prepare an optimized_kprobe and optimize it
 * NOTE: p must be a normal registered kprobe
 */
static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
{
	struct kprobe *ap;
	struct optimized_kprobe *op;

	ap = alloc_aggr_kprobe(p);
	if (!ap)
		return;

	op = container_of(ap, struct optimized_kprobe, kp);
	if (!arch_prepared_optinsn(&op->optinsn)) {
		/* If failed to setup optimizing, fallback to kprobe */
		free_aggr_kprobe(ap);
		return;
	}

	init_aggr_kprobe(ap, p);
	optimize_kprobe(ap);
}

596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
#ifdef CONFIG_SYSCTL
static void __kprobes optimize_all_kprobes(void)
{
	struct hlist_head *head;
	struct hlist_node *node;
	struct kprobe *p;
	unsigned int i;

	/* If optimization is already allowed, just return */
	if (kprobes_allow_optimization)
		return;

	kprobes_allow_optimization = true;
	mutex_lock(&text_mutex);
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
		head = &kprobe_table[i];
		hlist_for_each_entry_rcu(p, node, head, hlist)
			if (!kprobe_disabled(p))
				optimize_kprobe(p);
	}
	mutex_unlock(&text_mutex);
	printk(KERN_INFO "Kprobes globally optimized\n");
}

static void __kprobes unoptimize_all_kprobes(void)
{
	struct hlist_head *head;
	struct hlist_node *node;
	struct kprobe *p;
	unsigned int i;

	/* If optimization is already prohibited, just return */
	if (!kprobes_allow_optimization)
		return;

	kprobes_allow_optimization = false;
	printk(KERN_INFO "Kprobes globally unoptimized\n");
	get_online_cpus();	/* For avoiding text_mutex deadlock */
	mutex_lock(&text_mutex);
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
		head = &kprobe_table[i];
		hlist_for_each_entry_rcu(p, node, head, hlist) {
			if (!kprobe_disabled(p))
				unoptimize_kprobe(p);
		}
	}

	mutex_unlock(&text_mutex);
	put_online_cpus();
	/* Allow all currently running kprobes to complete */
	synchronize_sched();
}

int sysctl_kprobes_optimization;
int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
				      void __user *buffer, size_t *length,
				      loff_t *ppos)
{
	int ret;

	mutex_lock(&kprobe_mutex);
	sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);

	if (sysctl_kprobes_optimization)
		optimize_all_kprobes();
	else
		unoptimize_all_kprobes();
	mutex_unlock(&kprobe_mutex);

	return ret;
}
#endif /* CONFIG_SYSCTL */

670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
static void __kprobes __arm_kprobe(struct kprobe *p)
{
	struct kprobe *old_p;

	/* Check collision with other optimized kprobes */
	old_p = get_optimized_kprobe((unsigned long)p->addr);
	if (unlikely(old_p))
		unoptimize_kprobe(old_p); /* Fallback to unoptimized kprobe */

	arch_arm_kprobe(p);
	optimize_kprobe(p);	/* Try to optimize (add kprobe to a list) */
}

static void __kprobes __disarm_kprobe(struct kprobe *p)
{
	struct kprobe *old_p;

	unoptimize_kprobe(p);	/* Try to unoptimize */
	arch_disarm_kprobe(p);

	/* If another kprobe was blocked, optimize it. */
	old_p = get_optimized_kprobe((unsigned long)p->addr);
	if (unlikely(old_p))
		optimize_kprobe(old_p);
}

#else /* !CONFIG_OPTPROBES */

#define optimize_kprobe(p)			do {} while (0)
#define unoptimize_kprobe(p)			do {} while (0)
#define kill_optimized_kprobe(p)		do {} while (0)
#define prepare_optimized_kprobe(p)		do {} while (0)
#define try_to_optimize_kprobe(p)		do {} while (0)
#define __arm_kprobe(p)				arch_arm_kprobe(p)
#define __disarm_kprobe(p)			arch_disarm_kprobe(p)

static __kprobes void free_aggr_kprobe(struct kprobe *p)
{
	kfree(p);
}

static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
{
	return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
}
#endif /* CONFIG_OPTPROBES */

717 718 719
/* Arm a kprobe with text_mutex */
static void __kprobes arm_kprobe(struct kprobe *kp)
{
720 721 722 723 724
	/*
	 * Here, since __arm_kprobe() doesn't use stop_machine(),
	 * this doesn't cause deadlock on text_mutex. So, we don't
	 * need get_online_cpus().
	 */
725
	mutex_lock(&text_mutex);
726
	__arm_kprobe(kp);
727 728 729 730 731 732
	mutex_unlock(&text_mutex);
}

/* Disarm a kprobe with text_mutex */
static void __kprobes disarm_kprobe(struct kprobe *kp)
{
733
	get_online_cpus();	/* For avoiding text_mutex deadlock */
734
	mutex_lock(&text_mutex);
735
	__disarm_kprobe(kp);
736
	mutex_unlock(&text_mutex);
737
	put_online_cpus();
738 739
}

740 741 742 743
/*
 * Aggregate handlers for multiple kprobes support - these handlers
 * take care of invoking the individual kprobe handlers on p->list
 */
744
static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
745 746 747
{
	struct kprobe *kp;

748
	list_for_each_entry_rcu(kp, &p->list, list) {
749
		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
750
			set_kprobe_instance(kp);
751 752
			if (kp->pre_handler(kp, regs))
				return 1;
753
		}
754
		reset_kprobe_instance();
755 756 757 758
	}
	return 0;
}

759 760
static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
					unsigned long flags)
761 762 763
{
	struct kprobe *kp;

764
	list_for_each_entry_rcu(kp, &p->list, list) {
765
		if (kp->post_handler && likely(!kprobe_disabled(kp))) {
766
			set_kprobe_instance(kp);
767
			kp->post_handler(kp, regs, flags);
768
			reset_kprobe_instance();
769 770 771 772
		}
	}
}

773 774
static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
					int trapnr)
775
{
776 777
	struct kprobe *cur = __get_cpu_var(kprobe_instance);

778 779 780 781
	/*
	 * if we faulted "during" the execution of a user specified
	 * probe handler, invoke just that probe's fault handler
	 */
782 783
	if (cur && cur->fault_handler) {
		if (cur->fault_handler(cur, regs, trapnr))
784 785 786 787 788
			return 1;
	}
	return 0;
}

789
static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
790
{
791 792 793 794 795 796
	struct kprobe *cur = __get_cpu_var(kprobe_instance);
	int ret = 0;

	if (cur && cur->break_handler) {
		if (cur->break_handler(cur, regs))
			ret = 1;
797
	}
798 799
	reset_kprobe_instance();
	return ret;
800 801
}

802 803 804 805
/* Walks the list and increments nmissed count for multiprobe case */
void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
{
	struct kprobe *kp;
806
	if (!kprobe_aggrprobe(p)) {
807 808 809 810 811 812 813 814
		p->nmissed++;
	} else {
		list_for_each_entry_rcu(kp, &p->list, list)
			kp->nmissed++;
	}
	return;
}

815 816
void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
				struct hlist_head *head)
817
{
818 819
	struct kretprobe *rp = ri->rp;

820 821
	/* remove rp inst off the rprobe_inst_table */
	hlist_del(&ri->hlist);
822 823 824 825 826
	INIT_HLIST_NODE(&ri->hlist);
	if (likely(rp)) {
		spin_lock(&rp->lock);
		hlist_add_head(&ri->hlist, &rp->free_instances);
		spin_unlock(&rp->lock);
827 828
	} else
		/* Unregistering */
829
		hlist_add_head(&ri->hlist, head);
830 831
}

832
void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
833 834 835 836 837 838 839 840 841 842
			 struct hlist_head **head, unsigned long *flags)
{
	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
	spinlock_t *hlist_lock;

	*head = &kretprobe_inst_table[hash];
	hlist_lock = kretprobe_table_lock_ptr(hash);
	spin_lock_irqsave(hlist_lock, *flags);
}

843 844
static void __kprobes kretprobe_table_lock(unsigned long hash,
	unsigned long *flags)
845
{
846 847 848 849
	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
	spin_lock_irqsave(hlist_lock, *flags);
}

850 851
void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
	unsigned long *flags)
852 853 854 855 856 857 858 859
{
	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
	spinlock_t *hlist_lock;

	hlist_lock = kretprobe_table_lock_ptr(hash);
	spin_unlock_irqrestore(hlist_lock, *flags);
}

860
void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
861 862 863
{
	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
	spin_unlock_irqrestore(hlist_lock, *flags);
864 865 866
}

/*
867 868 869 870
 * This function is called from finish_task_switch when task tk becomes dead,
 * so that we can recycle any function-return probe instances associated
 * with this task. These left over instances represent probed functions
 * that have been called but will never return.
871
 */
872
void __kprobes kprobe_flush_task(struct task_struct *tk)
873
{
B
bibo,mao 已提交
874
	struct kretprobe_instance *ri;
875
	struct hlist_head *head, empty_rp;
876
	struct hlist_node *node, *tmp;
877
	unsigned long hash, flags = 0;
878

879 880 881 882 883 884 885
	if (unlikely(!kprobes_initialized))
		/* Early boot.  kretprobe_table_locks not yet initialized. */
		return;

	hash = hash_ptr(tk, KPROBE_HASH_BITS);
	head = &kretprobe_inst_table[hash];
	kretprobe_table_lock(hash, &flags);
B
bibo,mao 已提交
886 887
	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
		if (ri->task == tk)
888
			recycle_rp_inst(ri, &empty_rp);
B
bibo,mao 已提交
889
	}
890 891
	kretprobe_table_unlock(hash, &flags);
	INIT_HLIST_HEAD(&empty_rp);
892 893 894 895
	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
		hlist_del(&ri->hlist);
		kfree(ri);
	}
896 897 898 899 900
}

static inline void free_rp_inst(struct kretprobe *rp)
{
	struct kretprobe_instance *ri;
901 902
	struct hlist_node *pos, *next;

903 904
	hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
		hlist_del(&ri->hlist);
905 906 907 908
		kfree(ri);
	}
}

909 910
static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
{
911
	unsigned long flags, hash;
912 913
	struct kretprobe_instance *ri;
	struct hlist_node *pos, *next;
914 915
	struct hlist_head *head;

916
	/* No race here */
917 918 919 920 921 922 923 924
	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
		kretprobe_table_lock(hash, &flags);
		head = &kretprobe_inst_table[hash];
		hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
			if (ri->rp == rp)
				ri->rp = NULL;
		}
		kretprobe_table_unlock(hash, &flags);
925 926 927 928
	}
	free_rp_inst(rp);
}

929
/*
930
* Add the new probe to ap->list. Fail if this is the
931 932
* second jprobe at the address - two jprobes can't coexist
*/
933
static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
934
{
935
	BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
936 937 938 939

	if (p->break_handler || p->post_handler)
		unoptimize_kprobe(ap);	/* Fall back to normal kprobe */

940
	if (p->break_handler) {
941
		if (ap->break_handler)
942
			return -EEXIST;
943 944
		list_add_tail_rcu(&p->list, &ap->list);
		ap->break_handler = aggr_break_handler;
945
	} else
946 947 948
		list_add_rcu(&p->list, &ap->list);
	if (p->post_handler && !ap->post_handler)
		ap->post_handler = aggr_post_handler;
949 950 951 952 953

	if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
		ap->flags &= ~KPROBE_FLAG_DISABLED;
		if (!kprobes_all_disarmed)
			/* Arm the breakpoint again. */
954
			__arm_kprobe(ap);
955
	}
956 957 958
	return 0;
}

959 960 961 962
/*
 * Fill in the required fields of the "manager kprobe". Replace the
 * earlier kprobe in the hlist with the manager kprobe
 */
963
static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
964
{
965
	/* Copy p's insn slot to ap */
966
	copy_kprobe(p, ap);
967
	flush_insn_slot(ap);
968
	ap->addr = p->addr;
969
	ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
970 971
	ap->pre_handler = aggr_pre_handler;
	ap->fault_handler = aggr_fault_handler;
972 973
	/* We don't care the kprobe which has gone. */
	if (p->post_handler && !kprobe_gone(p))
974
		ap->post_handler = aggr_post_handler;
975
	if (p->break_handler && !kprobe_gone(p))
976
		ap->break_handler = aggr_break_handler;
977 978

	INIT_LIST_HEAD(&ap->list);
979
	INIT_HLIST_NODE(&ap->hlist);
980

981
	list_add_rcu(&p->list, &ap->list);
982
	hlist_replace_rcu(&p->hlist, &ap->hlist);
983 984 985 986 987 988
}

/*
 * This is the second or subsequent kprobe at the address - handle
 * the intricacies
 */
989 990
static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
					  struct kprobe *p)
991 992
{
	int ret = 0;
993
	struct kprobe *ap = old_p;
994

995 996 997
	if (!kprobe_aggrprobe(old_p)) {
		/* If old_p is not an aggr_kprobe, create new aggr_kprobe. */
		ap = alloc_aggr_kprobe(old_p);
998 999
		if (!ap)
			return -ENOMEM;
1000
		init_aggr_kprobe(ap, old_p);
1001 1002 1003
	}

	if (kprobe_gone(ap)) {
1004 1005 1006 1007 1008 1009
		/*
		 * Attempting to insert new probe at the same location that
		 * had a probe in the module vaddr area which already
		 * freed. So, the instruction slot has already been
		 * released. We need a new slot for the new probe.
		 */
1010
		ret = arch_prepare_kprobe(ap);
1011
		if (ret)
1012 1013 1014 1015 1016
			/*
			 * Even if fail to allocate new slot, don't need to
			 * free aggr_probe. It will be used next time, or
			 * freed by unregister_kprobe.
			 */
1017
			return ret;
1018

1019 1020 1021
		/* Prepare optimized instructions if possible. */
		prepare_optimized_kprobe(ap);

1022
		/*
1023 1024
		 * Clear gone flag to prevent allocating new slot again, and
		 * set disabled flag because it is not armed yet.
1025
		 */
1026 1027
		ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
			    | KPROBE_FLAG_DISABLED;
1028
	}
1029

1030
	/* Copy ap's insn slot to p */
1031 1032
	copy_kprobe(ap, p);
	return add_new_kprobe(ap, p);
1033 1034
}

1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
/* Try to disable aggr_kprobe, and return 1 if succeeded.*/
static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
{
	struct kprobe *kp;

	list_for_each_entry_rcu(kp, &p->list, list) {
		if (!kprobe_disabled(kp))
			/*
			 * There is an active probe on the list.
			 * We can't disable aggr_kprobe.
			 */
			return 0;
	}
	p->flags |= KPROBE_FLAG_DISABLED;
	return 1;
}

1052 1053
static int __kprobes in_kprobes_functions(unsigned long addr)
{
1054 1055
	struct kprobe_blackpoint *kb;

1056 1057
	if (addr >= (unsigned long)__kprobes_text_start &&
	    addr < (unsigned long)__kprobes_text_end)
1058
		return -EINVAL;
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
	/*
	 * If there exists a kprobe_blacklist, verify and
	 * fail any probe registration in the prohibited area
	 */
	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
		if (kb->start_addr) {
			if (addr >= kb->start_addr &&
			    addr < (kb->start_addr + kb->range))
				return -EINVAL;
		}
	}
1070 1071 1072
	return 0;
}

1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
/*
 * If we have a symbol_name argument, look it up and add the offset field
 * to it. This way, we can specify a relative address to a symbol.
 */
static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
{
	kprobe_opcode_t *addr = p->addr;
	if (p->symbol_name) {
		if (addr)
			return NULL;
		kprobe_lookup_name(p->symbol_name, addr);
	}

	if (!addr)
		return NULL;
	return (kprobe_opcode_t *)(((char *)addr) + p->offset);
}

1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
/* Check passed kprobe is valid and return kprobe in kprobe_table. */
static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
{
	struct kprobe *old_p, *list_p;

	old_p = get_kprobe(p->addr);
	if (unlikely(!old_p))
		return NULL;

	if (p != old_p) {
		list_for_each_entry_rcu(list_p, &old_p->list, list)
			if (list_p == p)
			/* kprobe p is a valid probe */
				goto valid;
		return NULL;
	}
valid:
	return old_p;
}

/* Return error if the kprobe is being re-registered */
static inline int check_kprobe_rereg(struct kprobe *p)
{
	int ret = 0;
	struct kprobe *old_p;

	mutex_lock(&kprobe_mutex);
	old_p = __get_valid_kprobe(p);
	if (old_p)
		ret = -EINVAL;
	mutex_unlock(&kprobe_mutex);
	return ret;
}

1125
int __kprobes register_kprobe(struct kprobe *p)
L
Linus Torvalds 已提交
1126 1127
{
	int ret = 0;
1128
	struct kprobe *old_p;
1129
	struct module *probed_mod;
1130
	kprobe_opcode_t *addr;
1131

1132 1133
	addr = kprobe_addr(p);
	if (!addr)
1134
		return -EINVAL;
1135
	p->addr = addr;
1136

1137 1138 1139 1140
	ret = check_kprobe_rereg(p);
	if (ret)
		return ret;

1141
	preempt_disable();
1142
	if (!kernel_text_address((unsigned long) p->addr) ||
1143 1144
	    in_kprobes_functions((unsigned long) p->addr) ||
	    ftrace_text_reserved(p->addr, p->addr)) {
1145
		preempt_enable();
1146
		return -EINVAL;
1147
	}
1148

1149 1150 1151
	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
	p->flags &= KPROBE_FLAG_DISABLED;

1152 1153 1154
	/*
	 * Check if are we probing a module.
	 */
1155
	probed_mod = __module_text_address((unsigned long) p->addr);
1156 1157
	if (probed_mod) {
		/*
1158 1159
		 * We must hold a refcount of the probed module while updating
		 * its code to prohibit unexpected unloading.
1160
		 */
1161 1162 1163 1164
		if (unlikely(!try_module_get(probed_mod))) {
			preempt_enable();
			return -EINVAL;
		}
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
		/*
		 * If the module freed .init.text, we couldn't insert
		 * kprobes in there.
		 */
		if (within_module_init((unsigned long)p->addr, probed_mod) &&
		    probed_mod->state != MODULE_STATE_COMING) {
			module_put(probed_mod);
			preempt_enable();
			return -EINVAL;
		}
1175
	}
1176
	preempt_enable();
L
Linus Torvalds 已提交
1177

1178
	p->nmissed = 0;
1179
	INIT_LIST_HEAD(&p->list);
I
Ingo Molnar 已提交
1180
	mutex_lock(&kprobe_mutex);
1181 1182 1183 1184

	get_online_cpus();	/* For avoiding text_mutex deadlock. */
	mutex_lock(&text_mutex);

1185 1186
	old_p = get_kprobe(p->addr);
	if (old_p) {
1187
		/* Since this may unoptimize old_p, locking text_mutex. */
1188
		ret = register_aggr_kprobe(old_p, p);
L
Linus Torvalds 已提交
1189 1190 1191
		goto out;
	}

1192 1193
	ret = arch_prepare_kprobe(p);
	if (ret)
1194
		goto out;
1195

1196
	INIT_HLIST_NODE(&p->hlist);
1197
	hlist_add_head_rcu(&p->hlist,
L
Linus Torvalds 已提交
1198 1199
		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);

1200
	if (!kprobes_all_disarmed && !kprobe_disabled(p))
1201 1202 1203 1204
		__arm_kprobe(p);

	/* Try to optimize kprobe */
	try_to_optimize_kprobe(p);
1205

L
Linus Torvalds 已提交
1206
out:
1207 1208
	mutex_unlock(&text_mutex);
	put_online_cpus();
I
Ingo Molnar 已提交
1209
	mutex_unlock(&kprobe_mutex);
1210

1211
	if (probed_mod)
1212
		module_put(probed_mod);
1213

L
Linus Torvalds 已提交
1214 1215
	return ret;
}
1216
EXPORT_SYMBOL_GPL(register_kprobe);
L
Linus Torvalds 已提交
1217

1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
/*
 * Unregister a kprobe without a scheduler synchronization.
 */
static int __kprobes __unregister_kprobe_top(struct kprobe *p)
{
	struct kprobe *old_p, *list_p;

	old_p = __get_valid_kprobe(p);
	if (old_p == NULL)
		return -EINVAL;

1229
	if (old_p == p ||
1230
	    (kprobe_aggrprobe(old_p) &&
1231
	     list_is_singular(&old_p->list))) {
1232 1233
		/*
		 * Only probe on the hash list. Disarm only if kprobes are
1234 1235
		 * enabled and not gone - otherwise, the breakpoint would
		 * already have been removed. We save on flushing icache.
1236
		 */
1237
		if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
1238
			disarm_kprobe(old_p);
1239 1240
		hlist_del_rcu(&old_p->hlist);
	} else {
1241
		if (p->break_handler && !kprobe_gone(p))
1242
			old_p->break_handler = NULL;
1243
		if (p->post_handler && !kprobe_gone(p)) {
1244 1245 1246 1247 1248 1249 1250
			list_for_each_entry_rcu(list_p, &old_p->list, list) {
				if ((list_p != p) && (list_p->post_handler))
					goto noclean;
			}
			old_p->post_handler = NULL;
		}
noclean:
1251
		list_del_rcu(&p->list);
1252 1253
		if (!kprobe_disabled(old_p)) {
			try_to_disable_aggr_kprobe(old_p);
1254 1255 1256 1257 1258 1259 1260
			if (!kprobes_all_disarmed) {
				if (kprobe_disabled(old_p))
					disarm_kprobe(old_p);
				else
					/* Try to optimize this probe again */
					optimize_kprobe(old_p);
			}
1261
		}
1262
	}
1263 1264
	return 0;
}
1265

1266 1267 1268
static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
{
	struct kprobe *old_p;
1269

1270
	if (list_empty(&p->list))
1271
		arch_remove_kprobe(p);
1272 1273 1274 1275 1276
	else if (list_is_singular(&p->list)) {
		/* "p" is the last child of an aggr_kprobe */
		old_p = list_entry(p->list.next, struct kprobe, list);
		list_del(&p->list);
		arch_remove_kprobe(old_p);
1277
		free_aggr_kprobe(old_p);
1278 1279 1280
	}
}

1281
int __kprobes register_kprobes(struct kprobe **kps, int num)
1282 1283 1284 1285 1286 1287
{
	int i, ret = 0;

	if (num <= 0)
		return -EINVAL;
	for (i = 0; i < num; i++) {
1288
		ret = register_kprobe(kps[i]);
1289 1290 1291
		if (ret < 0) {
			if (i > 0)
				unregister_kprobes(kps, i);
1292
			break;
1293
		}
1294
	}
1295 1296
	return ret;
}
1297
EXPORT_SYMBOL_GPL(register_kprobes);
1298 1299 1300 1301 1302

void __kprobes unregister_kprobe(struct kprobe *p)
{
	unregister_kprobes(&p, 1);
}
1303
EXPORT_SYMBOL_GPL(unregister_kprobe);
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320

void __kprobes unregister_kprobes(struct kprobe **kps, int num)
{
	int i;

	if (num <= 0)
		return;
	mutex_lock(&kprobe_mutex);
	for (i = 0; i < num; i++)
		if (__unregister_kprobe_top(kps[i]) < 0)
			kps[i]->addr = NULL;
	mutex_unlock(&kprobe_mutex);

	synchronize_sched();
	for (i = 0; i < num; i++)
		if (kps[i]->addr)
			__unregister_kprobe_bottom(kps[i]);
L
Linus Torvalds 已提交
1321
}
1322
EXPORT_SYMBOL_GPL(unregister_kprobes);
L
Linus Torvalds 已提交
1323 1324

static struct notifier_block kprobe_exceptions_nb = {
1325 1326 1327 1328
	.notifier_call = kprobe_exceptions_notify,
	.priority = 0x7fffffff /* we need to be notified first */
};

1329 1330 1331 1332
unsigned long __weak arch_deref_entry_point(void *entry)
{
	return (unsigned long)entry;
}
L
Linus Torvalds 已提交
1333

1334
int __kprobes register_jprobes(struct jprobe **jps, int num)
L
Linus Torvalds 已提交
1335
{
1336 1337
	struct jprobe *jp;
	int ret = 0, i;
1338

1339
	if (num <= 0)
1340
		return -EINVAL;
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
	for (i = 0; i < num; i++) {
		unsigned long addr;
		jp = jps[i];
		addr = arch_deref_entry_point(jp->entry);

		if (!kernel_text_address(addr))
			ret = -EINVAL;
		else {
			/* Todo: Verify probepoint is a function entry point */
			jp->kp.pre_handler = setjmp_pre_handler;
			jp->kp.break_handler = longjmp_break_handler;
1352
			ret = register_kprobe(&jp->kp);
1353
		}
1354 1355 1356
		if (ret < 0) {
			if (i > 0)
				unregister_jprobes(jps, i);
1357 1358 1359 1360 1361
			break;
		}
	}
	return ret;
}
1362
EXPORT_SYMBOL_GPL(register_jprobes);
1363

1364 1365
int __kprobes register_jprobe(struct jprobe *jp)
{
1366
	return register_jprobes(&jp, 1);
L
Linus Torvalds 已提交
1367
}
1368
EXPORT_SYMBOL_GPL(register_jprobe);
L
Linus Torvalds 已提交
1369

1370
void __kprobes unregister_jprobe(struct jprobe *jp)
L
Linus Torvalds 已提交
1371
{
1372 1373
	unregister_jprobes(&jp, 1);
}
1374
EXPORT_SYMBOL_GPL(unregister_jprobe);
1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392

void __kprobes unregister_jprobes(struct jprobe **jps, int num)
{
	int i;

	if (num <= 0)
		return;
	mutex_lock(&kprobe_mutex);
	for (i = 0; i < num; i++)
		if (__unregister_kprobe_top(&jps[i]->kp) < 0)
			jps[i]->kp.addr = NULL;
	mutex_unlock(&kprobe_mutex);

	synchronize_sched();
	for (i = 0; i < num; i++) {
		if (jps[i]->kp.addr)
			__unregister_kprobe_bottom(&jps[i]->kp);
	}
L
Linus Torvalds 已提交
1393
}
1394
EXPORT_SYMBOL_GPL(unregister_jprobes);
L
Linus Torvalds 已提交
1395

1396
#ifdef CONFIG_KRETPROBES
1397 1398 1399 1400 1401 1402 1403 1404
/*
 * This kprobe pre_handler is registered with every kretprobe. When probe
 * hits it will set up the return probe.
 */
static int __kprobes pre_handler_kretprobe(struct kprobe *p,
					   struct pt_regs *regs)
{
	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1405 1406
	unsigned long hash, flags = 0;
	struct kretprobe_instance *ri;
1407 1408

	/*TODO: consider to only swap the RA after the last pre_handler fired */
1409 1410
	hash = hash_ptr(current, KPROBE_HASH_BITS);
	spin_lock_irqsave(&rp->lock, flags);
1411 1412
	if (!hlist_empty(&rp->free_instances)) {
		ri = hlist_entry(rp->free_instances.first,
1413 1414 1415 1416
				struct kretprobe_instance, hlist);
		hlist_del(&ri->hlist);
		spin_unlock_irqrestore(&rp->lock, flags);

1417 1418
		ri->rp = rp;
		ri->task = current;
1419

1420
		if (rp->entry_handler && rp->entry_handler(ri, regs))
1421 1422
			return 0;

1423 1424 1425
		arch_prepare_kretprobe(ri, regs);

		/* XXX(hch): why is there no hlist_move_head? */
1426 1427 1428 1429 1430
		INIT_HLIST_NODE(&ri->hlist);
		kretprobe_table_lock(hash, &flags);
		hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
		kretprobe_table_unlock(hash, &flags);
	} else {
1431
		rp->nmissed++;
1432 1433
		spin_unlock_irqrestore(&rp->lock, flags);
	}
1434 1435 1436
	return 0;
}

1437
int __kprobes register_kretprobe(struct kretprobe *rp)
1438 1439 1440 1441
{
	int ret = 0;
	struct kretprobe_instance *inst;
	int i;
1442
	void *addr;
1443 1444

	if (kretprobe_blacklist_size) {
1445 1446 1447
		addr = kprobe_addr(&rp->kp);
		if (!addr)
			return -EINVAL;
1448 1449 1450 1451 1452 1453

		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
			if (kretprobe_blacklist[i].addr == addr)
				return -EINVAL;
		}
	}
1454 1455

	rp->kp.pre_handler = pre_handler_kretprobe;
1456 1457 1458
	rp->kp.post_handler = NULL;
	rp->kp.fault_handler = NULL;
	rp->kp.break_handler = NULL;
1459 1460 1461 1462

	/* Pre-allocate memory for max kretprobe instances */
	if (rp->maxactive <= 0) {
#ifdef CONFIG_PREEMPT
1463
		rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1464
#else
1465
		rp->maxactive = num_possible_cpus();
1466 1467
#endif
	}
1468
	spin_lock_init(&rp->lock);
1469 1470
	INIT_HLIST_HEAD(&rp->free_instances);
	for (i = 0; i < rp->maxactive; i++) {
1471 1472
		inst = kmalloc(sizeof(struct kretprobe_instance) +
			       rp->data_size, GFP_KERNEL);
1473 1474 1475 1476
		if (inst == NULL) {
			free_rp_inst(rp);
			return -ENOMEM;
		}
1477 1478
		INIT_HLIST_NODE(&inst->hlist);
		hlist_add_head(&inst->hlist, &rp->free_instances);
1479 1480 1481 1482
	}

	rp->nmissed = 0;
	/* Establish function entry probe point */
1483
	ret = register_kprobe(&rp->kp);
1484
	if (ret != 0)
1485 1486 1487
		free_rp_inst(rp);
	return ret;
}
1488
EXPORT_SYMBOL_GPL(register_kretprobe);
1489

1490
int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1491 1492 1493 1494 1495 1496
{
	int ret = 0, i;

	if (num <= 0)
		return -EINVAL;
	for (i = 0; i < num; i++) {
1497
		ret = register_kretprobe(rps[i]);
1498 1499 1500
		if (ret < 0) {
			if (i > 0)
				unregister_kretprobes(rps, i);
1501 1502 1503 1504 1505
			break;
		}
	}
	return ret;
}
1506
EXPORT_SYMBOL_GPL(register_kretprobes);
1507 1508 1509 1510 1511

void __kprobes unregister_kretprobe(struct kretprobe *rp)
{
	unregister_kretprobes(&rp, 1);
}
1512
EXPORT_SYMBOL_GPL(unregister_kretprobe);
1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533

void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
{
	int i;

	if (num <= 0)
		return;
	mutex_lock(&kprobe_mutex);
	for (i = 0; i < num; i++)
		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
			rps[i]->kp.addr = NULL;
	mutex_unlock(&kprobe_mutex);

	synchronize_sched();
	for (i = 0; i < num; i++) {
		if (rps[i]->kp.addr) {
			__unregister_kprobe_bottom(&rps[i]->kp);
			cleanup_rp_inst(rps[i]);
		}
	}
}
1534
EXPORT_SYMBOL_GPL(unregister_kretprobes);
1535

1536
#else /* CONFIG_KRETPROBES */
1537
int __kprobes register_kretprobe(struct kretprobe *rp)
1538 1539 1540
{
	return -ENOSYS;
}
1541
EXPORT_SYMBOL_GPL(register_kretprobe);
1542

1543
int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1544
{
1545
	return -ENOSYS;
1546
}
1547 1548
EXPORT_SYMBOL_GPL(register_kretprobes);

1549
void __kprobes unregister_kretprobe(struct kretprobe *rp)
1550
{
1551
}
1552
EXPORT_SYMBOL_GPL(unregister_kretprobe);
1553

1554 1555 1556
void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
{
}
1557
EXPORT_SYMBOL_GPL(unregister_kretprobes);
1558

1559 1560 1561 1562
static int __kprobes pre_handler_kretprobe(struct kprobe *p,
					   struct pt_regs *regs)
{
	return 0;
1563 1564
}

1565 1566
#endif /* CONFIG_KRETPROBES */

1567 1568 1569 1570
/* Set the kprobe gone and remove its instruction buffer. */
static void __kprobes kill_kprobe(struct kprobe *p)
{
	struct kprobe *kp;
1571

1572
	p->flags |= KPROBE_FLAG_GONE;
1573
	if (kprobe_aggrprobe(p)) {
1574 1575 1576 1577 1578 1579 1580 1581
		/*
		 * If this is an aggr_kprobe, we have to list all the
		 * chained probes and mark them GONE.
		 */
		list_for_each_entry_rcu(kp, &p->list, list)
			kp->flags |= KPROBE_FLAG_GONE;
		p->post_handler = NULL;
		p->break_handler = NULL;
1582
		kill_optimized_kprobe(p);
1583 1584 1585 1586 1587 1588 1589 1590
	}
	/*
	 * Here, we can remove insn_slot safely, because no thread calls
	 * the original probed function (which will be freed soon) any more.
	 */
	arch_remove_kprobe(p);
}

1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656
/* Disable one kprobe */
int __kprobes disable_kprobe(struct kprobe *kp)
{
	int ret = 0;
	struct kprobe *p;

	mutex_lock(&kprobe_mutex);

	/* Check whether specified probe is valid. */
	p = __get_valid_kprobe(kp);
	if (unlikely(p == NULL)) {
		ret = -EINVAL;
		goto out;
	}

	/* If the probe is already disabled (or gone), just return */
	if (kprobe_disabled(kp))
		goto out;

	kp->flags |= KPROBE_FLAG_DISABLED;
	if (p != kp)
		/* When kp != p, p is always enabled. */
		try_to_disable_aggr_kprobe(p);

	if (!kprobes_all_disarmed && kprobe_disabled(p))
		disarm_kprobe(p);
out:
	mutex_unlock(&kprobe_mutex);
	return ret;
}
EXPORT_SYMBOL_GPL(disable_kprobe);

/* Enable one kprobe */
int __kprobes enable_kprobe(struct kprobe *kp)
{
	int ret = 0;
	struct kprobe *p;

	mutex_lock(&kprobe_mutex);

	/* Check whether specified probe is valid. */
	p = __get_valid_kprobe(kp);
	if (unlikely(p == NULL)) {
		ret = -EINVAL;
		goto out;
	}

	if (kprobe_gone(kp)) {
		/* This kprobe has gone, we couldn't enable it. */
		ret = -EINVAL;
		goto out;
	}

	if (p != kp)
		kp->flags &= ~KPROBE_FLAG_DISABLED;

	if (!kprobes_all_disarmed && kprobe_disabled(p)) {
		p->flags &= ~KPROBE_FLAG_DISABLED;
		arm_kprobe(p);
	}
out:
	mutex_unlock(&kprobe_mutex);
	return ret;
}
EXPORT_SYMBOL_GPL(enable_kprobe);

1657 1658 1659 1660 1661 1662 1663
void __kprobes dump_kprobe(struct kprobe *kp)
{
	printk(KERN_WARNING "Dumping kprobe:\n");
	printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
	       kp->symbol_name, kp->addr, kp->offset);
}

1664 1665 1666 1667 1668 1669 1670 1671 1672
/* Module notifier call back, checking kprobes on the module */
static int __kprobes kprobes_module_callback(struct notifier_block *nb,
					     unsigned long val, void *data)
{
	struct module *mod = data;
	struct hlist_head *head;
	struct hlist_node *node;
	struct kprobe *p;
	unsigned int i;
1673
	int checkcore = (val == MODULE_STATE_GOING);
1674

1675
	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
1676 1677 1678
		return NOTIFY_DONE;

	/*
1679 1680 1681 1682
	 * When MODULE_STATE_GOING was notified, both of module .text and
	 * .init.text sections would be freed. When MODULE_STATE_LIVE was
	 * notified, only .init.text section would be freed. We need to
	 * disable kprobes which have been inserted in the sections.
1683 1684 1685 1686 1687
	 */
	mutex_lock(&kprobe_mutex);
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
		head = &kprobe_table[i];
		hlist_for_each_entry_rcu(p, node, head, hlist)
1688 1689 1690
			if (within_module_init((unsigned long)p->addr, mod) ||
			    (checkcore &&
			     within_module_core((unsigned long)p->addr, mod))) {
1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707
				/*
				 * The vaddr this probe is installed will soon
				 * be vfreed buy not synced to disk. Hence,
				 * disarming the breakpoint isn't needed.
				 */
				kill_kprobe(p);
			}
	}
	mutex_unlock(&kprobe_mutex);
	return NOTIFY_DONE;
}

static struct notifier_block kprobe_module_nb = {
	.notifier_call = kprobes_module_callback,
	.priority = 0
};

L
Linus Torvalds 已提交
1708 1709 1710
static int __init init_kprobes(void)
{
	int i, err = 0;
1711 1712 1713 1714 1715
	unsigned long offset = 0, size = 0;
	char *modname, namebuf[128];
	const char *symbol_name;
	void *addr;
	struct kprobe_blackpoint *kb;
L
Linus Torvalds 已提交
1716 1717 1718

	/* FIXME allocate the probe table, currently defined statically */
	/* initialize all list heads */
1719
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
L
Linus Torvalds 已提交
1720
		INIT_HLIST_HEAD(&kprobe_table[i]);
1721
		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1722
		spin_lock_init(&(kretprobe_table_locks[i].lock));
1723
	}
L
Linus Torvalds 已提交
1724

1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
	/*
	 * Lookup and populate the kprobe_blacklist.
	 *
	 * Unlike the kretprobe blacklist, we'll need to determine
	 * the range of addresses that belong to the said functions,
	 * since a kprobe need not necessarily be at the beginning
	 * of a function.
	 */
	for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
		kprobe_lookup_name(kb->name, addr);
		if (!addr)
			continue;

		kb->start_addr = (unsigned long)addr;
		symbol_name = kallsyms_lookup(kb->start_addr,
				&size, &offset, &modname, namebuf);
		if (!symbol_name)
			kb->range = 0;
		else
			kb->range = size;
	}

1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757
	if (kretprobe_blacklist_size) {
		/* lookup the function address from its name */
		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
			kprobe_lookup_name(kretprobe_blacklist[i].name,
					   kretprobe_blacklist[i].addr);
			if (!kretprobe_blacklist[i].addr)
				printk("kretprobe: lookup failed: %s\n",
				       kretprobe_blacklist[i].name);
		}
	}

1758 1759
#if defined(CONFIG_OPTPROBES)
#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
1760 1761 1762
	/* Init kprobe_optinsn_slots */
	kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
#endif
1763 1764 1765
	/* By default, kprobes can be optimized */
	kprobes_allow_optimization = true;
#endif
1766

1767 1768
	/* By default, kprobes are armed */
	kprobes_all_disarmed = false;
1769

1770
	err = arch_init_kprobes();
1771 1772
	if (!err)
		err = register_die_notifier(&kprobe_exceptions_nb);
1773 1774 1775
	if (!err)
		err = register_module_notifier(&kprobe_module_nb);

1776
	kprobes_initialized = (err == 0);
1777

1778 1779
	if (!err)
		init_test_probes();
L
Linus Torvalds 已提交
1780 1781 1782
	return err;
}

1783 1784
#ifdef CONFIG_DEBUG_FS
static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1785
		const char *sym, int offset, char *modname, struct kprobe *pp)
1786 1787 1788 1789 1790 1791 1792 1793 1794
{
	char *kprobe_type;

	if (p->pre_handler == pre_handler_kretprobe)
		kprobe_type = "r";
	else if (p->pre_handler == setjmp_pre_handler)
		kprobe_type = "j";
	else
		kprobe_type = "k";
1795

1796
	if (sym)
1797
		seq_printf(pi, "%p  %s  %s+0x%x  %s ",
1798
			p->addr, kprobe_type, sym, offset,
1799
			(modname ? modname : " "));
1800
	else
1801 1802 1803 1804 1805 1806 1807 1808 1809
		seq_printf(pi, "%p  %s  %p ",
			p->addr, kprobe_type, p->addr);

	if (!pp)
		pp = p;
	seq_printf(pi, "%s%s%s\n",
		(kprobe_gone(p) ? "[GONE]" : ""),
		((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
		(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""));
1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836
}

static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
{
	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
}

static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
{
	(*pos)++;
	if (*pos >= KPROBE_TABLE_SIZE)
		return NULL;
	return pos;
}

static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
{
	/* Nothing to do */
}

static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
{
	struct hlist_head *head;
	struct hlist_node *node;
	struct kprobe *p, *kp;
	const char *sym = NULL;
	unsigned int i = *(loff_t *) v;
A
Alexey Dobriyan 已提交
1837
	unsigned long offset = 0;
1838 1839 1840 1841 1842
	char *modname, namebuf[128];

	head = &kprobe_table[i];
	preempt_disable();
	hlist_for_each_entry_rcu(p, node, head, hlist) {
A
Alexey Dobriyan 已提交
1843
		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
1844
					&offset, &modname, namebuf);
1845
		if (kprobe_aggrprobe(p)) {
1846
			list_for_each_entry_rcu(kp, &p->list, list)
1847
				report_probe(pi, kp, sym, offset, modname, p);
1848
		} else
1849
			report_probe(pi, p, sym, offset, modname, NULL);
1850 1851 1852 1853 1854
	}
	preempt_enable();
	return 0;
}

J
James Morris 已提交
1855
static const struct seq_operations kprobes_seq_ops = {
1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866
	.start = kprobe_seq_start,
	.next  = kprobe_seq_next,
	.stop  = kprobe_seq_stop,
	.show  = show_kprobe_addr
};

static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
{
	return seq_open(filp, &kprobes_seq_ops);
}

1867
static const struct file_operations debugfs_kprobes_operations = {
1868 1869 1870 1871 1872 1873
	.open           = kprobes_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = seq_release,
};

1874
static void __kprobes arm_all_kprobes(void)
1875 1876 1877 1878 1879 1880 1881 1882
{
	struct hlist_head *head;
	struct hlist_node *node;
	struct kprobe *p;
	unsigned int i;

	mutex_lock(&kprobe_mutex);

1883 1884
	/* If kprobes are armed, just return */
	if (!kprobes_all_disarmed)
1885 1886
		goto already_enabled;

1887
	/* Arming kprobes doesn't optimize kprobe itself */
1888
	mutex_lock(&text_mutex);
1889 1890 1891
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
		head = &kprobe_table[i];
		hlist_for_each_entry_rcu(p, node, head, hlist)
1892
			if (!kprobe_disabled(p))
1893
				__arm_kprobe(p);
1894
	}
1895
	mutex_unlock(&text_mutex);
1896

1897
	kprobes_all_disarmed = false;
1898 1899 1900 1901 1902 1903 1904
	printk(KERN_INFO "Kprobes globally enabled\n");

already_enabled:
	mutex_unlock(&kprobe_mutex);
	return;
}

1905
static void __kprobes disarm_all_kprobes(void)
1906 1907 1908 1909 1910 1911 1912 1913
{
	struct hlist_head *head;
	struct hlist_node *node;
	struct kprobe *p;
	unsigned int i;

	mutex_lock(&kprobe_mutex);

1914 1915
	/* If kprobes are already disarmed, just return */
	if (kprobes_all_disarmed)
1916 1917
		goto already_disabled;

1918
	kprobes_all_disarmed = true;
1919
	printk(KERN_INFO "Kprobes globally disabled\n");
1920 1921 1922 1923 1924 1925

	/*
	 * Here we call get_online_cpus() for avoiding text_mutex deadlock,
	 * because disarming may also unoptimize kprobes.
	 */
	get_online_cpus();
1926
	mutex_lock(&text_mutex);
1927 1928 1929
	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
		head = &kprobe_table[i];
		hlist_for_each_entry_rcu(p, node, head, hlist) {
1930
			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
1931
				__disarm_kprobe(p);
1932 1933 1934
		}
	}

1935
	mutex_unlock(&text_mutex);
1936
	put_online_cpus();
1937 1938 1939
	mutex_unlock(&kprobe_mutex);
	/* Allow all currently running kprobes to complete */
	synchronize_sched();
1940
	return;
1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956

already_disabled:
	mutex_unlock(&kprobe_mutex);
	return;
}

/*
 * XXX: The debugfs bool file interface doesn't allow for callbacks
 * when the bool state is switched. We can reuse that facility when
 * available
 */
static ssize_t read_enabled_file_bool(struct file *file,
	       char __user *user_buf, size_t count, loff_t *ppos)
{
	char buf[3];

1957
	if (!kprobes_all_disarmed)
1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979
		buf[0] = '1';
	else
		buf[0] = '0';
	buf[1] = '\n';
	buf[2] = 0x00;
	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}

static ssize_t write_enabled_file_bool(struct file *file,
	       const char __user *user_buf, size_t count, loff_t *ppos)
{
	char buf[32];
	int buf_size;

	buf_size = min(count, (sizeof(buf)-1));
	if (copy_from_user(buf, user_buf, buf_size))
		return -EFAULT;

	switch (buf[0]) {
	case 'y':
	case 'Y':
	case '1':
1980
		arm_all_kprobes();
1981 1982 1983 1984
		break;
	case 'n':
	case 'N':
	case '0':
1985
		disarm_all_kprobes();
1986 1987 1988 1989 1990 1991
		break;
	}

	return count;
}

1992
static const struct file_operations fops_kp = {
1993 1994 1995 1996
	.read =         read_enabled_file_bool,
	.write =        write_enabled_file_bool,
};

1997 1998 1999
static int __kprobes debugfs_kprobe_init(void)
{
	struct dentry *dir, *file;
2000
	unsigned int value = 1;
2001 2002 2003 2004 2005

	dir = debugfs_create_dir("kprobes", NULL);
	if (!dir)
		return -ENOMEM;

R
Randy Dunlap 已提交
2006
	file = debugfs_create_file("list", 0444, dir, NULL,
2007 2008 2009 2010 2011 2012
				&debugfs_kprobes_operations);
	if (!file) {
		debugfs_remove(dir);
		return -ENOMEM;
	}

2013 2014 2015 2016 2017 2018 2019
	file = debugfs_create_file("enabled", 0600, dir,
					&value, &fops_kp);
	if (!file) {
		debugfs_remove(dir);
		return -ENOMEM;
	}

2020 2021 2022 2023 2024 2025 2026
	return 0;
}

late_initcall(debugfs_kprobe_init);
#endif /* CONFIG_DEBUG_FS */

module_init(init_kprobes);
L
Linus Torvalds 已提交
2027

2028
/* defined in arch/.../kernel/kprobes.c */
L
Linus Torvalds 已提交
2029
EXPORT_SYMBOL_GPL(jprobe_return);