hw_breakpoint.c 14.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * Copyright (C) 2007 Alan Stern
 * Copyright (C) IBM Corporation, 2009
18
 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
19 20
 *
 * Thanks to Ingo Molnar for his many suggestions.
21 22 23 24
 *
 * Authors: Alan Stern <stern@rowland.harvard.edu>
 *          K.Prasad <prasad@linux.vnet.ibm.com>
 *          Frederic Weisbecker <fweisbec@gmail.com>
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
 */

/*
 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
 * using the CPU's debug registers.
 * This file contains the arch-independent routines.
 */

#include <linux/irqflags.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/init.h>
43
#include <linux/slab.h>
44
#include <linux/list.h>
45
#include <linux/cpu.h>
46 47
#include <linux/smp.h>

48 49
#include <linux/hw_breakpoint.h>

50

51 52 53
/*
 * Constraints data
 */
54

55
/* Number of pinned cpu breakpoints in a cpu */
56
static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]);
57 58

/* Number of pinned task breakpoints in a cpu */
59
static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]);
60 61

/* Number of non-pinned cpu/task breakpoints in a cpu */
62
static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
63

64 65
static int nr_slots[TYPE_MAX];

66 67 68
/* Keep track of the breakpoints attached to tasks */
static LIST_HEAD(bp_task_head);

69 70
static int constraints_initialized;

71 72 73 74 75 76 77 78 79
/* Gather the number of total pinned and un-pinned bp in a cpuset */
struct bp_busy_slots {
	unsigned int pinned;
	unsigned int flexible;
};

/* Serialize accesses to the above constraints */
static DEFINE_MUTEX(nr_bp_mutex);

80 81 82 83 84
__weak int hw_breakpoint_weight(struct perf_event *bp)
{
	return 1;
}

85 86 87 88 89 90 91 92
static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
{
	if (bp->attr.bp_type & HW_BREAKPOINT_RW)
		return TYPE_DATA;

	return TYPE_INST;
}

93 94 95 96
/*
 * Report the maximum number of pinned breakpoints a task
 * have in this cpu
 */
97
static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
98
{
99
	int i;
100
	unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
101

102
	for (i = nr_slots[type] - 1; i >= 0; i--) {
103 104
		if (tsk_pinned[i] > 0)
			return i + 1;
105 106
	}

107
	return 0;
108 109
}

110 111 112 113 114
/*
 * Count the number of breakpoints of the same type and same task.
 * The given event must be not on the list.
 */
static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type)
115
{
116 117
	struct perf_event_context *ctx = bp->ctx;
	struct perf_event *iter;
118 119
	int count = 0;

120 121 122
	list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
		if (iter->ctx == ctx && find_slot_idx(iter) == type)
			count += hw_breakpoint_weight(iter);
123 124 125 126 127
	}

	return count;
}

128 129 130 131
/*
 * Report the number of pinned/un-pinned breakpoints we have in
 * a given cpu (cpu > -1) or in all of them (cpu = -1).
 */
132
static void
133 134
fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
		    enum bp_type_idx type)
135
{
136 137 138
	int cpu = bp->cpu;
	struct task_struct *tsk = bp->ctx->task;

139
	if (cpu >= 0) {
140
		slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
141
		if (!tsk)
142
			slots->pinned += max_task_bp_pinned(cpu, type);
143
		else
144
			slots->pinned += task_bp_pinned(bp, type);
145
		slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
146 147 148 149 150 151 152

		return;
	}

	for_each_online_cpu(cpu) {
		unsigned int nr;

153
		nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
154
		if (!tsk)
155
			nr += max_task_bp_pinned(cpu, type);
156
		else
157
			nr += task_bp_pinned(bp, type);
158 159 160 161

		if (nr > slots->pinned)
			slots->pinned = nr;

162
		nr = per_cpu(nr_bp_flexible[type], cpu);
163 164 165 166 167 168

		if (nr > slots->flexible)
			slots->flexible = nr;
	}
}

169 170 171 172 173 174 175 176 177 178 179
/*
 * For now, continue to consider flexible as pinned, until we can
 * ensure no flexible event can ever be scheduled before a pinned event
 * in a same cpu.
 */
static void
fetch_this_slot(struct bp_busy_slots *slots, int weight)
{
	slots->pinned += weight;
}

180 181 182
/*
 * Add a pinned breakpoint for the given task in our constraint table
 */
183
static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable,
184
				enum bp_type_idx type, int weight)
185
{
186
	unsigned int *tsk_pinned;
187 188 189
	int old_count = 0;
	int old_idx = 0;
	int idx = 0;
190

191
	old_count = task_bp_pinned(bp, type);
192 193
	old_idx = old_count - 1;
	idx = old_idx + weight;
194

195
	/* tsk_pinned[n] is the number of tasks having n breakpoints */
196
	tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
197
	if (enable) {
198 199 200
		tsk_pinned[idx]++;
		if (old_count > 0)
			tsk_pinned[old_idx]--;
201
	} else {
202 203 204
		tsk_pinned[idx]--;
		if (old_count > 0)
			tsk_pinned[old_idx]++;
205 206 207 208 209 210
	}
}

/*
 * Add/remove the given breakpoint in our constraint table
 */
211
static void
212 213
toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
	       int weight)
214 215 216 217
{
	int cpu = bp->cpu;
	struct task_struct *tsk = bp->ctx->task;

218 219 220 221 222 223 224 225 226 227
	/* Pinned counter cpu profiling */
	if (!tsk) {

		if (enable)
			per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
		else
			per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
		return;
	}

228 229
	/* Pinned counter task profiling */

230 231 232 233 234 235
	if (!enable)
		list_del(&bp->hw.bp_list);

	if (cpu >= 0) {
		toggle_bp_task_slot(bp, cpu, enable, type, weight);
	} else {
236
		for_each_online_cpu(cpu)
237
			toggle_bp_task_slot(bp, cpu, enable, type, weight);
238 239 240
	}

	if (enable)
241
		list_add_tail(&bp->hw.bp_list, &bp_task_head);
242 243
}

244 245 246 247 248 249 250 251 252 253 254
/*
 * Function to perform processor-specific cleanup during unregistration
 */
__weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
{
	/*
	 * A weak stub function here for those archs that don't define
	 * it inside arch/.../kernel/hw_breakpoint.c
	 */
}

255 256 257 258 259 260 261 262
/*
 * Contraints to check before allowing this new breakpoint counter:
 *
 *  == Non-pinned counter == (Considered as pinned for now)
 *
 *   - If attached to a single cpu, check:
 *
 *       (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
263
 *           + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
264 265 266 267 268 269 270 271 272 273
 *
 *       -> If there are already non-pinned counters in this cpu, it means
 *          there is already a free slot for them.
 *          Otherwise, we check that the maximum number of per task
 *          breakpoints (for this cpu) plus the number of per cpu breakpoint
 *          (for this cpu) doesn't cover every registers.
 *
 *   - If attached to every cpus, check:
 *
 *       (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
274
 *           + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
275 276 277 278 279 280 281 282 283 284 285
 *
 *       -> This is roughly the same, except we check the number of per cpu
 *          bp for every cpu and we keep the max one. Same for the per tasks
 *          breakpoints.
 *
 *
 * == Pinned counter ==
 *
 *   - If attached to a single cpu, check:
 *
 *       ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
286
 *            + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
287 288 289 290 291 292 293
 *
 *       -> Same checks as before. But now the nr_bp_flexible, if any, must keep
 *          one register at least (or they will never be fed).
 *
 *   - If attached to every cpus, check:
 *
 *       ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
294
 *            + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
295
 */
296
static int __reserve_bp_slot(struct perf_event *bp)
297 298
{
	struct bp_busy_slots slots = {0};
299
	enum bp_type_idx type;
300
	int weight;
301

302 303 304 305
	/* We couldn't initialize breakpoint constraints on boot */
	if (!constraints_initialized)
		return -ENOMEM;

306 307 308 309 310 311
	/* Basic checks */
	if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
	    bp->attr.bp_type == HW_BREAKPOINT_INVALID)
		return -EINVAL;

	type = find_slot_idx(bp);
312 313
	weight = hw_breakpoint_weight(bp);

314
	fetch_bp_busy_slots(&slots, bp, type);
315 316 317 318
	/*
	 * Simulate the addition of this breakpoint to the constraints
	 * and see the result.
	 */
319
	fetch_this_slot(&slots, weight);
320 321

	/* Flexible counters need to keep at least one slot */
322
	if (slots.pinned + (!!slots.flexible) > nr_slots[type])
323
		return -ENOSPC;
324

325
	toggle_bp_slot(bp, true, type, weight);
326

327 328 329 330 331 332 333 334 335 336 337
	return 0;
}

int reserve_bp_slot(struct perf_event *bp)
{
	int ret;

	mutex_lock(&nr_bp_mutex);

	ret = __reserve_bp_slot(bp);

338 339 340 341 342
	mutex_unlock(&nr_bp_mutex);

	return ret;
}

343 344
static void __release_bp_slot(struct perf_event *bp)
{
345
	enum bp_type_idx type;
346
	int weight;
347 348

	type = find_slot_idx(bp);
349 350
	weight = hw_breakpoint_weight(bp);
	toggle_bp_slot(bp, false, type, weight);
351 352
}

353
void release_bp_slot(struct perf_event *bp)
354
{
355 356
	mutex_lock(&nr_bp_mutex);

357
	arch_unregister_hw_breakpoint(bp);
358
	__release_bp_slot(bp);
359 360

	mutex_unlock(&nr_bp_mutex);
361 362
}

363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
/*
 * Allow the kernel debugger to reserve breakpoint slots without
 * taking a lock using the dbg_* variant of for the reserve and
 * release breakpoint slots.
 */
int dbg_reserve_bp_slot(struct perf_event *bp)
{
	if (mutex_is_locked(&nr_bp_mutex))
		return -1;

	return __reserve_bp_slot(bp);
}

int dbg_release_bp_slot(struct perf_event *bp)
{
	if (mutex_is_locked(&nr_bp_mutex))
		return -1;

	__release_bp_slot(bp);

	return 0;
}
385

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
static int validate_hw_breakpoint(struct perf_event *bp)
{
	int ret;

	ret = arch_validate_hwbkpt_settings(bp);
	if (ret)
		return ret;

	if (arch_check_bp_in_kernelspace(bp)) {
		if (bp->attr.exclude_kernel)
			return -EINVAL;
		/*
		 * Don't let unprivileged users set a breakpoint in the trap
		 * path to avoid trap recursion attacks.
		 */
		if (!capable(CAP_SYS_ADMIN))
			return -EPERM;
	}

	return 0;
}

408
int register_perf_hw_breakpoint(struct perf_event *bp)
409
{
410
	int ret;
411

412 413 414
	ret = reserve_bp_slot(bp);
	if (ret)
		return ret;
415

416
	ret = validate_hw_breakpoint(bp);
417

418 419 420 421
	/* if arch_validate_hwbkpt_settings() fails then release bp slot */
	if (ret)
		release_bp_slot(bp);

422 423
	return ret;
}
424 425 426

/**
 * register_user_hw_breakpoint - register a hardware breakpoint for user space
427
 * @attr: breakpoint attributes
428
 * @triggered: callback to trigger when we hit the breakpoint
429 430
 * @tsk: pointer to 'task_struct' of the process to which the address belongs
 */
431
struct perf_event *
432
register_user_hw_breakpoint(struct perf_event_attr *attr,
433
			    perf_overflow_handler_t triggered,
434
			    struct task_struct *tsk)
435
{
436
	return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
437 438 439 440 441
}
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);

/**
 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
442
 * @bp: the breakpoint structure to modify
443
 * @attr: new breakpoint attributes
444
 * @triggered: callback to trigger when we hit the breakpoint
445 446
 * @tsk: pointer to 'task_struct' of the process to which the address belongs
 */
447
int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
448
{
449
	u64 old_addr = bp->attr.bp_addr;
450
	u64 old_len = bp->attr.bp_len;
451 452 453 454 455 456 457 458 459 460 461
	int old_type = bp->attr.bp_type;
	int err = 0;

	perf_event_disable(bp);

	bp->attr.bp_addr = attr->bp_addr;
	bp->attr.bp_type = attr->bp_type;
	bp->attr.bp_len = attr->bp_len;

	if (attr->disabled)
		goto end;
462

463
	err = validate_hw_breakpoint(bp);
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
	if (!err)
		perf_event_enable(bp);

	if (err) {
		bp->attr.bp_addr = old_addr;
		bp->attr.bp_type = old_type;
		bp->attr.bp_len = old_len;
		if (!bp->attr.disabled)
			perf_event_enable(bp);

		return err;
	}

end:
	bp->attr.disabled = attr->disabled;

	return 0;
481 482 483 484
}
EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);

/**
485
 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
486 487
 * @bp: the breakpoint structure to unregister
 */
488
void unregister_hw_breakpoint(struct perf_event *bp)
489
{
490 491 492 493 494 495
	if (!bp)
		return;
	perf_event_release_kernel(bp);
}
EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);

496
/**
497
 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
498
 * @attr: breakpoint attributes
499
 * @triggered: callback to trigger when we hit the breakpoint
500
 *
501
 * @return a set of per_cpu pointers to perf events
502
 */
503
struct perf_event * __percpu *
504
register_wide_hw_breakpoint(struct perf_event_attr *attr,
505
			    perf_overflow_handler_t triggered)
506
{
507
	struct perf_event * __percpu *cpu_events, **pevent, *bp;
508 509 510 511 512
	long err;
	int cpu;

	cpu_events = alloc_percpu(typeof(*cpu_events));
	if (!cpu_events)
513
		return (void __percpu __force *)ERR_PTR(-ENOMEM);
514

515 516
	get_online_cpus();
	for_each_online_cpu(cpu) {
517
		pevent = per_cpu_ptr(cpu_events, cpu);
518
		bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
519

520
		*pevent = bp;
521

522
		if (IS_ERR(bp)) {
523 524 525
			err = PTR_ERR(bp);
			goto fail;
		}
526
	}
527
	put_online_cpus();
528

529 530 531
	return cpu_events;

fail:
532
	for_each_online_cpu(cpu) {
533
		pevent = per_cpu_ptr(cpu_events, cpu);
534
		if (IS_ERR(*pevent))
535 536 537
			break;
		unregister_hw_breakpoint(*pevent);
	}
538 539
	put_online_cpus();

540
	free_percpu(cpu_events);
541
	return (void __percpu __force *)ERR_PTR(err);
542
}
543
EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
544 545

/**
546 547
 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
 * @cpu_events: the per cpu set of events to unregister
548
 */
549
void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
550
{
551 552
	int cpu;
	struct perf_event **pevent;
553

554 555 556
	for_each_possible_cpu(cpu) {
		pevent = per_cpu_ptr(cpu_events, cpu);
		unregister_hw_breakpoint(*pevent);
557
	}
558
	free_percpu(cpu_events);
559
}
560
EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
561 562 563 564 565 566 567

static struct notifier_block hw_breakpoint_exceptions_nb = {
	.notifier_call = hw_breakpoint_exceptions_notify,
	/* we need to be notified first */
	.priority = 0x7fffffff
};

568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
static void bp_perf_event_destroy(struct perf_event *event)
{
	release_bp_slot(event);
}

static int hw_breakpoint_event_init(struct perf_event *bp)
{
	int err;

	if (bp->attr.type != PERF_TYPE_BREAKPOINT)
		return -ENOENT;

	err = register_perf_hw_breakpoint(bp);
	if (err)
		return err;

	bp->destroy = bp_perf_event_destroy;

	return 0;
}

static struct pmu perf_breakpoint = {
	.event_init	= hw_breakpoint_event_init,
	.enable		= arch_install_hw_breakpoint,
	.disable	= arch_uninstall_hw_breakpoint,
	.read		= hw_breakpoint_pmu_read,
};

596 597
static int __init init_hw_breakpoint(void)
{
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
	unsigned int **task_bp_pinned;
	int cpu, err_cpu;
	int i;

	for (i = 0; i < TYPE_MAX; i++)
		nr_slots[i] = hw_breakpoint_slots(i);

	for_each_possible_cpu(cpu) {
		for (i = 0; i < TYPE_MAX; i++) {
			task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu);
			*task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i],
						  GFP_KERNEL);
			if (!*task_bp_pinned)
				goto err_alloc;
		}
	}

	constraints_initialized = 1;

617 618
	perf_pmu_register(&perf_breakpoint);

619
	return register_die_notifier(&hw_breakpoint_exceptions_nb);
620 621 622 623 624 625 626 627 628 629

 err_alloc:
	for_each_possible_cpu(err_cpu) {
		if (err_cpu == cpu)
			break;
		for (i = 0; i < TYPE_MAX; i++)
			kfree(per_cpu(nr_task_bp_pinned[i], cpu));
	}

	return -ENOMEM;
630 631
}
core_initcall(init_hw_breakpoint);
632 633