hw_breakpoint.c 16.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * Copyright (C) 2007 Alan Stern
 * Copyright (C) IBM Corporation, 2009
18
 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
19 20
 *
 * Thanks to Ingo Molnar for his many suggestions.
21 22 23 24
 *
 * Authors: Alan Stern <stern@rowland.harvard.edu>
 *          K.Prasad <prasad@linux.vnet.ibm.com>
 *          Frederic Weisbecker <fweisbec@gmail.com>
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
 */

/*
 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
 * using the CPU's debug registers.
 * This file contains the arch-independent routines.
 */

#include <linux/irqflags.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/init.h>
43
#include <linux/slab.h>
44
#include <linux/list.h>
45
#include <linux/cpu.h>
46 47
#include <linux/smp.h>

48 49
#include <linux/hw_breakpoint.h>

50

51 52 53
/*
 * Constraints data
 */
54

55
/* Number of pinned cpu breakpoints in a cpu */
56
static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]);
57 58

/* Number of pinned task breakpoints in a cpu */
59
static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]);
60 61

/* Number of non-pinned cpu/task breakpoints in a cpu */
62
static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
63

64 65
static int nr_slots[TYPE_MAX];

66 67 68
/* Keep track of the breakpoints attached to tasks */
static LIST_HEAD(bp_task_head);

69 70
static int constraints_initialized;

71 72 73 74 75 76 77 78 79
/* Gather the number of total pinned and un-pinned bp in a cpuset */
struct bp_busy_slots {
	unsigned int pinned;
	unsigned int flexible;
};

/* Serialize accesses to the above constraints */
static DEFINE_MUTEX(nr_bp_mutex);

80 81 82 83 84
__weak int hw_breakpoint_weight(struct perf_event *bp)
{
	return 1;
}

85 86 87 88 89 90 91 92
static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
{
	if (bp->attr.bp_type & HW_BREAKPOINT_RW)
		return TYPE_DATA;

	return TYPE_INST;
}

93 94 95 96
/*
 * Report the maximum number of pinned breakpoints a task
 * have in this cpu
 */
97
static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
98
{
99
	int i;
100
	unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
101

102
	for (i = nr_slots[type] - 1; i >= 0; i--) {
103 104
		if (tsk_pinned[i] > 0)
			return i + 1;
105 106
	}

107
	return 0;
108 109
}

110 111 112 113
/*
 * Count the number of breakpoints of the same type and same task.
 * The given event must be not on the list.
 */
114
static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
115
{
116
	struct task_struct *tsk = bp->hw.bp_target;
117
	struct perf_event *iter;
118 119
	int count = 0;

120
	list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
121 122
		if (iter->hw.bp_target == tsk &&
		    find_slot_idx(iter) == type &&
123
		    (iter->cpu < 0 || cpu == iter->cpu))
124
			count += hw_breakpoint_weight(iter);
125 126 127 128 129
	}

	return count;
}

130 131 132 133
/*
 * Report the number of pinned/un-pinned breakpoints we have in
 * a given cpu (cpu > -1) or in all of them (cpu = -1).
 */
134
static void
135 136
fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
		    enum bp_type_idx type)
137
{
138
	int cpu = bp->cpu;
139
	struct task_struct *tsk = bp->hw.bp_target;
140

141
	if (cpu >= 0) {
142
		slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
143
		if (!tsk)
144
			slots->pinned += max_task_bp_pinned(cpu, type);
145
		else
146
			slots->pinned += task_bp_pinned(cpu, bp, type);
147
		slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
148 149 150 151

		return;
	}

152
	for_each_possible_cpu(cpu) {
153 154
		unsigned int nr;

155
		nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
156
		if (!tsk)
157
			nr += max_task_bp_pinned(cpu, type);
158
		else
159
			nr += task_bp_pinned(cpu, bp, type);
160 161 162 163

		if (nr > slots->pinned)
			slots->pinned = nr;

164
		nr = per_cpu(nr_bp_flexible[type], cpu);
165 166 167 168 169 170

		if (nr > slots->flexible)
			slots->flexible = nr;
	}
}

171 172 173 174 175 176 177 178 179 180 181
/*
 * For now, continue to consider flexible as pinned, until we can
 * ensure no flexible event can ever be scheduled before a pinned event
 * in a same cpu.
 */
static void
fetch_this_slot(struct bp_busy_slots *slots, int weight)
{
	slots->pinned += weight;
}

182 183 184
/*
 * Add a pinned breakpoint for the given task in our constraint table
 */
185
static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
186
				enum bp_type_idx type, int weight)
187
{
188 189 190 191 192
	/* tsk_pinned[n-1] is the number of tasks having n>0 breakpoints */
	unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
	int old_idx, new_idx;

	old_idx = task_bp_pinned(cpu, bp, type) - 1;
193
	new_idx = old_idx + weight;
194 195 196 197 198

	if (old_idx >= 0)
		tsk_pinned[old_idx]--;
	if (new_idx >= 0)
		tsk_pinned[new_idx]++;
199 200 201 202 203
}

/*
 * Add/remove the given breakpoint in our constraint table
 */
204
static void
205 206
toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
	       int weight)
207 208
{
	int cpu = bp->cpu;
209
	struct task_struct *tsk = bp->hw.bp_target;
210

211 212 213
	if (!enable)
		weight = -weight;

214 215
	/* Pinned counter cpu profiling */
	if (!tsk) {
216
		per_cpu(nr_cpu_bp_pinned[type], cpu) += weight;
217 218 219
		return;
	}

220
	/* Pinned counter task profiling */
221
	if (cpu >= 0) {
222
		toggle_bp_task_slot(bp, cpu, type, weight);
223
	} else {
224
		for_each_possible_cpu(cpu)
225
			toggle_bp_task_slot(bp, cpu, type, weight);
226 227 228
	}

	if (enable)
229
		list_add_tail(&bp->hw.bp_list, &bp_task_head);
230 231
	else
		list_del(&bp->hw.bp_list);
232 233
}

234 235 236 237 238 239 240 241 242 243 244
/*
 * Function to perform processor-specific cleanup during unregistration
 */
__weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
{
	/*
	 * A weak stub function here for those archs that don't define
	 * it inside arch/.../kernel/hw_breakpoint.c
	 */
}

245 246 247 248 249 250 251 252
/*
 * Contraints to check before allowing this new breakpoint counter:
 *
 *  == Non-pinned counter == (Considered as pinned for now)
 *
 *   - If attached to a single cpu, check:
 *
 *       (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
253
 *           + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
254 255 256 257 258 259 260 261 262 263
 *
 *       -> If there are already non-pinned counters in this cpu, it means
 *          there is already a free slot for them.
 *          Otherwise, we check that the maximum number of per task
 *          breakpoints (for this cpu) plus the number of per cpu breakpoint
 *          (for this cpu) doesn't cover every registers.
 *
 *   - If attached to every cpus, check:
 *
 *       (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
264
 *           + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
265 266 267 268 269 270 271 272 273 274 275
 *
 *       -> This is roughly the same, except we check the number of per cpu
 *          bp for every cpu and we keep the max one. Same for the per tasks
 *          breakpoints.
 *
 *
 * == Pinned counter ==
 *
 *   - If attached to a single cpu, check:
 *
 *       ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
276
 *            + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
277 278 279 280 281 282 283
 *
 *       -> Same checks as before. But now the nr_bp_flexible, if any, must keep
 *          one register at least (or they will never be fed).
 *
 *   - If attached to every cpus, check:
 *
 *       ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
284
 *            + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
285
 */
286
static int __reserve_bp_slot(struct perf_event *bp)
287 288
{
	struct bp_busy_slots slots = {0};
289
	enum bp_type_idx type;
290
	int weight;
291

292 293 294 295
	/* We couldn't initialize breakpoint constraints on boot */
	if (!constraints_initialized)
		return -ENOMEM;

296 297 298 299 300 301
	/* Basic checks */
	if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
	    bp->attr.bp_type == HW_BREAKPOINT_INVALID)
		return -EINVAL;

	type = find_slot_idx(bp);
302 303
	weight = hw_breakpoint_weight(bp);

304
	fetch_bp_busy_slots(&slots, bp, type);
305 306 307 308
	/*
	 * Simulate the addition of this breakpoint to the constraints
	 * and see the result.
	 */
309
	fetch_this_slot(&slots, weight);
310 311

	/* Flexible counters need to keep at least one slot */
312
	if (slots.pinned + (!!slots.flexible) > nr_slots[type])
313
		return -ENOSPC;
314

315
	toggle_bp_slot(bp, true, type, weight);
316

317 318 319 320 321 322 323 324 325 326 327
	return 0;
}

int reserve_bp_slot(struct perf_event *bp)
{
	int ret;

	mutex_lock(&nr_bp_mutex);

	ret = __reserve_bp_slot(bp);

328 329 330 331 332
	mutex_unlock(&nr_bp_mutex);

	return ret;
}

333 334
static void __release_bp_slot(struct perf_event *bp)
{
335
	enum bp_type_idx type;
336
	int weight;
337 338

	type = find_slot_idx(bp);
339 340
	weight = hw_breakpoint_weight(bp);
	toggle_bp_slot(bp, false, type, weight);
341 342
}

343
void release_bp_slot(struct perf_event *bp)
344
{
345 346
	mutex_lock(&nr_bp_mutex);

347
	arch_unregister_hw_breakpoint(bp);
348
	__release_bp_slot(bp);
349 350

	mutex_unlock(&nr_bp_mutex);
351 352
}

353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
/*
 * Allow the kernel debugger to reserve breakpoint slots without
 * taking a lock using the dbg_* variant of for the reserve and
 * release breakpoint slots.
 */
int dbg_reserve_bp_slot(struct perf_event *bp)
{
	if (mutex_is_locked(&nr_bp_mutex))
		return -1;

	return __reserve_bp_slot(bp);
}

int dbg_release_bp_slot(struct perf_event *bp)
{
	if (mutex_is_locked(&nr_bp_mutex))
		return -1;

	__release_bp_slot(bp);

	return 0;
}
375

376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
static int validate_hw_breakpoint(struct perf_event *bp)
{
	int ret;

	ret = arch_validate_hwbkpt_settings(bp);
	if (ret)
		return ret;

	if (arch_check_bp_in_kernelspace(bp)) {
		if (bp->attr.exclude_kernel)
			return -EINVAL;
		/*
		 * Don't let unprivileged users set a breakpoint in the trap
		 * path to avoid trap recursion attacks.
		 */
		if (!capable(CAP_SYS_ADMIN))
			return -EPERM;
	}

	return 0;
}

398
int register_perf_hw_breakpoint(struct perf_event *bp)
399
{
400
	int ret;
401

402 403 404
	ret = reserve_bp_slot(bp);
	if (ret)
		return ret;
405

406
	ret = validate_hw_breakpoint(bp);
407

408 409 410 411
	/* if arch_validate_hwbkpt_settings() fails then release bp slot */
	if (ret)
		release_bp_slot(bp);

412 413
	return ret;
}
414 415 416

/**
 * register_user_hw_breakpoint - register a hardware breakpoint for user space
417
 * @attr: breakpoint attributes
418
 * @triggered: callback to trigger when we hit the breakpoint
419 420
 * @tsk: pointer to 'task_struct' of the process to which the address belongs
 */
421
struct perf_event *
422
register_user_hw_breakpoint(struct perf_event_attr *attr,
423
			    perf_overflow_handler_t triggered,
424
			    void *context,
425
			    struct task_struct *tsk)
426
{
427 428
	return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
						context);
429 430 431 432 433
}
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);

/**
 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
434
 * @bp: the breakpoint structure to modify
435
 * @attr: new breakpoint attributes
436
 * @triggered: callback to trigger when we hit the breakpoint
437 438
 * @tsk: pointer to 'task_struct' of the process to which the address belongs
 */
439
int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
440
{
441
	u64 old_addr = bp->attr.bp_addr;
442
	u64 old_len = bp->attr.bp_len;
443 444 445
	int old_type = bp->attr.bp_type;
	int err = 0;

446 447 448 449 450 451 452 453 454 455
	/*
	 * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
	 * will not be possible to raise IPIs that invoke __perf_event_disable.
	 * So call the function directly after making sure we are targeting the
	 * current task.
	 */
	if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
		__perf_event_disable(bp);
	else
		perf_event_disable(bp);
456 457 458 459 460 461 462

	bp->attr.bp_addr = attr->bp_addr;
	bp->attr.bp_type = attr->bp_type;
	bp->attr.bp_len = attr->bp_len;

	if (attr->disabled)
		goto end;
463

464
	err = validate_hw_breakpoint(bp);
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
	if (!err)
		perf_event_enable(bp);

	if (err) {
		bp->attr.bp_addr = old_addr;
		bp->attr.bp_type = old_type;
		bp->attr.bp_len = old_len;
		if (!bp->attr.disabled)
			perf_event_enable(bp);

		return err;
	}

end:
	bp->attr.disabled = attr->disabled;

	return 0;
482 483 484 485
}
EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);

/**
486
 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
487 488
 * @bp: the breakpoint structure to unregister
 */
489
void unregister_hw_breakpoint(struct perf_event *bp)
490
{
491 492 493 494 495 496
	if (!bp)
		return;
	perf_event_release_kernel(bp);
}
EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);

497
/**
498
 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
499
 * @attr: breakpoint attributes
500
 * @triggered: callback to trigger when we hit the breakpoint
501
 *
502
 * @return a set of per_cpu pointers to perf events
503
 */
504
struct perf_event * __percpu *
505
register_wide_hw_breakpoint(struct perf_event_attr *attr,
506 507
			    perf_overflow_handler_t triggered,
			    void *context)
508
{
509
	struct perf_event * __percpu *cpu_events, **pevent, *bp;
510 511 512 513 514
	long err;
	int cpu;

	cpu_events = alloc_percpu(typeof(*cpu_events));
	if (!cpu_events)
515
		return (void __percpu __force *)ERR_PTR(-ENOMEM);
516

517 518
	get_online_cpus();
	for_each_online_cpu(cpu) {
519
		pevent = per_cpu_ptr(cpu_events, cpu);
520 521
		bp = perf_event_create_kernel_counter(attr, cpu, NULL,
						      triggered, context);
522

523
		*pevent = bp;
524

525
		if (IS_ERR(bp)) {
526 527 528
			err = PTR_ERR(bp);
			goto fail;
		}
529
	}
530
	put_online_cpus();
531

532 533 534
	return cpu_events;

fail:
535
	for_each_online_cpu(cpu) {
536
		pevent = per_cpu_ptr(cpu_events, cpu);
537
		if (IS_ERR(*pevent))
538 539 540
			break;
		unregister_hw_breakpoint(*pevent);
	}
541 542
	put_online_cpus();

543
	free_percpu(cpu_events);
544
	return (void __percpu __force *)ERR_PTR(err);
545
}
546
EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
547 548

/**
549 550
 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
 * @cpu_events: the per cpu set of events to unregister
551
 */
552
void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
553
{
554 555
	int cpu;
	struct perf_event **pevent;
556

557 558 559
	for_each_possible_cpu(cpu) {
		pevent = per_cpu_ptr(cpu_events, cpu);
		unregister_hw_breakpoint(*pevent);
560
	}
561
	free_percpu(cpu_events);
562
}
563
EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
564 565 566 567 568 569 570

static struct notifier_block hw_breakpoint_exceptions_nb = {
	.notifier_call = hw_breakpoint_exceptions_notify,
	/* we need to be notified first */
	.priority = 0x7fffffff
};

571 572 573 574 575 576 577 578 579 580 581 582
static void bp_perf_event_destroy(struct perf_event *event)
{
	release_bp_slot(event);
}

static int hw_breakpoint_event_init(struct perf_event *bp)
{
	int err;

	if (bp->attr.type != PERF_TYPE_BREAKPOINT)
		return -ENOENT;

583 584 585 586 587 588
	/*
	 * no branch sampling for breakpoint events
	 */
	if (has_branch_stack(bp))
		return -EOPNOTSUPP;

589 590 591 592 593 594 595 596 597
	err = register_perf_hw_breakpoint(bp);
	if (err)
		return err;

	bp->destroy = bp_perf_event_destroy;

	return 0;
}

P
Peter Zijlstra 已提交
598 599 600 601 602
static int hw_breakpoint_add(struct perf_event *bp, int flags)
{
	if (!(flags & PERF_EF_START))
		bp->hw.state = PERF_HES_STOPPED;

603 604 605 606 607
	if (is_sampling_event(bp)) {
		bp->hw.last_period = bp->hw.sample_period;
		perf_swevent_set_period(bp);
	}

P
Peter Zijlstra 已提交
608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
	return arch_install_hw_breakpoint(bp);
}

static void hw_breakpoint_del(struct perf_event *bp, int flags)
{
	arch_uninstall_hw_breakpoint(bp);
}

static void hw_breakpoint_start(struct perf_event *bp, int flags)
{
	bp->hw.state = 0;
}

static void hw_breakpoint_stop(struct perf_event *bp, int flags)
{
	bp->hw.state = PERF_HES_STOPPED;
}

626 627 628 629 630
static int hw_breakpoint_event_idx(struct perf_event *bp)
{
	return 0;
}

631
static struct pmu perf_breakpoint = {
632 633
	.task_ctx_nr	= perf_sw_context, /* could eventually get its own */

634
	.event_init	= hw_breakpoint_event_init,
P
Peter Zijlstra 已提交
635 636 637 638
	.add		= hw_breakpoint_add,
	.del		= hw_breakpoint_del,
	.start		= hw_breakpoint_start,
	.stop		= hw_breakpoint_stop,
639
	.read		= hw_breakpoint_pmu_read,
640 641

	.event_idx	= hw_breakpoint_event_idx,
642 643
};

644
int __init init_hw_breakpoint(void)
645
{
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
	unsigned int **task_bp_pinned;
	int cpu, err_cpu;
	int i;

	for (i = 0; i < TYPE_MAX; i++)
		nr_slots[i] = hw_breakpoint_slots(i);

	for_each_possible_cpu(cpu) {
		for (i = 0; i < TYPE_MAX; i++) {
			task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu);
			*task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i],
						  GFP_KERNEL);
			if (!*task_bp_pinned)
				goto err_alloc;
		}
	}

	constraints_initialized = 1;

P
Peter Zijlstra 已提交
665
	perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
666

667
	return register_die_notifier(&hw_breakpoint_exceptions_nb);
668 669 670 671

 err_alloc:
	for_each_possible_cpu(err_cpu) {
		for (i = 0; i < TYPE_MAX; i++)
672
			kfree(per_cpu(nr_task_bp_pinned[i], err_cpu));
673 674
		if (err_cpu == cpu)
			break;
675 676 677
	}

	return -ENOMEM;
678
}
679 680