hw_breakpoint.c 16.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * Copyright (C) 2007 Alan Stern
 * Copyright (C) IBM Corporation, 2009
18
 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
19 20
 *
 * Thanks to Ingo Molnar for his many suggestions.
21 22 23 24
 *
 * Authors: Alan Stern <stern@rowland.harvard.edu>
 *          K.Prasad <prasad@linux.vnet.ibm.com>
 *          Frederic Weisbecker <fweisbec@gmail.com>
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
 */

/*
 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
 * using the CPU's debug registers.
 * This file contains the arch-independent routines.
 */

#include <linux/irqflags.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/init.h>
43
#include <linux/slab.h>
44
#include <linux/list.h>
45
#include <linux/cpu.h>
46
#include <linux/smp.h>
47
#include <linux/bug.h>
48

49
#include <linux/hw_breakpoint.h>
50 51 52
/*
 * Constraints data
 */
53 54 55 56 57 58 59 60
struct bp_cpuinfo {
	/* Number of pinned cpu breakpoints in a cpu */
	unsigned int	cpu_pinned;
	/* tsk_pinned[n] is the number of tasks having n+1 breakpoints */
	unsigned int	*tsk_pinned;
	/* Number of non-pinned cpu/task breakpoints in a cpu */
	unsigned int	flexible; /* XXX: placeholder, see fetch_this_slot() */
};
61

62
static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
63 64
static int nr_slots[TYPE_MAX];

65 66 67 68 69
static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
{
	return per_cpu_ptr(bp_cpuinfo + type, cpu);
}

70 71 72
/* Keep track of the breakpoints attached to tasks */
static LIST_HEAD(bp_task_head);

73 74
static int constraints_initialized;

75 76 77 78 79 80 81 82 83
/* Gather the number of total pinned and un-pinned bp in a cpuset */
struct bp_busy_slots {
	unsigned int pinned;
	unsigned int flexible;
};

/* Serialize accesses to the above constraints */
static DEFINE_MUTEX(nr_bp_mutex);

84 85 86 87 88
__weak int hw_breakpoint_weight(struct perf_event *bp)
{
	return 1;
}

89
static inline enum bp_type_idx find_slot_idx(u64 bp_type)
90
{
91
	if (bp_type & HW_BREAKPOINT_RW)
92 93 94 95 96
		return TYPE_DATA;

	return TYPE_INST;
}

97 98 99 100
/*
 * Report the maximum number of pinned breakpoints a task
 * have in this cpu
 */
101
static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
102
{
103
	unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
104
	int i;
105

106
	for (i = nr_slots[type] - 1; i >= 0; i--) {
107 108
		if (tsk_pinned[i] > 0)
			return i + 1;
109 110
	}

111
	return 0;
112 113
}

114 115 116 117
/*
 * Count the number of breakpoints of the same type and same task.
 * The given event must be not on the list.
 */
118
static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
119
{
120
	struct task_struct *tsk = bp->hw.target;
121
	struct perf_event *iter;
122 123
	int count = 0;

124
	list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
125
		if (iter->hw.target == tsk &&
126
		    find_slot_idx(iter->attr.bp_type) == type &&
127
		    (iter->cpu < 0 || cpu == iter->cpu))
128
			count += hw_breakpoint_weight(iter);
129 130 131 132 133
	}

	return count;
}

134 135 136 137 138 139 140
static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
{
	if (bp->cpu >= 0)
		return cpumask_of(bp->cpu);
	return cpu_possible_mask;
}

141 142 143 144
/*
 * Report the number of pinned/un-pinned breakpoints we have in
 * a given cpu (cpu > -1) or in all of them (cpu = -1).
 */
145
static void
146 147
fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
		    enum bp_type_idx type)
148
{
149 150
	const struct cpumask *cpumask = cpumask_of_bp(bp);
	int cpu;
151

152
	for_each_cpu(cpu, cpumask) {
153 154
		struct bp_cpuinfo *info = get_bp_info(cpu, type);
		int nr;
155

156
		nr = info->cpu_pinned;
157
		if (!bp->hw.target)
158
			nr += max_task_bp_pinned(cpu, type);
159
		else
160
			nr += task_bp_pinned(cpu, bp, type);
161 162 163 164

		if (nr > slots->pinned)
			slots->pinned = nr;

165
		nr = info->flexible;
166 167 168 169 170
		if (nr > slots->flexible)
			slots->flexible = nr;
	}
}

171 172 173 174 175 176 177 178 179 180 181
/*
 * For now, continue to consider flexible as pinned, until we can
 * ensure no flexible event can ever be scheduled before a pinned event
 * in a same cpu.
 */
static void
fetch_this_slot(struct bp_busy_slots *slots, int weight)
{
	slots->pinned += weight;
}

182 183 184
/*
 * Add a pinned breakpoint for the given task in our constraint table
 */
185
static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
186
				enum bp_type_idx type, int weight)
187
{
188
	unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
189 190 191
	int old_idx, new_idx;

	old_idx = task_bp_pinned(cpu, bp, type) - 1;
192
	new_idx = old_idx + weight;
193 194 195 196 197

	if (old_idx >= 0)
		tsk_pinned[old_idx]--;
	if (new_idx >= 0)
		tsk_pinned[new_idx]++;
198 199 200 201 202
}

/*
 * Add/remove the given breakpoint in our constraint table
 */
203
static void
204 205
toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
	       int weight)
206
{
207 208
	const struct cpumask *cpumask = cpumask_of_bp(bp);
	int cpu;
209

210 211 212
	if (!enable)
		weight = -weight;

213
	/* Pinned counter cpu profiling */
214
	if (!bp->hw.target) {
215
		get_bp_info(bp->cpu, type)->cpu_pinned += weight;
216 217 218
		return;
	}

219
	/* Pinned counter task profiling */
220
	for_each_cpu(cpu, cpumask)
221
		toggle_bp_task_slot(bp, cpu, type, weight);
222 223

	if (enable)
224
		list_add_tail(&bp->hw.bp_list, &bp_task_head);
225 226
	else
		list_del(&bp->hw.bp_list);
227 228
}

229 230 231 232 233 234 235 236 237 238 239
/*
 * Function to perform processor-specific cleanup during unregistration
 */
__weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
{
	/*
	 * A weak stub function here for those archs that don't define
	 * it inside arch/.../kernel/hw_breakpoint.c
	 */
}

240 241 242 243 244 245 246
/*
 * Contraints to check before allowing this new breakpoint counter:
 *
 *  == Non-pinned counter == (Considered as pinned for now)
 *
 *   - If attached to a single cpu, check:
 *
247 248
 *       (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu)
 *           + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM
249 250 251 252 253 254 255 256 257
 *
 *       -> If there are already non-pinned counters in this cpu, it means
 *          there is already a free slot for them.
 *          Otherwise, we check that the maximum number of per task
 *          breakpoints (for this cpu) plus the number of per cpu breakpoint
 *          (for this cpu) doesn't cover every registers.
 *
 *   - If attached to every cpus, check:
 *
258 259
 *       (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *))
 *           + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM
260 261 262 263 264 265 266 267 268 269
 *
 *       -> This is roughly the same, except we check the number of per cpu
 *          bp for every cpu and we keep the max one. Same for the per tasks
 *          breakpoints.
 *
 *
 * == Pinned counter ==
 *
 *   - If attached to a single cpu, check:
 *
270 271
 *       ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu)
 *            + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM
272
 *
273
 *       -> Same checks as before. But now the info->flexible, if any, must keep
274 275 276 277
 *          one register at least (or they will never be fed).
 *
 *   - If attached to every cpus, check:
 *
278 279
 *       ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *))
 *            + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM
280
 */
281
static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
282 283
{
	struct bp_busy_slots slots = {0};
284
	enum bp_type_idx type;
285
	int weight;
286

287 288 289 290
	/* We couldn't initialize breakpoint constraints on boot */
	if (!constraints_initialized)
		return -ENOMEM;

291
	/* Basic checks */
292 293
	if (bp_type == HW_BREAKPOINT_EMPTY ||
	    bp_type == HW_BREAKPOINT_INVALID)
294 295
		return -EINVAL;

296
	type = find_slot_idx(bp_type);
297 298
	weight = hw_breakpoint_weight(bp);

299
	fetch_bp_busy_slots(&slots, bp, type);
300 301 302 303
	/*
	 * Simulate the addition of this breakpoint to the constraints
	 * and see the result.
	 */
304
	fetch_this_slot(&slots, weight);
305 306

	/* Flexible counters need to keep at least one slot */
307
	if (slots.pinned + (!!slots.flexible) > nr_slots[type])
308
		return -ENOSPC;
309

310
	toggle_bp_slot(bp, true, type, weight);
311

312 313 314 315 316 317 318 319 320
	return 0;
}

int reserve_bp_slot(struct perf_event *bp)
{
	int ret;

	mutex_lock(&nr_bp_mutex);

321
	ret = __reserve_bp_slot(bp, bp->attr.bp_type);
322

323 324 325 326 327
	mutex_unlock(&nr_bp_mutex);

	return ret;
}

328
static void __release_bp_slot(struct perf_event *bp, u64 bp_type)
329
{
330
	enum bp_type_idx type;
331
	int weight;
332

333
	type = find_slot_idx(bp_type);
334 335
	weight = hw_breakpoint_weight(bp);
	toggle_bp_slot(bp, false, type, weight);
336 337
}

338
void release_bp_slot(struct perf_event *bp)
339
{
340 341
	mutex_lock(&nr_bp_mutex);

342
	arch_unregister_hw_breakpoint(bp);
343
	__release_bp_slot(bp, bp->attr.bp_type);
344 345

	mutex_unlock(&nr_bp_mutex);
346 347
}

348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
static int __modify_bp_slot(struct perf_event *bp, u64 old_type)
{
	int err;

	__release_bp_slot(bp, old_type);

	err = __reserve_bp_slot(bp, bp->attr.bp_type);
	if (err) {
		/*
		 * Reserve the old_type slot back in case
		 * there's no space for the new type.
		 *
		 * This must succeed, because we just released
		 * the old_type slot in the __release_bp_slot
		 * call above. If not, something is broken.
		 */
		WARN_ON(__reserve_bp_slot(bp, old_type));
	}

	return err;
}

static int modify_bp_slot(struct perf_event *bp, u64 old_type)
{
	int ret;

	mutex_lock(&nr_bp_mutex);
	ret = __modify_bp_slot(bp, old_type);
	mutex_unlock(&nr_bp_mutex);
	return ret;
}

380 381 382 383 384 385 386 387 388 389
/*
 * Allow the kernel debugger to reserve breakpoint slots without
 * taking a lock using the dbg_* variant of for the reserve and
 * release breakpoint slots.
 */
int dbg_reserve_bp_slot(struct perf_event *bp)
{
	if (mutex_is_locked(&nr_bp_mutex))
		return -1;

390
	return __reserve_bp_slot(bp, bp->attr.bp_type);
391 392 393 394 395 396 397
}

int dbg_release_bp_slot(struct perf_event *bp)
{
	if (mutex_is_locked(&nr_bp_mutex))
		return -1;

398
	__release_bp_slot(bp, bp->attr.bp_type);
399 400 401

	return 0;
}
402

403 404 405 406 407
static int hw_breakpoint_parse(struct perf_event *bp,
			       const struct perf_event_attr *attr,
			       struct arch_hw_breakpoint *hw)
{
	int err;
408

409 410 411
	err = hw_breakpoint_arch_parse(bp, attr, hw);
	if (err)
		return err;
412

413
	if (arch_check_bp_in_kernelspace(hw)) {
414
		if (attr->exclude_kernel)
415 416 417 418 419 420 421 422 423 424 425 426
			return -EINVAL;
		/*
		 * Don't let unprivileged users set a breakpoint in the trap
		 * path to avoid trap recursion attacks.
		 */
		if (!capable(CAP_SYS_ADMIN))
			return -EPERM;
	}

	return 0;
}

427
int register_perf_hw_breakpoint(struct perf_event *bp)
428
{
429 430
	struct arch_hw_breakpoint hw;
	int err;
431

432 433 434
	err = reserve_bp_slot(bp);
	if (err)
		return err;
435

436 437
	err = hw_breakpoint_parse(bp, &bp->attr, &hw);
	if (err) {
438
		release_bp_slot(bp);
439 440
		return err;
	}
441

442 443 444
	bp->hw.info = hw;

	return 0;
445
}
446 447 448

/**
 * register_user_hw_breakpoint - register a hardware breakpoint for user space
449
 * @attr: breakpoint attributes
450
 * @triggered: callback to trigger when we hit the breakpoint
451 452
 * @tsk: pointer to 'task_struct' of the process to which the address belongs
 */
453
struct perf_event *
454
register_user_hw_breakpoint(struct perf_event_attr *attr,
455
			    perf_overflow_handler_t triggered,
456
			    void *context,
457
			    struct task_struct *tsk)
458
{
459 460
	return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
						context);
461 462 463
}
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);

464
int
465 466
modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
			        bool check)
467 468 469 470 471
{
	u64 old_addr = bp->attr.bp_addr;
	u64 old_len  = bp->attr.bp_len;
	int old_type = bp->attr.bp_type;
	bool modify  = attr->bp_type != old_type;
472
	struct arch_hw_breakpoint hw;
473 474 475 476 477 478
	int err = 0;

	bp->attr.bp_addr = attr->bp_addr;
	bp->attr.bp_type = attr->bp_type;
	bp->attr.bp_len  = attr->bp_len;

479 480 481
	if (check && memcmp(&bp->attr, attr, sizeof(*attr)))
		return -EINVAL;

482
	err = hw_breakpoint_parse(bp, attr, &hw);
483 484 485 486 487 488 489 490 491 492
	if (!err && modify)
		err = modify_bp_slot(bp, old_type);

	if (err) {
		bp->attr.bp_addr = old_addr;
		bp->attr.bp_type = old_type;
		bp->attr.bp_len  = old_len;
		return err;
	}

493
	bp->hw.info = hw;
494
	bp->attr.disabled = attr->disabled;
495

496 497 498
	return 0;
}

499 500
/**
 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
501
 * @bp: the breakpoint structure to modify
502
 * @attr: new breakpoint attributes
503
 */
504
int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
505
{
506 507 508 509 510 511 512
	/*
	 * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
	 * will not be possible to raise IPIs that invoke __perf_event_disable.
	 * So call the function directly after making sure we are targeting the
	 * current task.
	 */
	if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
513
		perf_event_disable_local(bp);
514 515
	else
		perf_event_disable(bp);
516

517
	if (!attr->disabled) {
518
		int err = modify_user_hw_breakpoint_check(bp, attr, false);
519

520 521
		if (err)
			return err;
522
		perf_event_enable(bp);
523
		bp->attr.disabled = 0;
524 525
	}
	return 0;
526 527 528 529
}
EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);

/**
530
 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
531 532
 * @bp: the breakpoint structure to unregister
 */
533
void unregister_hw_breakpoint(struct perf_event *bp)
534
{
535 536 537 538 539 540
	if (!bp)
		return;
	perf_event_release_kernel(bp);
}
EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);

541
/**
542
 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
543
 * @attr: breakpoint attributes
544
 * @triggered: callback to trigger when we hit the breakpoint
545
 *
546
 * @return a set of per_cpu pointers to perf events
547
 */
548
struct perf_event * __percpu *
549
register_wide_hw_breakpoint(struct perf_event_attr *attr,
550 551
			    perf_overflow_handler_t triggered,
			    void *context)
552
{
553 554
	struct perf_event * __percpu *cpu_events, *bp;
	long err = 0;
555 556 557 558
	int cpu;

	cpu_events = alloc_percpu(typeof(*cpu_events));
	if (!cpu_events)
559
		return (void __percpu __force *)ERR_PTR(-ENOMEM);
560

561 562
	get_online_cpus();
	for_each_online_cpu(cpu) {
563 564
		bp = perf_event_create_kernel_counter(attr, cpu, NULL,
						      triggered, context);
565
		if (IS_ERR(bp)) {
566
			err = PTR_ERR(bp);
567
			break;
568 569
		}

570
		per_cpu(*cpu_events, cpu) = bp;
571
	}
572 573
	put_online_cpus();

574 575 576 577
	if (likely(!err))
		return cpu_events;

	unregister_wide_hw_breakpoint(cpu_events);
578
	return (void __percpu __force *)ERR_PTR(err);
579
}
580
EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
581 582

/**
583 584
 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
 * @cpu_events: the per cpu set of events to unregister
585
 */
586
void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
587
{
588
	int cpu;
589

590 591 592
	for_each_possible_cpu(cpu)
		unregister_hw_breakpoint(per_cpu(*cpu_events, cpu));

593
	free_percpu(cpu_events);
594
}
595
EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
596 597 598 599 600 601 602

static struct notifier_block hw_breakpoint_exceptions_nb = {
	.notifier_call = hw_breakpoint_exceptions_notify,
	/* we need to be notified first */
	.priority = 0x7fffffff
};

603 604 605 606 607 608 609 610 611 612 613 614
static void bp_perf_event_destroy(struct perf_event *event)
{
	release_bp_slot(event);
}

static int hw_breakpoint_event_init(struct perf_event *bp)
{
	int err;

	if (bp->attr.type != PERF_TYPE_BREAKPOINT)
		return -ENOENT;

615 616 617 618 619 620
	/*
	 * no branch sampling for breakpoint events
	 */
	if (has_branch_stack(bp))
		return -EOPNOTSUPP;

621 622 623 624 625 626 627 628 629
	err = register_perf_hw_breakpoint(bp);
	if (err)
		return err;

	bp->destroy = bp_perf_event_destroy;

	return 0;
}

P
Peter Zijlstra 已提交
630 631 632 633 634
static int hw_breakpoint_add(struct perf_event *bp, int flags)
{
	if (!(flags & PERF_EF_START))
		bp->hw.state = PERF_HES_STOPPED;

635 636 637 638 639
	if (is_sampling_event(bp)) {
		bp->hw.last_period = bp->hw.sample_period;
		perf_swevent_set_period(bp);
	}

P
Peter Zijlstra 已提交
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
	return arch_install_hw_breakpoint(bp);
}

static void hw_breakpoint_del(struct perf_event *bp, int flags)
{
	arch_uninstall_hw_breakpoint(bp);
}

static void hw_breakpoint_start(struct perf_event *bp, int flags)
{
	bp->hw.state = 0;
}

static void hw_breakpoint_stop(struct perf_event *bp, int flags)
{
	bp->hw.state = PERF_HES_STOPPED;
}

658
static struct pmu perf_breakpoint = {
659 660
	.task_ctx_nr	= perf_sw_context, /* could eventually get its own */

661
	.event_init	= hw_breakpoint_event_init,
P
Peter Zijlstra 已提交
662 663 664 665
	.add		= hw_breakpoint_add,
	.del		= hw_breakpoint_del,
	.start		= hw_breakpoint_start,
	.stop		= hw_breakpoint_stop,
666 667 668
	.read		= hw_breakpoint_pmu_read,
};

669
int __init init_hw_breakpoint(void)
670
{
671 672 673 674 675 676 677 678
	int cpu, err_cpu;
	int i;

	for (i = 0; i < TYPE_MAX; i++)
		nr_slots[i] = hw_breakpoint_slots(i);

	for_each_possible_cpu(cpu) {
		for (i = 0; i < TYPE_MAX; i++) {
679 680 681 682 683
			struct bp_cpuinfo *info = get_bp_info(cpu, i);

			info->tsk_pinned = kcalloc(nr_slots[i], sizeof(int),
							GFP_KERNEL);
			if (!info->tsk_pinned)
684 685 686 687 688 689
				goto err_alloc;
		}
	}

	constraints_initialized = 1;

P
Peter Zijlstra 已提交
690
	perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
691

692
	return register_die_notifier(&hw_breakpoint_exceptions_nb);
693 694 695 696

 err_alloc:
	for_each_possible_cpu(err_cpu) {
		for (i = 0; i < TYPE_MAX; i++)
697
			kfree(get_bp_info(err_cpu, i)->tsk_pinned);
698 699
		if (err_cpu == cpu)
			break;
700 701 702
	}

	return -ENOMEM;
703
}
704 705