hw_breakpoint.c 11.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * Copyright (C) 2007 Alan Stern
 * Copyright (C) IBM Corporation, 2009
18
 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
19 20
 *
 * Thanks to Ingo Molnar for his many suggestions.
21 22 23 24
 *
 * Authors: Alan Stern <stern@rowland.harvard.edu>
 *          K.Prasad <prasad@linux.vnet.ibm.com>
 *          Frederic Weisbecker <fweisbec@gmail.com>
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
 */

/*
 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
 * using the CPU's debug registers.
 * This file contains the arch-independent routines.
 */

#include <linux/irqflags.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/init.h>
43
#include <linux/cpu.h>
44 45
#include <linux/smp.h>

46 47
#include <linux/hw_breakpoint.h>

48 49 50
/*
 * Constraints data
 */
51

52 53 54 55
/* Number of pinned cpu breakpoints in a cpu */
static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);

/* Number of pinned task breakpoints in a cpu */
56
static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]);
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74

/* Number of non-pinned cpu/task breakpoints in a cpu */
static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);

/* Gather the number of total pinned and un-pinned bp in a cpuset */
struct bp_busy_slots {
	unsigned int pinned;
	unsigned int flexible;
};

/* Serialize accesses to the above constraints */
static DEFINE_MUTEX(nr_bp_mutex);

/*
 * Report the maximum number of pinned breakpoints a task
 * have in this cpu
 */
static unsigned int max_task_bp_pinned(int cpu)
75
{
76
	int i;
77
	unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
78

79 80 81
	for (i = HBP_NUM -1; i >= 0; i--) {
		if (tsk_pinned[i] > 0)
			return i + 1;
82 83
	}

84
	return 0;
85 86
}

87 88 89 90 91 92 93 94 95 96 97 98 99
static int task_bp_pinned(struct task_struct *tsk)
{
	struct perf_event_context *ctx = tsk->perf_event_ctxp;
	struct list_head *list;
	struct perf_event *bp;
	unsigned long flags;
	int count = 0;

	if (WARN_ONCE(!ctx, "No perf context for this task"))
		return 0;

	list = &ctx->event_list;

100
	raw_spin_lock_irqsave(&ctx->lock, flags);
101 102 103 104 105 106 107 108 109 110

	/*
	 * The current breakpoint counter is not included in the list
	 * at the open() callback time
	 */
	list_for_each_entry(bp, list, event_entry) {
		if (bp->attr.type == PERF_TYPE_BREAKPOINT)
			count++;
	}

111
	raw_spin_unlock_irqrestore(&ctx->lock, flags);
112 113 114 115

	return count;
}

116 117 118 119
/*
 * Report the number of pinned/un-pinned breakpoints we have in
 * a given cpu (cpu > -1) or in all of them (cpu = -1).
 */
120 121
static void
fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp)
122
{
123 124 125
	int cpu = bp->cpu;
	struct task_struct *tsk = bp->ctx->task;

126 127
	if (cpu >= 0) {
		slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
128 129 130 131
		if (!tsk)
			slots->pinned += max_task_bp_pinned(cpu);
		else
			slots->pinned += task_bp_pinned(tsk);
132 133 134 135 136 137 138 139 140
		slots->flexible = per_cpu(nr_bp_flexible, cpu);

		return;
	}

	for_each_online_cpu(cpu) {
		unsigned int nr;

		nr = per_cpu(nr_cpu_bp_pinned, cpu);
141 142 143 144
		if (!tsk)
			nr += max_task_bp_pinned(cpu);
		else
			nr += task_bp_pinned(tsk);
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160

		if (nr > slots->pinned)
			slots->pinned = nr;

		nr = per_cpu(nr_bp_flexible, cpu);

		if (nr > slots->flexible)
			slots->flexible = nr;
	}
}

/*
 * Add a pinned breakpoint for the given task in our constraint table
 */
static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
{
161
	unsigned int *tsk_pinned;
162
	int count = 0;
163

164
	count = task_bp_pinned(tsk);
165

166
	tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
167
	if (enable) {
168
		tsk_pinned[count]++;
169
		if (count > 0)
170
			tsk_pinned[count-1]--;
171
	} else {
172
		tsk_pinned[count]--;
173
		if (count > 0)
174
			tsk_pinned[count-1]++;
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
	}
}

/*
 * Add/remove the given breakpoint in our constraint table
 */
static void toggle_bp_slot(struct perf_event *bp, bool enable)
{
	int cpu = bp->cpu;
	struct task_struct *tsk = bp->ctx->task;

	/* Pinned counter task profiling */
	if (tsk) {
		if (cpu >= 0) {
			toggle_bp_task_slot(tsk, cpu, enable);
			return;
		}

		for_each_online_cpu(cpu)
			toggle_bp_task_slot(tsk, cpu, enable);
		return;
	}

	/* Pinned counter cpu profiling */
	if (enable)
		per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
	else
		per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
}

/*
 * Contraints to check before allowing this new breakpoint counter:
 *
 *  == Non-pinned counter == (Considered as pinned for now)
 *
 *   - If attached to a single cpu, check:
 *
 *       (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
213
 *           + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
214 215 216 217 218 219 220 221 222 223
 *
 *       -> If there are already non-pinned counters in this cpu, it means
 *          there is already a free slot for them.
 *          Otherwise, we check that the maximum number of per task
 *          breakpoints (for this cpu) plus the number of per cpu breakpoint
 *          (for this cpu) doesn't cover every registers.
 *
 *   - If attached to every cpus, check:
 *
 *       (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
224
 *           + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
225 226 227 228 229 230 231 232 233 234 235
 *
 *       -> This is roughly the same, except we check the number of per cpu
 *          bp for every cpu and we keep the max one. Same for the per tasks
 *          breakpoints.
 *
 *
 * == Pinned counter ==
 *
 *   - If attached to a single cpu, check:
 *
 *       ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
236
 *            + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
237 238 239 240 241 242 243
 *
 *       -> Same checks as before. But now the nr_bp_flexible, if any, must keep
 *          one register at least (or they will never be fed).
 *
 *   - If attached to every cpus, check:
 *
 *       ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
244
 *            + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
245
 */
246
static int __reserve_bp_slot(struct perf_event *bp)
247 248 249
{
	struct bp_busy_slots slots = {0};

250
	fetch_bp_busy_slots(&slots, bp);
251 252

	/* Flexible counters need to keep at least one slot */
253 254
	if (slots.pinned + (!!slots.flexible) == HBP_NUM)
		return -ENOSPC;
255 256 257

	toggle_bp_slot(bp, true);

258 259 260 261 262 263 264 265 266 267 268
	return 0;
}

int reserve_bp_slot(struct perf_event *bp)
{
	int ret;

	mutex_lock(&nr_bp_mutex);

	ret = __reserve_bp_slot(bp);

269 270 271 272 273
	mutex_unlock(&nr_bp_mutex);

	return ret;
}

274 275 276 277 278
static void __release_bp_slot(struct perf_event *bp)
{
	toggle_bp_slot(bp, false);
}

279
void release_bp_slot(struct perf_event *bp)
280
{
281 282
	mutex_lock(&nr_bp_mutex);

283
	__release_bp_slot(bp);
284 285

	mutex_unlock(&nr_bp_mutex);
286 287
}

288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
/*
 * Allow the kernel debugger to reserve breakpoint slots without
 * taking a lock using the dbg_* variant of for the reserve and
 * release breakpoint slots.
 */
int dbg_reserve_bp_slot(struct perf_event *bp)
{
	if (mutex_is_locked(&nr_bp_mutex))
		return -1;

	return __reserve_bp_slot(bp);
}

int dbg_release_bp_slot(struct perf_event *bp)
{
	if (mutex_is_locked(&nr_bp_mutex))
		return -1;

	__release_bp_slot(bp);

	return 0;
}
310

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
static int validate_hw_breakpoint(struct perf_event *bp)
{
	int ret;

	ret = arch_validate_hwbkpt_settings(bp);
	if (ret)
		return ret;

	if (arch_check_bp_in_kernelspace(bp)) {
		if (bp->attr.exclude_kernel)
			return -EINVAL;
		/*
		 * Don't let unprivileged users set a breakpoint in the trap
		 * path to avoid trap recursion attacks.
		 */
		if (!capable(CAP_SYS_ADMIN))
			return -EPERM;
	}

	return 0;
}

333
int register_perf_hw_breakpoint(struct perf_event *bp)
334
{
335
	int ret;
336

337 338 339
	ret = reserve_bp_slot(bp);
	if (ret)
		return ret;
340

341
	ret = validate_hw_breakpoint(bp);
342

343 344 345 346
	/* if arch_validate_hwbkpt_settings() fails then release bp slot */
	if (ret)
		release_bp_slot(bp);

347 348
	return ret;
}
349 350 351

/**
 * register_user_hw_breakpoint - register a hardware breakpoint for user space
352
 * @attr: breakpoint attributes
353
 * @triggered: callback to trigger when we hit the breakpoint
354 355
 * @tsk: pointer to 'task_struct' of the process to which the address belongs
 */
356
struct perf_event *
357
register_user_hw_breakpoint(struct perf_event_attr *attr,
358
			    perf_overflow_handler_t triggered,
359
			    struct task_struct *tsk)
360
{
361
	return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
362 363 364 365 366
}
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);

/**
 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
367
 * @bp: the breakpoint structure to modify
368
 * @attr: new breakpoint attributes
369
 * @triggered: callback to trigger when we hit the breakpoint
370 371
 * @tsk: pointer to 'task_struct' of the process to which the address belongs
 */
372
int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
373
{
374
	u64 old_addr = bp->attr.bp_addr;
375
	u64 old_len = bp->attr.bp_len;
376 377 378 379 380 381 382 383 384 385 386
	int old_type = bp->attr.bp_type;
	int err = 0;

	perf_event_disable(bp);

	bp->attr.bp_addr = attr->bp_addr;
	bp->attr.bp_type = attr->bp_type;
	bp->attr.bp_len = attr->bp_len;

	if (attr->disabled)
		goto end;
387

388
	err = validate_hw_breakpoint(bp);
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
	if (!err)
		perf_event_enable(bp);

	if (err) {
		bp->attr.bp_addr = old_addr;
		bp->attr.bp_type = old_type;
		bp->attr.bp_len = old_len;
		if (!bp->attr.disabled)
			perf_event_enable(bp);

		return err;
	}

end:
	bp->attr.disabled = attr->disabled;

	return 0;
406 407 408 409
}
EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);

/**
410
 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
411 412
 * @bp: the breakpoint structure to unregister
 */
413
void unregister_hw_breakpoint(struct perf_event *bp)
414
{
415 416 417 418 419 420
	if (!bp)
		return;
	perf_event_release_kernel(bp);
}
EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);

421
/**
422
 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
423
 * @attr: breakpoint attributes
424
 * @triggered: callback to trigger when we hit the breakpoint
425
 *
426
 * @return a set of per_cpu pointers to perf events
427
 */
428
struct perf_event * __percpu *
429
register_wide_hw_breakpoint(struct perf_event_attr *attr,
430
			    perf_overflow_handler_t triggered)
431
{
432
	struct perf_event * __percpu *cpu_events, **pevent, *bp;
433 434 435 436 437
	long err;
	int cpu;

	cpu_events = alloc_percpu(typeof(*cpu_events));
	if (!cpu_events)
438
		return (void __percpu __force *)ERR_PTR(-ENOMEM);
439

440 441
	get_online_cpus();
	for_each_online_cpu(cpu) {
442
		pevent = per_cpu_ptr(cpu_events, cpu);
443
		bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
444

445
		*pevent = bp;
446

447
		if (IS_ERR(bp)) {
448 449 450
			err = PTR_ERR(bp);
			goto fail;
		}
451
	}
452
	put_online_cpus();
453

454 455 456
	return cpu_events;

fail:
457
	for_each_online_cpu(cpu) {
458
		pevent = per_cpu_ptr(cpu_events, cpu);
459
		if (IS_ERR(*pevent))
460 461 462
			break;
		unregister_hw_breakpoint(*pevent);
	}
463 464
	put_online_cpus();

465
	free_percpu(cpu_events);
466
	return (void __percpu __force *)ERR_PTR(err);
467
}
468
EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
469 470

/**
471 472
 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
 * @cpu_events: the per cpu set of events to unregister
473
 */
474
void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
475
{
476 477
	int cpu;
	struct perf_event **pevent;
478

479 480 481
	for_each_possible_cpu(cpu) {
		pevent = per_cpu_ptr(cpu_events, cpu);
		unregister_hw_breakpoint(*pevent);
482
	}
483
	free_percpu(cpu_events);
484
}
485
EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
486 487 488 489 490 491 492 493 494 495 496 497

static struct notifier_block hw_breakpoint_exceptions_nb = {
	.notifier_call = hw_breakpoint_exceptions_notify,
	/* we need to be notified first */
	.priority = 0x7fffffff
};

static int __init init_hw_breakpoint(void)
{
	return register_die_notifier(&hw_breakpoint_exceptions_nb);
}
core_initcall(init_hw_breakpoint);
498 499 500 501 502 503 504


struct pmu perf_ops_bp = {
	.enable		= arch_install_hw_breakpoint,
	.disable	= arch_uninstall_hw_breakpoint,
	.read		= hw_breakpoint_pmu_read,
};