hw_breakpoint.c 11.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * Copyright (C) 2007 Alan Stern
 * Copyright (C) IBM Corporation, 2009
18
 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
19 20
 *
 * Thanks to Ingo Molnar for his many suggestions.
21 22 23 24
 *
 * Authors: Alan Stern <stern@rowland.harvard.edu>
 *          K.Prasad <prasad@linux.vnet.ibm.com>
 *          Frederic Weisbecker <fweisbec@gmail.com>
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
 */

/*
 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
 * using the CPU's debug registers.
 * This file contains the arch-independent routines.
 */

#include <linux/irqflags.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/smp.h>

45 46
#include <linux/hw_breakpoint.h>

47 48 49
/*
 * Constraints data
 */
50

51 52 53 54
/* Number of pinned cpu breakpoints in a cpu */
static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);

/* Number of pinned task breakpoints in a cpu */
55
static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]);
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73

/* Number of non-pinned cpu/task breakpoints in a cpu */
static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);

/* Gather the number of total pinned and un-pinned bp in a cpuset */
struct bp_busy_slots {
	unsigned int pinned;
	unsigned int flexible;
};

/* Serialize accesses to the above constraints */
static DEFINE_MUTEX(nr_bp_mutex);

/*
 * Report the maximum number of pinned breakpoints a task
 * have in this cpu
 */
static unsigned int max_task_bp_pinned(int cpu)
74
{
75
	int i;
76
	unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
77

78 79 80
	for (i = HBP_NUM -1; i >= 0; i--) {
		if (tsk_pinned[i] > 0)
			return i + 1;
81 82
	}

83
	return 0;
84 85
}

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
static int task_bp_pinned(struct task_struct *tsk)
{
	struct perf_event_context *ctx = tsk->perf_event_ctxp;
	struct list_head *list;
	struct perf_event *bp;
	unsigned long flags;
	int count = 0;

	if (WARN_ONCE(!ctx, "No perf context for this task"))
		return 0;

	list = &ctx->event_list;

	spin_lock_irqsave(&ctx->lock, flags);

	/*
	 * The current breakpoint counter is not included in the list
	 * at the open() callback time
	 */
	list_for_each_entry(bp, list, event_entry) {
		if (bp->attr.type == PERF_TYPE_BREAKPOINT)
			count++;
	}

	spin_unlock_irqrestore(&ctx->lock, flags);

	return count;
}

115 116 117 118
/*
 * Report the number of pinned/un-pinned breakpoints we have in
 * a given cpu (cpu > -1) or in all of them (cpu = -1).
 */
119 120
static void
fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp)
121
{
122 123 124
	int cpu = bp->cpu;
	struct task_struct *tsk = bp->ctx->task;

125 126
	if (cpu >= 0) {
		slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
127 128 129 130
		if (!tsk)
			slots->pinned += max_task_bp_pinned(cpu);
		else
			slots->pinned += task_bp_pinned(tsk);
131 132 133 134 135 136 137 138 139
		slots->flexible = per_cpu(nr_bp_flexible, cpu);

		return;
	}

	for_each_online_cpu(cpu) {
		unsigned int nr;

		nr = per_cpu(nr_cpu_bp_pinned, cpu);
140 141 142 143
		if (!tsk)
			nr += max_task_bp_pinned(cpu);
		else
			nr += task_bp_pinned(tsk);
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159

		if (nr > slots->pinned)
			slots->pinned = nr;

		nr = per_cpu(nr_bp_flexible, cpu);

		if (nr > slots->flexible)
			slots->flexible = nr;
	}
}

/*
 * Add a pinned breakpoint for the given task in our constraint table
 */
static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
{
160
	unsigned int *tsk_pinned;
161
	int count = 0;
162

163
	count = task_bp_pinned(tsk);
164

165
	tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
166
	if (enable) {
167
		tsk_pinned[count]++;
168
		if (count > 0)
169
			tsk_pinned[count-1]--;
170
	} else {
171
		tsk_pinned[count]--;
172
		if (count > 0)
173
			tsk_pinned[count-1]++;
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
	}
}

/*
 * Add/remove the given breakpoint in our constraint table
 */
static void toggle_bp_slot(struct perf_event *bp, bool enable)
{
	int cpu = bp->cpu;
	struct task_struct *tsk = bp->ctx->task;

	/* Pinned counter task profiling */
	if (tsk) {
		if (cpu >= 0) {
			toggle_bp_task_slot(tsk, cpu, enable);
			return;
		}

		for_each_online_cpu(cpu)
			toggle_bp_task_slot(tsk, cpu, enable);
		return;
	}

	/* Pinned counter cpu profiling */
	if (enable)
		per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
	else
		per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
}

/*
 * Contraints to check before allowing this new breakpoint counter:
 *
 *  == Non-pinned counter == (Considered as pinned for now)
 *
 *   - If attached to a single cpu, check:
 *
 *       (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
212
 *           + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
213 214 215 216 217 218 219 220 221 222
 *
 *       -> If there are already non-pinned counters in this cpu, it means
 *          there is already a free slot for them.
 *          Otherwise, we check that the maximum number of per task
 *          breakpoints (for this cpu) plus the number of per cpu breakpoint
 *          (for this cpu) doesn't cover every registers.
 *
 *   - If attached to every cpus, check:
 *
 *       (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
223
 *           + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
224 225 226 227 228 229 230 231 232 233 234
 *
 *       -> This is roughly the same, except we check the number of per cpu
 *          bp for every cpu and we keep the max one. Same for the per tasks
 *          breakpoints.
 *
 *
 * == Pinned counter ==
 *
 *   - If attached to a single cpu, check:
 *
 *       ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
235
 *            + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
236 237 238 239 240 241 242
 *
 *       -> Same checks as before. But now the nr_bp_flexible, if any, must keep
 *          one register at least (or they will never be fed).
 *
 *   - If attached to every cpus, check:
 *
 *       ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
243
 *            + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
244 245 246 247 248 249 250 251
 */
int reserve_bp_slot(struct perf_event *bp)
{
	struct bp_busy_slots slots = {0};
	int ret = 0;

	mutex_lock(&nr_bp_mutex);

252
	fetch_bp_busy_slots(&slots, bp);
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267

	/* Flexible counters need to keep at least one slot */
	if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
		ret = -ENOSPC;
		goto end;
	}

	toggle_bp_slot(bp, true);

end:
	mutex_unlock(&nr_bp_mutex);

	return ret;
}

268
void release_bp_slot(struct perf_event *bp)
269
{
270 271 272 273 274
	mutex_lock(&nr_bp_mutex);

	toggle_bp_slot(bp, false);

	mutex_unlock(&nr_bp_mutex);
275 276
}

277

278
int register_perf_hw_breakpoint(struct perf_event *bp)
279
{
280
	int ret;
281

282 283 284
	ret = reserve_bp_slot(bp);
	if (ret)
		return ret;
285

286 287 288 289 290 291 292 293 294
	/*
	 * Ptrace breakpoints can be temporary perf events only
	 * meant to reserve a slot. In this case, it is created disabled and
	 * we don't want to check the params right now (as we put a null addr)
	 * But perf tools create events as disabled and we want to check
	 * the params for them.
	 * This is a quick hack that will be removed soon, once we remove
	 * the tmp breakpoints from ptrace
	 */
295
	if (!bp->attr.disabled || !bp->overflow_handler)
296
		ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
297

298 299
	return ret;
}
300 301 302

/**
 * register_user_hw_breakpoint - register a hardware breakpoint for user space
303
 * @attr: breakpoint attributes
304
 * @triggered: callback to trigger when we hit the breakpoint
305 306
 * @tsk: pointer to 'task_struct' of the process to which the address belongs
 */
307
struct perf_event *
308
register_user_hw_breakpoint(struct perf_event_attr *attr,
309
			    perf_overflow_handler_t triggered,
310
			    struct task_struct *tsk)
311
{
312
	return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
313 314 315 316 317
}
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);

/**
 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
318
 * @bp: the breakpoint structure to modify
319
 * @attr: new breakpoint attributes
320
 * @triggered: callback to trigger when we hit the breakpoint
321 322
 * @tsk: pointer to 'task_struct' of the process to which the address belongs
 */
323
int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
324
{
325 326 327 328 329 330 331 332 333 334 335 336 337
	u64 old_addr = bp->attr.bp_addr;
	int old_type = bp->attr.bp_type;
	int old_len = bp->attr.bp_len;
	int err = 0;

	perf_event_disable(bp);

	bp->attr.bp_addr = attr->bp_addr;
	bp->attr.bp_type = attr->bp_type;
	bp->attr.bp_len = attr->bp_len;

	if (attr->disabled)
		goto end;
338

339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
	err = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
	if (!err)
		perf_event_enable(bp);

	if (err) {
		bp->attr.bp_addr = old_addr;
		bp->attr.bp_type = old_type;
		bp->attr.bp_len = old_len;
		if (!bp->attr.disabled)
			perf_event_enable(bp);

		return err;
	}

end:
	bp->attr.disabled = attr->disabled;

	return 0;
357 358 359 360
}
EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);

/**
361
 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
362 363
 * @bp: the breakpoint structure to unregister
 */
364
void unregister_hw_breakpoint(struct perf_event *bp)
365
{
366 367 368 369 370 371
	if (!bp)
		return;
	perf_event_release_kernel(bp);
}
EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);

372
/**
373
 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
374
 * @attr: breakpoint attributes
375
 * @triggered: callback to trigger when we hit the breakpoint
376
 *
377
 * @return a set of per_cpu pointers to perf events
378
 */
379
struct perf_event **
380
register_wide_hw_breakpoint(struct perf_event_attr *attr,
381
			    perf_overflow_handler_t triggered)
382
{
383 384 385 386 387 388 389
	struct perf_event **cpu_events, **pevent, *bp;
	long err;
	int cpu;

	cpu_events = alloc_percpu(typeof(*cpu_events));
	if (!cpu_events)
		return ERR_PTR(-ENOMEM);
390

391 392
	for_each_possible_cpu(cpu) {
		pevent = per_cpu_ptr(cpu_events, cpu);
393
		bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
394

395
		*pevent = bp;
396

397
		if (IS_ERR(bp)) {
398 399 400
			err = PTR_ERR(bp);
			goto fail;
		}
401 402
	}

403 404 405 406 407
	return cpu_events;

fail:
	for_each_possible_cpu(cpu) {
		pevent = per_cpu_ptr(cpu_events, cpu);
408
		if (IS_ERR(*pevent))
409 410 411 412 413 414
			break;
		unregister_hw_breakpoint(*pevent);
	}
	free_percpu(cpu_events);
	/* return the error if any */
	return ERR_PTR(err);
415
}
416
EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
417 418

/**
419 420
 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
 * @cpu_events: the per cpu set of events to unregister
421
 */
422
void unregister_wide_hw_breakpoint(struct perf_event **cpu_events)
423
{
424 425
	int cpu;
	struct perf_event **pevent;
426

427 428 429
	for_each_possible_cpu(cpu) {
		pevent = per_cpu_ptr(cpu_events, cpu);
		unregister_hw_breakpoint(*pevent);
430
	}
431
	free_percpu(cpu_events);
432
}
433
EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
434 435 436 437 438 439 440 441 442 443 444 445

static struct notifier_block hw_breakpoint_exceptions_nb = {
	.notifier_call = hw_breakpoint_exceptions_notify,
	/* we need to be notified first */
	.priority = 0x7fffffff
};

static int __init init_hw_breakpoint(void)
{
	return register_die_notifier(&hw_breakpoint_exceptions_nb);
}
core_initcall(init_hw_breakpoint);
446 447 448 449 450 451 452 453


struct pmu perf_ops_bp = {
	.enable		= arch_install_hw_breakpoint,
	.disable	= arch_uninstall_hw_breakpoint,
	.read		= hw_breakpoint_pmu_read,
	.unthrottle	= hw_breakpoint_pmu_unthrottle
};