hw_breakpoint.c 9.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
/*
 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
 * using the CPU's debug registers. Derived from
 * "arch/x86/kernel/hw_breakpoint.c"
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 * Copyright 2010 IBM Corporation
 * Author: K.Prasad <prasad@linux.vnet.ibm.com>
 *
 */

#include <linux/hw_breakpoint.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/percpu.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>

#include <asm/hw_breakpoint.h>
#include <asm/processor.h>
#include <asm/sstep.h>
#include <asm/uaccess.h>

/*
 * Stores the breakpoints currently in use on each breakpoint address
 * register for every cpu
 */
static DEFINE_PER_CPU(struct perf_event *, bp_per_reg);

44 45 46 47 48 49 50 51 52 53
/*
 * Returns total number of data or instruction breakpoints available.
 */
int hw_breakpoint_slots(int type)
{
	if (type == TYPE_DATA)
		return HBP_NUM;
	return 0;		/* no instruction breakpoints available */
}

54 55 56 57 58 59 60 61 62 63 64 65
/*
 * Install a perf counter breakpoint.
 *
 * We seek a free debug address register and use it for this
 * breakpoint.
 *
 * Atomic: we hold the counter->ctx->lock and we only handle variables
 * and registers local to this cpu.
 */
int arch_install_hw_breakpoint(struct perf_event *bp)
{
	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
66
	struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
67 68 69 70 71 72 73 74

	*slot = bp;

	/*
	 * Do not install DABR values if the instruction must be single-stepped.
	 * If so, DABR will be populated in single_step_dabr_instruction().
	 */
	if (current->thread.last_hit_ubp != bp)
75
		__set_breakpoint(info);
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90

	return 0;
}

/*
 * Uninstall the breakpoint contained in the given counter.
 *
 * First we search the debug address register it uses and then we disable
 * it.
 *
 * Atomic: we hold the counter->ctx->lock and we only handle variables
 * and registers local to this cpu.
 */
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
91
	struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
92 93 94 95 96 97 98

	if (*slot != bp) {
		WARN_ONCE(1, "Can't find the breakpoint");
		return;
	}

	*slot = NULL;
99
	hw_breakpoint_disable();
100 101 102 103 104 105 106 107 108 109 110 111 112
}

/*
 * Perform cleanup of arch-specific counters during unregistration
 * of the perf-event
 */
void arch_unregister_hw_breakpoint(struct perf_event *bp)
{
	/*
	 * If the breakpoint is unregistered between a hw_breakpoint_handler()
	 * and the single_step_dabr_instruction(), then cleanup the breakpoint
	 * restoration variables to prevent dangling pointers.
	 */
113
	if (bp->ctx && bp->ctx->task)
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
		bp->ctx->task->thread.last_hit_ubp = NULL;
}

/*
 * Check for virtual address in kernel space.
 */
int arch_check_bp_in_kernelspace(struct perf_event *bp)
{
	struct arch_hw_breakpoint *info = counter_arch_bp(bp);

	return is_kernel_addr(info->address);
}

int arch_bp_generic_fields(int type, int *gen_bp_type)
{
129 130 131 132 133 134
	*gen_bp_type = 0;
	if (type & HW_BRK_TYPE_READ)
		*gen_bp_type |= HW_BREAKPOINT_R;
	if (type & HW_BRK_TYPE_WRITE)
		*gen_bp_type |= HW_BREAKPOINT_W;
	if (*gen_bp_type == 0)
135 136 137 138 139 140 141 142 143
		return -EINVAL;
	return 0;
}

/*
 * Validate the arch-specific HW Breakpoint register settings
 */
int arch_validate_hwbkpt_settings(struct perf_event *bp)
{
144
	int ret = -EINVAL, length_max;
145 146 147 148 149
	struct arch_hw_breakpoint *info = counter_arch_bp(bp);

	if (!bp)
		return ret;

150 151 152 153 154 155 156
	info->type = HW_BRK_TYPE_TRANSLATE;
	if (bp->attr.bp_type & HW_BREAKPOINT_R)
		info->type |= HW_BRK_TYPE_READ;
	if (bp->attr.bp_type & HW_BREAKPOINT_W)
		info->type |= HW_BRK_TYPE_WRITE;
	if (info->type == HW_BRK_TYPE_TRANSLATE)
		/* must set alteast read or write */
157
		return ret;
158 159 160 161 162 163
	if (!(bp->attr.exclude_user))
		info->type |= HW_BRK_TYPE_USER;
	if (!(bp->attr.exclude_kernel))
		info->type |= HW_BRK_TYPE_KERNEL;
	if (!(bp->attr.exclude_hv))
		info->type |= HW_BRK_TYPE_HYP;
164 165 166 167 168 169 170 171 172
	info->address = bp->attr.bp_addr;
	info->len = bp->attr.bp_len;

	/*
	 * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
	 * and breakpoint addresses are aligned to nearest double-word
	 * HW_BREAKPOINT_ALIGN by rounding off to the lower address, the
	 * 'symbolsize' should satisfy the check below.
	 */
173 174 175 176 177
	length_max = 8; /* DABR */
	if (cpu_has_feature(CPU_FTR_DAWR)) {
		length_max = 512 ; /* 64 doublewords */
		/* DAWR region can't cross 512 boundary */
		if ((bp->attr.bp_addr >> 10) != 
178
		    ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10))
179 180
			return -EINVAL;
	}
181
	if (info->len >
182
	    (length_max - (info->address & HW_BREAKPOINT_ALIGN)))
183 184 185 186
		return -EINVAL;
	return 0;
}

187 188 189 190 191 192 193 194 195 196 197 198 199 200
/*
 * Restores the breakpoint on the debug registers.
 * Invoke this function if it is known that the execution context is
 * about to change to cause loss of MSR_SE settings.
 */
void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
{
	struct arch_hw_breakpoint *info;

	if (likely(!tsk->thread.last_hit_ubp))
		return;

	info = counter_arch_bp(tsk->thread.last_hit_ubp);
	regs->msr &= ~MSR_SE;
201
	__set_breakpoint(info);
202 203 204
	tsk->thread.last_hit_ubp = NULL;
}

205 206 207 208 209 210 211 212 213 214 215
/*
 * Handle debug exception notifications.
 */
int __kprobes hw_breakpoint_handler(struct die_args *args)
{
	int rc = NOTIFY_STOP;
	struct perf_event *bp;
	struct pt_regs *regs = args->regs;
	int stepped = 1;
	struct arch_hw_breakpoint *info;
	unsigned int instr;
216
	unsigned long dar = regs->dar;
217 218

	/* Disable breakpoints during exception handling */
219
	hw_breakpoint_disable();
220

221 222 223 224 225 226 227 228
	/*
	 * The counter may be concurrently released but that can only
	 * occur from a call_rcu() path. We can then safely fetch
	 * the breakpoint, use its callback, touch its counter
	 * while we are in an rcu_read_lock() path.
	 */
	rcu_read_lock();

229
	bp = __this_cpu_read(bp_per_reg);
230 231 232 233 234 235 236 237 238 239
	if (!bp)
		goto out;
	info = counter_arch_bp(bp);

	/*
	 * Return early after invoking user-callback function without restoring
	 * DABR if the breakpoint is from ptrace which always operates in
	 * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
	 * generated in do_dabr().
	 */
240
	if (bp->overflow_handler == ptrace_triggered) {
241 242 243 244 245
		perf_bp_event(bp, regs);
		rc = NOTIFY_DONE;
		goto out;
	}

246 247
	/*
	 * Verify if dar lies within the address range occupied by the symbol
248 249 250
	 * being watched to filter extraneous exceptions.  If it doesn't,
	 * we still need to single-step the instruction, but we don't
	 * generate an event.
251
	 */
252
	info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
253 254 255
	if (!((bp->attr.bp_addr <= dar) &&
	      (dar - bp->attr.bp_addr < bp->attr.bp_len)))
		info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
256

257 258
	/* Do not emulate user-space instructions, instead single-step them */
	if (user_mode(regs)) {
259
		current->thread.last_hit_ubp = bp;
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
		regs->msr |= MSR_SE;
		goto out;
	}

	stepped = 0;
	instr = 0;
	if (!__get_user_inatomic(instr, (unsigned int *) regs->nip))
		stepped = emulate_step(regs, instr);

	/*
	 * emulate_step() could not execute it. We've failed in reliably
	 * handling the hw-breakpoint. Unregister it and throw a warning
	 * message to let the user know about it.
	 */
	if (!stepped) {
		WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
			"0x%lx will be disabled.", info->address);
		perf_event_disable(bp);
		goto out;
	}
	/*
	 * As a policy, the callback is invoked in a 'trigger-after-execute'
	 * fashion
	 */
284
	if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
285
		perf_bp_event(bp, regs);
286

287
	__set_breakpoint(info);
288 289 290 291 292 293 294 295
out:
	rcu_read_unlock();
	return rc;
}

/*
 * Handle single-step exceptions following a DABR hit.
 */
296
static int __kprobes single_step_dabr_instruction(struct die_args *args)
297 298 299
{
	struct pt_regs *regs = args->regs;
	struct perf_event *bp = NULL;
300
	struct arch_hw_breakpoint *info;
301 302 303 304 305 306 307 308 309

	bp = current->thread.last_hit_ubp;
	/*
	 * Check if we are single-stepping as a result of a
	 * previous HW Breakpoint exception
	 */
	if (!bp)
		return NOTIFY_DONE;

310
	info = counter_arch_bp(bp);
311 312 313 314 315

	/*
	 * We shall invoke the user-defined callback function in the single
	 * stepping handler to confirm to 'trigger-after-execute' semantics
	 */
316
	if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
317
		perf_bp_event(bp, regs);
318

319
	__set_breakpoint(info);
320 321
	current->thread.last_hit_ubp = NULL;

322
	/*
323 324
	 * If the process was being single-stepped by ptrace, let the
	 * other single-step actions occur (e.g. generate SIGTRAP).
325
	 */
326 327
	if (test_thread_flag(TIF_SINGLESTEP))
		return NOTIFY_DONE;
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366

	return NOTIFY_STOP;
}

/*
 * Handle debug exception notifications.
 */
int __kprobes hw_breakpoint_exceptions_notify(
		struct notifier_block *unused, unsigned long val, void *data)
{
	int ret = NOTIFY_DONE;

	switch (val) {
	case DIE_DABR_MATCH:
		ret = hw_breakpoint_handler(data);
		break;
	case DIE_SSTEP:
		ret = single_step_dabr_instruction(data);
		break;
	}

	return ret;
}

/*
 * Release the user breakpoints used by ptrace
 */
void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
{
	struct thread_struct *t = &tsk->thread;

	unregister_hw_breakpoint(t->ptrace_bps[0]);
	t->ptrace_bps[0] = NULL;
}

void hw_breakpoint_pmu_read(struct perf_event *bp)
{
	/* TODO */
}