callchain.c 12.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Performance counter callchain support - powerpc architecture code
 *
 * Copyright © 2009 Paul Mackerras, IBM Corporation.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */
#include <linux/kernel.h>
#include <linux/sched.h>
13
#include <linux/perf_event.h>
14 15 16 17 18 19 20 21 22
#include <linux/percpu.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <asm/ptrace.h>
#include <asm/pgtable.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include <asm/vdso.h>
#ifdef CONFIG_PPC64
23
#include "../kernel/ppc32.h"
24 25 26 27 28 29 30 31 32 33 34 35 36 37
#endif


/*
 * Is sp valid as the address of the next kernel stack frame after prev_sp?
 * The next frame may be in a different stack area but should not go
 * back down in the same stack area.
 */
static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
{
	if (sp & 0xf)
		return 0;		/* must be 16-byte aligned */
	if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
		return 0;
38
	if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
39 40 41 42 43 44 45 46 47 48
		return 1;
	/*
	 * sp could decrease when we jump off an interrupt stack
	 * back to the regular process stack.
	 */
	if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1)))
		return 1;
	return 0;
}

49 50
void
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
51 52 53 54 55 56 57 58 59
{
	unsigned long sp, next_sp;
	unsigned long next_ip;
	unsigned long lr;
	long level = 0;
	unsigned long *fp;

	lr = regs->link;
	sp = regs->gpr[1];
60
	perf_callchain_store(entry, perf_instruction_pointer(regs));
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78

	if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
		return;

	for (;;) {
		fp = (unsigned long *) sp;
		next_sp = fp[0];

		if (next_sp == sp + STACK_INT_FRAME_SIZE &&
		    fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
			/*
			 * This looks like an interrupt frame for an
			 * interrupt that occurred in the kernel
			 */
			regs = (struct pt_regs *)(sp + STACK_FRAME_OVERHEAD);
			next_ip = regs->nip;
			lr = regs->link;
			level = 0;
79
			perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100

		} else {
			if (level == 0)
				next_ip = lr;
			else
				next_ip = fp[STACK_FRAME_LR_SAVE];

			/*
			 * We can't tell which of the first two addresses
			 * we get are valid, but we can filter out the
			 * obviously bogus ones here.  We replace them
			 * with 0 rather than removing them entirely so
			 * that userspace can tell which is which.
			 */
			if ((level == 1 && next_ip == lr) ||
			    (level <= 1 && !kernel_text_address(next_ip)))
				next_ip = 0;

			++level;
		}

101
		perf_callchain_store(entry, next_ip);
102 103 104 105 106 107 108 109 110 111 112 113
		if (!valid_next_sp(next_sp, sp))
			return;
		sp = next_sp;
	}
}

#ifdef CONFIG_PPC64
/*
 * On 64-bit we don't want to invoke hash_page on user addresses from
 * interrupt context, so if the access faults, we read the page tables
 * to find which page (if any) is mapped and access it directly.
 */
114
static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
115
{
116
	int ret = -EFAULT;
117 118
	pgd_t *pgdir;
	pte_t *ptep, pte;
119
	unsigned shift;
120 121
	unsigned long addr = (unsigned long) ptr;
	unsigned long offset;
122
	unsigned long pfn, flags;
123 124 125 126 127 128
	void *kaddr;

	pgdir = current->mm->pgd;
	if (!pgdir)
		return -EFAULT;

129
	local_irq_save(flags);
130
	ptep = find_linux_pte_or_hugepte(pgdir, addr, NULL, &shift);
131 132
	if (!ptep)
		goto err_out;
133 134
	if (!shift)
		shift = PAGE_SHIFT;
135 136

	/* align address to page boundary */
137
	offset = addr & ((1UL << shift) - 1);
138

139
	pte = READ_ONCE(*ptep);
140
	if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
141
		goto err_out;
142 143
	pfn = pte_pfn(pte);
	if (!page_is_ram(pfn))
144
		goto err_out;
145 146 147

	/* no highmem to worry about here */
	kaddr = pfn_to_kaddr(pfn);
148 149 150 151 152
	memcpy(buf, kaddr + offset, nb);
	ret = 0;
err_out:
	local_irq_restore(flags);
	return ret;
153 154 155 156 157 158 159 160
}

static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
{
	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
	    ((unsigned long)ptr & 7))
		return -EFAULT;

161 162 163
	pagefault_disable();
	if (!__get_user_inatomic(*ret, ptr)) {
		pagefault_enable();
164
		return 0;
165 166
	}
	pagefault_enable();
167 168 169 170 171 172 173 174 175 176

	return read_user_stack_slow(ptr, ret, 8);
}

static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
{
	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
	    ((unsigned long)ptr & 3))
		return -EFAULT;

177 178 179
	pagefault_disable();
	if (!__get_user_inatomic(*ret, ptr)) {
		pagefault_enable();
180
		return 0;
181 182
	}
	pagefault_enable();
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234

	return read_user_stack_slow(ptr, ret, 4);
}

static inline int valid_user_sp(unsigned long sp, int is_64)
{
	if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
		return 0;
	return 1;
}

/*
 * 64-bit user processes use the same stack frame for RT and non-RT signals.
 */
struct signal_frame_64 {
	char		dummy[__SIGNAL_FRAMESIZE];
	struct ucontext	uc;
	unsigned long	unused[2];
	unsigned int	tramp[6];
	struct siginfo	*pinfo;
	void		*puc;
	struct siginfo	info;
	char		abigap[288];
};

static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
{
	if (nip == fp + offsetof(struct signal_frame_64, tramp))
		return 1;
	if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
	    nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
		return 1;
	return 0;
}

/*
 * Do some sanity checking on the signal frame pointed to by sp.
 * We check the pinfo and puc pointers in the frame.
 */
static int sane_signal_64_frame(unsigned long sp)
{
	struct signal_frame_64 __user *sf;
	unsigned long pinfo, puc;

	sf = (struct signal_frame_64 __user *) sp;
	if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
	    read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
		return 0;
	return pinfo == (unsigned long) &sf->info &&
		puc == (unsigned long) &sf->uc;
}

235 236
static void perf_callchain_user_64(struct perf_callchain_entry *entry,
				   struct pt_regs *regs)
237 238 239 240 241 242 243 244
{
	unsigned long sp, next_sp;
	unsigned long next_ip;
	unsigned long lr;
	long level = 0;
	struct signal_frame_64 __user *sigframe;
	unsigned long __user *fp, *uregs;

245
	next_ip = perf_instruction_pointer(regs);
246 247
	lr = regs->link;
	sp = regs->gpr[1];
248
	perf_callchain_store(entry, next_ip);
249

250
	while (entry->nr < PERF_MAX_STACK_DEPTH) {
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
		fp = (unsigned long __user *) sp;
		if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
			return;
		if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
			return;

		/*
		 * Note: the next_sp - sp >= signal frame size check
		 * is true when next_sp < sp, which can happen when
		 * transitioning from an alternate signal stack to the
		 * normal stack.
		 */
		if (next_sp - sp >= sizeof(struct signal_frame_64) &&
		    (is_sigreturn_64_address(next_ip, sp) ||
		     (level <= 1 && is_sigreturn_64_address(lr, sp))) &&
		    sane_signal_64_frame(sp)) {
			/*
			 * This looks like an signal frame
			 */
			sigframe = (struct signal_frame_64 __user *) sp;
			uregs = sigframe->uc.uc_mcontext.gp_regs;
			if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
			    read_user_stack_64(&uregs[PT_LNK], &lr) ||
			    read_user_stack_64(&uregs[PT_R1], &sp))
				return;
			level = 0;
277 278
			perf_callchain_store(entry, PERF_CONTEXT_USER);
			perf_callchain_store(entry, next_ip);
279 280 281 282 283
			continue;
		}

		if (level == 0)
			next_ip = lr;
284
		perf_callchain_store(entry, next_ip);
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
		++level;
		sp = next_sp;
	}
}

static inline int current_is_64bit(void)
{
	/*
	 * We can't use test_thread_flag() here because we may be on an
	 * interrupt stack, and the thread flags don't get copied over
	 * from the thread_info on the main stack to the interrupt stack.
	 */
	return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT);
}

#else  /* CONFIG_PPC64 */
/*
 * On 32-bit we just access the address and let hash_page create a
 * HPTE if necessary, so there is no need to fall back to reading
 * the page tables.  Since this is called at interrupt level,
 * do_page_fault() won't treat a DSI as a page fault.
 */
static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
{
309 310
	int rc;

311 312 313 314
	if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
	    ((unsigned long)ptr & 3))
		return -EFAULT;

315 316 317 318 319
	pagefault_disable();
	rc = __get_user_inatomic(*ret, ptr);
	pagefault_enable();

	return rc;
320 321
}

322 323
static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,
					  struct pt_regs *regs)
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
{
}

static inline int current_is_64bit(void)
{
	return 0;
}

static inline int valid_user_sp(unsigned long sp, int is_64)
{
	if (!sp || (sp & 7) || sp > TASK_SIZE - 32)
		return 0;
	return 1;
}

#define __SIGNAL_FRAMESIZE32	__SIGNAL_FRAMESIZE
#define sigcontext32		sigcontext
#define mcontext32		mcontext
#define ucontext32		ucontext
#define compat_siginfo_t	struct siginfo

#endif /* CONFIG_PPC64 */

/*
 * Layout for non-RT signal frames
 */
struct signal_frame_32 {
	char			dummy[__SIGNAL_FRAMESIZE32];
	struct sigcontext32	sctx;
	struct mcontext32	mctx;
	int			abigap[56];
};

/*
 * Layout for RT signal frames
 */
struct rt_signal_frame_32 {
	char			dummy[__SIGNAL_FRAMESIZE32 + 16];
	compat_siginfo_t	info;
	struct ucontext32	uc;
	int			abigap[56];
};

static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
{
	if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
		return 1;
	if (vdso32_sigtramp && current->mm->context.vdso_base &&
	    nip == current->mm->context.vdso_base + vdso32_sigtramp)
		return 1;
	return 0;
}

static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
{
	if (nip == fp + offsetof(struct rt_signal_frame_32,
				 uc.uc_mcontext.mc_pad))
		return 1;
	if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
	    nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
		return 1;
	return 0;
}

static int sane_signal_32_frame(unsigned int sp)
{
	struct signal_frame_32 __user *sf;
	unsigned int regs;

	sf = (struct signal_frame_32 __user *) (unsigned long) sp;
	if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, &regs))
		return 0;
	return regs == (unsigned long) &sf->mctx;
}

static int sane_rt_signal_32_frame(unsigned int sp)
{
	struct rt_signal_frame_32 __user *sf;
	unsigned int regs;

	sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
	if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, &regs))
		return 0;
	return regs == (unsigned long) &sf->uc.uc_mcontext;
}

static unsigned int __user *signal_frame_32_regs(unsigned int sp,
				unsigned int next_sp, unsigned int next_ip)
{
	struct mcontext32 __user *mctx = NULL;
	struct signal_frame_32 __user *sf;
	struct rt_signal_frame_32 __user *rt_sf;

	/*
	 * Note: the next_sp - sp >= signal frame size check
	 * is true when next_sp < sp, for example, when
	 * transitioning from an alternate signal stack to the
	 * normal stack.
	 */
	if (next_sp - sp >= sizeof(struct signal_frame_32) &&
	    is_sigreturn_32_address(next_ip, sp) &&
	    sane_signal_32_frame(sp)) {
		sf = (struct signal_frame_32 __user *) (unsigned long) sp;
		mctx = &sf->mctx;
	}

	if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) &&
	    is_rt_sigreturn_32_address(next_ip, sp) &&
	    sane_rt_signal_32_frame(sp)) {
		rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
		mctx = &rt_sf->uc.uc_mcontext;
	}

	if (!mctx)
		return NULL;
	return mctx->mc_gregs;
}

442 443
static void perf_callchain_user_32(struct perf_callchain_entry *entry,
				   struct pt_regs *regs)
444 445 446 447 448 449 450
{
	unsigned int sp, next_sp;
	unsigned int next_ip;
	unsigned int lr;
	long level = 0;
	unsigned int __user *fp, *uregs;

451
	next_ip = perf_instruction_pointer(regs);
452 453
	lr = regs->link;
	sp = regs->gpr[1];
454
	perf_callchain_store(entry, next_ip);
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475

	while (entry->nr < PERF_MAX_STACK_DEPTH) {
		fp = (unsigned int __user *) (unsigned long) sp;
		if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
			return;
		if (level > 0 && read_user_stack_32(&fp[1], &next_ip))
			return;

		uregs = signal_frame_32_regs(sp, next_sp, next_ip);
		if (!uregs && level <= 1)
			uregs = signal_frame_32_regs(sp, next_sp, lr);
		if (uregs) {
			/*
			 * This looks like an signal frame, so restart
			 * the stack trace with the values in it.
			 */
			if (read_user_stack_32(&uregs[PT_NIP], &next_ip) ||
			    read_user_stack_32(&uregs[PT_LNK], &lr) ||
			    read_user_stack_32(&uregs[PT_R1], &sp))
				return;
			level = 0;
476 477
			perf_callchain_store(entry, PERF_CONTEXT_USER);
			perf_callchain_store(entry, next_ip);
478 479 480 481 482
			continue;
		}

		if (level == 0)
			next_ip = lr;
483
		perf_callchain_store(entry, next_ip);
484 485 486 487 488
		++level;
		sp = next_sp;
	}
}

489 490
void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
491
{
492 493 494 495
	if (current_is_64bit())
		perf_callchain_user_64(entry, regs);
	else
		perf_callchain_user_32(entry, regs);
496
}