ds.c 42.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/slab.h>
5

6
#include <asm/cpu_entry_area.h>
7
#include <asm/perf_event.h>
8
#include <asm/tlbflush.h>
9
#include <asm/insn.h>
10

11
#include "../perf_event.h"
12

13 14 15
/* Waste a full page so it can be mapped into the cpu_entry_area */
DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);

16 17 18
/* The size of a BTS record in bytes: */
#define BTS_RECORD_SIZE		24

19
#define PEBS_FIXUP_SIZE		PAGE_SIZE
20 21 22 23 24 25 26 27 28 29 30 31

/*
 * pebs_record_32 for p4 and core not supported

struct pebs_record_32 {
	u32 flags, ip;
	u32 ax, bc, cx, dx;
	u32 si, di, bp, sp;
};

 */

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
union intel_x86_pebs_dse {
	u64 val;
	struct {
		unsigned int ld_dse:4;
		unsigned int ld_stlb_miss:1;
		unsigned int ld_locked:1;
		unsigned int ld_reserved:26;
	};
	struct {
		unsigned int st_l1d_hit:1;
		unsigned int st_reserved1:3;
		unsigned int st_stlb_miss:1;
		unsigned int st_locked:1;
		unsigned int st_reserved2:26;
	};
};


/*
 * Map PEBS Load Latency Data Source encodings to generic
 * memory data source information
 */
#define P(a, b) PERF_MEM_S(a, b)
#define OP_LH (P(OP, LOAD) | P(LVL, HIT))
56 57
#define LEVEL(x) P(LVLNUM, x)
#define REM P(REMOTE, REMOTE)
58 59
#define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))

60 61
/* Version for Sandy Bridge and later */
static u64 pebs_data_source[] = {
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
	P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
	OP_LH | P(LVL, L1)  | LEVEL(L1) | P(SNOOP, NONE),  /* 0x01: L1 local */
	OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
	OP_LH | P(LVL, L2)  | LEVEL(L2) | P(SNOOP, NONE),  /* 0x03: L2 hit */
	OP_LH | P(LVL, L3)  | LEVEL(L3) | P(SNOOP, NONE),  /* 0x04: L3 hit */
	OP_LH | P(LVL, L3)  | LEVEL(L3) | P(SNOOP, MISS),  /* 0x05: L3 hit, snoop miss */
	OP_LH | P(LVL, L3)  | LEVEL(L3) | P(SNOOP, HIT),   /* 0x06: L3 hit, snoop hit */
	OP_LH | P(LVL, L3)  | LEVEL(L3) | P(SNOOP, HITM),  /* 0x07: L3 hit, snoop hitm */
	OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HIT),  /* 0x08: L3 miss snoop hit */
	OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/
	OP_LH | P(LVL, LOC_RAM)  | LEVEL(RAM) | P(SNOOP, HIT),       /* 0x0a: L3 miss, shared */
	OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT),  /* 0x0b: L3 miss, shared */
	OP_LH | P(LVL, LOC_RAM)  | LEVEL(RAM) | SNOOP_NONE_MISS,     /* 0x0c: L3 miss, excl */
	OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS, /* 0x0d: L3 miss, excl */
	OP_LH | P(LVL, IO)  | LEVEL(NA) | P(SNOOP, NONE), /* 0x0e: I/O */
	OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0f: uncached */
78 79
};

80 81 82
/* Patch up minor differences in the bits */
void __init intel_pmu_pebs_data_source_nhm(void)
{
83 84 85 86 87 88 89 90 91 92 93 94 95 96
	pebs_data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
	pebs_data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
	pebs_data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
}

void __init intel_pmu_pebs_data_source_skl(bool pmem)
{
	u64 pmem_or_l4 = pmem ? LEVEL(PMEM) : LEVEL(L4);

	pebs_data_source[0x08] = OP_LH | pmem_or_l4 | P(SNOOP, HIT);
	pebs_data_source[0x09] = OP_LH | pmem_or_l4 | REM | P(SNOOP, HIT);
	pebs_data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE);
	pebs_data_source[0x0c] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOPX, FWD);
	pebs_data_source[0x0d] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOP, HITM);
97 98
}

99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
static u64 precise_store_data(u64 status)
{
	union intel_x86_pebs_dse dse;
	u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2);

	dse.val = status;

	/*
	 * bit 4: TLB access
	 * 1 = stored missed 2nd level TLB
	 *
	 * so it either hit the walker or the OS
	 * otherwise hit 2nd level TLB
	 */
	if (dse.st_stlb_miss)
		val |= P(TLB, MISS);
	else
		val |= P(TLB, HIT);

	/*
	 * bit 0: hit L1 data cache
	 * if not set, then all we know is that
	 * it missed L1D
	 */
	if (dse.st_l1d_hit)
		val |= P(LVL, HIT);
	else
		val |= P(LVL, MISS);

	/*
	 * bit 5: Locked prefix
	 */
	if (dse.st_locked)
		val |= P(LOCK, LOCKED);

	return val;
}

137
static u64 precise_datala_hsw(struct perf_event *event, u64 status)
138 139 140
{
	union perf_mem_data_src dse;

141 142 143 144 145 146
	dse.val = PERF_MEM_NA;

	if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
		dse.mem_op = PERF_MEM_OP_STORE;
	else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW)
		dse.mem_op = PERF_MEM_OP_LOAD;
147 148 149 150 151 152 153 154 155

	/*
	 * L1 info only valid for following events:
	 *
	 * MEM_UOPS_RETIRED.STLB_MISS_STORES
	 * MEM_UOPS_RETIRED.LOCK_STORES
	 * MEM_UOPS_RETIRED.SPLIT_STORES
	 * MEM_UOPS_RETIRED.ALL_STORES
	 */
156 157 158 159 160 161
	if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) {
		if (status & 1)
			dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
		else
			dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
	}
162 163 164
	return dse.val;
}

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
static u64 load_latency_data(u64 status)
{
	union intel_x86_pebs_dse dse;
	u64 val;

	dse.val = status;

	/*
	 * use the mapping table for bit 0-3
	 */
	val = pebs_data_source[dse.ld_dse];

	/*
	 * Nehalem models do not support TLB, Lock infos
	 */
180
	if (x86_pmu.pebs_no_tlb) {
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
		val |= P(TLB, NA) | P(LOCK, NA);
		return val;
	}
	/*
	 * bit 4: TLB access
	 * 0 = did not miss 2nd level TLB
	 * 1 = missed 2nd level TLB
	 */
	if (dse.ld_stlb_miss)
		val |= P(TLB, MISS) | P(TLB, L2);
	else
		val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);

	/*
	 * bit 5: locked prefix
	 */
	if (dse.ld_locked)
		val |= P(LOCK, LOCKED);

	return val;
}

203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
struct pebs_record_core {
	u64 flags, ip;
	u64 ax, bx, cx, dx;
	u64 si, di, bp, sp;
	u64 r8,  r9,  r10, r11;
	u64 r12, r13, r14, r15;
};

struct pebs_record_nhm {
	u64 flags, ip;
	u64 ax, bx, cx, dx;
	u64 si, di, bp, sp;
	u64 r8,  r9,  r10, r11;
	u64 r12, r13, r14, r15;
	u64 status, dla, dse, lat;
};

220 221 222 223
/*
 * Same as pebs_record_nhm, with two additional fields.
 */
struct pebs_record_hsw {
224 225 226 227 228 229
	u64 flags, ip;
	u64 ax, bx, cx, dx;
	u64 si, di, bp, sp;
	u64 r8,  r9,  r10, r11;
	u64 r12, r13, r14, r15;
	u64 status, dla, dse, lat;
230
	u64 real_ip, tsx_tuning;
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
};

union hsw_tsx_tuning {
	struct {
		u32 cycles_last_block     : 32,
		    hle_abort		  : 1,
		    rtm_abort		  : 1,
		    instruction_abort     : 1,
		    non_instruction_abort : 1,
		    retry		  : 1,
		    data_conflict	  : 1,
		    capacity_writes	  : 1,
		    capacity_reads	  : 1;
	};
	u64	    value;
246 247
};

248 249
#define PEBS_HSW_TSX_FLAGS	0xff00000000ULL

250 251 252 253 254 255 256 257 258 259 260 261 262
/* Same as HSW, plus TSC */

struct pebs_record_skl {
	u64 flags, ip;
	u64 ax, bx, cx, dx;
	u64 si, di, bp, sp;
	u64 r8,  r9,  r10, r11;
	u64 r12, r13, r14, r15;
	u64 status, dla, dse, lat;
	u64 real_ip, tsx_tuning;
	u64 tsc;
};

263
void init_debug_store_on_cpu(int cpu)
264 265 266 267 268 269 270 271 272 273 274
{
	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;

	if (!ds)
		return;

	wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
		     (u32)((u64)(unsigned long)ds),
		     (u32)((u64)(unsigned long)ds >> 32));
}

275
void fini_debug_store_on_cpu(int cpu)
276 277 278 279 280 281 282
{
	if (!per_cpu(cpu_hw_events, cpu).ds)
		return;

	wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
}

283 284
static DEFINE_PER_CPU(void *, insn_buffer);

285
static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
286
{
287
	unsigned long start = (unsigned long)cea;
288 289 290 291
	phys_addr_t pa;
	size_t msz = 0;

	pa = virt_to_phys(addr);
292 293

	preempt_disable();
294 295
	for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
		cea_set_pte(cea, pa, prot);
296 297 298 299 300 301 302

	/*
	 * This is a cross-CPU update of the cpu_entry_area, we must shoot down
	 * all TLB entries for it.
	 */
	flush_tlb_kernel_range(start, start + size);
	preempt_enable();
303 304 305 306
}

static void ds_clear_cea(void *cea, size_t size)
{
307
	unsigned long start = (unsigned long)cea;
308 309
	size_t msz = 0;

310
	preempt_disable();
311 312
	for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
		cea_set_pte(cea, 0, PAGE_NONE);
313 314 315

	flush_tlb_kernel_range(start, start + size);
	preempt_enable();
316 317 318 319 320
}

static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
{
	unsigned int order = get_order(size);
321
	int node = cpu_to_node(cpu);
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
	struct page *page;

	page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
	return page ? page_address(page) : NULL;
}

static void dsfree_pages(const void *buffer, size_t size)
{
	if (buffer)
		free_pages((unsigned long)buffer, get_order(size));
}

static int alloc_pebs_buffer(int cpu)
{
	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
	struct debug_store *ds = hwev->ds;
	size_t bsiz = x86_pmu.pebs_buffer_size;
	int max, node = cpu_to_node(cpu);
	void *buffer, *ibuffer, *cea;
341 342 343 344

	if (!x86_pmu.pebs)
		return 0;

345
	buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu);
346 347 348
	if (unlikely(!buffer))
		return -ENOMEM;

349 350 351 352 353 354 355
	/*
	 * HSW+ already provides us the eventing ip; no need to allocate this
	 * buffer then.
	 */
	if (x86_pmu.intel_cap.pebs_format < 2) {
		ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
		if (!ibuffer) {
356
			dsfree_pages(buffer, bsiz);
357 358 359 360
			return -ENOMEM;
		}
		per_cpu(insn_buffer, cpu) = ibuffer;
	}
361 362 363 364 365
	hwev->ds_pebs_vaddr = buffer;
	/* Update the cpu entry area mapping */
	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
	ds->pebs_buffer_base = (unsigned long) cea;
	ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL);
366
	ds->pebs_index = ds->pebs_buffer_base;
367 368
	max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size);
	ds->pebs_absolute_maximum = ds->pebs_buffer_base + max;
369 370 371
	return 0;
}

372 373
static void release_pebs_buffer(int cpu)
{
374 375 376
	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
	struct debug_store *ds = hwev->ds;
	void *cea;
377 378 379 380

	if (!ds || !x86_pmu.pebs)
		return;

381 382 383
	kfree(per_cpu(insn_buffer, cpu));
	per_cpu(insn_buffer, cpu) = NULL;

384 385 386
	/* Clear the fixmap */
	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
	ds_clear_cea(cea, x86_pmu.pebs_buffer_size);
387
	ds->pebs_buffer_base = 0;
388 389
	dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size);
	hwev->ds_pebs_vaddr = NULL;
390 391
}

392 393
static int alloc_bts_buffer(int cpu)
{
394 395 396 397
	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
	struct debug_store *ds = hwev->ds;
	void *buffer, *cea;
	int max;
398 399 400 401

	if (!x86_pmu.bts)
		return 0;

402
	buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu);
403 404
	if (unlikely(!buffer)) {
		WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
405
		return -ENOMEM;
406
	}
407 408 409 410 411
	hwev->ds_bts_vaddr = buffer;
	/* Update the fixmap */
	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
	ds->bts_buffer_base = (unsigned long) cea;
	ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
412
	ds->bts_index = ds->bts_buffer_base;
413 414 415
	max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE);
	ds->bts_absolute_maximum = ds->bts_buffer_base + max;
	ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16);
416 417 418
	return 0;
}

419 420
static void release_bts_buffer(int cpu)
{
421 422 423
	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
	struct debug_store *ds = hwev->ds;
	void *cea;
424 425 426 427

	if (!ds || !x86_pmu.bts)
		return;

428 429 430
	/* Clear the fixmap */
	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
	ds_clear_cea(cea, BTS_BUFFER_SIZE);
431
	ds->bts_buffer_base = 0;
432 433
	dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE);
	hwev->ds_bts_vaddr = NULL;
434 435
}

436 437
static int alloc_ds_buffer(int cpu)
{
438
	struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store;
439

440
	memset(ds, 0, sizeof(*ds));
441 442 443 444 445 446 447 448 449
	per_cpu(cpu_hw_events, cpu).ds = ds;
	return 0;
}

static void release_ds_buffer(int cpu)
{
	per_cpu(cpu_hw_events, cpu).ds = NULL;
}

450
void release_ds_buffers(void)
451 452 453 454 455 456 457 458 459 460 461
{
	int cpu;

	if (!x86_pmu.bts && !x86_pmu.pebs)
		return;

	get_online_cpus();
	for_each_online_cpu(cpu)
		fini_debug_store_on_cpu(cpu);

	for_each_possible_cpu(cpu) {
462 463
		release_pebs_buffer(cpu);
		release_bts_buffer(cpu);
464
		release_ds_buffer(cpu);
465 466 467 468
	}
	put_online_cpus();
}

469
void reserve_ds_buffers(void)
470
{
471 472 473 474 475
	int bts_err = 0, pebs_err = 0;
	int cpu;

	x86_pmu.bts_active = 0;
	x86_pmu.pebs_active = 0;
476 477

	if (!x86_pmu.bts && !x86_pmu.pebs)
478
		return;
479

480 481 482 483 484 485
	if (!x86_pmu.bts)
		bts_err = 1;

	if (!x86_pmu.pebs)
		pebs_err = 1;

486 487 488
	get_online_cpus();

	for_each_possible_cpu(cpu) {
489 490 491 492
		if (alloc_ds_buffer(cpu)) {
			bts_err = 1;
			pebs_err = 1;
		}
493

494 495 496 497 498
		if (!bts_err && alloc_bts_buffer(cpu))
			bts_err = 1;

		if (!pebs_err && alloc_pebs_buffer(cpu))
			pebs_err = 1;
499

500
		if (bts_err && pebs_err)
501
			break;
502 503 504 505 506 507
	}

	if (bts_err) {
		for_each_possible_cpu(cpu)
			release_bts_buffer(cpu);
	}
508

509 510 511
	if (pebs_err) {
		for_each_possible_cpu(cpu)
			release_pebs_buffer(cpu);
512 513
	}

514 515 516 517 518 519 520 521 522 523
	if (bts_err && pebs_err) {
		for_each_possible_cpu(cpu)
			release_ds_buffer(cpu);
	} else {
		if (x86_pmu.bts && !bts_err)
			x86_pmu.bts_active = 1;

		if (x86_pmu.pebs && !pebs_err)
			x86_pmu.pebs_active = 1;

524 525 526 527 528 529 530 531 532 533 534
		for_each_online_cpu(cpu)
			init_debug_store_on_cpu(cpu);
	}

	put_online_cpus();
}

/*
 * BTS
 */

535
struct event_constraint bts_constraint =
536
	EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
537

538
void intel_pmu_enable_bts(u64 config)
539 540 541 542 543
{
	unsigned long debugctlmsr;

	debugctlmsr = get_debugctlmsr();

544 545
	debugctlmsr |= DEBUGCTLMSR_TR;
	debugctlmsr |= DEBUGCTLMSR_BTS;
546 547
	if (config & ARCH_PERFMON_EVENTSEL_INT)
		debugctlmsr |= DEBUGCTLMSR_BTINT;
548 549

	if (!(config & ARCH_PERFMON_EVENTSEL_OS))
550
		debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
551 552

	if (!(config & ARCH_PERFMON_EVENTSEL_USR))
553
		debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
554 555 556 557

	update_debugctlmsr(debugctlmsr);
}

558
void intel_pmu_disable_bts(void)
559
{
560
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
561 562 563 564 565 566 567 568
	unsigned long debugctlmsr;

	if (!cpuc->ds)
		return;

	debugctlmsr = get_debugctlmsr();

	debugctlmsr &=
569 570
		~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
		  DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
571 572 573 574

	update_debugctlmsr(debugctlmsr);
}

575
int intel_pmu_drain_bts_buffer(void)
576
{
577
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
578 579 580 581 582 583
	struct debug_store *ds = cpuc->ds;
	struct bts_record {
		u64	from;
		u64	to;
		u64	flags;
	};
584
	struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
585
	struct bts_record *at, *base, *top;
586 587 588
	struct perf_output_handle handle;
	struct perf_event_header header;
	struct perf_sample_data data;
589
	unsigned long skip = 0;
590 591 592
	struct pt_regs regs;

	if (!event)
593
		return 0;
594

595
	if (!x86_pmu.bts_active)
596
		return 0;
597

598 599
	base = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
	top  = (struct bts_record *)(unsigned long)ds->bts_index;
600

601
	if (top <= base)
602
		return 0;
603

604 605
	memset(&regs, 0, sizeof(regs));

606 607
	ds->bts_index = ds->bts_buffer_base;

608
	perf_sample_data_init(&data, 0, event->hw.last_period);
609

610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
	/*
	 * BTS leaks kernel addresses in branches across the cpl boundary,
	 * such as traps or system calls, so unless the user is asking for
	 * kernel tracing (and right now it's not possible), we'd need to
	 * filter them out. But first we need to count how many of those we
	 * have in the current batch. This is an extra O(n) pass, however,
	 * it's much faster than the other one especially considering that
	 * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the
	 * alloc_bts_buffer()).
	 */
	for (at = base; at < top; at++) {
		/*
		 * Note that right now *this* BTS code only works if
		 * attr::exclude_kernel is set, but let's keep this extra
		 * check here in case that changes.
		 */
		if (event->attr.exclude_kernel &&
		    (kernel_ip(at->from) || kernel_ip(at->to)))
			skip++;
	}

631 632 633 634 635
	/*
	 * Prepare a generic sample, i.e. fill in the invariant fields.
	 * We will overwrite the from and to address before we output
	 * the sample.
	 */
P
Peter Zijlstra 已提交
636
	rcu_read_lock();
637 638
	perf_prepare_sample(&header, &data, event, &regs);

639 640
	if (perf_output_begin(&handle, event, header.size *
			      (top - base - skip)))
P
Peter Zijlstra 已提交
641
		goto unlock;
642

643 644 645 646 647 648
	for (at = base; at < top; at++) {
		/* Filter out any records that contain kernel addresses. */
		if (event->attr.exclude_kernel &&
		    (kernel_ip(at->from) || kernel_ip(at->to)))
			continue;

649 650 651 652 653 654 655 656 657 658 659
		data.ip		= at->from;
		data.addr	= at->to;

		perf_output_sample(&handle, &header, &data, event);
	}

	perf_output_end(&handle);

	/* There's new data available. */
	event->hw.interrupts++;
	event->pending_kill = POLL_IN;
P
Peter Zijlstra 已提交
660 661
unlock:
	rcu_read_unlock();
662
	return 1;
663 664
}

665 666 667 668 669 670 671
static inline void intel_pmu_drain_pebs_buffer(void)
{
	struct pt_regs regs;

	x86_pmu.drain_pebs(&regs);
}

672 673 674
/*
 * PEBS
 */
675
struct event_constraint intel_core2_pebs_event_constraints[] = {
676 677 678 679 680
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
	INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED.* */
681 682
	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
683 684 685
	EVENT_CONSTRAINT_END
};

686
struct event_constraint intel_atom_pebs_event_constraints[] = {
687 688 689
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED.* */
690 691
	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
692 693
	/* Allow all events as PEBS with no flags */
	INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
694 695 696
	EVENT_CONSTRAINT_END
};

697
struct event_constraint intel_slm_pebs_event_constraints[] = {
698 699
	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
700 701
	/* Allow all events as PEBS with no flags */
	INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
702 703 704
	EVENT_CONSTRAINT_END
};

705 706 707 708 709 710
struct event_constraint intel_glm_pebs_event_constraints[] = {
	/* Allow all events as PEBS with no flags */
	INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
	EVENT_CONSTRAINT_END
};

711 712 713 714 715 716
struct event_constraint intel_glp_pebs_event_constraints[] = {
	/* Allow all events as PEBS with no flags */
	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
	EVENT_CONSTRAINT_END
};

717
struct event_constraint intel_nehalem_pebs_event_constraints[] = {
718
	INTEL_PLD_CONSTRAINT(0x100b, 0xf),      /* MEM_INST_RETIRED.* */
719 720 721
	INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf),    /* MEM_UNCORE_RETIRED.* */
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
	INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf),    /* INST_RETIRED.ANY */
722
	INTEL_EVENT_CONSTRAINT(0xc2, 0xf),    /* UOPS_RETIRED.* */
723 724 725 726 727 728
	INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
	INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf),    /* SSEX_UOPS_RETIRED.* */
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf),    /* MEM_LOAD_RETIRED.* */
	INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf),    /* FP_ASSIST.* */
729 730
	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
731 732 733
	EVENT_CONSTRAINT_END
};

734
struct event_constraint intel_westmere_pebs_event_constraints[] = {
735
	INTEL_PLD_CONSTRAINT(0x100b, 0xf),      /* MEM_INST_RETIRED.* */
736 737 738
	INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf),    /* MEM_UNCORE_RETIRED.* */
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
	INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf),    /* INSTR_RETIRED.* */
739
	INTEL_EVENT_CONSTRAINT(0xc2, 0xf),    /* UOPS_RETIRED.* */
740 741 742 743 744 745
	INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf),    /* BR_INST_RETIRED.* */
	INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf),    /* BR_MISP_RETIRED.* */
	INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf),    /* SSEX_UOPS_RETIRED.* */
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
	INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf),    /* MEM_LOAD_RETIRED.* */
	INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf),    /* FP_ASSIST.* */
746 747
	/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
748 749 750
	EVENT_CONSTRAINT_END
};

751
struct event_constraint intel_snb_pebs_event_constraints[] = {
752
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
753
	INTEL_PLD_CONSTRAINT(0x01cd, 0x8),    /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
754
	INTEL_PST_CONSTRAINT(0x02cd, 0x8),    /* MEM_TRANS_RETIRED.PRECISE_STORES */
755 756
	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
757 758 759 760
        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),    /* MEM_UOP_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),    /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf),    /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
761 762
	/* Allow all events as PEBS with no flags */
	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
763 764 765
	EVENT_CONSTRAINT_END
};

766
struct event_constraint intel_ivb_pebs_event_constraints[] = {
767
        INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
768
        INTEL_PLD_CONSTRAINT(0x01cd, 0x8),    /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
769
	INTEL_PST_CONSTRAINT(0x02cd, 0x8),    /* MEM_TRANS_RETIRED.PRECISE_STORES */
770 771
	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
772 773
	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
774 775 776 777
	INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),    /* MEM_UOP_RETIRED.* */
	INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
	INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),    /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
	INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf),    /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
778 779
	/* Allow all events as PEBS with no flags */
	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
780 781 782
        EVENT_CONSTRAINT_END
};

783
struct event_constraint intel_hsw_pebs_event_constraints[] = {
784
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
785 786 787
	INTEL_PLD_CONSTRAINT(0x01cd, 0xf),    /* MEM_TRANS_RETIRED.* */
	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
788 789
	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
790
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
791 792 793 794 795 796 797 798 799 800
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf),    /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf),    /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
801 802 803 804 805
	/* Allow all events as PEBS with no flags */
	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
	EVENT_CONSTRAINT_END
};

806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829
struct event_constraint intel_bdw_pebs_event_constraints[] = {
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
	INTEL_PLD_CONSTRAINT(0x01cd, 0xf),    /* MEM_TRANS_RETIRED.* */
	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf),    /* MEM_LOAD_UOPS_RETIRED.* */
	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf),    /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf),    /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
	/* Allow all events as PEBS with no flags */
	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
	EVENT_CONSTRAINT_END
};


830 831
struct event_constraint intel_skl_pebs_event_constraints[] = {
	INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2),	/* INST_RETIRED.PREC_DIST */
832 833
	/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
834 835
	/* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
	INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
836 837 838 839 840 841 842 843 844 845 846 847
	INTEL_PLD_CONSTRAINT(0x1cd, 0xf),		      /* MEM_TRANS_RETIRED.* */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf),    /* MEM_LOAD_RETIRED.* */
	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf),    /* MEM_LOAD_L3_HIT_RETIRED.* */
	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf),    /* MEM_LOAD_L3_MISS_RETIRED.* */
848 849
	/* Allow all events as PEBS with no flags */
	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
850 851 852
	EVENT_CONSTRAINT_END
};

853
struct event_constraint *intel_pebs_constraints(struct perf_event *event)
854 855 856
{
	struct event_constraint *c;

P
Peter Zijlstra 已提交
857
	if (!event->attr.precise_ip)
858 859 860 861
		return NULL;

	if (x86_pmu.pebs_constraints) {
		for_each_event_constraint(c, x86_pmu.pebs_constraints) {
862 863
			if ((event->hw.config & c->cmask) == c->code) {
				event->hw.flags |= c->flags;
864
				return c;
865
			}
866 867 868 869 870 871
		}
	}

	return &emptyconstraint;
}

872 873 874 875 876 877 878 879 880 881
/*
 * We need the sched_task callback even for per-cpu events when we use
 * the large interrupt threshold, such that we can provide PID and TID
 * to PEBS samples.
 */
static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
{
	return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
}

882 883 884 885 886 887 888 889
void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
{
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);

	if (!sched_in && pebs_needs_sched_cb(cpuc))
		intel_pmu_drain_pebs_buffer();
}

890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907
static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
{
	struct debug_store *ds = cpuc->ds;
	u64 threshold;

	if (cpuc->n_pebs == cpuc->n_large_pebs) {
		threshold = ds->pebs_absolute_maximum -
			x86_pmu.max_pebs_events * x86_pmu.pebs_record_size;
	} else {
		threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size;
	}

	ds->pebs_interrupt_threshold = threshold;
}

static void
pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, struct pmu *pmu)
{
908 909 910 911 912 913 914
	/*
	 * Make sure we get updated with the first PEBS
	 * event. It will trigger also during removal, but
	 * that does not hurt:
	 */
	bool update = cpuc->n_pebs == 1;

915 916 917 918 919 920
	if (needed_cb != pebs_needs_sched_cb(cpuc)) {
		if (!needed_cb)
			perf_sched_cb_inc(pmu);
		else
			perf_sched_cb_dec(pmu);

921
		update = true;
922
	}
923 924 925

	if (update)
		pebs_update_threshold(cpuc);
926 927
}

928
void intel_pmu_pebs_add(struct perf_event *event)
929
{
930 931 932 933 934 935 936 937 938
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
	struct hw_perf_event *hwc = &event->hw;
	bool needed_cb = pebs_needs_sched_cb(cpuc);

	cpuc->n_pebs++;
	if (hwc->flags & PERF_X86_EVENT_FREERUNNING)
		cpuc->n_large_pebs++;

	pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
939 940
}

941
void intel_pmu_pebs_enable(struct perf_event *event)
942
{
943
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
944
	struct hw_perf_event *hwc = &event->hw;
945
	struct debug_store *ds = cpuc->ds;
946

947 948
	hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;

949
	cpuc->pebs_enabled |= 1ULL << hwc->idx;
950 951 952

	if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
		cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
953 954
	else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
		cpuc->pebs_enabled |= 1ULL << 63;
955

956
	/*
957 958
	 * Use auto-reload if possible to save a MSR write in the PMI.
	 * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
959
	 */
960 961 962
	if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
		ds->pebs_event_reset[hwc->idx] =
			(u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
963 964
	} else {
		ds->pebs_event_reset[hwc->idx] = 0;
965
	}
966 967
}

968
void intel_pmu_pebs_del(struct perf_event *event)
969 970 971 972 973 974 975 976
{
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
	struct hw_perf_event *hwc = &event->hw;
	bool needed_cb = pebs_needs_sched_cb(cpuc);

	cpuc->n_pebs--;
	if (hwc->flags & PERF_X86_EVENT_FREERUNNING)
		cpuc->n_large_pebs--;
977

978
	pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
979 980
}

981
void intel_pmu_pebs_disable(struct perf_event *event)
982
{
983
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
984
	struct hw_perf_event *hwc = &event->hw;
985

986
	if (cpuc->n_pebs == cpuc->n_large_pebs)
987
		intel_pmu_drain_pebs_buffer();
988

989
	cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
990

991
	if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
992
		cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
993
	else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
994 995
		cpuc->pebs_enabled &= ~(1ULL << 63);

996
	if (cpuc->enabled)
997
		wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
998 999 1000 1001

	hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
}

1002
void intel_pmu_pebs_enable_all(void)
1003
{
1004
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1005 1006 1007 1008 1009

	if (cpuc->pebs_enabled)
		wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
}

1010
void intel_pmu_pebs_disable_all(void)
1011
{
1012
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1013 1014 1015 1016 1017

	if (cpuc->pebs_enabled)
		wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
}

1018 1019
static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
{
1020
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1021 1022 1023
	unsigned long from = cpuc->lbr_entries[0].from;
	unsigned long old_to, to = cpuc->lbr_entries[0].to;
	unsigned long ip = regs->ip;
1024
	int is_64bit = 0;
1025
	void *kaddr;
1026
	int size;
1027

1028 1029 1030 1031 1032 1033
	/*
	 * We don't need to fixup if the PEBS assist is fault like
	 */
	if (!x86_pmu.intel_cap.pebs_trap)
		return 1;

P
Peter Zijlstra 已提交
1034 1035 1036
	/*
	 * No LBR entry, no basic block, no rewinding
	 */
1037 1038 1039
	if (!cpuc->lbr_stack.nr || !from || !to)
		return 0;

P
Peter Zijlstra 已提交
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
	/*
	 * Basic blocks should never cross user/kernel boundaries
	 */
	if (kernel_ip(ip) != kernel_ip(to))
		return 0;

	/*
	 * unsigned math, either ip is before the start (impossible) or
	 * the basic block is larger than 1 page (sanity)
	 */
1050
	if ((ip - to) > PEBS_FIXUP_SIZE)
1051 1052 1053 1054 1055 1056
		return 0;

	/*
	 * We sampled a branch insn, rewind using the LBR stack
	 */
	if (ip == to) {
1057
		set_linear_ip(regs, from);
1058 1059 1060
		return 1;
	}

1061
	size = ip - to;
1062
	if (!kernel_ip(ip)) {
1063
		int bytes;
1064 1065
		u8 *buf = this_cpu_read(insn_buffer);

1066
		/* 'size' must fit our buffer, see above */
1067
		bytes = copy_from_user_nmi(buf, (void __user *)to, size);
1068
		if (bytes != 0)
1069 1070 1071 1072 1073 1074 1075
			return 0;

		kaddr = buf;
	} else {
		kaddr = (void *)to;
	}

1076 1077 1078 1079 1080
	do {
		struct insn insn;

		old_to = to;

1081 1082 1083
#ifdef CONFIG_X86_64
		is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
#endif
1084
		insn_init(&insn, kaddr, size, is_64bit);
1085
		insn_get_length(&insn);
1086 1087 1088 1089 1090 1091 1092 1093
		/*
		 * Make sure there was not a problem decoding the
		 * instruction and getting the length.  This is
		 * doubly important because we have an infinite
		 * loop if insn.length=0.
		 */
		if (!insn.length)
			break;
1094

1095
		to += insn.length;
1096
		kaddr += insn.length;
1097
		size -= insn.length;
1098 1099 1100
	} while (to < ip);

	if (to == ip) {
1101
		set_linear_ip(regs, old_to);
1102 1103 1104
		return 1;
	}

P
Peter Zijlstra 已提交
1105 1106 1107 1108
	/*
	 * Even though we decoded the basic block, the instruction stream
	 * never matched the given IP, either the TO or the IP got corrupted.
	 */
1109 1110 1111
	return 0;
}

1112
static inline u64 intel_hsw_weight(struct pebs_record_skl *pebs)
1113 1114 1115 1116 1117 1118 1119 1120
{
	if (pebs->tsx_tuning) {
		union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning };
		return tsx.cycles_last_block;
	}
	return 0;
}

1121
static inline u64 intel_hsw_transaction(struct pebs_record_skl *pebs)
1122 1123 1124 1125 1126 1127 1128 1129 1130
{
	u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;

	/* For RTM XABORTs also log the abort code from AX */
	if ((txn & PERF_TXN_TRANSACTION) && (pebs->ax & 1))
		txn |= ((pebs->ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
	return txn;
}

1131 1132 1133 1134
static void setup_pebs_sample_data(struct perf_event *event,
				   struct pt_regs *iregs, void *__pebs,
				   struct perf_sample_data *data,
				   struct pt_regs *regs)
1135
{
1136 1137 1138 1139
#define PERF_X86_EVENT_PEBS_HSW_PREC \
		(PERF_X86_EVENT_PEBS_ST_HSW | \
		 PERF_X86_EVENT_PEBS_LD_HSW | \
		 PERF_X86_EVENT_PEBS_NA_HSW)
1140
	/*
1141 1142
	 * We cast to the biggest pebs_record but are careful not to
	 * unconditionally access the 'extra' entries.
1143
	 */
1144
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1145
	struct pebs_record_skl *pebs = __pebs;
1146
	u64 sample_type;
1147 1148
	int fll, fst, dsrc;
	int fl = event->hw.flags;
1149

1150 1151 1152
	if (pebs == NULL)
		return;

1153 1154 1155 1156 1157
	sample_type = event->attr.sample_type;
	dsrc = sample_type & PERF_SAMPLE_DATA_SRC;

	fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
	fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
1158

1159
	perf_sample_data_init(data, 0, event->hw.last_period);
1160

1161
	data->period = event->hw.last_period;
1162 1163

	/*
1164
	 * Use latency for weight (only avail with PEBS-LL)
1165
	 */
1166
	if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
1167
		data->weight = pebs->lat;
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179

	/*
	 * data.data_src encodes the data source
	 */
	if (dsrc) {
		u64 val = PERF_MEM_NA;
		if (fll)
			val = load_latency_data(pebs->dse);
		else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
			val = precise_datala_hsw(event, pebs->dse);
		else if (fst)
			val = precise_store_data(pebs->dse);
1180
		data->data_src.val = val;
1181 1182
	}

1183
	/*
1184 1185 1186
	 * We use the interrupt regs as a base because the PEBS record does not
	 * contain a full regs set, specifically it seems to lack segment
	 * descriptors, which get used by things like user_mode().
1187
	 *
1188 1189 1190 1191 1192 1193
	 * In the simple case fix up only the IP for PERF_SAMPLE_IP.
	 *
	 * We must however always use BP,SP from iregs for the unwinder to stay
	 * sane; the record BP,SP can point into thin air when the record is
	 * from a previous PMI context or an (I)RET happend between the record
	 * and PMI.
1194
	 */
1195 1196 1197
	*regs = *iregs;
	regs->flags = pebs->flags;
	set_linear_ip(regs, pebs->ip);
1198

1199
	if (sample_type & PERF_SAMPLE_REGS_INTR) {
1200 1201 1202 1203 1204 1205 1206
		regs->ax = pebs->ax;
		regs->bx = pebs->bx;
		regs->cx = pebs->cx;
		regs->dx = pebs->dx;
		regs->si = pebs->si;
		regs->di = pebs->di;

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
		/*
		 * Per the above; only set BP,SP if we don't need callchains.
		 *
		 * XXX: does this make sense?
		 */
		if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
			regs->bp = pebs->bp;
			regs->sp = pebs->sp;
		}

		/*
		 * Preserve PERF_EFLAGS_VM from set_linear_ip().
		 */
		regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM);
1221
#ifndef CONFIG_X86_32
1222 1223 1224 1225 1226 1227 1228 1229
		regs->r8 = pebs->r8;
		regs->r9 = pebs->r9;
		regs->r10 = pebs->r10;
		regs->r11 = pebs->r11;
		regs->r12 = pebs->r12;
		regs->r13 = pebs->r13;
		regs->r14 = pebs->r14;
		regs->r15 = pebs->r15;
1230 1231 1232
#endif
	}

1233
	if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
1234 1235 1236 1237
		regs->ip = pebs->real_ip;
		regs->flags |= PERF_EFLAGS_EXACT;
	} else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs))
		regs->flags |= PERF_EFLAGS_EXACT;
1238
	else
1239
		regs->flags &= ~PERF_EFLAGS_EXACT;
1240

1241
	if ((sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) &&
1242
	    x86_pmu.intel_cap.pebs_format >= 1)
1243
		data->addr = pebs->dla;
1244

1245 1246
	if (x86_pmu.intel_cap.pebs_format >= 2) {
		/* Only set the TSX weight when no memory weight. */
1247
		if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
1248
			data->weight = intel_hsw_weight(pebs);
1249

1250
		if (sample_type & PERF_SAMPLE_TRANSACTION)
1251
			data->txn = intel_hsw_transaction(pebs);
1252
	}
1253

1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
	/*
	 * v3 supplies an accurate time stamp, so we use that
	 * for the time stamp.
	 *
	 * We can only do this for the default trace clock.
	 */
	if (x86_pmu.intel_cap.pebs_format >= 3 &&
		event->attr.use_clockid == 0)
		data->time = native_sched_clock_from_tsc(pebs->tsc);

1264
	if (has_branch_stack(event))
1265 1266 1267
		data->br_stack = &cpuc->lbr_stack;
}

1268 1269 1270 1271 1272 1273 1274
static inline void *
get_next_pebs_record_by_bit(void *base, void *top, int bit)
{
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
	void *at;
	u64 pebs_status;

1275 1276 1277 1278 1279 1280 1281
	/*
	 * fmt0 does not have a status bitfield (does not use
	 * perf_record_nhm format)
	 */
	if (x86_pmu.intel_cap.pebs_format < 1)
		return base;

1282 1283 1284 1285 1286 1287 1288
	if (base == NULL)
		return NULL;

	for (at = base; at < top; at += x86_pmu.pebs_record_size) {
		struct pebs_record_nhm *p = at;

		if (test_bit(bit, (unsigned long *)&p->status)) {
1289 1290 1291
			/* PEBS v3 has accurate status bits */
			if (x86_pmu.intel_cap.pebs_format >= 3)
				return at;
1292 1293 1294 1295 1296 1297

			if (p->status == (1 << bit))
				return at;

			/* clear non-PEBS bit and re-check */
			pebs_status = p->status & cpuc->pebs_enabled;
1298
			pebs_status &= PEBS_COUNTER_MASK;
1299 1300 1301 1302 1303 1304 1305
			if (pebs_status == (1 << bit))
				return at;
		}
	}
	return NULL;
}

1306
static void __intel_pmu_pebs_event(struct perf_event *event,
1307 1308 1309
				   struct pt_regs *iregs,
				   void *base, void *top,
				   int bit, int count)
1310 1311 1312
{
	struct perf_sample_data data;
	struct pt_regs regs;
1313
	void *at = get_next_pebs_record_by_bit(base, top, bit);
1314

1315 1316
	if (!intel_pmu_save_and_restart(event) &&
	    !(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD))
1317 1318
		return;

1319 1320 1321 1322 1323 1324
	while (count > 1) {
		setup_pebs_sample_data(event, iregs, at, &data, &regs);
		perf_event_output(event, &data, &regs);
		at += x86_pmu.pebs_record_size;
		at = get_next_pebs_record_by_bit(at, top, bit);
		count--;
1325 1326 1327
	}

	setup_pebs_sample_data(event, iregs, at, &data, &regs);
1328

1329 1330 1331 1332 1333
	/*
	 * All but the last records are processed.
	 * The last one is left to be able to call the overflow handler.
	 */
	if (perf_event_overflow(event, &data, &regs)) {
P
Peter Zijlstra 已提交
1334
		x86_pmu_stop(event, 0);
1335 1336 1337
		return;
	}

1338 1339
}

1340 1341
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
{
1342
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1343 1344 1345 1346 1347
	struct debug_store *ds = cpuc->ds;
	struct perf_event *event = cpuc->events[0]; /* PMC0 only */
	struct pebs_record_core *at, *top;
	int n;

1348
	if (!x86_pmu.pebs_active)
1349 1350 1351 1352 1353
		return;

	at  = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
	top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;

1354 1355 1356 1357 1358 1359
	/*
	 * Whatever else happens, drain the thing
	 */
	ds->pebs_index = ds->pebs_buffer_base;

	if (!test_bit(0, cpuc->active_mask))
P
Peter Zijlstra 已提交
1360
		return;
1361

1362 1363
	WARN_ON_ONCE(!event);

P
Peter Zijlstra 已提交
1364
	if (!event->attr.precise_ip)
1365 1366
		return;

1367
	n = top - at;
1368 1369
	if (n <= 0)
		return;
1370

1371
	__intel_pmu_pebs_event(event, iregs, at, top, 0, n);
1372 1373
}

1374
static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
1375
{
1376
	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1377
	struct debug_store *ds = cpuc->ds;
1378 1379 1380
	struct perf_event *event;
	void *base, *at, *top;
	short counts[MAX_PEBS_EVENTS] = {};
1381
	short error[MAX_PEBS_EVENTS] = {};
1382
	int bit, i;
1383 1384 1385 1386

	if (!x86_pmu.pebs_active)
		return;

1387
	base = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
1388
	top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
1389 1390 1391

	ds->pebs_index = ds->pebs_buffer_base;

1392
	if (unlikely(base >= top))
1393 1394
		return;

1395
	for (at = base; at < top; at += x86_pmu.pebs_record_size) {
1396
		struct pebs_record_nhm *p = at;
1397
		u64 pebs_status;
1398

1399 1400 1401 1402
		pebs_status = p->status & cpuc->pebs_enabled;
		pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;

		/* PEBS v3 has more accurate status bits */
1403
		if (x86_pmu.intel_cap.pebs_format >= 3) {
1404 1405
			for_each_set_bit(bit, (unsigned long *)&pebs_status,
					 x86_pmu.max_pebs_events)
1406 1407 1408 1409 1410
				counts[bit]++;

			continue;
		}

1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422
		/*
		 * On some CPUs the PEBS status can be zero when PEBS is
		 * racing with clearing of GLOBAL_STATUS.
		 *
		 * Normally we would drop that record, but in the
		 * case when there is only a single active PEBS event
		 * we can assume it's for that event.
		 */
		if (!pebs_status && cpuc->pebs_enabled &&
			!(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
			pebs_status = cpuc->pebs_enabled;

1423
		bit = find_first_bit((unsigned long *)&pebs_status,
1424
					x86_pmu.max_pebs_events);
1425
		if (bit >= x86_pmu.max_pebs_events)
1426
			continue;
1427

1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442
		/*
		 * The PEBS hardware does not deal well with the situation
		 * when events happen near to each other and multiple bits
		 * are set. But it should happen rarely.
		 *
		 * If these events include one PEBS and multiple non-PEBS
		 * events, it doesn't impact PEBS record. The record will
		 * be handled normally. (slow path)
		 *
		 * If these events include two or more PEBS events, the
		 * records for the events can be collapsed into a single
		 * one, and it's not possible to reconstruct all events
		 * that caused the PEBS record. It's called collision.
		 * If collision happened, the record will be dropped.
		 */
1443 1444 1445 1446 1447
		if (p->status != (1ULL << bit)) {
			for_each_set_bit(i, (unsigned long *)&pebs_status,
					 x86_pmu.max_pebs_events)
				error[i]++;
			continue;
1448
		}
1449

1450 1451
		counts[bit]++;
	}
1452

1453
	for (bit = 0; bit < x86_pmu.max_pebs_events; bit++) {
1454
		if ((counts[bit] == 0) && (error[bit] == 0))
1455
			continue;
1456

1457
		event = cpuc->events[bit];
1458 1459 1460 1461 1462
		if (WARN_ON_ONCE(!event))
			continue;

		if (WARN_ON_ONCE(!event->attr.precise_ip))
			continue;
1463

1464
		/* log dropped samples number */
1465
		if (error[bit]) {
1466 1467
			perf_log_lost_samples(event, error[bit]);

1468 1469 1470 1471
			if (perf_event_account_interrupt(event))
				x86_pmu_stop(event, 0);
		}

1472 1473 1474 1475
		if (counts[bit]) {
			__intel_pmu_pebs_event(event, iregs, base,
					       top, bit, counts[bit]);
		}
1476 1477 1478 1479 1480 1481 1482
	}
}

/*
 * BTS, PEBS probe and setup
 */

1483
void __init intel_ds_init(void)
1484 1485 1486 1487 1488 1489 1490 1491 1492
{
	/*
	 * No support for 32bit formats
	 */
	if (!boot_cpu_has(X86_FEATURE_DTES64))
		return;

	x86_pmu.bts  = boot_cpu_has(X86_FEATURE_BTS);
	x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
1493
	x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
1494
	if (x86_pmu.pebs) {
1495 1496
		char pebs_type = x86_pmu.intel_cap.pebs_trap ?  '+' : '-';
		int format = x86_pmu.intel_cap.pebs_format;
1497 1498 1499

		switch (format) {
		case 0:
1500
			pr_cont("PEBS fmt0%c, ", pebs_type);
1501
			x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
1502 1503 1504 1505 1506 1507 1508 1509
			/*
			 * Using >PAGE_SIZE buffers makes the WRMSR to
			 * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
			 * mysteriously hang on Core2.
			 *
			 * As a workaround, we don't do this.
			 */
			x86_pmu.pebs_buffer_size = PAGE_SIZE;
1510 1511 1512 1513
			x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
			break;

		case 1:
1514
			pr_cont("PEBS fmt1%c, ", pebs_type);
1515 1516 1517 1518
			x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
			x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
			break;

1519 1520 1521
		case 2:
			pr_cont("PEBS fmt2%c, ", pebs_type);
			x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw);
1522
			x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
1523 1524
			break;

1525 1526 1527 1528 1529
		case 3:
			pr_cont("PEBS fmt3%c, ", pebs_type);
			x86_pmu.pebs_record_size =
						sizeof(struct pebs_record_skl);
			x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
1530
			x86_pmu.free_running_flags |= PERF_SAMPLE_TIME;
1531 1532
			break;

1533
		default:
1534
			pr_cont("no PEBS fmt%d%c, ", format, pebs_type);
1535 1536 1537 1538
			x86_pmu.pebs = 0;
		}
	}
}
1539 1540 1541

void perf_restore_debug_store(void)
{
1542 1543
	struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);

1544 1545 1546
	if (!x86_pmu.bts && !x86_pmu.pebs)
		return;

1547
	wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
1548
}