uncore.h 15.8 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#include <linux/slab.h>
3
#include <linux/pci.h>
4
#include <asm/apicdef.h>
5
#include <linux/io-64-nonatomic-lo-hi.h>
6

7
#include <linux/perf_event.h>
8
#include "../perf_event.h"
9 10

#define UNCORE_PMU_NAME_LEN		32
11
#define UNCORE_PMU_HRTIMER_INTERVAL	(60LL * NSEC_PER_SEC)
12
#define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
13

14
#define UNCORE_FIXED_EVENT		0xff
15
#define UNCORE_PMC_IDX_MAX_GENERIC	8
16 17
#define UNCORE_PMC_IDX_MAX_FIXED	1
#define UNCORE_PMC_IDX_MAX_FREERUNNING	1
18
#define UNCORE_PMC_IDX_FIXED		UNCORE_PMC_IDX_MAX_GENERIC
19 20 21 22
#define UNCORE_PMC_IDX_FREERUNNING	(UNCORE_PMC_IDX_FIXED + \
					UNCORE_PMC_IDX_MAX_FIXED)
#define UNCORE_PMC_IDX_MAX		(UNCORE_PMC_IDX_FREERUNNING + \
					UNCORE_PMC_IDX_MAX_FREERUNNING)
23

24 25
#define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx)	\
		((dev << 24) | (func << 16) | (type << 8) | idx)
26
#define UNCORE_PCI_DEV_DATA(type, idx)	((type << 8) | idx)
27 28
#define UNCORE_PCI_DEV_DEV(data)	((data >> 24) & 0xff)
#define UNCORE_PCI_DEV_FUNC(data)	((data >> 16) & 0xff)
29 30 31
#define UNCORE_PCI_DEV_TYPE(data)	((data >> 8) & 0xff)
#define UNCORE_PCI_DEV_IDX(data)	(data & 0xff)
#define UNCORE_EXTRA_PCI_DEV		0xff
32
#define UNCORE_EXTRA_PCI_DEV_MAX	4
33

34 35
#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)

36 37 38 39
struct pci_extra_dev {
	struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
};

40 41 42 43
struct intel_uncore_ops;
struct intel_uncore_pmu;
struct intel_uncore_box;
struct uncore_event_desc;
44
struct freerunning_counters;
45 46 47 48 49 50 51

struct intel_uncore_type {
	const char *name;
	int num_counters;
	int num_boxes;
	int perf_ctr_bits;
	int fixed_ctr_bits;
52
	int num_freerunning_types;
53 54 55
	unsigned perf_ctr;
	unsigned event_ctl;
	unsigned event_mask;
56
	unsigned event_mask_ext;
57 58 59
	unsigned fixed_ctr;
	unsigned fixed_ctl;
	unsigned box_ctl;
60 61 62 63
	union {
		unsigned msr_offset;
		unsigned mmio_offset;
	};
64 65
	unsigned num_shared_regs:8;
	unsigned single_fixed:1;
66
	unsigned pair_ctr_ctl:1;
67
	unsigned *msr_offsets;
68 69 70 71 72
	struct event_constraint unconstrainted;
	struct event_constraint *constraints;
	struct intel_uncore_pmu *pmus;
	struct intel_uncore_ops *ops;
	struct uncore_event_desc *event_descs;
73
	struct freerunning_counters *freerunning;
74
	const struct attribute_group *attr_groups[4];
75
	struct pmu *pmu; /* for custom pmu ops */
76 77
};

78 79 80
#define pmu_group attr_groups[0]
#define format_group attr_groups[1]
#define events_group attr_groups[2]
81 82 83

struct intel_uncore_ops {
	void (*init_box)(struct intel_uncore_box *);
84
	void (*exit_box)(struct intel_uncore_box *);
85 86 87 88 89
	void (*disable_box)(struct intel_uncore_box *);
	void (*enable_box)(struct intel_uncore_box *);
	void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
	void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
	u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
90 91 92 93
	int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
	struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
						   struct perf_event *);
	void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
94 95 96
};

struct intel_uncore_pmu {
97 98 99 100 101
	struct pmu			pmu;
	char				name[UNCORE_PMU_NAME_LEN];
	int				pmu_idx;
	int				func_id;
	bool				registered;
102
	atomic_t			activeboxes;
103
	struct intel_uncore_type	*type;
104
	struct intel_uncore_box		**boxes;
105 106
};

107 108
struct intel_uncore_extra_reg {
	raw_spinlock_t lock;
109
	u64 config, config1, config2;
110 111 112
	atomic_t ref;
};

113
struct intel_uncore_box {
114
	int pci_phys_id;
115
	int pkgid;	/* Logical package ID */
116 117 118 119 120 121 122
	int n_active;	/* number of active events */
	int n_events;
	int cpu;	/* cpu to collect events */
	unsigned long flags;
	atomic_t refcnt;
	struct perf_event *events[UNCORE_PMC_IDX_MAX];
	struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
123
	struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
124 125
	unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
	u64 tags[UNCORE_PMC_IDX_MAX];
126
	struct pci_dev *pci_dev;
127
	struct intel_uncore_pmu *pmu;
128
	u64 hrtimer_duration; /* hrtimer timeout for this box */
129 130
	struct hrtimer hrtimer;
	struct list_head list;
131
	struct list_head active_list;
132
	void __iomem *io_addr;
133
	struct intel_uncore_extra_reg shared_regs[0];
134 135 136
};

#define UNCORE_BOX_FLAG_INITIATED	0
137
#define UNCORE_BOX_FLAG_CTL_OFFS8	1 /* event config registers are 8-byte apart */
138 139 140 141 142 143

struct uncore_event_desc {
	struct kobj_attribute attr;
	const char *config;
};

144 145 146 147 148 149
struct freerunning_counters {
	unsigned int counter_base;
	unsigned int counter_offset;
	unsigned int box_offset;
	unsigned int num_counters;
	unsigned int bits;
150
	unsigned *box_offsets;
151 152
};

153 154 155 156 157 158 159
struct pci2phy_map {
	struct list_head list;
	int segment;
	int pbus_to_physid[256];
};

struct pci2phy_map *__find_pci2phy_map(int segment);
160
int uncore_pcibus_to_physid(struct pci_bus *bus);
161

162 163 164
ssize_t uncore_event_show(struct kobject *kobj,
			  struct kobj_attribute *attr, char *buf);

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
#define INTEL_UNCORE_EVENT_DESC(_name, _config)			\
{								\
	.attr	= __ATTR(_name, 0444, uncore_event_show, NULL),	\
	.config	= _config,					\
}

#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)			\
static ssize_t __uncore_##_var##_show(struct kobject *kobj,		\
				struct kobj_attribute *attr,		\
				char *page)				\
{									\
	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
	return sprintf(page, _format "\n");				\
}									\
static struct kobj_attribute format_attr_##_var =			\
	__ATTR(_name, 0444, __uncore_##_var##_show, NULL)

182 183 184 185 186 187 188 189 190 191
static inline bool uncore_pmc_fixed(int idx)
{
	return idx == UNCORE_PMC_IDX_FIXED;
}

static inline bool uncore_pmc_freerunning(int idx)
{
	return idx == UNCORE_PMC_IDX_FREERUNNING;
}

192 193 194 195 196 197 198
static inline
unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box)
{
	return box->pmu->type->box_ctl +
	       box->pmu->type->mmio_offset * box->pmu->pmu_idx;
}

199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
{
	return box->pmu->type->box_ctl;
}

static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
{
	return box->pmu->type->fixed_ctl;
}

static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
{
	return box->pmu->type->fixed_ctr;
}

static inline
unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
{
217 218 219
	if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags))
		return idx * 8 + box->pmu->type->event_ctl;

220 221 222 223 224 225 226 227 228
	return idx * 4 + box->pmu->type->event_ctl;
}

static inline
unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
{
	return idx * 8 + box->pmu->type->perf_ctr;
}

229 230 231 232 233 234 235 236 237
static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
{
	struct intel_uncore_pmu *pmu = box->pmu;
	return pmu->type->msr_offsets ?
		pmu->type->msr_offsets[pmu->pmu_idx] :
		pmu->type->msr_offset * pmu->pmu_idx;
}

static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
238 239 240
{
	if (!box->pmu->type->box_ctl)
		return 0;
241
	return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
242 243
}

244
static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
245 246 247
{
	if (!box->pmu->type->fixed_ctl)
		return 0;
248
	return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
249 250
}

251
static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
252
{
253
	return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
254 255
}

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300

/*
 * In the uncore document, there is no event-code assigned to free running
 * counters. Some events need to be defined to indicate the free running
 * counters. The events are encoded as event-code + umask-code.
 *
 * The event-code for all free running counters is 0xff, which is the same as
 * the fixed counters.
 *
 * The umask-code is used to distinguish a fixed counter and a free running
 * counter, and different types of free running counters.
 * - For fixed counters, the umask-code is 0x0X.
 *   X indicates the index of the fixed counter, which starts from 0.
 * - For free running counters, the umask-code uses the rest of the space.
 *   It would bare the format of 0xXY.
 *   X stands for the type of free running counters, which starts from 1.
 *   Y stands for the index of free running counters of same type, which
 *   starts from 0.
 *
 * For example, there are three types of IIO free running counters on Skylake
 * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters.
 * The event-code for all the free running counters is 0xff.
 * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type,
 * which umask-code starts from 0x10.
 * So 'ioclk' is encoded as event=0xff,umask=0x10
 * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is
 * the second type, which umask-code starts from 0x20.
 * So 'bw_in_port2' is encoded as event=0xff,umask=0x22
 */
static inline unsigned int uncore_freerunning_idx(u64 config)
{
	return ((config >> 8) & 0xf);
}

#define UNCORE_FREERUNNING_UMASK_START		0x10

static inline unsigned int uncore_freerunning_type(u64 config)
{
	return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf);
}

static inline
unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
					struct perf_event *event)
{
301 302
	unsigned int type = uncore_freerunning_type(event->hw.config);
	unsigned int idx = uncore_freerunning_idx(event->hw.config);
303 304 305 306
	struct intel_uncore_pmu *pmu = box->pmu;

	return pmu->type->freerunning[type].counter_base +
	       pmu->type->freerunning[type].counter_offset * idx +
307 308 309
	       (pmu->type->freerunning[type].box_offsets ?
	        pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] :
	        pmu->type->freerunning[type].box_offset * pmu->pmu_idx);
310 311
}

312 313 314
static inline
unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
{
315 316
	return box->pmu->type->event_ctl +
		(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
317
		uncore_msr_box_offset(box);
318 319 320 321 322
}

static inline
unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
{
323 324
	return box->pmu->type->perf_ctr +
		(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
325
		uncore_msr_box_offset(box);
326 327
}

328 329 330
static inline
unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
{
331
	if (box->pci_dev || box->io_addr)
332 333 334 335 336 337 338 339
		return uncore_pci_fixed_ctl(box);
	else
		return uncore_msr_fixed_ctl(box);
}

static inline
unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
{
340
	if (box->pci_dev || box->io_addr)
341 342 343 344 345 346 347 348
		return uncore_pci_fixed_ctr(box);
	else
		return uncore_msr_fixed_ctr(box);
}

static inline
unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
{
349
	if (box->pci_dev || box->io_addr)
350 351 352 353 354 355 356 357
		return uncore_pci_event_ctl(box, idx);
	else
		return uncore_msr_event_ctl(box, idx);
}

static inline
unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
{
358
	if (box->pci_dev || box->io_addr)
359 360 361 362 363
		return uncore_pci_perf_ctr(box, idx);
	else
		return uncore_msr_perf_ctr(box, idx);
}

364 365 366 367 368 369 370 371 372 373
static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
{
	return box->pmu->type->perf_ctr_bits;
}

static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
{
	return box->pmu->type->fixed_ctr_bits;
}

374 375 376 377
static inline
unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
				     struct perf_event *event)
{
378
	unsigned int type = uncore_freerunning_type(event->hw.config);
379 380 381 382 383 384 385

	return box->pmu->type->freerunning[type].bits;
}

static inline int uncore_num_freerunning(struct intel_uncore_box *box,
					 struct perf_event *event)
{
386
	unsigned int type = uncore_freerunning_type(event->hw.config);
387 388 389 390 391 392 393 394 395 396 397 398 399

	return box->pmu->type->freerunning[type].num_counters;
}

static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
					       struct perf_event *event)
{
	return box->pmu->type->num_freerunning_types;
}

static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
						 struct perf_event *event)
{
400 401
	unsigned int type = uncore_freerunning_type(event->hw.config);
	unsigned int idx = uncore_freerunning_idx(event->hw.config);
402 403 404 405 406

	return (type < uncore_num_freerunning_types(box, event)) &&
	       (idx < uncore_num_freerunning(box, event));
}

407 408 409 410 411
static inline int uncore_num_counters(struct intel_uncore_box *box)
{
	return box->pmu->type->num_counters;
}

412 413 414 415 416 417 418 419
static inline bool is_freerunning_event(struct perf_event *event)
{
	u64 cfg = event->attr.config;

	return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) &&
	       (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
}

420 421 422 423 424 425 426 427 428 429
/* Check and reject invalid config */
static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
					       struct perf_event *event)
{
	if (is_freerunning_event(event))
		return 0;

	return -EINVAL;
}

430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
static inline void uncore_disable_event(struct intel_uncore_box *box,
				struct perf_event *event)
{
	box->pmu->type->ops->disable_event(box, event);
}

static inline void uncore_enable_event(struct intel_uncore_box *box,
				struct perf_event *event)
{
	box->pmu->type->ops->enable_event(box, event);
}

static inline u64 uncore_read_counter(struct intel_uncore_box *box,
				struct perf_event *event)
{
	return box->pmu->type->ops->read_counter(box, event);
}

448 449 450 451 452 453 454 455
static inline void uncore_box_init(struct intel_uncore_box *box)
{
	if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
		if (box->pmu->type->ops->init_box)
			box->pmu->type->ops->init_box(box);
	}
}

456 457 458 459 460 461 462 463
static inline void uncore_box_exit(struct intel_uncore_box *box)
{
	if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
		if (box->pmu->type->ops->exit_box)
			box->pmu->type->ops->exit_box(box);
	}
}

464 465
static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
{
466
	return (box->pkgid < 0);
467
}
468

469 470 471 472 473 474 475 476 477 478
static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
{
	return container_of(event->pmu, struct intel_uncore_pmu, pmu);
}

static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
{
	return event->pmu_private;
}

479 480
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
481 482 483
void uncore_mmio_exit_box(struct intel_uncore_box *box);
u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
			     struct perf_event *event);
484 485
void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
486 487 488 489
void uncore_pmu_event_start(struct perf_event *event, int flags);
void uncore_pmu_event_stop(struct perf_event *event, int flags);
int uncore_pmu_event_add(struct perf_event *event, int flags);
void uncore_pmu_event_del(struct perf_event *event, int flags);
490 491 492 493 494 495 496 497 498
void uncore_pmu_event_read(struct perf_event *event);
void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
struct event_constraint *
uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);

extern struct intel_uncore_type **uncore_msr_uncores;
extern struct intel_uncore_type **uncore_pci_uncores;
499
extern struct intel_uncore_type **uncore_mmio_uncores;
500
extern struct pci_driver *uncore_pci_driver;
501 502
extern raw_spinlock_t pci2phy_map_lock;
extern struct list_head pci2phy_map_head;
503
extern struct pci_extra_dev *uncore_extra_pci_dev;
504
extern struct event_constraint uncore_constraint_empty;
505

506
/* uncore_snb.c */
507 508 509
int snb_uncore_pci_init(void);
int ivb_uncore_pci_init(void);
int hsw_uncore_pci_init(void);
510
int bdw_uncore_pci_init(void);
511
int skl_uncore_pci_init(void);
512 513
void snb_uncore_cpu_init(void);
void nhm_uncore_cpu_init(void);
514
void skl_uncore_cpu_init(void);
515
void icl_uncore_cpu_init(void);
516
int snb_pci2phy_map_init(int devid);
517

518
/* uncore_snbep.c */
519 520
int snbep_uncore_pci_init(void);
void snbep_uncore_cpu_init(void);
521 522
int ivbep_uncore_pci_init(void);
void ivbep_uncore_cpu_init(void);
523 524
int hswep_uncore_pci_init(void);
void hswep_uncore_cpu_init(void);
525 526
int bdx_uncore_pci_init(void);
void bdx_uncore_cpu_init(void);
527 528
int knl_uncore_pci_init(void);
void knl_uncore_cpu_init(void);
529 530
int skx_uncore_pci_init(void);
void skx_uncore_cpu_init(void);
531 532
int snr_uncore_pci_init(void);
void snr_uncore_cpu_init(void);
533
void snr_uncore_mmio_init(void);
534 535 536
int icx_uncore_pci_init(void);
void icx_uncore_cpu_init(void);
void icx_uncore_mmio_init(void);
537

538
/* uncore_nhmex.c */
539
void nhmex_uncore_cpu_init(void);