core.c 205.1 KB
Newer Older
T
Thomas Gleixner 已提交
1
/*
I
Ingo Molnar 已提交
2
 * Performance events core code:
T
Thomas Gleixner 已提交
3
 *
4
 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 6
 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
A
Al Viro 已提交
7
 *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8
 *
I
Ingo Molnar 已提交
9
 * For licensing details see kernel-base/COPYING
T
Thomas Gleixner 已提交
10 11 12
 */

#include <linux/fs.h>
13
#include <linux/mm.h>
T
Thomas Gleixner 已提交
14 15
#include <linux/cpu.h>
#include <linux/smp.h>
P
Peter Zijlstra 已提交
16
#include <linux/idr.h>
17
#include <linux/file.h>
T
Thomas Gleixner 已提交
18
#include <linux/poll.h>
19
#include <linux/slab.h>
20
#include <linux/hash.h>
21
#include <linux/tick.h>
T
Thomas Gleixner 已提交
22
#include <linux/sysfs.h>
23
#include <linux/dcache.h>
T
Thomas Gleixner 已提交
24
#include <linux/percpu.h>
25
#include <linux/ptrace.h>
P
Peter Zijlstra 已提交
26
#include <linux/reboot.h>
27
#include <linux/vmstat.h>
P
Peter Zijlstra 已提交
28
#include <linux/device.h>
29
#include <linux/export.h>
30
#include <linux/vmalloc.h>
31 32
#include <linux/hardirq.h>
#include <linux/rculist.h>
T
Thomas Gleixner 已提交
33 34 35
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
I
Ingo Molnar 已提交
36
#include <linux/kernel_stat.h>
37
#include <linux/cgroup.h>
38
#include <linux/perf_event.h>
L
Li Zefan 已提交
39
#include <linux/ftrace_event.h>
40
#include <linux/hw_breakpoint.h>
41
#include <linux/mm_types.h>
42
#include <linux/module.h>
43
#include <linux/mman.h>
P
Pawel Moll 已提交
44
#include <linux/compat.h>
45 46
#include <linux/bpf.h>
#include <linux/filter.h>
T
Thomas Gleixner 已提交
47

48 49
#include "internal.h"

50 51
#include <asm/irq_regs.h>

52 53
static struct workqueue_struct *perf_wq;

54
struct remote_function_call {
55 56 57 58
	struct task_struct	*p;
	int			(*func)(void *info);
	void			*info;
	int			ret;
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
};

static void remote_function(void *data)
{
	struct remote_function_call *tfc = data;
	struct task_struct *p = tfc->p;

	if (p) {
		tfc->ret = -EAGAIN;
		if (task_cpu(p) != smp_processor_id() || !task_curr(p))
			return;
	}

	tfc->ret = tfc->func(tfc->info);
}

/**
 * task_function_call - call a function on the cpu on which a task runs
 * @p:		the task to evaluate
 * @func:	the function to be called
 * @info:	the function call argument
 *
 * Calls the function @func when the task is currently running. This might
 * be on the current CPU, which just calls the function directly
 *
 * returns: @func return value, or
 *	    -ESRCH  - when the process isn't running
 *	    -EAGAIN - when the process moved away
 */
static int
task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
{
	struct remote_function_call data = {
92 93 94 95
		.p	= p,
		.func	= func,
		.info	= info,
		.ret	= -ESRCH, /* No such (running) process */
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
	};

	if (task_curr(p))
		smp_call_function_single(task_cpu(p), remote_function, &data, 1);

	return data.ret;
}

/**
 * cpu_function_call - call a function on the cpu
 * @func:	the function to be called
 * @info:	the function call argument
 *
 * Calls the function @func on the remote cpu.
 *
 * returns: @func return value or -ENXIO when the cpu is offline
 */
static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
{
	struct remote_function_call data = {
116 117 118 119
		.p	= NULL,
		.func	= func,
		.info	= info,
		.ret	= -ENXIO, /* No such CPU */
120 121 122 123 124 125 126
	};

	smp_call_function_single(cpu, remote_function, &data, 1);

	return data.ret;
}

127 128 129 130 131 132 133
#define EVENT_OWNER_KERNEL ((void *) -1)

static bool is_kernel_event(struct perf_event *event)
{
	return event->owner == EVENT_OWNER_KERNEL;
}

S
Stephane Eranian 已提交
134 135
#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
		       PERF_FLAG_FD_OUTPUT  |\
136 137
		       PERF_FLAG_PID_CGROUP |\
		       PERF_FLAG_FD_CLOEXEC)
S
Stephane Eranian 已提交
138

139 140 141 142 143 144 145
/*
 * branch priv levels that need permission checks
 */
#define PERF_SAMPLE_BRANCH_PERM_PLM \
	(PERF_SAMPLE_BRANCH_KERNEL |\
	 PERF_SAMPLE_BRANCH_HV)

146 147 148 149 150 151
enum event_type_t {
	EVENT_FLEXIBLE = 0x1,
	EVENT_PINNED = 0x2,
	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};

S
Stephane Eranian 已提交
152 153 154 155
/*
 * perf_sched_events : >0 events exist
 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
 */
156
struct static_key_deferred perf_sched_events __read_mostly;
S
Stephane Eranian 已提交
157
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
158
static DEFINE_PER_CPU(int, perf_sched_cb_usages);
S
Stephane Eranian 已提交
159

160 161 162
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
163
static atomic_t nr_freq_events __read_mostly;
164

P
Peter Zijlstra 已提交
165 166 167 168
static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu;

169
/*
170
 * perf event paranoia level:
171 172
 *  -1 - not paranoid at all
 *   0 - disallow raw tracepoint access for unpriv
173
 *   1 - disallow cpu events for unpriv
174
 *   2 - disallow kernel profiling for unpriv
175
 */
176
int sysctl_perf_event_paranoid __read_mostly = 1;
177

178 179
/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
180 181

/*
182
 * max perf event sample rate
183
 */
184 185 186 187 188 189 190 191 192
#define DEFAULT_MAX_SAMPLE_RATE		100000
#define DEFAULT_SAMPLE_PERIOD_NS	(NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
#define DEFAULT_CPU_TIME_MAX_PERCENT	25

int sysctl_perf_event_sample_rate __read_mostly	= DEFAULT_MAX_SAMPLE_RATE;

static int max_samples_per_tick __read_mostly	= DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
static int perf_sample_period_ns __read_mostly	= DEFAULT_SAMPLE_PERIOD_NS;

P
Peter Zijlstra 已提交
193 194
static int perf_sample_allowed_ns __read_mostly =
	DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
195 196 197 198 199 200

void update_perf_cpu_limits(void)
{
	u64 tmp = perf_sample_period_ns;

	tmp *= sysctl_perf_cpu_time_max_percent;
201
	do_div(tmp, 100);
P
Peter Zijlstra 已提交
202
	ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
203
}
P
Peter Zijlstra 已提交
204

205 206
static int perf_rotate_context(struct perf_cpu_context *cpuctx);

P
Peter Zijlstra 已提交
207 208 209 210
int perf_proc_update_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *lenp,
		loff_t *ppos)
{
211
	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
P
Peter Zijlstra 已提交
212 213 214 215 216

	if (ret || !write)
		return ret;

	max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
	perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
	update_perf_cpu_limits();

	return 0;
}

int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;

int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
				void __user *buffer, size_t *lenp,
				loff_t *ppos)
{
	int ret = proc_dointvec(table, write, buffer, lenp, ppos);

	if (ret || !write)
		return ret;

	update_perf_cpu_limits();
P
Peter Zijlstra 已提交
235 236 237

	return 0;
}
238

239 240 241 242 243 244 245
/*
 * perf samples are done in some very critical code paths (NMIs).
 * If they take too much CPU time, the system can lock up and not
 * get any real work done.  This will drop the sample rate when
 * we detect that events are taking too long.
 */
#define NR_ACCUMULATED_SAMPLES 128
P
Peter Zijlstra 已提交
246
static DEFINE_PER_CPU(u64, running_sample_length);
247

248
static void perf_duration_warn(struct irq_work *w)
249
{
250
	u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
251
	u64 avg_local_sample_len;
252
	u64 local_samples_len;
253

254
	local_samples_len = __this_cpu_read(running_sample_length);
255 256 257 258 259
	avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;

	printk_ratelimited(KERN_WARNING
			"perf interrupt took too long (%lld > %lld), lowering "
			"kernel.perf_event_max_sample_rate to %d\n",
260
			avg_local_sample_len, allowed_ns >> 1,
261 262 263 264 265 266 267
			sysctl_perf_event_sample_rate);
}

static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);

void perf_sample_event_took(u64 sample_len_ns)
{
P
Peter Zijlstra 已提交
268
	u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
269 270
	u64 avg_local_sample_len;
	u64 local_samples_len;
271

P
Peter Zijlstra 已提交
272
	if (allowed_ns == 0)
273 274 275
		return;

	/* decay the counter by 1 average sample */
276
	local_samples_len = __this_cpu_read(running_sample_length);
277 278
	local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
	local_samples_len += sample_len_ns;
279
	__this_cpu_write(running_sample_length, local_samples_len);
280 281 282 283 284 285 286 287

	/*
	 * note: this will be biased artifically low until we have
	 * seen NR_ACCUMULATED_SAMPLES.  Doing it this way keeps us
	 * from having to maintain a count.
	 */
	avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;

P
Peter Zijlstra 已提交
288
	if (avg_local_sample_len <= allowed_ns)
289 290 291 292 293 294 295 296 297 298
		return;

	if (max_samples_per_tick <= 1)
		return;

	max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2);
	sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
	perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;

	update_perf_cpu_limits();
299

300 301 302 303 304 305
	if (!irq_work_queue(&perf_duration_work)) {
		early_printk("perf interrupt took too long (%lld > %lld), lowering "
			     "kernel.perf_event_max_sample_rate to %d\n",
			     avg_local_sample_len, allowed_ns >> 1,
			     sysctl_perf_event_sample_rate);
	}
306 307
}

308
static atomic64_t perf_event_id;
309

310 311 312 313
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
			      enum event_type_t event_type);

static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
S
Stephane Eranian 已提交
314 315 316 317 318
			     enum event_type_t event_type,
			     struct task_struct *task);

static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
319

320
void __weak perf_event_print_debug(void)	{ }
T
Thomas Gleixner 已提交
321

322
extern __weak const char *perf_pmu_name(void)
T
Thomas Gleixner 已提交
323
{
324
	return "pmu";
T
Thomas Gleixner 已提交
325 326
}

327 328 329 330 331
static inline u64 perf_clock(void)
{
	return local_clock();
}

332 333 334 335 336
static inline u64 perf_event_clock(struct perf_event *event)
{
	return event->clock();
}

S
Stephane Eranian 已提交
337 338 339 340 341 342
static inline struct perf_cpu_context *
__get_cpu_context(struct perf_event_context *ctx)
{
	return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
}

343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
			  struct perf_event_context *ctx)
{
	raw_spin_lock(&cpuctx->ctx.lock);
	if (ctx)
		raw_spin_lock(&ctx->lock);
}

static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
			    struct perf_event_context *ctx)
{
	if (ctx)
		raw_spin_unlock(&ctx->lock);
	raw_spin_unlock(&cpuctx->ctx.lock);
}

S
Stephane Eranian 已提交
359 360 361 362 363 364 365 366
#ifdef CONFIG_CGROUP_PERF

static inline bool
perf_cgroup_match(struct perf_event *event)
{
	struct perf_event_context *ctx = event->ctx;
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);

367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
	/* @event doesn't care about cgroup */
	if (!event->cgrp)
		return true;

	/* wants specific cgroup scope but @cpuctx isn't associated with any */
	if (!cpuctx->cgrp)
		return false;

	/*
	 * Cgroup scoping is recursive.  An event enabled for a cgroup is
	 * also enabled for all its descendant cgroups.  If @cpuctx's
	 * cgroup is a descendant of @event's (the test covers identity
	 * case), it's a match.
	 */
	return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
				    event->cgrp->css.cgroup);
S
Stephane Eranian 已提交
383 384 385 386
}

static inline void perf_detach_cgroup(struct perf_event *event)
{
Z
Zefan Li 已提交
387
	css_put(&event->cgrp->css);
S
Stephane Eranian 已提交
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
	event->cgrp = NULL;
}

static inline int is_cgroup_event(struct perf_event *event)
{
	return event->cgrp != NULL;
}

static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
	struct perf_cgroup_info *t;

	t = per_cpu_ptr(event->cgrp->info, event->cpu);
	return t->time;
}

static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
{
	struct perf_cgroup_info *info;
	u64 now;

	now = perf_clock();

	info = this_cpu_ptr(cgrp->info);

	info->time += now - info->timestamp;
	info->timestamp = now;
}

static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
	struct perf_cgroup *cgrp_out = cpuctx->cgrp;
	if (cgrp_out)
		__update_cgrp_time(cgrp_out);
}

static inline void update_cgrp_time_from_event(struct perf_event *event)
{
426 427
	struct perf_cgroup *cgrp;

S
Stephane Eranian 已提交
428
	/*
429 430
	 * ensure we access cgroup data only when needed and
	 * when we know the cgroup is pinned (css_get)
S
Stephane Eranian 已提交
431
	 */
432
	if (!is_cgroup_event(event))
S
Stephane Eranian 已提交
433 434
		return;

435 436 437 438 439 440
	cgrp = perf_cgroup_from_task(current);
	/*
	 * Do not update time when cgroup is not active
	 */
	if (cgrp == event->cgrp)
		__update_cgrp_time(event->cgrp);
S
Stephane Eranian 已提交
441 442 443
}

static inline void
444 445
perf_cgroup_set_timestamp(struct task_struct *task,
			  struct perf_event_context *ctx)
S
Stephane Eranian 已提交
446 447 448 449
{
	struct perf_cgroup *cgrp;
	struct perf_cgroup_info *info;

450 451 452 453 454 455
	/*
	 * ctx->lock held by caller
	 * ensure we do not access cgroup data
	 * unless we have the cgroup pinned (css_get)
	 */
	if (!task || !ctx->nr_cgroups)
S
Stephane Eranian 已提交
456 457 458 459
		return;

	cgrp = perf_cgroup_from_task(task);
	info = this_cpu_ptr(cgrp->info);
460
	info->timestamp = ctx->timestamp;
S
Stephane Eranian 已提交
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
}

#define PERF_CGROUP_SWOUT	0x1 /* cgroup switch out every event */
#define PERF_CGROUP_SWIN	0x2 /* cgroup switch in events based on task */

/*
 * reschedule events based on the cgroup constraint of task.
 *
 * mode SWOUT : schedule out everything
 * mode SWIN : schedule in based on cgroup for next
 */
void perf_cgroup_switch(struct task_struct *task, int mode)
{
	struct perf_cpu_context *cpuctx;
	struct pmu *pmu;
	unsigned long flags;

	/*
	 * disable interrupts to avoid geting nr_cgroup
	 * changes via __perf_event_disable(). Also
	 * avoids preemption.
	 */
	local_irq_save(flags);

	/*
	 * we reschedule only in the presence of cgroup
	 * constrained events.
	 */
	rcu_read_lock();

	list_for_each_entry_rcu(pmu, &pmus, entry) {
		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
493 494
		if (cpuctx->unique_pmu != pmu)
			continue; /* ensure we process each cpuctx once */
S
Stephane Eranian 已提交
495 496 497 498 499 500 501 502 503

		/*
		 * perf_cgroup_events says at least one
		 * context on this CPU has cgroup events.
		 *
		 * ctx->nr_cgroups reports the number of cgroup
		 * events for a context.
		 */
		if (cpuctx->ctx.nr_cgroups > 0) {
504 505
			perf_ctx_lock(cpuctx, cpuctx->task_ctx);
			perf_pmu_disable(cpuctx->ctx.pmu);
S
Stephane Eranian 已提交
506 507 508 509 510 511 512 513 514 515 516

			if (mode & PERF_CGROUP_SWOUT) {
				cpu_ctx_sched_out(cpuctx, EVENT_ALL);
				/*
				 * must not be done before ctxswout due
				 * to event_filter_match() in event_sched_out()
				 */
				cpuctx->cgrp = NULL;
			}

			if (mode & PERF_CGROUP_SWIN) {
517
				WARN_ON_ONCE(cpuctx->cgrp);
518 519 520 521
				/*
				 * set cgrp before ctxsw in to allow
				 * event_filter_match() to not have to pass
				 * task around
S
Stephane Eranian 已提交
522 523 524 525
				 */
				cpuctx->cgrp = perf_cgroup_from_task(task);
				cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
			}
526 527
			perf_pmu_enable(cpuctx->ctx.pmu);
			perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
S
Stephane Eranian 已提交
528 529 530 531 532 533 534 535
		}
	}

	rcu_read_unlock();

	local_irq_restore(flags);
}

536 537
static inline void perf_cgroup_sched_out(struct task_struct *task,
					 struct task_struct *next)
S
Stephane Eranian 已提交
538
{
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
	struct perf_cgroup *cgrp1;
	struct perf_cgroup *cgrp2 = NULL;

	/*
	 * we come here when we know perf_cgroup_events > 0
	 */
	cgrp1 = perf_cgroup_from_task(task);

	/*
	 * next is NULL when called from perf_event_enable_on_exec()
	 * that will systematically cause a cgroup_switch()
	 */
	if (next)
		cgrp2 = perf_cgroup_from_task(next);

	/*
	 * only schedule out current cgroup events if we know
	 * that we are switching to a different cgroup. Otherwise,
	 * do no touch the cgroup events.
	 */
	if (cgrp1 != cgrp2)
		perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
S
Stephane Eranian 已提交
561 562
}

563 564
static inline void perf_cgroup_sched_in(struct task_struct *prev,
					struct task_struct *task)
S
Stephane Eranian 已提交
565
{
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
	struct perf_cgroup *cgrp1;
	struct perf_cgroup *cgrp2 = NULL;

	/*
	 * we come here when we know perf_cgroup_events > 0
	 */
	cgrp1 = perf_cgroup_from_task(task);

	/* prev can never be NULL */
	cgrp2 = perf_cgroup_from_task(prev);

	/*
	 * only need to schedule in cgroup events if we are changing
	 * cgroup during ctxsw. Cgroup events were not scheduled
	 * out of ctxsw out if that was not the case.
	 */
	if (cgrp1 != cgrp2)
		perf_cgroup_switch(task, PERF_CGROUP_SWIN);
S
Stephane Eranian 已提交
584 585 586 587 588 589 590 591
}

static inline int perf_cgroup_connect(int fd, struct perf_event *event,
				      struct perf_event_attr *attr,
				      struct perf_event *group_leader)
{
	struct perf_cgroup *cgrp;
	struct cgroup_subsys_state *css;
592 593
	struct fd f = fdget(fd);
	int ret = 0;
S
Stephane Eranian 已提交
594

595
	if (!f.file)
S
Stephane Eranian 已提交
596 597
		return -EBADF;

A
Al Viro 已提交
598
	css = css_tryget_online_from_dir(f.file->f_path.dentry,
599
					 &perf_event_cgrp_subsys);
600 601 602 603
	if (IS_ERR(css)) {
		ret = PTR_ERR(css);
		goto out;
	}
S
Stephane Eranian 已提交
604 605 606 607 608 609 610 611 612 613 614 615 616

	cgrp = container_of(css, struct perf_cgroup, css);
	event->cgrp = cgrp;

	/*
	 * all events in a group must monitor
	 * the same cgroup because a task belongs
	 * to only one perf cgroup at a time
	 */
	if (group_leader && group_leader->cgrp != cgrp) {
		perf_detach_cgroup(event);
		ret = -EINVAL;
	}
617
out:
618
	fdput(f);
S
Stephane Eranian 已提交
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
	return ret;
}

static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
	struct perf_cgroup_info *t;
	t = per_cpu_ptr(event->cgrp->info, event->cpu);
	event->shadow_ctx_time = now - t->timestamp;
}

static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
	/*
	 * when the current task's perf cgroup does not match
	 * the event's, we need to remember to call the
	 * perf_mark_enable() function the first time a task with
	 * a matching perf cgroup is scheduled in.
	 */
	if (is_cgroup_event(event) && !perf_cgroup_match(event))
		event->cgrp_defer_enabled = 1;
}

static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
			 struct perf_event_context *ctx)
{
	struct perf_event *sub;
	u64 tstamp = perf_event_time(event);

	if (!event->cgrp_defer_enabled)
		return;

	event->cgrp_defer_enabled = 0;

	event->tstamp_enabled = tstamp - event->total_time_enabled;
	list_for_each_entry(sub, &event->sibling_list, group_entry) {
		if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
			sub->cgrp_defer_enabled = 0;
		}
	}
}
#else /* !CONFIG_CGROUP_PERF */

static inline bool
perf_cgroup_match(struct perf_event *event)
{
	return true;
}

static inline void perf_detach_cgroup(struct perf_event *event)
{}

static inline int is_cgroup_event(struct perf_event *event)
{
	return 0;
}

static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
{
	return 0;
}

static inline void update_cgrp_time_from_event(struct perf_event *event)
{
}

static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
}

692 693
static inline void perf_cgroup_sched_out(struct task_struct *task,
					 struct task_struct *next)
S
Stephane Eranian 已提交
694 695 696
{
}

697 698
static inline void perf_cgroup_sched_in(struct task_struct *prev,
					struct task_struct *task)
S
Stephane Eranian 已提交
699 700 701 702 703 704 705 706 707 708 709
{
}

static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
				      struct perf_event_attr *attr,
				      struct perf_event *group_leader)
{
	return -EINVAL;
}

static inline void
710 711
perf_cgroup_set_timestamp(struct task_struct *task,
			  struct perf_event_context *ctx)
S
Stephane Eranian 已提交
712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
{
}

void
perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
{
}

static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
}

static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
	return 0;
}

static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
}

static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
			 struct perf_event_context *ctx)
{
}
#endif

742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
/*
 * set default to be dependent on timer tick just
 * like original code
 */
#define PERF_CPU_HRTIMER (1000 / HZ)
/*
 * function must be called with interrupts disbled
 */
static enum hrtimer_restart perf_cpu_hrtimer_handler(struct hrtimer *hr)
{
	struct perf_cpu_context *cpuctx;
	enum hrtimer_restart ret = HRTIMER_NORESTART;
	int rotations = 0;

	WARN_ON(!irqs_disabled());

	cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);

	rotations = perf_rotate_context(cpuctx);

	/*
	 * arm timer if needed
	 */
	if (rotations) {
		hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
		ret = HRTIMER_RESTART;
	}

	return ret;
}

/* CPU is going down */
void perf_cpu_hrtimer_cancel(int cpu)
{
	struct perf_cpu_context *cpuctx;
	struct pmu *pmu;
	unsigned long flags;

	if (WARN_ON(cpu != smp_processor_id()))
		return;

	local_irq_save(flags);

	rcu_read_lock();

	list_for_each_entry_rcu(pmu, &pmus, entry) {
		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);

		if (pmu->task_ctx_nr == perf_sw_context)
			continue;

		hrtimer_cancel(&cpuctx->hrtimer);
	}

	rcu_read_unlock();

	local_irq_restore(flags);
}

static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
{
	struct hrtimer *hr = &cpuctx->hrtimer;
	struct pmu *pmu = cpuctx->ctx.pmu;
805
	int timer;
806 807 808 809 810

	/* no multiplexing needed for SW PMU */
	if (pmu->task_ctx_nr == perf_sw_context)
		return;

811 812 813 814 815 816 817 818 819
	/*
	 * check default is sane, if not set then force to
	 * default interval (1/tick)
	 */
	timer = pmu->hrtimer_interval_ms;
	if (timer < 1)
		timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;

	cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841

	hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
	hr->function = perf_cpu_hrtimer_handler;
}

static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx)
{
	struct hrtimer *hr = &cpuctx->hrtimer;
	struct pmu *pmu = cpuctx->ctx.pmu;

	/* not for SW PMU */
	if (pmu->task_ctx_nr == perf_sw_context)
		return;

	if (hrtimer_active(hr))
		return;

	if (!hrtimer_callback_running(hr))
		__hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval,
					 0, HRTIMER_MODE_REL_PINNED, 0);
}

P
Peter Zijlstra 已提交
842
void perf_pmu_disable(struct pmu *pmu)
843
{
P
Peter Zijlstra 已提交
844 845 846
	int *count = this_cpu_ptr(pmu->pmu_disable_count);
	if (!(*count)++)
		pmu->pmu_disable(pmu);
847 848
}

P
Peter Zijlstra 已提交
849
void perf_pmu_enable(struct pmu *pmu)
850
{
P
Peter Zijlstra 已提交
851 852 853
	int *count = this_cpu_ptr(pmu->pmu_disable_count);
	if (!--(*count))
		pmu->pmu_enable(pmu);
854 855
}

856
static DEFINE_PER_CPU(struct list_head, active_ctx_list);
857 858

/*
859 860 861 862
 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
 * perf_event_task_tick() are fully serialized because they're strictly cpu
 * affine and perf_event_ctx{activate,deactivate} are called with IRQs
 * disabled, while perf_event_task_tick is called from IRQ context.
863
 */
864
static void perf_event_ctx_activate(struct perf_event_context *ctx)
865
{
866
	struct list_head *head = this_cpu_ptr(&active_ctx_list);
867

868
	WARN_ON(!irqs_disabled());
869

870 871 872 873 874 875 876 877 878 879 880 881
	WARN_ON(!list_empty(&ctx->active_ctx_list));

	list_add(&ctx->active_ctx_list, head);
}

static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
{
	WARN_ON(!irqs_disabled());

	WARN_ON(list_empty(&ctx->active_ctx_list));

	list_del_init(&ctx->active_ctx_list);
882 883
}

884
static void get_ctx(struct perf_event_context *ctx)
885
{
886
	WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
887 888
}

889 890 891 892 893 894 895 896 897
static void free_ctx(struct rcu_head *head)
{
	struct perf_event_context *ctx;

	ctx = container_of(head, struct perf_event_context, rcu_head);
	kfree(ctx->task_ctx_data);
	kfree(ctx);
}

898
static void put_ctx(struct perf_event_context *ctx)
899
{
900 901 902
	if (atomic_dec_and_test(&ctx->refcount)) {
		if (ctx->parent_ctx)
			put_ctx(ctx->parent_ctx);
903 904
		if (ctx->task)
			put_task_struct(ctx->task);
905
		call_rcu(&ctx->rcu_head, free_ctx);
906
	}
907 908
}

P
Peter Zijlstra 已提交
909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
/*
 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
 * perf_pmu_migrate_context() we need some magic.
 *
 * Those places that change perf_event::ctx will hold both
 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
 *
 * Lock ordering is by mutex address. There is one other site where
 * perf_event_context::mutex nests and that is put_event(). But remember that
 * that is a parent<->child context relation, and migration does not affect
 * children, therefore these two orderings should not interact.
 *
 * The change in perf_event::ctx does not affect children (as claimed above)
 * because the sys_perf_event_open() case will install a new event and break
 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
 * concerned with cpuctx and that doesn't have children.
 *
 * The places that change perf_event::ctx will issue:
 *
 *   perf_remove_from_context();
 *   synchronize_rcu();
 *   perf_install_in_context();
 *
 * to affect the change. The remove_from_context() + synchronize_rcu() should
 * quiesce the event, after which we can install it in the new location. This
 * means that only external vectors (perf_fops, prctl) can perturb the event
 * while in transit. Therefore all such accessors should also acquire
 * perf_event_context::mutex to serialize against this.
 *
 * However; because event->ctx can change while we're waiting to acquire
 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
 * function.
 *
 * Lock order:
 *	task_struct::perf_event_mutex
 *	  perf_event_context::mutex
 *	    perf_event_context::lock
 *	    perf_event::child_mutex;
 *	    perf_event::mmap_mutex
 *	    mmap_sem
 */
P
Peter Zijlstra 已提交
950 951
static struct perf_event_context *
perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
P
Peter Zijlstra 已提交
952 953 954 955 956 957 958 959 960 961 962 963
{
	struct perf_event_context *ctx;

again:
	rcu_read_lock();
	ctx = ACCESS_ONCE(event->ctx);
	if (!atomic_inc_not_zero(&ctx->refcount)) {
		rcu_read_unlock();
		goto again;
	}
	rcu_read_unlock();

P
Peter Zijlstra 已提交
964
	mutex_lock_nested(&ctx->mutex, nesting);
P
Peter Zijlstra 已提交
965 966 967 968 969 970 971 972 973
	if (event->ctx != ctx) {
		mutex_unlock(&ctx->mutex);
		put_ctx(ctx);
		goto again;
	}

	return ctx;
}

P
Peter Zijlstra 已提交
974 975 976 977 978 979
static inline struct perf_event_context *
perf_event_ctx_lock(struct perf_event *event)
{
	return perf_event_ctx_lock_nested(event, 0);
}

P
Peter Zijlstra 已提交
980 981 982 983 984 985 986
static void perf_event_ctx_unlock(struct perf_event *event,
				  struct perf_event_context *ctx)
{
	mutex_unlock(&ctx->mutex);
	put_ctx(ctx);
}

987 988 989 990 991 992 993
/*
 * This must be done under the ctx->lock, such as to serialize against
 * context_equiv(), therefore we cannot call put_ctx() since that might end up
 * calling scheduler related locks and ctx->lock nests inside those.
 */
static __must_check struct perf_event_context *
unclone_ctx(struct perf_event_context *ctx)
994
{
995 996 997 998 999
	struct perf_event_context *parent_ctx = ctx->parent_ctx;

	lockdep_assert_held(&ctx->lock);

	if (parent_ctx)
1000
		ctx->parent_ctx = NULL;
1001
	ctx->generation++;
1002 1003

	return parent_ctx;
1004 1005
}

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
{
	/*
	 * only top level events have the pid namespace they were created in
	 */
	if (event->parent)
		event = event->parent;

	return task_tgid_nr_ns(p, event->ns);
}

static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
{
	/*
	 * only top level events have the pid namespace they were created in
	 */
	if (event->parent)
		event = event->parent;

	return task_pid_nr_ns(p, event->ns);
}

1028
/*
1029
 * If we inherit events we want to return the parent event id
1030 1031
 * to userspace.
 */
1032
static u64 primary_event_id(struct perf_event *event)
1033
{
1034
	u64 id = event->id;
1035

1036 1037
	if (event->parent)
		id = event->parent->id;
1038 1039 1040 1041

	return id;
}

1042
/*
1043
 * Get the perf_event_context for a task and lock it.
1044 1045 1046
 * This has to cope with with the fact that until it is locked,
 * the context could get moved to another task.
 */
1047
static struct perf_event_context *
P
Peter Zijlstra 已提交
1048
perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
1049
{
1050
	struct perf_event_context *ctx;
1051

P
Peter Zijlstra 已提交
1052
retry:
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
	/*
	 * One of the few rules of preemptible RCU is that one cannot do
	 * rcu_read_unlock() while holding a scheduler (or nested) lock when
	 * part of the read side critical section was preemptible -- see
	 * rcu_read_unlock_special().
	 *
	 * Since ctx->lock nests under rq->lock we must ensure the entire read
	 * side critical section is non-preemptible.
	 */
	preempt_disable();
	rcu_read_lock();
P
Peter Zijlstra 已提交
1064
	ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
1065 1066 1067 1068
	if (ctx) {
		/*
		 * If this context is a clone of another, it might
		 * get swapped for another underneath us by
1069
		 * perf_event_task_sched_out, though the
1070 1071 1072 1073 1074 1075
		 * rcu_read_lock() protects us from any context
		 * getting freed.  Lock the context and check if it
		 * got swapped before we could get the lock, and retry
		 * if so.  If we locked the right context, then it
		 * can't get swapped on us any more.
		 */
1076
		raw_spin_lock_irqsave(&ctx->lock, *flags);
P
Peter Zijlstra 已提交
1077
		if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
1078
			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
1079 1080
			rcu_read_unlock();
			preempt_enable();
1081 1082
			goto retry;
		}
1083 1084

		if (!atomic_inc_not_zero(&ctx->refcount)) {
1085
			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
1086 1087
			ctx = NULL;
		}
1088 1089
	}
	rcu_read_unlock();
1090
	preempt_enable();
1091 1092 1093 1094 1095 1096 1097 1098
	return ctx;
}

/*
 * Get the context for a task and increment its pin_count so it
 * can't get swapped to another task.  This also increments its
 * reference count so that the context can't get freed.
 */
P
Peter Zijlstra 已提交
1099 1100
static struct perf_event_context *
perf_pin_task_context(struct task_struct *task, int ctxn)
1101
{
1102
	struct perf_event_context *ctx;
1103 1104
	unsigned long flags;

P
Peter Zijlstra 已提交
1105
	ctx = perf_lock_task_context(task, ctxn, &flags);
1106 1107
	if (ctx) {
		++ctx->pin_count;
1108
		raw_spin_unlock_irqrestore(&ctx->lock, flags);
1109 1110 1111 1112
	}
	return ctx;
}

1113
static void perf_unpin_context(struct perf_event_context *ctx)
1114 1115 1116
{
	unsigned long flags;

1117
	raw_spin_lock_irqsave(&ctx->lock, flags);
1118
	--ctx->pin_count;
1119
	raw_spin_unlock_irqrestore(&ctx->lock, flags);
1120 1121
}

1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
/*
 * Update the record of the current time in a context.
 */
static void update_context_time(struct perf_event_context *ctx)
{
	u64 now = perf_clock();

	ctx->time += now - ctx->timestamp;
	ctx->timestamp = now;
}

1133 1134 1135
static u64 perf_event_time(struct perf_event *event)
{
	struct perf_event_context *ctx = event->ctx;
S
Stephane Eranian 已提交
1136 1137 1138 1139

	if (is_cgroup_event(event))
		return perf_cgroup_event_time(event);

1140 1141 1142
	return ctx ? ctx->time : 0;
}

1143 1144
/*
 * Update the total_time_enabled and total_time_running fields for a event.
1145
 * The caller of this function needs to hold the ctx->lock.
1146 1147 1148 1149 1150 1151 1152 1153 1154
 */
static void update_event_times(struct perf_event *event)
{
	struct perf_event_context *ctx = event->ctx;
	u64 run_end;

	if (event->state < PERF_EVENT_STATE_INACTIVE ||
	    event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
		return;
S
Stephane Eranian 已提交
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
	/*
	 * in cgroup mode, time_enabled represents
	 * the time the event was enabled AND active
	 * tasks were in the monitored cgroup. This is
	 * independent of the activity of the context as
	 * there may be a mix of cgroup and non-cgroup events.
	 *
	 * That is why we treat cgroup events differently
	 * here.
	 */
	if (is_cgroup_event(event))
1166
		run_end = perf_cgroup_event_time(event);
S
Stephane Eranian 已提交
1167 1168
	else if (ctx->is_active)
		run_end = ctx->time;
1169 1170 1171 1172
	else
		run_end = event->tstamp_stopped;

	event->total_time_enabled = run_end - event->tstamp_enabled;
1173 1174 1175 1176

	if (event->state == PERF_EVENT_STATE_INACTIVE)
		run_end = event->tstamp_stopped;
	else
1177
		run_end = perf_event_time(event);
1178 1179

	event->total_time_running = run_end - event->tstamp_running;
S
Stephane Eranian 已提交
1180

1181 1182
}

1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
/*
 * Update total_time_enabled and total_time_running for all events in a group.
 */
static void update_group_times(struct perf_event *leader)
{
	struct perf_event *event;

	update_event_times(leader);
	list_for_each_entry(event, &leader->sibling_list, group_entry)
		update_event_times(event);
}

1195 1196 1197 1198 1199 1200 1201 1202 1203
static struct list_head *
ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
{
	if (event->attr.pinned)
		return &ctx->pinned_groups;
	else
		return &ctx->flexible_groups;
}

1204
/*
1205
 * Add a event from the lists for its context.
1206 1207
 * Must be called with ctx->mutex and ctx->lock held.
 */
1208
static void
1209
list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1210
{
1211 1212
	WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
	event->attach_state |= PERF_ATTACH_CONTEXT;
1213 1214

	/*
1215 1216 1217
	 * If we're a stand alone event or group leader, we go to the context
	 * list, group events are kept attached to the group so that
	 * perf_group_detach can, at all times, locate all siblings.
1218
	 */
1219
	if (event->group_leader == event) {
1220 1221
		struct list_head *list;

1222 1223 1224
		if (is_software_event(event))
			event->group_flags |= PERF_GROUP_SOFTWARE;

1225 1226
		list = ctx_group_list(event, ctx);
		list_add_tail(&event->group_entry, list);
P
Peter Zijlstra 已提交
1227
	}
P
Peter Zijlstra 已提交
1228

1229
	if (is_cgroup_event(event))
S
Stephane Eranian 已提交
1230 1231
		ctx->nr_cgroups++;

1232 1233 1234
	list_add_rcu(&event->event_entry, &ctx->event_list);
	ctx->nr_events++;
	if (event->attr.inherit_stat)
1235
		ctx->nr_stat++;
1236 1237

	ctx->generation++;
1238 1239
}

J
Jiri Olsa 已提交
1240 1241 1242 1243 1244 1245 1246 1247 1248
/*
 * Initialize event state based on the perf_event_attr::disabled.
 */
static inline void perf_event__state_init(struct perf_event *event)
{
	event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
					      PERF_EVENT_STATE_INACTIVE;
}

1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
/*
 * Called at perf_event creation and when events are attached/detached from a
 * group.
 */
static void perf_event__read_size(struct perf_event *event)
{
	int entry = sizeof(u64); /* value */
	int size = 0;
	int nr = 1;

	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		size += sizeof(u64);

	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		size += sizeof(u64);

	if (event->attr.read_format & PERF_FORMAT_ID)
		entry += sizeof(u64);

	if (event->attr.read_format & PERF_FORMAT_GROUP) {
		nr += event->group_leader->nr_siblings;
		size += sizeof(u64);
	}

	size += entry * nr;
	event->read_size = size;
}

static void perf_event__header_size(struct perf_event *event)
{
	struct perf_sample_data *data;
	u64 sample_type = event->attr.sample_type;
	u16 size = 0;

	perf_event__read_size(event);

	if (sample_type & PERF_SAMPLE_IP)
		size += sizeof(data->ip);

1288 1289 1290 1291 1292 1293
	if (sample_type & PERF_SAMPLE_ADDR)
		size += sizeof(data->addr);

	if (sample_type & PERF_SAMPLE_PERIOD)
		size += sizeof(data->period);

A
Andi Kleen 已提交
1294 1295 1296
	if (sample_type & PERF_SAMPLE_WEIGHT)
		size += sizeof(data->weight);

1297 1298 1299
	if (sample_type & PERF_SAMPLE_READ)
		size += event->read_size;

1300 1301 1302
	if (sample_type & PERF_SAMPLE_DATA_SRC)
		size += sizeof(data->data_src.val);

A
Andi Kleen 已提交
1303 1304 1305
	if (sample_type & PERF_SAMPLE_TRANSACTION)
		size += sizeof(data->txn);

1306 1307 1308 1309 1310 1311 1312 1313 1314
	event->header_size = size;
}

static void perf_event__id_header_size(struct perf_event *event)
{
	struct perf_sample_data *data;
	u64 sample_type = event->attr.sample_type;
	u16 size = 0;

1315 1316 1317 1318 1319 1320
	if (sample_type & PERF_SAMPLE_TID)
		size += sizeof(data->tid_entry);

	if (sample_type & PERF_SAMPLE_TIME)
		size += sizeof(data->time);

1321 1322 1323
	if (sample_type & PERF_SAMPLE_IDENTIFIER)
		size += sizeof(data->id);

1324 1325 1326 1327 1328 1329 1330 1331 1332
	if (sample_type & PERF_SAMPLE_ID)
		size += sizeof(data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		size += sizeof(data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		size += sizeof(data->cpu_entry);

1333
	event->id_header_size = size;
1334 1335
}

1336 1337
static void perf_group_attach(struct perf_event *event)
{
1338
	struct perf_event *group_leader = event->group_leader, *pos;
1339

P
Peter Zijlstra 已提交
1340 1341 1342 1343 1344 1345
	/*
	 * We can have double attach due to group movement in perf_event_open.
	 */
	if (event->attach_state & PERF_ATTACH_GROUP)
		return;

1346 1347 1348 1349 1350
	event->attach_state |= PERF_ATTACH_GROUP;

	if (group_leader == event)
		return;

P
Peter Zijlstra 已提交
1351 1352
	WARN_ON_ONCE(group_leader->ctx != event->ctx);

1353 1354 1355 1356 1357 1358
	if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
			!is_software_event(event))
		group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;

	list_add_tail(&event->group_entry, &group_leader->sibling_list);
	group_leader->nr_siblings++;
1359 1360 1361 1362 1363

	perf_event__header_size(group_leader);

	list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
		perf_event__header_size(pos);
1364 1365
}

1366
/*
1367
 * Remove a event from the lists for its context.
1368
 * Must be called with ctx->mutex and ctx->lock held.
1369
 */
1370
static void
1371
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1372
{
1373
	struct perf_cpu_context *cpuctx;
P
Peter Zijlstra 已提交
1374 1375 1376 1377

	WARN_ON_ONCE(event->ctx != ctx);
	lockdep_assert_held(&ctx->lock);

1378 1379 1380 1381
	/*
	 * We can have double detach due to exit/hot-unplug + close.
	 */
	if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1382
		return;
1383 1384 1385

	event->attach_state &= ~PERF_ATTACH_CONTEXT;

1386
	if (is_cgroup_event(event)) {
S
Stephane Eranian 已提交
1387
		ctx->nr_cgroups--;
1388 1389 1390 1391 1392 1393 1394 1395 1396
		cpuctx = __get_cpu_context(ctx);
		/*
		 * if there are no more cgroup events
		 * then cler cgrp to avoid stale pointer
		 * in update_cgrp_time_from_cpuctx()
		 */
		if (!ctx->nr_cgroups)
			cpuctx->cgrp = NULL;
	}
S
Stephane Eranian 已提交
1397

1398 1399
	ctx->nr_events--;
	if (event->attr.inherit_stat)
1400
		ctx->nr_stat--;
1401

1402
	list_del_rcu(&event->event_entry);
1403

1404 1405
	if (event->group_leader == event)
		list_del_init(&event->group_entry);
P
Peter Zijlstra 已提交
1406

1407
	update_group_times(event);
1408 1409 1410 1411 1412 1413 1414 1415 1416 1417

	/*
	 * If event was in error state, then keep it
	 * that way, otherwise bogus counts will be
	 * returned on read(). The only way to get out
	 * of error state is by explicit re-enabling
	 * of the event
	 */
	if (event->state > PERF_EVENT_STATE_OFF)
		event->state = PERF_EVENT_STATE_OFF;
1418 1419

	ctx->generation++;
1420 1421
}

1422
static void perf_group_detach(struct perf_event *event)
1423 1424
{
	struct perf_event *sibling, *tmp;
1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
	struct list_head *list = NULL;

	/*
	 * We can have double detach due to exit/hot-unplug + close.
	 */
	if (!(event->attach_state & PERF_ATTACH_GROUP))
		return;

	event->attach_state &= ~PERF_ATTACH_GROUP;

	/*
	 * If this is a sibling, remove it from its group.
	 */
	if (event->group_leader != event) {
		list_del_init(&event->group_entry);
		event->group_leader->nr_siblings--;
1441
		goto out;
1442 1443 1444 1445
	}

	if (!list_empty(&event->group_entry))
		list = &event->group_entry;
1446

1447
	/*
1448 1449
	 * If this was a group event with sibling events then
	 * upgrade the siblings to singleton events by adding them
1450
	 * to whatever list we are on.
1451
	 */
1452
	list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1453 1454
		if (list)
			list_move_tail(&sibling->group_entry, list);
1455
		sibling->group_leader = sibling;
1456 1457 1458

		/* Inherit group flags from the previous leader */
		sibling->group_flags = event->group_flags;
P
Peter Zijlstra 已提交
1459 1460

		WARN_ON_ONCE(sibling->ctx != event->ctx);
1461
	}
1462 1463 1464 1465 1466 1467

out:
	perf_event__header_size(event->group_leader);

	list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
		perf_event__header_size(tmp);
1468 1469
}

1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508
/*
 * User event without the task.
 */
static bool is_orphaned_event(struct perf_event *event)
{
	return event && !is_kernel_event(event) && !event->owner;
}

/*
 * Event has a parent but parent's task finished and it's
 * alive only because of children holding refference.
 */
static bool is_orphaned_child(struct perf_event *event)
{
	return is_orphaned_event(event->parent);
}

static void orphans_remove_work(struct work_struct *work);

static void schedule_orphans_remove(struct perf_event_context *ctx)
{
	if (!ctx->task || ctx->orphans_remove_sched || !perf_wq)
		return;

	if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) {
		get_ctx(ctx);
		ctx->orphans_remove_sched = true;
	}
}

static int __init perf_workqueue_init(void)
{
	perf_wq = create_singlethread_workqueue("perf");
	WARN(!perf_wq, "failed to create perf workqueue\n");
	return perf_wq ? 0 : -1;
}

core_initcall(perf_workqueue_init);

1509 1510 1511
static inline int
event_filter_match(struct perf_event *event)
{
S
Stephane Eranian 已提交
1512 1513
	return (event->cpu == -1 || event->cpu == smp_processor_id())
	    && perf_cgroup_match(event);
1514 1515
}

1516 1517
static void
event_sched_out(struct perf_event *event,
1518
		  struct perf_cpu_context *cpuctx,
1519
		  struct perf_event_context *ctx)
1520
{
1521
	u64 tstamp = perf_event_time(event);
1522
	u64 delta;
P
Peter Zijlstra 已提交
1523 1524 1525 1526

	WARN_ON_ONCE(event->ctx != ctx);
	lockdep_assert_held(&ctx->lock);

1527 1528 1529 1530 1531 1532 1533 1534
	/*
	 * An event which could not be activated because of
	 * filter mismatch still needs to have its timings
	 * maintained, otherwise bogus information is return
	 * via read() for time_enabled, time_running:
	 */
	if (event->state == PERF_EVENT_STATE_INACTIVE
	    && !event_filter_match(event)) {
S
Stephane Eranian 已提交
1535
		delta = tstamp - event->tstamp_stopped;
1536
		event->tstamp_running += delta;
1537
		event->tstamp_stopped = tstamp;
1538 1539
	}

1540
	if (event->state != PERF_EVENT_STATE_ACTIVE)
1541
		return;
1542

1543 1544
	perf_pmu_disable(event->pmu);

1545 1546 1547 1548
	event->state = PERF_EVENT_STATE_INACTIVE;
	if (event->pending_disable) {
		event->pending_disable = 0;
		event->state = PERF_EVENT_STATE_OFF;
1549
	}
1550
	event->tstamp_stopped = tstamp;
P
Peter Zijlstra 已提交
1551
	event->pmu->del(event, 0);
1552
	event->oncpu = -1;
1553

1554
	if (!is_software_event(event))
1555
		cpuctx->active_oncpu--;
1556 1557
	if (!--ctx->nr_active)
		perf_event_ctx_deactivate(ctx);
1558 1559
	if (event->attr.freq && event->attr.sample_freq)
		ctx->nr_freq--;
1560
	if (event->attr.exclusive || !cpuctx->active_oncpu)
1561
		cpuctx->exclusive = 0;
1562

1563 1564 1565
	if (is_orphaned_child(event))
		schedule_orphans_remove(ctx);

1566
	perf_pmu_enable(event->pmu);
1567 1568
}

1569
static void
1570
group_sched_out(struct perf_event *group_event,
1571
		struct perf_cpu_context *cpuctx,
1572
		struct perf_event_context *ctx)
1573
{
1574
	struct perf_event *event;
1575
	int state = group_event->state;
1576

1577
	event_sched_out(group_event, cpuctx, ctx);
1578 1579 1580 1581

	/*
	 * Schedule out siblings (if any):
	 */
1582 1583
	list_for_each_entry(event, &group_event->sibling_list, group_entry)
		event_sched_out(event, cpuctx, ctx);
1584

1585
	if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1586 1587 1588
		cpuctx->exclusive = 0;
}

1589 1590 1591 1592 1593
struct remove_event {
	struct perf_event *event;
	bool detach_group;
};

T
Thomas Gleixner 已提交
1594
/*
1595
 * Cross CPU call to remove a performance event
T
Thomas Gleixner 已提交
1596
 *
1597
 * We disable the event on the hardware level first. After that we
T
Thomas Gleixner 已提交
1598 1599
 * remove it from the context list.
 */
1600
static int __perf_remove_from_context(void *info)
T
Thomas Gleixner 已提交
1601
{
1602 1603
	struct remove_event *re = info;
	struct perf_event *event = re->event;
1604
	struct perf_event_context *ctx = event->ctx;
P
Peter Zijlstra 已提交
1605
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
T
Thomas Gleixner 已提交
1606

1607
	raw_spin_lock(&ctx->lock);
1608
	event_sched_out(event, cpuctx, ctx);
1609 1610
	if (re->detach_group)
		perf_group_detach(event);
1611
	list_del_event(event, ctx);
1612 1613 1614 1615
	if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
		ctx->is_active = 0;
		cpuctx->task_ctx = NULL;
	}
1616
	raw_spin_unlock(&ctx->lock);
1617 1618

	return 0;
T
Thomas Gleixner 已提交
1619 1620 1621 1622
}


/*
1623
 * Remove the event from a task's (or a CPU's) list of events.
T
Thomas Gleixner 已提交
1624
 *
1625
 * CPU events are removed with a smp call. For task events we only
T
Thomas Gleixner 已提交
1626
 * call when the task is on a CPU.
1627
 *
1628 1629
 * If event->ctx is a cloned context, callers must make sure that
 * every task struct that event->ctx->task could possibly point to
1630 1631
 * remains valid.  This is OK when called from perf_release since
 * that only calls us on the top-level context, which can't be a clone.
1632
 * When called from perf_event_exit_task, it's OK because the
1633
 * context has been detached from its task.
T
Thomas Gleixner 已提交
1634
 */
1635
static void perf_remove_from_context(struct perf_event *event, bool detach_group)
T
Thomas Gleixner 已提交
1636
{
1637
	struct perf_event_context *ctx = event->ctx;
T
Thomas Gleixner 已提交
1638
	struct task_struct *task = ctx->task;
1639 1640 1641 1642
	struct remove_event re = {
		.event = event,
		.detach_group = detach_group,
	};
T
Thomas Gleixner 已提交
1643

1644 1645
	lockdep_assert_held(&ctx->mutex);

T
Thomas Gleixner 已提交
1646 1647
	if (!task) {
		/*
1648 1649 1650 1651
		 * Per cpu events are removed via an smp call. The removal can
		 * fail if the CPU is currently offline, but in that case we
		 * already called __perf_remove_from_context from
		 * perf_event_exit_cpu.
T
Thomas Gleixner 已提交
1652
		 */
1653
		cpu_function_call(event->cpu, __perf_remove_from_context, &re);
T
Thomas Gleixner 已提交
1654 1655 1656 1657
		return;
	}

retry:
1658
	if (!task_function_call(task, __perf_remove_from_context, &re))
1659
		return;
T
Thomas Gleixner 已提交
1660

1661
	raw_spin_lock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1662
	/*
1663 1664
	 * If we failed to find a running task, but find the context active now
	 * that we've acquired the ctx->lock, retry.
T
Thomas Gleixner 已提交
1665
	 */
1666
	if (ctx->is_active) {
1667
		raw_spin_unlock_irq(&ctx->lock);
1668 1669 1670 1671 1672
		/*
		 * Reload the task pointer, it might have been changed by
		 * a concurrent perf_event_context_sched_out().
		 */
		task = ctx->task;
T
Thomas Gleixner 已提交
1673 1674 1675 1676
		goto retry;
	}

	/*
1677 1678
	 * Since the task isn't running, its safe to remove the event, us
	 * holding the ctx->lock ensures the task won't get scheduled in.
T
Thomas Gleixner 已提交
1679
	 */
1680 1681
	if (detach_group)
		perf_group_detach(event);
1682
	list_del_event(event, ctx);
1683
	raw_spin_unlock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1684 1685
}

1686
/*
1687
 * Cross CPU call to disable a performance event
1688
 */
1689
int __perf_event_disable(void *info)
1690
{
1691 1692
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
P
Peter Zijlstra 已提交
1693
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1694 1695

	/*
1696 1697
	 * If this is a per-task event, need to check whether this
	 * event's task is the current task on this cpu.
1698 1699 1700
	 *
	 * Can trigger due to concurrent perf_event_context_sched_out()
	 * flipping contexts around.
1701
	 */
1702
	if (ctx->task && cpuctx->task_ctx != ctx)
1703
		return -EINVAL;
1704

1705
	raw_spin_lock(&ctx->lock);
1706 1707

	/*
1708
	 * If the event is on, turn it off.
1709 1710
	 * If it is in error state, leave it in error state.
	 */
1711
	if (event->state >= PERF_EVENT_STATE_INACTIVE) {
1712
		update_context_time(ctx);
S
Stephane Eranian 已提交
1713
		update_cgrp_time_from_event(event);
1714 1715 1716
		update_group_times(event);
		if (event == event->group_leader)
			group_sched_out(event, cpuctx, ctx);
1717
		else
1718 1719
			event_sched_out(event, cpuctx, ctx);
		event->state = PERF_EVENT_STATE_OFF;
1720 1721
	}

1722
	raw_spin_unlock(&ctx->lock);
1723 1724

	return 0;
1725 1726 1727
}

/*
1728
 * Disable a event.
1729
 *
1730 1731
 * If event->ctx is a cloned context, callers must make sure that
 * every task struct that event->ctx->task could possibly point to
1732
 * remains valid.  This condition is satisifed when called through
1733 1734 1735 1736
 * perf_event_for_each_child or perf_event_for_each because they
 * hold the top-level event's child_mutex, so any descendant that
 * goes to exit will block in sync_child_event.
 * When called from perf_pending_event it's OK because event->ctx
1737
 * is the current context on this CPU and preemption is disabled,
1738
 * hence we can't get into perf_event_task_sched_out for this context.
1739
 */
P
Peter Zijlstra 已提交
1740
static void _perf_event_disable(struct perf_event *event)
1741
{
1742
	struct perf_event_context *ctx = event->ctx;
1743 1744 1745 1746
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
1747
		 * Disable the event on the cpu that it's on
1748
		 */
1749
		cpu_function_call(event->cpu, __perf_event_disable, event);
1750 1751 1752
		return;
	}

P
Peter Zijlstra 已提交
1753
retry:
1754 1755
	if (!task_function_call(task, __perf_event_disable, event))
		return;
1756

1757
	raw_spin_lock_irq(&ctx->lock);
1758
	/*
1759
	 * If the event is still active, we need to retry the cross-call.
1760
	 */
1761
	if (event->state == PERF_EVENT_STATE_ACTIVE) {
1762
		raw_spin_unlock_irq(&ctx->lock);
1763 1764 1765 1766 1767
		/*
		 * Reload the task pointer, it might have been changed by
		 * a concurrent perf_event_context_sched_out().
		 */
		task = ctx->task;
1768 1769 1770 1771 1772 1773 1774
		goto retry;
	}

	/*
	 * Since we have the lock this context can't be scheduled
	 * in, so we can change the state safely.
	 */
1775 1776 1777
	if (event->state == PERF_EVENT_STATE_INACTIVE) {
		update_group_times(event);
		event->state = PERF_EVENT_STATE_OFF;
1778
	}
1779
	raw_spin_unlock_irq(&ctx->lock);
1780
}
P
Peter Zijlstra 已提交
1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793

/*
 * Strictly speaking kernel users cannot create groups and therefore this
 * interface does not need the perf_event_ctx_lock() magic.
 */
void perf_event_disable(struct perf_event *event)
{
	struct perf_event_context *ctx;

	ctx = perf_event_ctx_lock(event);
	_perf_event_disable(event);
	perf_event_ctx_unlock(event, ctx);
}
1794
EXPORT_SYMBOL_GPL(perf_event_disable);
1795

S
Stephane Eranian 已提交
1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830
static void perf_set_shadow_time(struct perf_event *event,
				 struct perf_event_context *ctx,
				 u64 tstamp)
{
	/*
	 * use the correct time source for the time snapshot
	 *
	 * We could get by without this by leveraging the
	 * fact that to get to this function, the caller
	 * has most likely already called update_context_time()
	 * and update_cgrp_time_xx() and thus both timestamp
	 * are identical (or very close). Given that tstamp is,
	 * already adjusted for cgroup, we could say that:
	 *    tstamp - ctx->timestamp
	 * is equivalent to
	 *    tstamp - cgrp->timestamp.
	 *
	 * Then, in perf_output_read(), the calculation would
	 * work with no changes because:
	 * - event is guaranteed scheduled in
	 * - no scheduled out in between
	 * - thus the timestamp would be the same
	 *
	 * But this is a bit hairy.
	 *
	 * So instead, we have an explicit cgroup call to remain
	 * within the time time source all along. We believe it
	 * is cleaner and simpler to understand.
	 */
	if (is_cgroup_event(event))
		perf_cgroup_set_shadow_time(event, tstamp);
	else
		event->shadow_ctx_time = tstamp - ctx->timestamp;
}

P
Peter Zijlstra 已提交
1831 1832 1833 1834
#define MAX_INTERRUPTS (~0ULL)

static void perf_log_throttle(struct perf_event *event, int enable);

1835
static int
1836
event_sched_in(struct perf_event *event,
1837
		 struct perf_cpu_context *cpuctx,
1838
		 struct perf_event_context *ctx)
1839
{
1840
	u64 tstamp = perf_event_time(event);
1841
	int ret = 0;
1842

1843 1844
	lockdep_assert_held(&ctx->lock);

1845
	if (event->state <= PERF_EVENT_STATE_OFF)
1846 1847
		return 0;

1848
	event->state = PERF_EVENT_STATE_ACTIVE;
1849
	event->oncpu = smp_processor_id();
P
Peter Zijlstra 已提交
1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860

	/*
	 * Unthrottle events, since we scheduled we might have missed several
	 * ticks already, also for a heavily scheduling task there is little
	 * guarantee it'll get a tick in a timely manner.
	 */
	if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
		perf_log_throttle(event, 1);
		event->hw.interrupts = 0;
	}

1861 1862 1863 1864 1865
	/*
	 * The new state must be visible before we turn it on in the hardware:
	 */
	smp_wmb();

1866 1867
	perf_pmu_disable(event->pmu);

1868 1869 1870 1871
	event->tstamp_running += tstamp - event->tstamp_stopped;

	perf_set_shadow_time(event, ctx, tstamp);

P
Peter Zijlstra 已提交
1872
	if (event->pmu->add(event, PERF_EF_START)) {
1873 1874
		event->state = PERF_EVENT_STATE_INACTIVE;
		event->oncpu = -1;
1875 1876
		ret = -EAGAIN;
		goto out;
1877 1878
	}

1879
	if (!is_software_event(event))
1880
		cpuctx->active_oncpu++;
1881 1882
	if (!ctx->nr_active++)
		perf_event_ctx_activate(ctx);
1883 1884
	if (event->attr.freq && event->attr.sample_freq)
		ctx->nr_freq++;
1885

1886
	if (event->attr.exclusive)
1887 1888
		cpuctx->exclusive = 1;

1889 1890 1891
	if (is_orphaned_child(event))
		schedule_orphans_remove(ctx);

1892 1893 1894 1895
out:
	perf_pmu_enable(event->pmu);

	return ret;
1896 1897
}

1898
static int
1899
group_sched_in(struct perf_event *group_event,
1900
	       struct perf_cpu_context *cpuctx,
1901
	       struct perf_event_context *ctx)
1902
{
1903
	struct perf_event *event, *partial_group = NULL;
P
Peter Zijlstra 已提交
1904
	struct pmu *pmu = ctx->pmu;
1905 1906
	u64 now = ctx->time;
	bool simulate = false;
1907

1908
	if (group_event->state == PERF_EVENT_STATE_OFF)
1909 1910
		return 0;

P
Peter Zijlstra 已提交
1911
	pmu->start_txn(pmu);
1912

1913
	if (event_sched_in(group_event, cpuctx, ctx)) {
P
Peter Zijlstra 已提交
1914
		pmu->cancel_txn(pmu);
1915
		perf_cpu_hrtimer_restart(cpuctx);
1916
		return -EAGAIN;
1917
	}
1918 1919 1920 1921

	/*
	 * Schedule in siblings as one group (if any):
	 */
1922
	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1923
		if (event_sched_in(event, cpuctx, ctx)) {
1924
			partial_group = event;
1925 1926 1927 1928
			goto group_error;
		}
	}

1929
	if (!pmu->commit_txn(pmu))
1930
		return 0;
1931

1932 1933 1934 1935
group_error:
	/*
	 * Groups can be scheduled in as one unit only, so undo any
	 * partial group before returning:
1936 1937 1938 1939 1940 1941 1942 1943 1944 1945
	 * The events up to the failed event are scheduled out normally,
	 * tstamp_stopped will be updated.
	 *
	 * The failed events and the remaining siblings need to have
	 * their timings updated as if they had gone thru event_sched_in()
	 * and event_sched_out(). This is required to get consistent timings
	 * across the group. This also takes care of the case where the group
	 * could never be scheduled by ensuring tstamp_stopped is set to mark
	 * the time the event was actually stopped, such that time delta
	 * calculation in update_event_times() is correct.
1946
	 */
1947 1948
	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
		if (event == partial_group)
1949 1950 1951 1952 1953 1954 1955 1956
			simulate = true;

		if (simulate) {
			event->tstamp_running += now - event->tstamp_stopped;
			event->tstamp_stopped = now;
		} else {
			event_sched_out(event, cpuctx, ctx);
		}
1957
	}
1958
	event_sched_out(group_event, cpuctx, ctx);
1959

P
Peter Zijlstra 已提交
1960
	pmu->cancel_txn(pmu);
1961

1962 1963
	perf_cpu_hrtimer_restart(cpuctx);

1964 1965 1966
	return -EAGAIN;
}

1967
/*
1968
 * Work out whether we can put this event group on the CPU now.
1969
 */
1970
static int group_can_go_on(struct perf_event *event,
1971 1972 1973 1974
			   struct perf_cpu_context *cpuctx,
			   int can_add_hw)
{
	/*
1975
	 * Groups consisting entirely of software events can always go on.
1976
	 */
1977
	if (event->group_flags & PERF_GROUP_SOFTWARE)
1978 1979 1980
		return 1;
	/*
	 * If an exclusive group is already on, no other hardware
1981
	 * events can go on.
1982 1983 1984 1985 1986
	 */
	if (cpuctx->exclusive)
		return 0;
	/*
	 * If this group is exclusive and there are already
1987
	 * events on the CPU, it can't go on.
1988
	 */
1989
	if (event->attr.exclusive && cpuctx->active_oncpu)
1990 1991 1992 1993 1994 1995 1996 1997
		return 0;
	/*
	 * Otherwise, try to add it if all previous groups were able
	 * to go on.
	 */
	return can_add_hw;
}

1998 1999
static void add_event_to_ctx(struct perf_event *event,
			       struct perf_event_context *ctx)
2000
{
2001 2002
	u64 tstamp = perf_event_time(event);

2003
	list_add_event(event, ctx);
2004
	perf_group_attach(event);
2005 2006 2007
	event->tstamp_enabled = tstamp;
	event->tstamp_running = tstamp;
	event->tstamp_stopped = tstamp;
2008 2009
}

2010 2011 2012 2013 2014 2015
static void task_ctx_sched_out(struct perf_event_context *ctx);
static void
ctx_sched_in(struct perf_event_context *ctx,
	     struct perf_cpu_context *cpuctx,
	     enum event_type_t event_type,
	     struct task_struct *task);
2016

2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
				struct perf_event_context *ctx,
				struct task_struct *task)
{
	cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
	if (ctx)
		ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
	if (ctx)
		ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
}

T
Thomas Gleixner 已提交
2029
/*
2030
 * Cross CPU call to install and enable a performance event
2031 2032
 *
 * Must be called with ctx->mutex held
T
Thomas Gleixner 已提交
2033
 */
2034
static int  __perf_install_in_context(void *info)
T
Thomas Gleixner 已提交
2035
{
2036 2037
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
P
Peter Zijlstra 已提交
2038
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2039 2040 2041
	struct perf_event_context *task_ctx = cpuctx->task_ctx;
	struct task_struct *task = current;

2042
	perf_ctx_lock(cpuctx, task_ctx);
2043
	perf_pmu_disable(cpuctx->ctx.pmu);
T
Thomas Gleixner 已提交
2044 2045

	/*
2046
	 * If there was an active task_ctx schedule it out.
T
Thomas Gleixner 已提交
2047
	 */
2048
	if (task_ctx)
2049
		task_ctx_sched_out(task_ctx);
2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063

	/*
	 * If the context we're installing events in is not the
	 * active task_ctx, flip them.
	 */
	if (ctx->task && task_ctx != ctx) {
		if (task_ctx)
			raw_spin_unlock(&task_ctx->lock);
		raw_spin_lock(&ctx->lock);
		task_ctx = ctx;
	}

	if (task_ctx) {
		cpuctx->task_ctx = task_ctx;
2064 2065
		task = task_ctx->task;
	}
2066

2067
	cpu_ctx_sched_out(cpuctx, EVENT_ALL);
T
Thomas Gleixner 已提交
2068

2069
	update_context_time(ctx);
S
Stephane Eranian 已提交
2070 2071 2072 2073 2074 2075
	/*
	 * update cgrp time only if current cgrp
	 * matches event->cgrp. Must be done before
	 * calling add_event_to_ctx()
	 */
	update_cgrp_time_from_event(event);
T
Thomas Gleixner 已提交
2076

2077
	add_event_to_ctx(event, ctx);
T
Thomas Gleixner 已提交
2078

2079
	/*
2080
	 * Schedule everything back in
2081
	 */
2082
	perf_event_sched_in(cpuctx, task_ctx, task);
2083 2084 2085

	perf_pmu_enable(cpuctx->ctx.pmu);
	perf_ctx_unlock(cpuctx, task_ctx);
2086 2087

	return 0;
T
Thomas Gleixner 已提交
2088 2089 2090
}

/*
2091
 * Attach a performance event to a context
T
Thomas Gleixner 已提交
2092
 *
2093 2094
 * First we add the event to the list with the hardware enable bit
 * in event->hw_config cleared.
T
Thomas Gleixner 已提交
2095
 *
2096
 * If the event is attached to a task which is on a CPU we use a smp
T
Thomas Gleixner 已提交
2097 2098 2099 2100
 * call to enable it in the task context. The task might have been
 * scheduled away, but we check this in the smp call again.
 */
static void
2101 2102
perf_install_in_context(struct perf_event_context *ctx,
			struct perf_event *event,
T
Thomas Gleixner 已提交
2103 2104 2105 2106
			int cpu)
{
	struct task_struct *task = ctx->task;

2107 2108
	lockdep_assert_held(&ctx->mutex);

2109
	event->ctx = ctx;
2110 2111
	if (event->cpu != -1)
		event->cpu = cpu;
2112

T
Thomas Gleixner 已提交
2113 2114
	if (!task) {
		/*
2115
		 * Per cpu events are installed via an smp call and
2116
		 * the install is always successful.
T
Thomas Gleixner 已提交
2117
		 */
2118
		cpu_function_call(cpu, __perf_install_in_context, event);
T
Thomas Gleixner 已提交
2119 2120 2121 2122
		return;
	}

retry:
2123 2124
	if (!task_function_call(task, __perf_install_in_context, event))
		return;
T
Thomas Gleixner 已提交
2125

2126
	raw_spin_lock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
2127
	/*
2128 2129
	 * If we failed to find a running task, but find the context active now
	 * that we've acquired the ctx->lock, retry.
T
Thomas Gleixner 已提交
2130
	 */
2131
	if (ctx->is_active) {
2132
		raw_spin_unlock_irq(&ctx->lock);
2133 2134 2135 2136 2137
		/*
		 * Reload the task pointer, it might have been changed by
		 * a concurrent perf_event_context_sched_out().
		 */
		task = ctx->task;
T
Thomas Gleixner 已提交
2138 2139 2140 2141
		goto retry;
	}

	/*
2142 2143
	 * Since the task isn't running, its safe to add the event, us holding
	 * the ctx->lock ensures the task won't get scheduled in.
T
Thomas Gleixner 已提交
2144
	 */
2145
	add_event_to_ctx(event, ctx);
2146
	raw_spin_unlock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
2147 2148
}

2149
/*
2150
 * Put a event into inactive state and update time fields.
2151 2152 2153 2154 2155 2156
 * Enabling the leader of a group effectively enables all
 * the group members that aren't explicitly disabled, so we
 * have to update their ->tstamp_enabled also.
 * Note: this works for group members as well as group leaders
 * since the non-leader members' sibling_lists will be empty.
 */
2157
static void __perf_event_mark_enabled(struct perf_event *event)
2158
{
2159
	struct perf_event *sub;
2160
	u64 tstamp = perf_event_time(event);
2161

2162
	event->state = PERF_EVENT_STATE_INACTIVE;
2163
	event->tstamp_enabled = tstamp - event->total_time_enabled;
P
Peter Zijlstra 已提交
2164
	list_for_each_entry(sub, &event->sibling_list, group_entry) {
2165 2166
		if (sub->state >= PERF_EVENT_STATE_INACTIVE)
			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
P
Peter Zijlstra 已提交
2167
	}
2168 2169
}

2170
/*
2171
 * Cross CPU call to enable a performance event
2172
 */
2173
static int __perf_event_enable(void *info)
2174
{
2175 2176 2177
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
	struct perf_event *leader = event->group_leader;
P
Peter Zijlstra 已提交
2178
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2179
	int err;
2180

2181 2182 2183 2184 2185 2186 2187 2188 2189 2190
	/*
	 * There's a time window between 'ctx->is_active' check
	 * in perf_event_enable function and this place having:
	 *   - IRQs on
	 *   - ctx->lock unlocked
	 *
	 * where the task could be killed and 'ctx' deactivated
	 * by perf_event_exit_task.
	 */
	if (!ctx->is_active)
2191
		return -EINVAL;
2192

2193
	raw_spin_lock(&ctx->lock);
2194
	update_context_time(ctx);
2195

2196
	if (event->state >= PERF_EVENT_STATE_INACTIVE)
2197
		goto unlock;
S
Stephane Eranian 已提交
2198 2199 2200 2201

	/*
	 * set current task's cgroup time reference point
	 */
2202
	perf_cgroup_set_timestamp(current, ctx);
S
Stephane Eranian 已提交
2203

2204
	__perf_event_mark_enabled(event);
2205

S
Stephane Eranian 已提交
2206 2207 2208
	if (!event_filter_match(event)) {
		if (is_cgroup_event(event))
			perf_cgroup_defer_enabled(event);
2209
		goto unlock;
S
Stephane Eranian 已提交
2210
	}
2211

2212
	/*
2213
	 * If the event is in a group and isn't the group leader,
2214
	 * then don't put it on unless the group is on.
2215
	 */
2216
	if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
2217
		goto unlock;
2218

2219
	if (!group_can_go_on(event, cpuctx, 1)) {
2220
		err = -EEXIST;
2221
	} else {
2222
		if (event == leader)
2223
			err = group_sched_in(event, cpuctx, ctx);
2224
		else
2225
			err = event_sched_in(event, cpuctx, ctx);
2226
	}
2227 2228 2229

	if (err) {
		/*
2230
		 * If this event can't go on and it's part of a
2231 2232
		 * group, then the whole group has to come off.
		 */
2233
		if (leader != event) {
2234
			group_sched_out(leader, cpuctx, ctx);
2235 2236
			perf_cpu_hrtimer_restart(cpuctx);
		}
2237
		if (leader->attr.pinned) {
2238
			update_group_times(leader);
2239
			leader->state = PERF_EVENT_STATE_ERROR;
2240
		}
2241 2242
	}

P
Peter Zijlstra 已提交
2243
unlock:
2244
	raw_spin_unlock(&ctx->lock);
2245 2246

	return 0;
2247 2248 2249
}

/*
2250
 * Enable a event.
2251
 *
2252 2253
 * If event->ctx is a cloned context, callers must make sure that
 * every task struct that event->ctx->task could possibly point to
2254
 * remains valid.  This condition is satisfied when called through
2255 2256
 * perf_event_for_each_child or perf_event_for_each as described
 * for perf_event_disable.
2257
 */
P
Peter Zijlstra 已提交
2258
static void _perf_event_enable(struct perf_event *event)
2259
{
2260
	struct perf_event_context *ctx = event->ctx;
2261 2262 2263 2264
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
2265
		 * Enable the event on the cpu that it's on
2266
		 */
2267
		cpu_function_call(event->cpu, __perf_event_enable, event);
2268 2269 2270
		return;
	}

2271
	raw_spin_lock_irq(&ctx->lock);
2272
	if (event->state >= PERF_EVENT_STATE_INACTIVE)
2273 2274 2275
		goto out;

	/*
2276 2277
	 * If the event is in error state, clear that first.
	 * That way, if we see the event in error state below, we
2278 2279 2280 2281
	 * know that it has gone back into error state, as distinct
	 * from the task having been scheduled away before the
	 * cross-call arrived.
	 */
2282 2283
	if (event->state == PERF_EVENT_STATE_ERROR)
		event->state = PERF_EVENT_STATE_OFF;
2284

P
Peter Zijlstra 已提交
2285
retry:
2286
	if (!ctx->is_active) {
2287
		__perf_event_mark_enabled(event);
2288 2289 2290
		goto out;
	}

2291
	raw_spin_unlock_irq(&ctx->lock);
2292 2293 2294

	if (!task_function_call(task, __perf_event_enable, event))
		return;
2295

2296
	raw_spin_lock_irq(&ctx->lock);
2297 2298

	/*
2299
	 * If the context is active and the event is still off,
2300 2301
	 * we need to retry the cross-call.
	 */
2302 2303 2304 2305 2306 2307
	if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
		/*
		 * task could have been flipped by a concurrent
		 * perf_event_context_sched_out()
		 */
		task = ctx->task;
2308
		goto retry;
2309
	}
2310

P
Peter Zijlstra 已提交
2311
out:
2312
	raw_spin_unlock_irq(&ctx->lock);
2313
}
P
Peter Zijlstra 已提交
2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325

/*
 * See perf_event_disable();
 */
void perf_event_enable(struct perf_event *event)
{
	struct perf_event_context *ctx;

	ctx = perf_event_ctx_lock(event);
	_perf_event_enable(event);
	perf_event_ctx_unlock(event, ctx);
}
2326
EXPORT_SYMBOL_GPL(perf_event_enable);
2327

P
Peter Zijlstra 已提交
2328
static int _perf_event_refresh(struct perf_event *event, int refresh)
2329
{
2330
	/*
2331
	 * not supported on inherited events
2332
	 */
2333
	if (event->attr.inherit || !is_sampling_event(event))
2334 2335
		return -EINVAL;

2336
	atomic_add(refresh, &event->event_limit);
P
Peter Zijlstra 已提交
2337
	_perf_event_enable(event);
2338 2339

	return 0;
2340
}
P
Peter Zijlstra 已提交
2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355

/*
 * See perf_event_disable()
 */
int perf_event_refresh(struct perf_event *event, int refresh)
{
	struct perf_event_context *ctx;
	int ret;

	ctx = perf_event_ctx_lock(event);
	ret = _perf_event_refresh(event, refresh);
	perf_event_ctx_unlock(event, ctx);

	return ret;
}
2356
EXPORT_SYMBOL_GPL(perf_event_refresh);
2357

2358 2359 2360
static void ctx_sched_out(struct perf_event_context *ctx,
			  struct perf_cpu_context *cpuctx,
			  enum event_type_t event_type)
2361
{
2362
	struct perf_event *event;
2363
	int is_active = ctx->is_active;
2364

2365
	ctx->is_active &= ~event_type;
2366
	if (likely(!ctx->nr_events))
2367 2368
		return;

2369
	update_context_time(ctx);
S
Stephane Eranian 已提交
2370
	update_cgrp_time_from_cpuctx(cpuctx);
2371
	if (!ctx->nr_active)
2372
		return;
2373

P
Peter Zijlstra 已提交
2374
	perf_pmu_disable(ctx->pmu);
2375
	if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
2376 2377
		list_for_each_entry(event, &ctx->pinned_groups, group_entry)
			group_sched_out(event, cpuctx, ctx);
P
Peter Zijlstra 已提交
2378
	}
2379

2380
	if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
2381
		list_for_each_entry(event, &ctx->flexible_groups, group_entry)
2382
			group_sched_out(event, cpuctx, ctx);
P
Peter Zijlstra 已提交
2383
	}
P
Peter Zijlstra 已提交
2384
	perf_pmu_enable(ctx->pmu);
2385 2386
}

2387
/*
2388 2389 2390 2391 2392 2393
 * Test whether two contexts are equivalent, i.e. whether they have both been
 * cloned from the same version of the same context.
 *
 * Equivalence is measured using a generation number in the context that is
 * incremented on each modification to it; see unclone_ctx(), list_add_event()
 * and list_del_event().
2394
 */
2395 2396
static int context_equiv(struct perf_event_context *ctx1,
			 struct perf_event_context *ctx2)
2397
{
2398 2399 2400
	lockdep_assert_held(&ctx1->lock);
	lockdep_assert_held(&ctx2->lock);

2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422
	/* Pinning disables the swap optimization */
	if (ctx1->pin_count || ctx2->pin_count)
		return 0;

	/* If ctx1 is the parent of ctx2 */
	if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
		return 1;

	/* If ctx2 is the parent of ctx1 */
	if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
		return 1;

	/*
	 * If ctx1 and ctx2 have the same parent; we flatten the parent
	 * hierarchy, see perf_event_init_context().
	 */
	if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
			ctx1->parent_gen == ctx2->parent_gen)
		return 1;

	/* Unmatched */
	return 0;
2423 2424
}

2425 2426
static void __perf_event_sync_stat(struct perf_event *event,
				     struct perf_event *next_event)
2427 2428 2429
{
	u64 value;

2430
	if (!event->attr.inherit_stat)
2431 2432 2433
		return;

	/*
2434
	 * Update the event value, we cannot use perf_event_read()
2435 2436
	 * because we're in the middle of a context switch and have IRQs
	 * disabled, which upsets smp_call_function_single(), however
2437
	 * we know the event must be on the current CPU, therefore we
2438 2439
	 * don't need to use it.
	 */
2440 2441
	switch (event->state) {
	case PERF_EVENT_STATE_ACTIVE:
2442 2443
		event->pmu->read(event);
		/* fall-through */
2444

2445 2446
	case PERF_EVENT_STATE_INACTIVE:
		update_event_times(event);
2447 2448 2449 2450 2451 2452 2453
		break;

	default:
		break;
	}

	/*
2454
	 * In order to keep per-task stats reliable we need to flip the event
2455 2456
	 * values when we flip the contexts.
	 */
2457 2458 2459
	value = local64_read(&next_event->count);
	value = local64_xchg(&event->count, value);
	local64_set(&next_event->count, value);
2460

2461 2462
	swap(event->total_time_enabled, next_event->total_time_enabled);
	swap(event->total_time_running, next_event->total_time_running);
2463

2464
	/*
2465
	 * Since we swizzled the values, update the user visible data too.
2466
	 */
2467 2468
	perf_event_update_userpage(event);
	perf_event_update_userpage(next_event);
2469 2470
}

2471 2472
static void perf_event_sync_stat(struct perf_event_context *ctx,
				   struct perf_event_context *next_ctx)
2473
{
2474
	struct perf_event *event, *next_event;
2475 2476 2477 2478

	if (!ctx->nr_stat)
		return;

2479 2480
	update_context_time(ctx);

2481 2482
	event = list_first_entry(&ctx->event_list,
				   struct perf_event, event_entry);
2483

2484 2485
	next_event = list_first_entry(&next_ctx->event_list,
					struct perf_event, event_entry);
2486

2487 2488
	while (&event->event_entry != &ctx->event_list &&
	       &next_event->event_entry != &next_ctx->event_list) {
2489

2490
		__perf_event_sync_stat(event, next_event);
2491

2492 2493
		event = list_next_entry(event, event_entry);
		next_event = list_next_entry(next_event, event_entry);
2494 2495 2496
	}
}

2497 2498
static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
					 struct task_struct *next)
T
Thomas Gleixner 已提交
2499
{
P
Peter Zijlstra 已提交
2500
	struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
2501
	struct perf_event_context *next_ctx;
2502
	struct perf_event_context *parent, *next_parent;
P
Peter Zijlstra 已提交
2503
	struct perf_cpu_context *cpuctx;
2504
	int do_switch = 1;
T
Thomas Gleixner 已提交
2505

P
Peter Zijlstra 已提交
2506 2507
	if (likely(!ctx))
		return;
2508

P
Peter Zijlstra 已提交
2509 2510
	cpuctx = __get_cpu_context(ctx);
	if (!cpuctx->task_ctx)
T
Thomas Gleixner 已提交
2511 2512
		return;

2513
	rcu_read_lock();
P
Peter Zijlstra 已提交
2514
	next_ctx = next->perf_event_ctxp[ctxn];
2515 2516 2517 2518 2519 2520 2521
	if (!next_ctx)
		goto unlock;

	parent = rcu_dereference(ctx->parent_ctx);
	next_parent = rcu_dereference(next_ctx->parent_ctx);

	/* If neither context have a parent context; they cannot be clones. */
2522
	if (!parent && !next_parent)
2523 2524 2525
		goto unlock;

	if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
2526 2527 2528 2529 2530 2531 2532 2533 2534
		/*
		 * Looks like the two contexts are clones, so we might be
		 * able to optimize the context switch.  We lock both
		 * contexts and check that they are clones under the
		 * lock (including re-checking that neither has been
		 * uncloned in the meantime).  It doesn't matter which
		 * order we take the locks because no other cpu could
		 * be trying to lock both of these tasks.
		 */
2535 2536
		raw_spin_lock(&ctx->lock);
		raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
2537
		if (context_equiv(ctx, next_ctx)) {
2538 2539
			/*
			 * XXX do we need a memory barrier of sorts
2540
			 * wrt to rcu_dereference() of perf_event_ctxp
2541
			 */
P
Peter Zijlstra 已提交
2542 2543
			task->perf_event_ctxp[ctxn] = next_ctx;
			next->perf_event_ctxp[ctxn] = ctx;
2544 2545
			ctx->task = next;
			next_ctx->task = task;
2546 2547 2548

			swap(ctx->task_ctx_data, next_ctx->task_ctx_data);

2549
			do_switch = 0;
2550

2551
			perf_event_sync_stat(ctx, next_ctx);
2552
		}
2553 2554
		raw_spin_unlock(&next_ctx->lock);
		raw_spin_unlock(&ctx->lock);
2555
	}
2556
unlock:
2557
	rcu_read_unlock();
2558

2559
	if (do_switch) {
2560
		raw_spin_lock(&ctx->lock);
2561
		ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2562
		cpuctx->task_ctx = NULL;
2563
		raw_spin_unlock(&ctx->lock);
2564
	}
T
Thomas Gleixner 已提交
2565 2566
}

2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616
void perf_sched_cb_dec(struct pmu *pmu)
{
	this_cpu_dec(perf_sched_cb_usages);
}

void perf_sched_cb_inc(struct pmu *pmu)
{
	this_cpu_inc(perf_sched_cb_usages);
}

/*
 * This function provides the context switch callback to the lower code
 * layer. It is invoked ONLY when the context switch callback is enabled.
 */
static void perf_pmu_sched_task(struct task_struct *prev,
				struct task_struct *next,
				bool sched_in)
{
	struct perf_cpu_context *cpuctx;
	struct pmu *pmu;
	unsigned long flags;

	if (prev == next)
		return;

	local_irq_save(flags);

	rcu_read_lock();

	list_for_each_entry_rcu(pmu, &pmus, entry) {
		if (pmu->sched_task) {
			cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);

			perf_ctx_lock(cpuctx, cpuctx->task_ctx);

			perf_pmu_disable(pmu);

			pmu->sched_task(cpuctx->task_ctx, sched_in);

			perf_pmu_enable(pmu);

			perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
		}
	}

	rcu_read_unlock();

	local_irq_restore(flags);
}

P
Peter Zijlstra 已提交
2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
#define for_each_task_context_nr(ctxn)					\
	for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)

/*
 * Called from scheduler to remove the events of the current task,
 * with interrupts disabled.
 *
 * We stop each event and update the event value in event->count.
 *
 * This does not protect us against NMI, but disable()
 * sets the disabled bit in the control field of event _before_
 * accessing the event control register. If a NMI hits, then it will
 * not restart the event.
 */
2631 2632
void __perf_event_task_sched_out(struct task_struct *task,
				 struct task_struct *next)
P
Peter Zijlstra 已提交
2633 2634 2635
{
	int ctxn;

2636 2637 2638
	if (__this_cpu_read(perf_sched_cb_usages))
		perf_pmu_sched_task(task, next, false);

P
Peter Zijlstra 已提交
2639 2640
	for_each_task_context_nr(ctxn)
		perf_event_context_sched_out(task, ctxn, next);
S
Stephane Eranian 已提交
2641 2642 2643 2644 2645 2646

	/*
	 * if cgroup events exist on this CPU, then we need
	 * to check if we have to switch out PMU state.
	 * cgroup event are system-wide mode only
	 */
2647
	if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
2648
		perf_cgroup_sched_out(task, next);
P
Peter Zijlstra 已提交
2649 2650
}

2651
static void task_ctx_sched_out(struct perf_event_context *ctx)
2652
{
P
Peter Zijlstra 已提交
2653
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2654

2655 2656
	if (!cpuctx->task_ctx)
		return;
2657 2658 2659 2660

	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
		return;

2661
	ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2662 2663 2664
	cpuctx->task_ctx = NULL;
}

2665 2666 2667 2668 2669 2670 2671
/*
 * Called with IRQs disabled
 */
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
			      enum event_type_t event_type)
{
	ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2672 2673
}

2674
static void
2675
ctx_pinned_sched_in(struct perf_event_context *ctx,
2676
		    struct perf_cpu_context *cpuctx)
T
Thomas Gleixner 已提交
2677
{
2678
	struct perf_event *event;
T
Thomas Gleixner 已提交
2679

2680 2681
	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
		if (event->state <= PERF_EVENT_STATE_OFF)
2682
			continue;
2683
		if (!event_filter_match(event))
2684 2685
			continue;

S
Stephane Eranian 已提交
2686 2687 2688 2689
		/* may need to reset tstamp_enabled */
		if (is_cgroup_event(event))
			perf_cgroup_mark_enabled(event, ctx);

2690
		if (group_can_go_on(event, cpuctx, 1))
2691
			group_sched_in(event, cpuctx, ctx);
2692 2693 2694 2695 2696

		/*
		 * If this pinned group hasn't been scheduled,
		 * put it in error state.
		 */
2697 2698 2699
		if (event->state == PERF_EVENT_STATE_INACTIVE) {
			update_group_times(event);
			event->state = PERF_EVENT_STATE_ERROR;
2700
		}
2701
	}
2702 2703 2704 2705
}

static void
ctx_flexible_sched_in(struct perf_event_context *ctx,
2706
		      struct perf_cpu_context *cpuctx)
2707 2708 2709
{
	struct perf_event *event;
	int can_add_hw = 1;
2710

2711 2712 2713
	list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
		/* Ignore events in OFF or ERROR state */
		if (event->state <= PERF_EVENT_STATE_OFF)
2714
			continue;
2715 2716
		/*
		 * Listen to the 'cpu' scheduling filter constraint
2717
		 * of events:
2718
		 */
2719
		if (!event_filter_match(event))
T
Thomas Gleixner 已提交
2720 2721
			continue;

S
Stephane Eranian 已提交
2722 2723 2724 2725
		/* may need to reset tstamp_enabled */
		if (is_cgroup_event(event))
			perf_cgroup_mark_enabled(event, ctx);

P
Peter Zijlstra 已提交
2726
		if (group_can_go_on(event, cpuctx, can_add_hw)) {
2727
			if (group_sched_in(event, cpuctx, ctx))
2728
				can_add_hw = 0;
P
Peter Zijlstra 已提交
2729
		}
T
Thomas Gleixner 已提交
2730
	}
2731 2732 2733 2734 2735
}

static void
ctx_sched_in(struct perf_event_context *ctx,
	     struct perf_cpu_context *cpuctx,
S
Stephane Eranian 已提交
2736 2737
	     enum event_type_t event_type,
	     struct task_struct *task)
2738
{
S
Stephane Eranian 已提交
2739
	u64 now;
2740
	int is_active = ctx->is_active;
S
Stephane Eranian 已提交
2741

2742
	ctx->is_active |= event_type;
2743
	if (likely(!ctx->nr_events))
2744
		return;
2745

S
Stephane Eranian 已提交
2746 2747
	now = perf_clock();
	ctx->timestamp = now;
2748
	perf_cgroup_set_timestamp(task, ctx);
2749 2750 2751 2752
	/*
	 * First go through the list and put on any pinned groups
	 * in order to give them the best chance of going on.
	 */
2753
	if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
2754
		ctx_pinned_sched_in(ctx, cpuctx);
2755 2756

	/* Then walk through the lower prio flexible groups */
2757
	if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
2758
		ctx_flexible_sched_in(ctx, cpuctx);
2759 2760
}

2761
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
S
Stephane Eranian 已提交
2762 2763
			     enum event_type_t event_type,
			     struct task_struct *task)
2764 2765 2766
{
	struct perf_event_context *ctx = &cpuctx->ctx;

S
Stephane Eranian 已提交
2767
	ctx_sched_in(ctx, cpuctx, event_type, task);
2768 2769
}

S
Stephane Eranian 已提交
2770 2771
static void perf_event_context_sched_in(struct perf_event_context *ctx,
					struct task_struct *task)
2772
{
P
Peter Zijlstra 已提交
2773
	struct perf_cpu_context *cpuctx;
2774

P
Peter Zijlstra 已提交
2775
	cpuctx = __get_cpu_context(ctx);
2776 2777 2778
	if (cpuctx->task_ctx == ctx)
		return;

2779
	perf_ctx_lock(cpuctx, ctx);
P
Peter Zijlstra 已提交
2780
	perf_pmu_disable(ctx->pmu);
2781 2782 2783 2784 2785 2786 2787
	/*
	 * We want to keep the following priority order:
	 * cpu pinned (that don't need to move), task pinned,
	 * cpu flexible, task flexible.
	 */
	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);

2788 2789
	if (ctx->nr_events)
		cpuctx->task_ctx = ctx;
2790

2791 2792
	perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);

2793 2794
	perf_pmu_enable(ctx->pmu);
	perf_ctx_unlock(cpuctx, ctx);
2795 2796
}

P
Peter Zijlstra 已提交
2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807
/*
 * Called from scheduler to add the events of the current task
 * with interrupts disabled.
 *
 * We restore the event value and then enable it.
 *
 * This does not protect us against NMI, but enable()
 * sets the enabled bit in the control field of event _before_
 * accessing the event control register. If a NMI hits, then it will
 * keep the event running.
 */
2808 2809
void __perf_event_task_sched_in(struct task_struct *prev,
				struct task_struct *task)
P
Peter Zijlstra 已提交
2810 2811 2812 2813 2814 2815 2816 2817 2818
{
	struct perf_event_context *ctx;
	int ctxn;

	for_each_task_context_nr(ctxn) {
		ctx = task->perf_event_ctxp[ctxn];
		if (likely(!ctx))
			continue;

S
Stephane Eranian 已提交
2819
		perf_event_context_sched_in(ctx, task);
P
Peter Zijlstra 已提交
2820
	}
S
Stephane Eranian 已提交
2821 2822 2823 2824 2825
	/*
	 * if cgroup events exist on this CPU, then we need
	 * to check if we have to switch in PMU state.
	 * cgroup event are system-wide mode only
	 */
2826
	if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
2827
		perf_cgroup_sched_in(prev, task);
2828

2829 2830
	if (__this_cpu_read(perf_sched_cb_usages))
		perf_pmu_sched_task(prev, task, true);
2831 2832
}

2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{
	u64 frequency = event->attr.sample_freq;
	u64 sec = NSEC_PER_SEC;
	u64 divisor, dividend;

	int count_fls, nsec_fls, frequency_fls, sec_fls;

	count_fls = fls64(count);
	nsec_fls = fls64(nsec);
	frequency_fls = fls64(frequency);
	sec_fls = 30;

	/*
	 * We got @count in @nsec, with a target of sample_freq HZ
	 * the target period becomes:
	 *
	 *             @count * 10^9
	 * period = -------------------
	 *          @nsec * sample_freq
	 *
	 */

	/*
	 * Reduce accuracy by one bit such that @a and @b converge
	 * to a similar magnitude.
	 */
2860
#define REDUCE_FLS(a, b)		\
2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899
do {					\
	if (a##_fls > b##_fls) {	\
		a >>= 1;		\
		a##_fls--;		\
	} else {			\
		b >>= 1;		\
		b##_fls--;		\
	}				\
} while (0)

	/*
	 * Reduce accuracy until either term fits in a u64, then proceed with
	 * the other, so that finally we can do a u64/u64 division.
	 */
	while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
		REDUCE_FLS(nsec, frequency);
		REDUCE_FLS(sec, count);
	}

	if (count_fls + sec_fls > 64) {
		divisor = nsec * frequency;

		while (count_fls + sec_fls > 64) {
			REDUCE_FLS(count, sec);
			divisor >>= 1;
		}

		dividend = count * sec;
	} else {
		dividend = count * sec;

		while (nsec_fls + frequency_fls > 64) {
			REDUCE_FLS(nsec, frequency);
			dividend >>= 1;
		}

		divisor = nsec * frequency;
	}

2900 2901 2902
	if (!divisor)
		return dividend;

2903 2904 2905
	return div64_u64(dividend, divisor);
}

2906 2907 2908
static DEFINE_PER_CPU(int, perf_throttled_count);
static DEFINE_PER_CPU(u64, perf_throttled_seq);

2909
static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
2910
{
2911
	struct hw_perf_event *hwc = &event->hw;
2912
	s64 period, sample_period;
2913 2914
	s64 delta;

2915
	period = perf_calculate_period(event, nsec, count);
2916 2917 2918 2919 2920 2921 2922 2923 2924 2925

	delta = (s64)(period - hwc->sample_period);
	delta = (delta + 7) / 8; /* low pass filter */

	sample_period = hwc->sample_period + delta;

	if (!sample_period)
		sample_period = 1;

	hwc->sample_period = sample_period;
2926

2927
	if (local64_read(&hwc->period_left) > 8*sample_period) {
2928 2929 2930
		if (disable)
			event->pmu->stop(event, PERF_EF_UPDATE);

2931
		local64_set(&hwc->period_left, 0);
2932 2933 2934

		if (disable)
			event->pmu->start(event, PERF_EF_RELOAD);
2935
	}
2936 2937
}

2938 2939 2940 2941 2942 2943 2944
/*
 * combine freq adjustment with unthrottling to avoid two passes over the
 * events. At the same time, make sure, having freq events does not change
 * the rate of unthrottling as that would introduce bias.
 */
static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
					   int needs_unthr)
2945
{
2946 2947
	struct perf_event *event;
	struct hw_perf_event *hwc;
2948
	u64 now, period = TICK_NSEC;
2949
	s64 delta;
2950

2951 2952 2953 2954 2955 2956
	/*
	 * only need to iterate over all events iff:
	 * - context have events in frequency mode (needs freq adjust)
	 * - there are events to unthrottle on this cpu
	 */
	if (!(ctx->nr_freq || needs_unthr))
2957 2958
		return;

2959
	raw_spin_lock(&ctx->lock);
2960
	perf_pmu_disable(ctx->pmu);
2961

2962
	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2963
		if (event->state != PERF_EVENT_STATE_ACTIVE)
2964 2965
			continue;

2966
		if (!event_filter_match(event))
2967 2968
			continue;

2969 2970
		perf_pmu_disable(event->pmu);

2971
		hwc = &event->hw;
2972

2973
		if (hwc->interrupts == MAX_INTERRUPTS) {
2974
			hwc->interrupts = 0;
2975
			perf_log_throttle(event, 1);
P
Peter Zijlstra 已提交
2976
			event->pmu->start(event, 0);
2977 2978
		}

2979
		if (!event->attr.freq || !event->attr.sample_freq)
2980
			goto next;
2981

2982 2983 2984 2985 2986
		/*
		 * stop the event and update event->count
		 */
		event->pmu->stop(event, PERF_EF_UPDATE);

2987
		now = local64_read(&event->count);
2988 2989
		delta = now - hwc->freq_count_stamp;
		hwc->freq_count_stamp = now;
2990

2991 2992 2993
		/*
		 * restart the event
		 * reload only if value has changed
2994 2995 2996
		 * we have stopped the event so tell that
		 * to perf_adjust_period() to avoid stopping it
		 * twice.
2997
		 */
2998
		if (delta > 0)
2999
			perf_adjust_period(event, period, delta, false);
3000 3001

		event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
3002 3003
	next:
		perf_pmu_enable(event->pmu);
3004
	}
3005

3006
	perf_pmu_enable(ctx->pmu);
3007
	raw_spin_unlock(&ctx->lock);
3008 3009
}

3010
/*
3011
 * Round-robin a context's events:
3012
 */
3013
static void rotate_ctx(struct perf_event_context *ctx)
T
Thomas Gleixner 已提交
3014
{
3015 3016 3017 3018 3019 3020
	/*
	 * Rotate the first entry last of non-pinned groups. Rotation might be
	 * disabled by the inheritance code.
	 */
	if (!ctx->rotate_disable)
		list_rotate_left(&ctx->flexible_groups);
3021 3022
}

3023
static int perf_rotate_context(struct perf_cpu_context *cpuctx)
3024
{
P
Peter Zijlstra 已提交
3025
	struct perf_event_context *ctx = NULL;
3026
	int rotate = 0;
3027

3028 3029 3030 3031
	if (cpuctx->ctx.nr_events) {
		if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
			rotate = 1;
	}
3032

P
Peter Zijlstra 已提交
3033
	ctx = cpuctx->task_ctx;
3034 3035 3036 3037
	if (ctx && ctx->nr_events) {
		if (ctx->nr_events != ctx->nr_active)
			rotate = 1;
	}
3038

3039
	if (!rotate)
3040 3041
		goto done;

3042
	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
P
Peter Zijlstra 已提交
3043
	perf_pmu_disable(cpuctx->ctx.pmu);
3044

3045 3046 3047
	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
	if (ctx)
		ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
T
Thomas Gleixner 已提交
3048

3049 3050 3051
	rotate_ctx(&cpuctx->ctx);
	if (ctx)
		rotate_ctx(ctx);
3052

3053
	perf_event_sched_in(cpuctx, ctx, current);
3054

3055 3056
	perf_pmu_enable(cpuctx->ctx.pmu);
	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
3057
done:
3058 3059

	return rotate;
3060 3061
}

3062 3063 3064
#ifdef CONFIG_NO_HZ_FULL
bool perf_event_can_stop_tick(void)
{
3065
	if (atomic_read(&nr_freq_events) ||
3066
	    __this_cpu_read(perf_throttled_count))
3067
		return false;
3068 3069
	else
		return true;
3070 3071 3072
}
#endif

3073 3074
void perf_event_task_tick(void)
{
3075 3076
	struct list_head *head = this_cpu_ptr(&active_ctx_list);
	struct perf_event_context *ctx, *tmp;
3077
	int throttled;
3078

3079 3080
	WARN_ON(!irqs_disabled());

3081 3082 3083
	__this_cpu_inc(perf_throttled_seq);
	throttled = __this_cpu_xchg(perf_throttled_count, 0);

3084
	list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
3085
		perf_adjust_freq_unthr_context(ctx, throttled);
T
Thomas Gleixner 已提交
3086 3087
}

3088 3089 3090 3091 3092 3093 3094 3095 3096 3097
static int event_enable_on_exec(struct perf_event *event,
				struct perf_event_context *ctx)
{
	if (!event->attr.enable_on_exec)
		return 0;

	event->attr.enable_on_exec = 0;
	if (event->state >= PERF_EVENT_STATE_INACTIVE)
		return 0;

3098
	__perf_event_mark_enabled(event);
3099 3100 3101 3102

	return 1;
}

3103
/*
3104
 * Enable all of a task's events that have been marked enable-on-exec.
3105 3106
 * This expects task == current.
 */
P
Peter Zijlstra 已提交
3107
static void perf_event_enable_on_exec(struct perf_event_context *ctx)
3108
{
3109
	struct perf_event_context *clone_ctx = NULL;
3110
	struct perf_event *event;
3111 3112
	unsigned long flags;
	int enabled = 0;
3113
	int ret;
3114 3115

	local_irq_save(flags);
3116
	if (!ctx || !ctx->nr_events)
3117 3118
		goto out;

3119 3120 3121 3122 3123 3124 3125
	/*
	 * We must ctxsw out cgroup events to avoid conflict
	 * when invoking perf_task_event_sched_in() later on
	 * in this function. Otherwise we end up trying to
	 * ctxswin cgroup events which are already scheduled
	 * in.
	 */
3126
	perf_cgroup_sched_out(current, NULL);
3127

3128
	raw_spin_lock(&ctx->lock);
3129
	task_ctx_sched_out(ctx);
3130

3131
	list_for_each_entry(event, &ctx->event_list, event_entry) {
3132 3133 3134
		ret = event_enable_on_exec(event, ctx);
		if (ret)
			enabled = 1;
3135 3136 3137
	}

	/*
3138
	 * Unclone this context if we enabled any event.
3139
	 */
3140
	if (enabled)
3141
		clone_ctx = unclone_ctx(ctx);
3142

3143
	raw_spin_unlock(&ctx->lock);
3144

3145 3146 3147
	/*
	 * Also calls ctxswin for cgroup events, if any:
	 */
S
Stephane Eranian 已提交
3148
	perf_event_context_sched_in(ctx, ctx->task);
P
Peter Zijlstra 已提交
3149
out:
3150
	local_irq_restore(flags);
3151 3152 3153

	if (clone_ctx)
		put_ctx(clone_ctx);
3154 3155
}

3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171
void perf_event_exec(void)
{
	struct perf_event_context *ctx;
	int ctxn;

	rcu_read_lock();
	for_each_task_context_nr(ctxn) {
		ctx = current->perf_event_ctxp[ctxn];
		if (!ctx)
			continue;

		perf_event_enable_on_exec(ctx);
	}
	rcu_read_unlock();
}

T
Thomas Gleixner 已提交
3172
/*
3173
 * Cross CPU call to read the hardware event
T
Thomas Gleixner 已提交
3174
 */
3175
static void __perf_event_read(void *info)
T
Thomas Gleixner 已提交
3176
{
3177 3178
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
P
Peter Zijlstra 已提交
3179
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
I
Ingo Molnar 已提交
3180

3181 3182 3183 3184
	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu.  If not it has been
	 * scheduled out before the smp call arrived.  In that case
3185 3186
	 * event->count would have been updated to a recent sample
	 * when the event was scheduled out.
3187 3188 3189 3190
	 */
	if (ctx->task && cpuctx->task_ctx != ctx)
		return;

3191
	raw_spin_lock(&ctx->lock);
S
Stephane Eranian 已提交
3192
	if (ctx->is_active) {
3193
		update_context_time(ctx);
S
Stephane Eranian 已提交
3194 3195
		update_cgrp_time_from_event(event);
	}
3196
	update_event_times(event);
3197 3198
	if (event->state == PERF_EVENT_STATE_ACTIVE)
		event->pmu->read(event);
3199
	raw_spin_unlock(&ctx->lock);
T
Thomas Gleixner 已提交
3200 3201
}

P
Peter Zijlstra 已提交
3202 3203
static inline u64 perf_event_count(struct perf_event *event)
{
3204 3205 3206 3207
	if (event->pmu->count)
		return event->pmu->count(event);

	return __perf_event_count(event);
P
Peter Zijlstra 已提交
3208 3209
}

3210
static u64 perf_event_read(struct perf_event *event)
T
Thomas Gleixner 已提交
3211 3212
{
	/*
3213 3214
	 * If event is enabled and currently active on a CPU, update the
	 * value in the event structure:
T
Thomas Gleixner 已提交
3215
	 */
3216 3217 3218 3219
	if (event->state == PERF_EVENT_STATE_ACTIVE) {
		smp_call_function_single(event->oncpu,
					 __perf_event_read, event, 1);
	} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
P
Peter Zijlstra 已提交
3220 3221 3222
		struct perf_event_context *ctx = event->ctx;
		unsigned long flags;

3223
		raw_spin_lock_irqsave(&ctx->lock, flags);
3224 3225 3226 3227 3228
		/*
		 * may read while context is not active
		 * (e.g., thread is blocked), in that case
		 * we cannot update context time
		 */
S
Stephane Eranian 已提交
3229
		if (ctx->is_active) {
3230
			update_context_time(ctx);
S
Stephane Eranian 已提交
3231 3232
			update_cgrp_time_from_event(event);
		}
3233
		update_event_times(event);
3234
		raw_spin_unlock_irqrestore(&ctx->lock, flags);
T
Thomas Gleixner 已提交
3235 3236
	}

P
Peter Zijlstra 已提交
3237
	return perf_event_count(event);
T
Thomas Gleixner 已提交
3238 3239
}

3240
/*
3241
 * Initialize the perf_event context in a task_struct:
3242
 */
3243
static void __perf_event_init_context(struct perf_event_context *ctx)
3244
{
3245
	raw_spin_lock_init(&ctx->lock);
3246
	mutex_init(&ctx->mutex);
3247
	INIT_LIST_HEAD(&ctx->active_ctx_list);
3248 3249
	INIT_LIST_HEAD(&ctx->pinned_groups);
	INIT_LIST_HEAD(&ctx->flexible_groups);
3250 3251
	INIT_LIST_HEAD(&ctx->event_list);
	atomic_set(&ctx->refcount, 1);
3252
	INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work);
3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267
}

static struct perf_event_context *
alloc_perf_context(struct pmu *pmu, struct task_struct *task)
{
	struct perf_event_context *ctx;

	ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
	if (!ctx)
		return NULL;

	__perf_event_init_context(ctx);
	if (task) {
		ctx->task = task;
		get_task_struct(task);
T
Thomas Gleixner 已提交
3268
	}
3269 3270 3271
	ctx->pmu = pmu;

	return ctx;
3272 3273
}

3274 3275 3276 3277 3278
static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)
{
	struct task_struct *task;
	int err;
T
Thomas Gleixner 已提交
3279 3280

	rcu_read_lock();
3281
	if (!vpid)
T
Thomas Gleixner 已提交
3282 3283
		task = current;
	else
3284
		task = find_task_by_vpid(vpid);
T
Thomas Gleixner 已提交
3285 3286 3287 3288 3289 3290 3291 3292
	if (task)
		get_task_struct(task);
	rcu_read_unlock();

	if (!task)
		return ERR_PTR(-ESRCH);

	/* Reuse ptrace permission checks for now. */
3293 3294 3295 3296
	err = -EACCES;
	if (!ptrace_may_access(task, PTRACE_MODE_READ))
		goto errout;

3297 3298 3299 3300 3301 3302 3303
	return task;
errout:
	put_task_struct(task);
	return ERR_PTR(err);

}

3304 3305 3306
/*
 * Returns a matching context with refcount and pincount.
 */
P
Peter Zijlstra 已提交
3307
static struct perf_event_context *
3308 3309
find_get_context(struct pmu *pmu, struct task_struct *task,
		struct perf_event *event)
T
Thomas Gleixner 已提交
3310
{
3311
	struct perf_event_context *ctx, *clone_ctx = NULL;
3312
	struct perf_cpu_context *cpuctx;
3313
	void *task_ctx_data = NULL;
3314
	unsigned long flags;
P
Peter Zijlstra 已提交
3315
	int ctxn, err;
3316
	int cpu = event->cpu;
T
Thomas Gleixner 已提交
3317

3318
	if (!task) {
3319
		/* Must be root to operate on a CPU event: */
3320
		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
T
Thomas Gleixner 已提交
3321 3322 3323
			return ERR_PTR(-EACCES);

		/*
3324
		 * We could be clever and allow to attach a event to an
T
Thomas Gleixner 已提交
3325 3326 3327
		 * offline CPU and activate it when the CPU comes up, but
		 * that's for later.
		 */
3328
		if (!cpu_online(cpu))
T
Thomas Gleixner 已提交
3329 3330
			return ERR_PTR(-ENODEV);

P
Peter Zijlstra 已提交
3331
		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
T
Thomas Gleixner 已提交
3332
		ctx = &cpuctx->ctx;
3333
		get_ctx(ctx);
3334
		++ctx->pin_count;
T
Thomas Gleixner 已提交
3335 3336 3337 3338

		return ctx;
	}

P
Peter Zijlstra 已提交
3339 3340 3341 3342 3343
	err = -EINVAL;
	ctxn = pmu->task_ctx_nr;
	if (ctxn < 0)
		goto errout;

3344 3345 3346 3347 3348 3349 3350 3351
	if (event->attach_state & PERF_ATTACH_TASK_DATA) {
		task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
		if (!task_ctx_data) {
			err = -ENOMEM;
			goto errout;
		}
	}

P
Peter Zijlstra 已提交
3352
retry:
P
Peter Zijlstra 已提交
3353
	ctx = perf_lock_task_context(task, ctxn, &flags);
3354
	if (ctx) {
3355
		clone_ctx = unclone_ctx(ctx);
3356
		++ctx->pin_count;
3357 3358 3359 3360 3361

		if (task_ctx_data && !ctx->task_ctx_data) {
			ctx->task_ctx_data = task_ctx_data;
			task_ctx_data = NULL;
		}
3362
		raw_spin_unlock_irqrestore(&ctx->lock, flags);
3363 3364 3365

		if (clone_ctx)
			put_ctx(clone_ctx);
3366
	} else {
3367
		ctx = alloc_perf_context(pmu, task);
3368 3369 3370
		err = -ENOMEM;
		if (!ctx)
			goto errout;
3371

3372 3373 3374 3375 3376
		if (task_ctx_data) {
			ctx->task_ctx_data = task_ctx_data;
			task_ctx_data = NULL;
		}

3377 3378 3379 3380 3381 3382 3383 3384 3385 3386
		err = 0;
		mutex_lock(&task->perf_event_mutex);
		/*
		 * If it has already passed perf_event_exit_task().
		 * we must see PF_EXITING, it takes this mutex too.
		 */
		if (task->flags & PF_EXITING)
			err = -ESRCH;
		else if (task->perf_event_ctxp[ctxn])
			err = -EAGAIN;
3387
		else {
3388
			get_ctx(ctx);
3389
			++ctx->pin_count;
3390
			rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
3391
		}
3392 3393 3394
		mutex_unlock(&task->perf_event_mutex);

		if (unlikely(err)) {
3395
			put_ctx(ctx);
3396 3397 3398 3399

			if (err == -EAGAIN)
				goto retry;
			goto errout;
3400 3401 3402
		}
	}

3403
	kfree(task_ctx_data);
T
Thomas Gleixner 已提交
3404
	return ctx;
3405

P
Peter Zijlstra 已提交
3406
errout:
3407
	kfree(task_ctx_data);
3408
	return ERR_PTR(err);
T
Thomas Gleixner 已提交
3409 3410
}

L
Li Zefan 已提交
3411
static void perf_event_free_filter(struct perf_event *event);
3412
static void perf_event_free_bpf_prog(struct perf_event *event);
L
Li Zefan 已提交
3413

3414
static void free_event_rcu(struct rcu_head *head)
P
Peter Zijlstra 已提交
3415
{
3416
	struct perf_event *event;
P
Peter Zijlstra 已提交
3417

3418 3419 3420
	event = container_of(head, struct perf_event, rcu_head);
	if (event->ns)
		put_pid_ns(event->ns);
L
Li Zefan 已提交
3421
	perf_event_free_filter(event);
3422
	perf_event_free_bpf_prog(event);
3423
	kfree(event);
P
Peter Zijlstra 已提交
3424 3425
}

3426
static void ring_buffer_put(struct ring_buffer *rb);
3427 3428
static void ring_buffer_attach(struct perf_event *event,
			       struct ring_buffer *rb);
3429

3430
static void unaccount_event_cpu(struct perf_event *event, int cpu)
3431
{
3432 3433 3434 3435 3436 3437
	if (event->parent)
		return;

	if (is_cgroup_event(event))
		atomic_dec(&per_cpu(perf_cgroup_events, cpu));
}
3438

3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451
static void unaccount_event(struct perf_event *event)
{
	if (event->parent)
		return;

	if (event->attach_state & PERF_ATTACH_TASK)
		static_key_slow_dec_deferred(&perf_sched_events);
	if (event->attr.mmap || event->attr.mmap_data)
		atomic_dec(&nr_mmap_events);
	if (event->attr.comm)
		atomic_dec(&nr_comm_events);
	if (event->attr.task)
		atomic_dec(&nr_task_events);
3452 3453
	if (event->attr.freq)
		atomic_dec(&nr_freq_events);
3454 3455 3456 3457 3458 3459 3460
	if (is_cgroup_event(event))
		static_key_slow_dec_deferred(&perf_sched_events);
	if (has_branch_stack(event))
		static_key_slow_dec_deferred(&perf_sched_events);

	unaccount_event_cpu(event, event->cpu);
}
3461

3462 3463
static void __free_event(struct perf_event *event)
{
3464
	if (!event->parent) {
3465 3466
		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
			put_callchain_buffers();
3467
	}
3468

3469 3470 3471 3472 3473 3474
	if (event->destroy)
		event->destroy(event);

	if (event->ctx)
		put_ctx(event->ctx);

3475 3476 3477
	if (event->pmu)
		module_put(event->pmu->module);

3478 3479
	call_rcu(&event->rcu_head, free_event_rcu);
}
P
Peter Zijlstra 已提交
3480 3481

static void _free_event(struct perf_event *event)
3482
{
3483
	irq_work_sync(&event->pending);
3484

3485
	unaccount_event(event);
3486

3487
	if (event->rb) {
3488 3489 3490 3491 3492 3493 3494
		/*
		 * Can happen when we close an event with re-directed output.
		 *
		 * Since we have a 0 refcount, perf_mmap_close() will skip
		 * over us; possibly making our ring_buffer_put() the last.
		 */
		mutex_lock(&event->mmap_mutex);
3495
		ring_buffer_attach(event, NULL);
3496
		mutex_unlock(&event->mmap_mutex);
3497 3498
	}

S
Stephane Eranian 已提交
3499 3500 3501
	if (is_cgroup_event(event))
		perf_detach_cgroup(event);

3502
	__free_event(event);
3503 3504
}

P
Peter Zijlstra 已提交
3505 3506 3507 3508 3509
/*
 * Used to free events which have a known refcount of 1, such as in error paths
 * where the event isn't exposed yet and inherited events.
 */
static void free_event(struct perf_event *event)
T
Thomas Gleixner 已提交
3510
{
P
Peter Zijlstra 已提交
3511 3512 3513 3514 3515 3516
	if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
				"unexpected event refcount: %ld; ptr=%p\n",
				atomic_long_read(&event->refcount), event)) {
		/* leak to avoid use-after-free */
		return;
	}
T
Thomas Gleixner 已提交
3517

P
Peter Zijlstra 已提交
3518
	_free_event(event);
T
Thomas Gleixner 已提交
3519 3520
}

3521
/*
3522
 * Remove user event from the owner task.
3523
 */
3524
static void perf_remove_from_owner(struct perf_event *event)
3525
{
P
Peter Zijlstra 已提交
3526
	struct task_struct *owner;
3527

P
Peter Zijlstra 已提交
3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547
	rcu_read_lock();
	owner = ACCESS_ONCE(event->owner);
	/*
	 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
	 * !owner it means the list deletion is complete and we can indeed
	 * free this event, otherwise we need to serialize on
	 * owner->perf_event_mutex.
	 */
	smp_read_barrier_depends();
	if (owner) {
		/*
		 * Since delayed_put_task_struct() also drops the last
		 * task reference we can safely take a new reference
		 * while holding the rcu_read_lock().
		 */
		get_task_struct(owner);
	}
	rcu_read_unlock();

	if (owner) {
P
Peter Zijlstra 已提交
3548 3549 3550 3551 3552 3553 3554 3555 3556 3557
		/*
		 * If we're here through perf_event_exit_task() we're already
		 * holding ctx->mutex which would be an inversion wrt. the
		 * normal lock order.
		 *
		 * However we can safely take this lock because its the child
		 * ctx->mutex.
		 */
		mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);

P
Peter Zijlstra 已提交
3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568
		/*
		 * We have to re-check the event->owner field, if it is cleared
		 * we raced with perf_event_exit_task(), acquiring the mutex
		 * ensured they're done, and we can proceed with freeing the
		 * event.
		 */
		if (event->owner)
			list_del_init(&event->owner_entry);
		mutex_unlock(&owner->perf_event_mutex);
		put_task_struct(owner);
	}
3569 3570 3571 3572 3573 3574 3575
}

/*
 * Called when the last reference to the file is gone.
 */
static void put_event(struct perf_event *event)
{
P
Peter Zijlstra 已提交
3576
	struct perf_event_context *ctx;
3577 3578 3579 3580 3581 3582

	if (!atomic_long_dec_and_test(&event->refcount))
		return;

	if (!is_kernel_event(event))
		perf_remove_from_owner(event);
P
Peter Zijlstra 已提交
3583

P
Peter Zijlstra 已提交
3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595
	/*
	 * There are two ways this annotation is useful:
	 *
	 *  1) there is a lock recursion from perf_event_exit_task
	 *     see the comment there.
	 *
	 *  2) there is a lock-inversion with mmap_sem through
	 *     perf_event_read_group(), which takes faults while
	 *     holding ctx->mutex, however this is called after
	 *     the last filedesc died, so there is no possibility
	 *     to trigger the AB-BA case.
	 */
P
Peter Zijlstra 已提交
3596 3597
	ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING);
	WARN_ON_ONCE(ctx->parent_ctx);
P
Peter Zijlstra 已提交
3598
	perf_remove_from_context(event, true);
L
Leon Yu 已提交
3599
	perf_event_ctx_unlock(event, ctx);
P
Peter Zijlstra 已提交
3600 3601

	_free_event(event);
3602 3603
}

P
Peter Zijlstra 已提交
3604 3605 3606 3607 3608 3609 3610
int perf_event_release_kernel(struct perf_event *event)
{
	put_event(event);
	return 0;
}
EXPORT_SYMBOL_GPL(perf_event_release_kernel);

3611 3612 3613 3614
static int perf_release(struct inode *inode, struct file *file)
{
	put_event(file->private_data);
	return 0;
3615 3616
}

3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652
/*
 * Remove all orphanes events from the context.
 */
static void orphans_remove_work(struct work_struct *work)
{
	struct perf_event_context *ctx;
	struct perf_event *event, *tmp;

	ctx = container_of(work, struct perf_event_context,
			   orphans_remove.work);

	mutex_lock(&ctx->mutex);
	list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
		struct perf_event *parent_event = event->parent;

		if (!is_orphaned_child(event))
			continue;

		perf_remove_from_context(event, true);

		mutex_lock(&parent_event->child_mutex);
		list_del_init(&event->child_list);
		mutex_unlock(&parent_event->child_mutex);

		free_event(event);
		put_event(parent_event);
	}

	raw_spin_lock_irq(&ctx->lock);
	ctx->orphans_remove_sched = false;
	raw_spin_unlock_irq(&ctx->lock);
	mutex_unlock(&ctx->mutex);

	put_ctx(ctx);
}

3653
u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
3654
{
3655
	struct perf_event *child;
3656 3657
	u64 total = 0;

3658 3659 3660
	*enabled = 0;
	*running = 0;

3661
	mutex_lock(&event->child_mutex);
3662
	total += perf_event_read(event);
3663 3664 3665 3666 3667 3668
	*enabled += event->total_time_enabled +
			atomic64_read(&event->child_total_time_enabled);
	*running += event->total_time_running +
			atomic64_read(&event->child_total_time_running);

	list_for_each_entry(child, &event->child_list, child_list) {
3669
		total += perf_event_read(child);
3670 3671 3672
		*enabled += child->total_time_enabled;
		*running += child->total_time_running;
	}
3673
	mutex_unlock(&event->child_mutex);
3674 3675 3676

	return total;
}
3677
EXPORT_SYMBOL_GPL(perf_event_read_value);
3678

3679
static int perf_event_read_group(struct perf_event *event,
3680 3681
				   u64 read_format, char __user *buf)
{
3682
	struct perf_event *leader = event->group_leader, *sub;
3683
	struct perf_event_context *ctx = leader->ctx;
P
Peter Zijlstra 已提交
3684
	int n = 0, size = 0, ret;
3685
	u64 count, enabled, running;
P
Peter Zijlstra 已提交
3686 3687 3688
	u64 values[5];

	lockdep_assert_held(&ctx->mutex);
3689

3690
	count = perf_event_read_value(leader, &enabled, &running);
3691 3692

	values[n++] = 1 + leader->nr_siblings;
3693 3694 3695 3696
	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		values[n++] = enabled;
	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		values[n++] = running;
3697 3698 3699
	values[n++] = count;
	if (read_format & PERF_FORMAT_ID)
		values[n++] = primary_event_id(leader);
3700 3701 3702 3703

	size = n * sizeof(u64);

	if (copy_to_user(buf, values, size))
P
Peter Zijlstra 已提交
3704
		return -EFAULT;
3705

3706
	ret = size;
3707

3708
	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3709
		n = 0;
3710

3711
		values[n++] = perf_event_read_value(sub, &enabled, &running);
3712 3713 3714 3715 3716
		if (read_format & PERF_FORMAT_ID)
			values[n++] = primary_event_id(sub);

		size = n * sizeof(u64);

3717
		if (copy_to_user(buf + ret, values, size)) {
P
Peter Zijlstra 已提交
3718
			return -EFAULT;
3719
		}
3720 3721

		ret += size;
3722 3723
	}

3724
	return ret;
3725 3726
}

3727
static int perf_event_read_one(struct perf_event *event,
3728 3729
				 u64 read_format, char __user *buf)
{
3730
	u64 enabled, running;
3731 3732 3733
	u64 values[4];
	int n = 0;

3734 3735 3736 3737 3738
	values[n++] = perf_event_read_value(event, &enabled, &running);
	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		values[n++] = enabled;
	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		values[n++] = running;
3739
	if (read_format & PERF_FORMAT_ID)
3740
		values[n++] = primary_event_id(event);
3741 3742 3743 3744 3745 3746 3747

	if (copy_to_user(buf, values, n * sizeof(u64)))
		return -EFAULT;

	return n * sizeof(u64);
}

3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760
static bool is_event_hup(struct perf_event *event)
{
	bool no_children;

	if (event->state != PERF_EVENT_STATE_EXIT)
		return false;

	mutex_lock(&event->child_mutex);
	no_children = list_empty(&event->child_list);
	mutex_unlock(&event->child_mutex);
	return no_children;
}

T
Thomas Gleixner 已提交
3761
/*
3762
 * Read the performance event - simple non blocking version for now
T
Thomas Gleixner 已提交
3763 3764
 */
static ssize_t
3765
perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
T
Thomas Gleixner 已提交
3766
{
3767
	u64 read_format = event->attr.read_format;
3768
	int ret;
T
Thomas Gleixner 已提交
3769

3770
	/*
3771
	 * Return end-of-file for a read on a event that is in
3772 3773 3774
	 * error state (i.e. because it was pinned but it couldn't be
	 * scheduled on to the CPU at some point).
	 */
3775
	if (event->state == PERF_EVENT_STATE_ERROR)
3776 3777
		return 0;

3778
	if (count < event->read_size)
3779 3780
		return -ENOSPC;

3781
	WARN_ON_ONCE(event->ctx->parent_ctx);
3782
	if (read_format & PERF_FORMAT_GROUP)
3783
		ret = perf_event_read_group(event, read_format, buf);
3784
	else
3785
		ret = perf_event_read_one(event, read_format, buf);
T
Thomas Gleixner 已提交
3786

3787
	return ret;
T
Thomas Gleixner 已提交
3788 3789 3790 3791 3792
}

static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
3793
	struct perf_event *event = file->private_data;
P
Peter Zijlstra 已提交
3794 3795
	struct perf_event_context *ctx;
	int ret;
T
Thomas Gleixner 已提交
3796

P
Peter Zijlstra 已提交
3797 3798 3799 3800 3801
	ctx = perf_event_ctx_lock(event);
	ret = perf_read_hw(event, buf, count);
	perf_event_ctx_unlock(event, ctx);

	return ret;
T
Thomas Gleixner 已提交
3802 3803 3804 3805
}

static unsigned int perf_poll(struct file *file, poll_table *wait)
{
3806
	struct perf_event *event = file->private_data;
3807
	struct ring_buffer *rb;
3808
	unsigned int events = POLLHUP;
P
Peter Zijlstra 已提交
3809

3810
	poll_wait(file, &event->waitq, wait);
3811

3812
	if (is_event_hup(event))
3813
		return events;
P
Peter Zijlstra 已提交
3814

3815
	/*
3816 3817
	 * Pin the event->rb by taking event->mmap_mutex; otherwise
	 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
3818 3819
	 */
	mutex_lock(&event->mmap_mutex);
3820 3821
	rb = event->rb;
	if (rb)
3822
		events = atomic_xchg(&rb->poll, 0);
3823
	mutex_unlock(&event->mmap_mutex);
T
Thomas Gleixner 已提交
3824 3825 3826
	return events;
}

P
Peter Zijlstra 已提交
3827
static void _perf_event_reset(struct perf_event *event)
3828
{
3829
	(void)perf_event_read(event);
3830
	local64_set(&event->count, 0);
3831
	perf_event_update_userpage(event);
P
Peter Zijlstra 已提交
3832 3833
}

3834
/*
3835 3836 3837 3838
 * Holding the top-level event's child_mutex means that any
 * descendant process that has inherited this event will block
 * in sync_child_event if it goes to exit, thus satisfying the
 * task existence requirements of perf_event_enable/disable.
3839
 */
3840 3841
static void perf_event_for_each_child(struct perf_event *event,
					void (*func)(struct perf_event *))
P
Peter Zijlstra 已提交
3842
{
3843
	struct perf_event *child;
P
Peter Zijlstra 已提交
3844

3845
	WARN_ON_ONCE(event->ctx->parent_ctx);
P
Peter Zijlstra 已提交
3846

3847 3848 3849
	mutex_lock(&event->child_mutex);
	func(event);
	list_for_each_entry(child, &event->child_list, child_list)
P
Peter Zijlstra 已提交
3850
		func(child);
3851
	mutex_unlock(&event->child_mutex);
P
Peter Zijlstra 已提交
3852 3853
}

3854 3855
static void perf_event_for_each(struct perf_event *event,
				  void (*func)(struct perf_event *))
P
Peter Zijlstra 已提交
3856
{
3857 3858
	struct perf_event_context *ctx = event->ctx;
	struct perf_event *sibling;
P
Peter Zijlstra 已提交
3859

P
Peter Zijlstra 已提交
3860 3861
	lockdep_assert_held(&ctx->mutex);

3862
	event = event->group_leader;
3863

3864 3865
	perf_event_for_each_child(event, func);
	list_for_each_entry(sibling, &event->sibling_list, group_entry)
3866
		perf_event_for_each_child(sibling, func);
3867 3868
}

3869
static int perf_event_period(struct perf_event *event, u64 __user *arg)
3870
{
3871
	struct perf_event_context *ctx = event->ctx;
3872
	int ret = 0, active;
3873 3874
	u64 value;

3875
	if (!is_sampling_event(event))
3876 3877
		return -EINVAL;

3878
	if (copy_from_user(&value, arg, sizeof(value)))
3879 3880 3881 3882 3883
		return -EFAULT;

	if (!value)
		return -EINVAL;

3884
	raw_spin_lock_irq(&ctx->lock);
3885 3886
	if (event->attr.freq) {
		if (value > sysctl_perf_event_sample_rate) {
3887 3888 3889 3890
			ret = -EINVAL;
			goto unlock;
		}

3891
		event->attr.sample_freq = value;
3892
	} else {
3893 3894
		event->attr.sample_period = value;
		event->hw.sample_period = value;
3895
	}
3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909

	active = (event->state == PERF_EVENT_STATE_ACTIVE);
	if (active) {
		perf_pmu_disable(ctx->pmu);
		event->pmu->stop(event, PERF_EF_UPDATE);
	}

	local64_set(&event->hw.period_left, 0);

	if (active) {
		event->pmu->start(event, PERF_EF_RELOAD);
		perf_pmu_enable(ctx->pmu);
	}

3910
unlock:
3911
	raw_spin_unlock_irq(&ctx->lock);
3912 3913 3914 3915

	return ret;
}

3916 3917
static const struct file_operations perf_fops;

3918
static inline int perf_fget_light(int fd, struct fd *p)
3919
{
3920 3921 3922
	struct fd f = fdget(fd);
	if (!f.file)
		return -EBADF;
3923

3924 3925 3926
	if (f.file->f_op != &perf_fops) {
		fdput(f);
		return -EBADF;
3927
	}
3928 3929
	*p = f;
	return 0;
3930 3931 3932 3933
}

static int perf_event_set_output(struct perf_event *event,
				 struct perf_event *output_event);
L
Li Zefan 已提交
3934
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
3935
static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
3936

P
Peter Zijlstra 已提交
3937
static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
3938
{
3939
	void (*func)(struct perf_event *);
P
Peter Zijlstra 已提交
3940
	u32 flags = arg;
3941 3942

	switch (cmd) {
3943
	case PERF_EVENT_IOC_ENABLE:
P
Peter Zijlstra 已提交
3944
		func = _perf_event_enable;
3945
		break;
3946
	case PERF_EVENT_IOC_DISABLE:
P
Peter Zijlstra 已提交
3947
		func = _perf_event_disable;
3948
		break;
3949
	case PERF_EVENT_IOC_RESET:
P
Peter Zijlstra 已提交
3950
		func = _perf_event_reset;
3951
		break;
P
Peter Zijlstra 已提交
3952

3953
	case PERF_EVENT_IOC_REFRESH:
P
Peter Zijlstra 已提交
3954
		return _perf_event_refresh(event, arg);
3955

3956 3957
	case PERF_EVENT_IOC_PERIOD:
		return perf_event_period(event, (u64 __user *)arg);
3958

3959 3960 3961 3962 3963 3964 3965 3966 3967
	case PERF_EVENT_IOC_ID:
	{
		u64 id = primary_event_id(event);

		if (copy_to_user((void __user *)arg, &id, sizeof(id)))
			return -EFAULT;
		return 0;
	}

3968
	case PERF_EVENT_IOC_SET_OUTPUT:
3969 3970 3971
	{
		int ret;
		if (arg != -1) {
3972 3973 3974 3975 3976 3977 3978 3979 3980 3981
			struct perf_event *output_event;
			struct fd output;
			ret = perf_fget_light(arg, &output);
			if (ret)
				return ret;
			output_event = output.file->private_data;
			ret = perf_event_set_output(event, output_event);
			fdput(output);
		} else {
			ret = perf_event_set_output(event, NULL);
3982 3983 3984
		}
		return ret;
	}
3985

L
Li Zefan 已提交
3986 3987 3988
	case PERF_EVENT_IOC_SET_FILTER:
		return perf_event_set_filter(event, (void __user *)arg);

3989 3990 3991
	case PERF_EVENT_IOC_SET_BPF:
		return perf_event_set_bpf_prog(event, arg);

3992
	default:
P
Peter Zijlstra 已提交
3993
		return -ENOTTY;
3994
	}
P
Peter Zijlstra 已提交
3995 3996

	if (flags & PERF_IOC_FLAG_GROUP)
3997
		perf_event_for_each(event, func);
P
Peter Zijlstra 已提交
3998
	else
3999
		perf_event_for_each_child(event, func);
P
Peter Zijlstra 已提交
4000 4001

	return 0;
4002 4003
}

P
Peter Zijlstra 已提交
4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct perf_event *event = file->private_data;
	struct perf_event_context *ctx;
	long ret;

	ctx = perf_event_ctx_lock(event);
	ret = _perf_ioctl(event, cmd, arg);
	perf_event_ctx_unlock(event, ctx);

	return ret;
}

P
Pawel Moll 已提交
4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036
#ifdef CONFIG_COMPAT
static long perf_compat_ioctl(struct file *file, unsigned int cmd,
				unsigned long arg)
{
	switch (_IOC_NR(cmd)) {
	case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
	case _IOC_NR(PERF_EVENT_IOC_ID):
		/* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
		if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
			cmd &= ~IOCSIZE_MASK;
			cmd |= sizeof(void *) << IOCSIZE_SHIFT;
		}
		break;
	}
	return perf_ioctl(file, cmd, arg);
}
#else
# define perf_compat_ioctl NULL
#endif

4037
int perf_event_task_enable(void)
4038
{
P
Peter Zijlstra 已提交
4039
	struct perf_event_context *ctx;
4040
	struct perf_event *event;
4041

4042
	mutex_lock(&current->perf_event_mutex);
P
Peter Zijlstra 已提交
4043 4044 4045 4046 4047
	list_for_each_entry(event, &current->perf_event_list, owner_entry) {
		ctx = perf_event_ctx_lock(event);
		perf_event_for_each_child(event, _perf_event_enable);
		perf_event_ctx_unlock(event, ctx);
	}
4048
	mutex_unlock(&current->perf_event_mutex);
4049 4050 4051 4052

	return 0;
}

4053
int perf_event_task_disable(void)
4054
{
P
Peter Zijlstra 已提交
4055
	struct perf_event_context *ctx;
4056
	struct perf_event *event;
4057

4058
	mutex_lock(&current->perf_event_mutex);
P
Peter Zijlstra 已提交
4059 4060 4061 4062 4063
	list_for_each_entry(event, &current->perf_event_list, owner_entry) {
		ctx = perf_event_ctx_lock(event);
		perf_event_for_each_child(event, _perf_event_disable);
		perf_event_ctx_unlock(event, ctx);
	}
4064
	mutex_unlock(&current->perf_event_mutex);
4065 4066 4067 4068

	return 0;
}

4069
static int perf_event_index(struct perf_event *event)
4070
{
P
Peter Zijlstra 已提交
4071 4072 4073
	if (event->hw.state & PERF_HES_STOPPED)
		return 0;

4074
	if (event->state != PERF_EVENT_STATE_ACTIVE)
4075 4076
		return 0;

4077
	return event->pmu->event_idx(event);
4078 4079
}

4080
static void calc_timer_values(struct perf_event *event,
4081
				u64 *now,
4082 4083
				u64 *enabled,
				u64 *running)
4084
{
4085
	u64 ctx_time;
4086

4087 4088
	*now = perf_clock();
	ctx_time = event->shadow_ctx_time + *now;
4089 4090 4091 4092
	*enabled = ctx_time - event->tstamp_enabled;
	*running = ctx_time - event->tstamp_running;
}

4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107
static void perf_event_init_userpage(struct perf_event *event)
{
	struct perf_event_mmap_page *userpg;
	struct ring_buffer *rb;

	rcu_read_lock();
	rb = rcu_dereference(event->rb);
	if (!rb)
		goto unlock;

	userpg = rb->user_page;

	/* Allow new userspace to detect that bit 0 is deprecated */
	userpg->cap_bit0_is_deprecated = 1;
	userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
4108 4109
	userpg->data_offset = PAGE_SIZE;
	userpg->data_size = perf_data_size(rb);
4110 4111 4112 4113 4114

unlock:
	rcu_read_unlock();
}

4115 4116
void __weak arch_perf_update_userpage(
	struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
4117 4118 4119
{
}

4120 4121 4122 4123 4124
/*
 * Callers need to ensure there can be no nesting of this function, otherwise
 * the seqlock logic goes bad. We can not serialize this because the arch
 * code calls this from NMI context.
 */
4125
void perf_event_update_userpage(struct perf_event *event)
4126
{
4127
	struct perf_event_mmap_page *userpg;
4128
	struct ring_buffer *rb;
4129
	u64 enabled, running, now;
4130 4131

	rcu_read_lock();
4132 4133 4134 4135
	rb = rcu_dereference(event->rb);
	if (!rb)
		goto unlock;

4136 4137 4138 4139 4140 4141 4142 4143 4144
	/*
	 * compute total_time_enabled, total_time_running
	 * based on snapshot values taken when the event
	 * was last scheduled in.
	 *
	 * we cannot simply called update_context_time()
	 * because of locking issue as we can be called in
	 * NMI context
	 */
4145
	calc_timer_values(event, &now, &enabled, &running);
4146

4147
	userpg = rb->user_page;
4148 4149 4150 4151 4152
	/*
	 * Disable preemption so as to not let the corresponding user-space
	 * spin too long if we get preempted.
	 */
	preempt_disable();
4153
	++userpg->lock;
4154
	barrier();
4155
	userpg->index = perf_event_index(event);
P
Peter Zijlstra 已提交
4156
	userpg->offset = perf_event_count(event);
4157
	if (userpg->index)
4158
		userpg->offset -= local64_read(&event->hw.prev_count);
4159

4160
	userpg->time_enabled = enabled +
4161
			atomic64_read(&event->child_total_time_enabled);
4162

4163
	userpg->time_running = running +
4164
			atomic64_read(&event->child_total_time_running);
4165

4166
	arch_perf_update_userpage(event, userpg, now);
4167

4168
	barrier();
4169
	++userpg->lock;
4170
	preempt_enable();
4171
unlock:
4172
	rcu_read_unlock();
4173 4174
}

4175 4176 4177
static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct perf_event *event = vma->vm_file->private_data;
4178
	struct ring_buffer *rb;
4179 4180 4181 4182 4183 4184 4185 4186 4187
	int ret = VM_FAULT_SIGBUS;

	if (vmf->flags & FAULT_FLAG_MKWRITE) {
		if (vmf->pgoff == 0)
			ret = 0;
		return ret;
	}

	rcu_read_lock();
4188 4189
	rb = rcu_dereference(event->rb);
	if (!rb)
4190 4191 4192 4193 4194
		goto unlock;

	if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
		goto unlock;

4195
	vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209
	if (!vmf->page)
		goto unlock;

	get_page(vmf->page);
	vmf->page->mapping = vma->vm_file->f_mapping;
	vmf->page->index   = vmf->pgoff;

	ret = 0;
unlock:
	rcu_read_unlock();

	return ret;
}

4210 4211 4212
static void ring_buffer_attach(struct perf_event *event,
			       struct ring_buffer *rb)
{
4213
	struct ring_buffer *old_rb = NULL;
4214 4215
	unsigned long flags;

4216 4217 4218 4219 4220 4221
	if (event->rb) {
		/*
		 * Should be impossible, we set this when removing
		 * event->rb_entry and wait/clear when adding event->rb_entry.
		 */
		WARN_ON_ONCE(event->rcu_pending);
4222

4223 4224 4225
		old_rb = event->rb;
		event->rcu_batches = get_state_synchronize_rcu();
		event->rcu_pending = 1;
4226

4227 4228 4229 4230
		spin_lock_irqsave(&old_rb->event_lock, flags);
		list_del_rcu(&event->rb_entry);
		spin_unlock_irqrestore(&old_rb->event_lock, flags);
	}
4231

4232 4233 4234 4235
	if (event->rcu_pending && rb) {
		cond_synchronize_rcu(event->rcu_batches);
		event->rcu_pending = 0;
	}
4236

4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253
	if (rb) {
		spin_lock_irqsave(&rb->event_lock, flags);
		list_add_rcu(&event->rb_entry, &rb->event_list);
		spin_unlock_irqrestore(&rb->event_lock, flags);
	}

	rcu_assign_pointer(event->rb, rb);

	if (old_rb) {
		ring_buffer_put(old_rb);
		/*
		 * Since we detached before setting the new rb, so that we
		 * could attach the new rb, we could have missed a wakeup.
		 * Provide it now.
		 */
		wake_up_all(&event->waitq);
	}
4254 4255 4256 4257 4258 4259 4260 4261
}

static void ring_buffer_wakeup(struct perf_event *event)
{
	struct ring_buffer *rb;

	rcu_read_lock();
	rb = rcu_dereference(event->rb);
4262 4263 4264 4265
	if (rb) {
		list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
			wake_up_all(&event->waitq);
	}
4266 4267 4268
	rcu_read_unlock();
}

4269
static void rb_free_rcu(struct rcu_head *rcu_head)
4270
{
4271
	struct ring_buffer *rb;
4272

4273 4274
	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
	rb_free(rb);
4275 4276
}

4277
static struct ring_buffer *ring_buffer_get(struct perf_event *event)
4278
{
4279
	struct ring_buffer *rb;
4280

4281
	rcu_read_lock();
4282 4283 4284 4285
	rb = rcu_dereference(event->rb);
	if (rb) {
		if (!atomic_inc_not_zero(&rb->refcount))
			rb = NULL;
4286 4287 4288
	}
	rcu_read_unlock();

4289
	return rb;
4290 4291
}

4292
static void ring_buffer_put(struct ring_buffer *rb)
4293
{
4294
	if (!atomic_dec_and_test(&rb->refcount))
4295
		return;
4296

4297
	WARN_ON_ONCE(!list_empty(&rb->event_list));
4298

4299
	call_rcu(&rb->rcu_head, rb_free_rcu);
4300 4301 4302 4303
}

static void perf_mmap_open(struct vm_area_struct *vma)
{
4304
	struct perf_event *event = vma->vm_file->private_data;
4305

4306
	atomic_inc(&event->mmap_count);
4307
	atomic_inc(&event->rb->mmap_count);
4308

4309 4310 4311
	if (vma->vm_pgoff)
		atomic_inc(&event->rb->aux_mmap_count);

4312 4313
	if (event->pmu->event_mapped)
		event->pmu->event_mapped(event);
4314 4315
}

4316 4317 4318 4319 4320 4321 4322 4323
/*
 * A buffer can be mmap()ed multiple times; either directly through the same
 * event, or through other events by use of perf_event_set_output().
 *
 * In order to undo the VM accounting done by perf_mmap() we need to destroy
 * the buffer here, where we still have a VM context. This means we need
 * to detach all events redirecting to us.
 */
4324 4325
static void perf_mmap_close(struct vm_area_struct *vma)
{
4326
	struct perf_event *event = vma->vm_file->private_data;
4327

4328
	struct ring_buffer *rb = ring_buffer_get(event);
4329 4330 4331
	struct user_struct *mmap_user = rb->mmap_user;
	int mmap_locked = rb->mmap_locked;
	unsigned long size = perf_data_size(rb);
4332

4333 4334 4335
	if (event->pmu->event_unmapped)
		event->pmu->event_unmapped(event);

4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349
	/*
	 * rb->aux_mmap_count will always drop before rb->mmap_count and
	 * event->mmap_count, so it is ok to use event->mmap_mutex to
	 * serialize with perf_mmap here.
	 */
	if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
	    atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
		atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
		vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;

		rb_free_aux(rb);
		mutex_unlock(&event->mmap_mutex);
	}

4350 4351 4352
	atomic_dec(&rb->mmap_count);

	if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
4353
		goto out_put;
4354

4355
	ring_buffer_attach(event, NULL);
4356 4357 4358
	mutex_unlock(&event->mmap_mutex);

	/* If there's still other mmap()s of this buffer, we're done. */
4359 4360
	if (atomic_read(&rb->mmap_count))
		goto out_put;
4361

4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377
	/*
	 * No other mmap()s, detach from all other events that might redirect
	 * into the now unreachable buffer. Somewhat complicated by the
	 * fact that rb::event_lock otherwise nests inside mmap_mutex.
	 */
again:
	rcu_read_lock();
	list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
		if (!atomic_long_inc_not_zero(&event->refcount)) {
			/*
			 * This event is en-route to free_event() which will
			 * detach it and remove it from the list.
			 */
			continue;
		}
		rcu_read_unlock();
4378

4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389
		mutex_lock(&event->mmap_mutex);
		/*
		 * Check we didn't race with perf_event_set_output() which can
		 * swizzle the rb from under us while we were waiting to
		 * acquire mmap_mutex.
		 *
		 * If we find a different rb; ignore this event, a next
		 * iteration will no longer find it on the list. We have to
		 * still restart the iteration to make sure we're not now
		 * iterating the wrong list.
		 */
4390 4391 4392
		if (event->rb == rb)
			ring_buffer_attach(event, NULL);

4393
		mutex_unlock(&event->mmap_mutex);
4394
		put_event(event);
4395

4396 4397 4398 4399 4400
		/*
		 * Restart the iteration; either we're on the wrong list or
		 * destroyed its integrity by doing a deletion.
		 */
		goto again;
4401
	}
4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416
	rcu_read_unlock();

	/*
	 * It could be there's still a few 0-ref events on the list; they'll
	 * get cleaned up by free_event() -- they'll also still have their
	 * ref on the rb and will free it whenever they are done with it.
	 *
	 * Aside from that, this buffer is 'fully' detached and unmapped,
	 * undo the VM accounting.
	 */

	atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
	vma->vm_mm->pinned_vm -= mmap_locked;
	free_uid(mmap_user);

4417
out_put:
4418
	ring_buffer_put(rb); /* could be last */
4419 4420
}

4421
static const struct vm_operations_struct perf_mmap_vmops = {
4422
	.open		= perf_mmap_open,
4423
	.close		= perf_mmap_close, /* non mergable */
4424 4425
	.fault		= perf_mmap_fault,
	.page_mkwrite	= perf_mmap_fault,
4426 4427 4428 4429
};

static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
4430
	struct perf_event *event = file->private_data;
4431
	unsigned long user_locked, user_lock_limit;
4432
	struct user_struct *user = current_user();
4433
	unsigned long locked, lock_limit;
4434
	struct ring_buffer *rb = NULL;
4435 4436
	unsigned long vma_size;
	unsigned long nr_pages;
4437
	long user_extra = 0, extra = 0;
4438
	int ret = 0, flags = 0;
4439

4440 4441 4442
	/*
	 * Don't allow mmap() of inherited per-task counters. This would
	 * create a performance issue due to all children writing to the
4443
	 * same rb.
4444 4445 4446 4447
	 */
	if (event->cpu == -1 && event->attr.inherit)
		return -EINVAL;

4448
	if (!(vma->vm_flags & VM_SHARED))
4449
		return -EINVAL;
4450 4451

	vma_size = vma->vm_end - vma->vm_start;
4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511

	if (vma->vm_pgoff == 0) {
		nr_pages = (vma_size / PAGE_SIZE) - 1;
	} else {
		/*
		 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
		 * mapped, all subsequent mappings should have the same size
		 * and offset. Must be above the normal perf buffer.
		 */
		u64 aux_offset, aux_size;

		if (!event->rb)
			return -EINVAL;

		nr_pages = vma_size / PAGE_SIZE;

		mutex_lock(&event->mmap_mutex);
		ret = -EINVAL;

		rb = event->rb;
		if (!rb)
			goto aux_unlock;

		aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
		aux_size = ACCESS_ONCE(rb->user_page->aux_size);

		if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
			goto aux_unlock;

		if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
			goto aux_unlock;

		/* already mapped with a different offset */
		if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
			goto aux_unlock;

		if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
			goto aux_unlock;

		/* already mapped with a different size */
		if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
			goto aux_unlock;

		if (!is_power_of_2(nr_pages))
			goto aux_unlock;

		if (!atomic_inc_not_zero(&rb->mmap_count))
			goto aux_unlock;

		if (rb_has_aux(rb)) {
			atomic_inc(&rb->aux_mmap_count);
			ret = 0;
			goto unlock;
		}

		atomic_set(&rb->aux_mmap_count, 1);
		user_extra = nr_pages;

		goto accounting;
	}
4512

4513
	/*
4514
	 * If we have rb pages ensure they're a power-of-two number, so we
4515 4516
	 * can do bitmasks instead of modulo.
	 */
4517
	if (nr_pages != 0 && !is_power_of_2(nr_pages))
4518 4519
		return -EINVAL;

4520
	if (vma_size != PAGE_SIZE * (1 + nr_pages))
4521 4522
		return -EINVAL;

4523
	WARN_ON_ONCE(event->ctx->parent_ctx);
4524
again:
4525
	mutex_lock(&event->mmap_mutex);
4526
	if (event->rb) {
4527
		if (event->rb->nr_pages != nr_pages) {
4528
			ret = -EINVAL;
4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541
			goto unlock;
		}

		if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
			/*
			 * Raced against perf_mmap_close() through
			 * perf_event_set_output(). Try again, hope for better
			 * luck.
			 */
			mutex_unlock(&event->mmap_mutex);
			goto again;
		}

4542 4543 4544
		goto unlock;
	}

4545
	user_extra = nr_pages + 1;
4546 4547

accounting:
4548
	user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
I
Ingo Molnar 已提交
4549 4550 4551 4552 4553 4554

	/*
	 * Increase the limit linearly with more CPUs:
	 */
	user_lock_limit *= num_online_cpus();

4555
	user_locked = atomic_long_read(&user->locked_vm) + user_extra;
4556

4557 4558
	if (user_locked > user_lock_limit)
		extra = user_locked - user_lock_limit;
4559

4560
	lock_limit = rlimit(RLIMIT_MEMLOCK);
4561
	lock_limit >>= PAGE_SHIFT;
4562
	locked = vma->vm_mm->pinned_vm + extra;
4563

4564 4565
	if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
		!capable(CAP_IPC_LOCK)) {
4566 4567 4568
		ret = -EPERM;
		goto unlock;
	}
4569

4570
	WARN_ON(!rb && event->rb);
4571

4572
	if (vma->vm_flags & VM_WRITE)
4573
		flags |= RING_BUFFER_WRITABLE;
4574

4575
	if (!rb) {
4576 4577 4578
		rb = rb_alloc(nr_pages,
			      event->attr.watermark ? event->attr.wakeup_watermark : 0,
			      event->cpu, flags);
P
Peter Zijlstra 已提交
4579

4580 4581 4582 4583
		if (!rb) {
			ret = -ENOMEM;
			goto unlock;
		}
4584

4585 4586 4587
		atomic_set(&rb->mmap_count, 1);
		rb->mmap_user = get_current_user();
		rb->mmap_locked = extra;
P
Peter Zijlstra 已提交
4588

4589
		ring_buffer_attach(event, rb);
4590

4591 4592 4593 4594 4595 4596 4597
		perf_event_init_userpage(event);
		perf_event_update_userpage(event);
	} else {
		ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, flags);
		if (!ret)
			rb->aux_mmap_locked = extra;
	}
4598

4599
unlock:
4600 4601 4602 4603
	if (!ret) {
		atomic_long_add(user_extra, &user->locked_vm);
		vma->vm_mm->pinned_vm += extra;

4604
		atomic_inc(&event->mmap_count);
4605 4606 4607 4608
	} else if (rb) {
		atomic_dec(&rb->mmap_count);
	}
aux_unlock:
4609
	mutex_unlock(&event->mmap_mutex);
4610

4611 4612 4613 4614
	/*
	 * Since pinned accounting is per vm we cannot allow fork() to copy our
	 * vma.
	 */
P
Peter Zijlstra 已提交
4615
	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
4616
	vma->vm_ops = &perf_mmap_vmops;
4617

4618 4619 4620
	if (event->pmu->event_mapped)
		event->pmu->event_mapped(event);

4621
	return ret;
4622 4623
}

P
Peter Zijlstra 已提交
4624 4625
static int perf_fasync(int fd, struct file *filp, int on)
{
A
Al Viro 已提交
4626
	struct inode *inode = file_inode(filp);
4627
	struct perf_event *event = filp->private_data;
P
Peter Zijlstra 已提交
4628 4629 4630
	int retval;

	mutex_lock(&inode->i_mutex);
4631
	retval = fasync_helper(fd, filp, on, &event->fasync);
P
Peter Zijlstra 已提交
4632 4633 4634 4635 4636 4637 4638 4639
	mutex_unlock(&inode->i_mutex);

	if (retval < 0)
		return retval;

	return 0;
}

T
Thomas Gleixner 已提交
4640
static const struct file_operations perf_fops = {
4641
	.llseek			= no_llseek,
T
Thomas Gleixner 已提交
4642 4643 4644
	.release		= perf_release,
	.read			= perf_read,
	.poll			= perf_poll,
4645
	.unlocked_ioctl		= perf_ioctl,
P
Pawel Moll 已提交
4646
	.compat_ioctl		= perf_compat_ioctl,
4647
	.mmap			= perf_mmap,
P
Peter Zijlstra 已提交
4648
	.fasync			= perf_fasync,
T
Thomas Gleixner 已提交
4649 4650
};

4651
/*
4652
 * Perf event wakeup
4653 4654 4655 4656 4657
 *
 * If there's data, ensure we set the poll() state and publish everything
 * to user-space before waking everybody up.
 */

4658
void perf_event_wakeup(struct perf_event *event)
4659
{
4660
	ring_buffer_wakeup(event);
4661

4662 4663 4664
	if (event->pending_kill) {
		kill_fasync(&event->fasync, SIGIO, event->pending_kill);
		event->pending_kill = 0;
4665
	}
4666 4667
}

4668
static void perf_pending_event(struct irq_work *entry)
4669
{
4670 4671
	struct perf_event *event = container_of(entry,
			struct perf_event, pending);
4672 4673 4674 4675 4676 4677 4678
	int rctx;

	rctx = perf_swevent_get_recursion_context();
	/*
	 * If we 'fail' here, that's OK, it means recursion is already disabled
	 * and we won't recurse 'further'.
	 */
4679

4680 4681 4682
	if (event->pending_disable) {
		event->pending_disable = 0;
		__perf_event_disable(event);
4683 4684
	}

4685 4686 4687
	if (event->pending_wakeup) {
		event->pending_wakeup = 0;
		perf_event_wakeup(event);
4688
	}
4689 4690 4691

	if (rctx >= 0)
		perf_swevent_put_recursion_context(rctx);
4692 4693
}

4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714
/*
 * We assume there is only KVM supporting the callbacks.
 * Later on, we might change it to a list if there is
 * another virtualization implementation supporting the callbacks.
 */
struct perf_guest_info_callbacks *perf_guest_cbs;

int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
	perf_guest_cbs = cbs;
	return 0;
}
EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);

int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
	perf_guest_cbs = NULL;
	return 0;
}
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);

4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729
static void
perf_output_sample_regs(struct perf_output_handle *handle,
			struct pt_regs *regs, u64 mask)
{
	int bit;

	for_each_set_bit(bit, (const unsigned long *) &mask,
			 sizeof(mask) * BITS_PER_BYTE) {
		u64 val;

		val = perf_reg_value(regs, bit);
		perf_output_put(handle, val);
	}
}

4730
static void perf_sample_regs_user(struct perf_regs *regs_user,
4731 4732
				  struct pt_regs *regs,
				  struct pt_regs *regs_user_copy)
4733
{
4734 4735
	if (user_mode(regs)) {
		regs_user->abi = perf_reg_abi(current);
4736
		regs_user->regs = regs;
4737 4738
	} else if (current->mm) {
		perf_get_regs_user(regs_user, regs, regs_user_copy);
4739 4740 4741
	} else {
		regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
		regs_user->regs = NULL;
4742 4743 4744
	}
}

4745 4746 4747 4748 4749 4750 4751 4752
static void perf_sample_regs_intr(struct perf_regs *regs_intr,
				  struct pt_regs *regs)
{
	regs_intr->regs = regs;
	regs_intr->abi  = perf_reg_abi(current);
}


4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847
/*
 * Get remaining task size from user stack pointer.
 *
 * It'd be better to take stack vma map and limit this more
 * precisly, but there's no way to get it safely under interrupt,
 * so using TASK_SIZE as limit.
 */
static u64 perf_ustack_task_size(struct pt_regs *regs)
{
	unsigned long addr = perf_user_stack_pointer(regs);

	if (!addr || addr >= TASK_SIZE)
		return 0;

	return TASK_SIZE - addr;
}

static u16
perf_sample_ustack_size(u16 stack_size, u16 header_size,
			struct pt_regs *regs)
{
	u64 task_size;

	/* No regs, no stack pointer, no dump. */
	if (!regs)
		return 0;

	/*
	 * Check if we fit in with the requested stack size into the:
	 * - TASK_SIZE
	 *   If we don't, we limit the size to the TASK_SIZE.
	 *
	 * - remaining sample size
	 *   If we don't, we customize the stack size to
	 *   fit in to the remaining sample size.
	 */

	task_size  = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
	stack_size = min(stack_size, (u16) task_size);

	/* Current header size plus static size and dynamic size. */
	header_size += 2 * sizeof(u64);

	/* Do we fit in with the current stack dump size? */
	if ((u16) (header_size + stack_size) < header_size) {
		/*
		 * If we overflow the maximum size for the sample,
		 * we customize the stack dump size to fit in.
		 */
		stack_size = USHRT_MAX - header_size - sizeof(u64);
		stack_size = round_up(stack_size, sizeof(u64));
	}

	return stack_size;
}

static void
perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
			  struct pt_regs *regs)
{
	/* Case of a kernel thread, nothing to dump */
	if (!regs) {
		u64 size = 0;
		perf_output_put(handle, size);
	} else {
		unsigned long sp;
		unsigned int rem;
		u64 dyn_size;

		/*
		 * We dump:
		 * static size
		 *   - the size requested by user or the best one we can fit
		 *     in to the sample max size
		 * data
		 *   - user stack dump data
		 * dynamic size
		 *   - the actual dumped size
		 */

		/* Static size. */
		perf_output_put(handle, dump_size);

		/* Data. */
		sp = perf_user_stack_pointer(regs);
		rem = __output_copy_user(handle, (void *) sp, dump_size);
		dyn_size = dump_size - rem;

		perf_output_skip(handle, rem);

		/* Dynamic size. */
		perf_output_put(handle, dyn_size);
	}
}

4848 4849 4850
static void __perf_event_header__init_id(struct perf_event_header *header,
					 struct perf_sample_data *data,
					 struct perf_event *event)
4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863
{
	u64 sample_type = event->attr.sample_type;

	data->type = sample_type;
	header->size += event->id_header_size;

	if (sample_type & PERF_SAMPLE_TID) {
		/* namespace issues */
		data->tid_entry.pid = perf_event_pid(event, current);
		data->tid_entry.tid = perf_event_tid(event, current);
	}

	if (sample_type & PERF_SAMPLE_TIME)
4864
		data->time = perf_event_clock(event);
4865

4866
	if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877
		data->id = primary_event_id(event);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		data->stream_id = event->id;

	if (sample_type & PERF_SAMPLE_CPU) {
		data->cpu_entry.cpu	 = raw_smp_processor_id();
		data->cpu_entry.reserved = 0;
	}
}

4878 4879 4880
void perf_event_header__init_id(struct perf_event_header *header,
				struct perf_sample_data *data,
				struct perf_event *event)
4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904
{
	if (event->attr.sample_id_all)
		__perf_event_header__init_id(header, data, event);
}

static void __perf_event__output_id_sample(struct perf_output_handle *handle,
					   struct perf_sample_data *data)
{
	u64 sample_type = data->type;

	if (sample_type & PERF_SAMPLE_TID)
		perf_output_put(handle, data->tid_entry);

	if (sample_type & PERF_SAMPLE_TIME)
		perf_output_put(handle, data->time);

	if (sample_type & PERF_SAMPLE_ID)
		perf_output_put(handle, data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		perf_output_put(handle, data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		perf_output_put(handle, data->cpu_entry);
4905 4906 4907

	if (sample_type & PERF_SAMPLE_IDENTIFIER)
		perf_output_put(handle, data->id);
4908 4909
}

4910 4911 4912
void perf_event__output_id_sample(struct perf_event *event,
				  struct perf_output_handle *handle,
				  struct perf_sample_data *sample)
4913 4914 4915 4916 4917
{
	if (event->attr.sample_id_all)
		__perf_event__output_id_sample(handle, sample);
}

4918
static void perf_output_read_one(struct perf_output_handle *handle,
4919 4920
				 struct perf_event *event,
				 u64 enabled, u64 running)
4921
{
4922
	u64 read_format = event->attr.read_format;
4923 4924 4925
	u64 values[4];
	int n = 0;

P
Peter Zijlstra 已提交
4926
	values[n++] = perf_event_count(event);
4927
	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
4928
		values[n++] = enabled +
4929
			atomic64_read(&event->child_total_time_enabled);
4930 4931
	}
	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
4932
		values[n++] = running +
4933
			atomic64_read(&event->child_total_time_running);
4934 4935
	}
	if (read_format & PERF_FORMAT_ID)
4936
		values[n++] = primary_event_id(event);
4937

4938
	__output_copy(handle, values, n * sizeof(u64));
4939 4940 4941
}

/*
4942
 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
4943 4944
 */
static void perf_output_read_group(struct perf_output_handle *handle,
4945 4946
			    struct perf_event *event,
			    u64 enabled, u64 running)
4947
{
4948 4949
	struct perf_event *leader = event->group_leader, *sub;
	u64 read_format = event->attr.read_format;
4950 4951 4952 4953 4954 4955
	u64 values[5];
	int n = 0;

	values[n++] = 1 + leader->nr_siblings;

	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
4956
		values[n++] = enabled;
4957 4958

	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
4959
		values[n++] = running;
4960

4961
	if (leader != event)
4962 4963
		leader->pmu->read(leader);

P
Peter Zijlstra 已提交
4964
	values[n++] = perf_event_count(leader);
4965
	if (read_format & PERF_FORMAT_ID)
4966
		values[n++] = primary_event_id(leader);
4967

4968
	__output_copy(handle, values, n * sizeof(u64));
4969

4970
	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
4971 4972
		n = 0;

4973 4974
		if ((sub != event) &&
		    (sub->state == PERF_EVENT_STATE_ACTIVE))
4975 4976
			sub->pmu->read(sub);

P
Peter Zijlstra 已提交
4977
		values[n++] = perf_event_count(sub);
4978
		if (read_format & PERF_FORMAT_ID)
4979
			values[n++] = primary_event_id(sub);
4980

4981
		__output_copy(handle, values, n * sizeof(u64));
4982 4983 4984
	}
}

4985 4986 4987
#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
				 PERF_FORMAT_TOTAL_TIME_RUNNING)

4988
static void perf_output_read(struct perf_output_handle *handle,
4989
			     struct perf_event *event)
4990
{
4991
	u64 enabled = 0, running = 0, now;
4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002
	u64 read_format = event->attr.read_format;

	/*
	 * compute total_time_enabled, total_time_running
	 * based on snapshot values taken when the event
	 * was last scheduled in.
	 *
	 * we cannot simply called update_context_time()
	 * because of locking issue as we are called in
	 * NMI context
	 */
5003
	if (read_format & PERF_FORMAT_TOTAL_TIMES)
5004
		calc_timer_values(event, &now, &enabled, &running);
5005

5006
	if (event->attr.read_format & PERF_FORMAT_GROUP)
5007
		perf_output_read_group(handle, event, enabled, running);
5008
	else
5009
		perf_output_read_one(handle, event, enabled, running);
5010 5011
}

5012 5013 5014
void perf_output_sample(struct perf_output_handle *handle,
			struct perf_event_header *header,
			struct perf_sample_data *data,
5015
			struct perf_event *event)
5016 5017 5018 5019 5020
{
	u64 sample_type = data->type;

	perf_output_put(handle, *header);

5021 5022 5023
	if (sample_type & PERF_SAMPLE_IDENTIFIER)
		perf_output_put(handle, data->id);

5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048
	if (sample_type & PERF_SAMPLE_IP)
		perf_output_put(handle, data->ip);

	if (sample_type & PERF_SAMPLE_TID)
		perf_output_put(handle, data->tid_entry);

	if (sample_type & PERF_SAMPLE_TIME)
		perf_output_put(handle, data->time);

	if (sample_type & PERF_SAMPLE_ADDR)
		perf_output_put(handle, data->addr);

	if (sample_type & PERF_SAMPLE_ID)
		perf_output_put(handle, data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		perf_output_put(handle, data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		perf_output_put(handle, data->cpu_entry);

	if (sample_type & PERF_SAMPLE_PERIOD)
		perf_output_put(handle, data->period);

	if (sample_type & PERF_SAMPLE_READ)
5049
		perf_output_read(handle, event);
5050 5051 5052 5053 5054 5055 5056 5057 5058 5059

	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
		if (data->callchain) {
			int size = 1;

			if (data->callchain)
				size += data->callchain->nr;

			size *= sizeof(u64);

5060
			__output_copy(handle, data->callchain, size);
5061 5062 5063 5064 5065 5066 5067 5068 5069
		} else {
			u64 nr = 0;
			perf_output_put(handle, nr);
		}
	}

	if (sample_type & PERF_SAMPLE_RAW) {
		if (data->raw) {
			perf_output_put(handle, data->raw->size);
5070 5071
			__output_copy(handle, data->raw->data,
					   data->raw->size);
5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082
		} else {
			struct {
				u32	size;
				u32	data;
			} raw = {
				.size = sizeof(u32),
				.data = 0,
			};
			perf_output_put(handle, raw);
		}
	}
5083

5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100
	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
		if (data->br_stack) {
			size_t size;

			size = data->br_stack->nr
			     * sizeof(struct perf_branch_entry);

			perf_output_put(handle, data->br_stack->nr);
			perf_output_copy(handle, data->br_stack->entries, size);
		} else {
			/*
			 * we always store at least the value of nr
			 */
			u64 nr = 0;
			perf_output_put(handle, nr);
		}
	}
5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117

	if (sample_type & PERF_SAMPLE_REGS_USER) {
		u64 abi = data->regs_user.abi;

		/*
		 * If there are no regs to dump, notice it through
		 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
		 */
		perf_output_put(handle, abi);

		if (abi) {
			u64 mask = event->attr.sample_regs_user;
			perf_output_sample_regs(handle,
						data->regs_user.regs,
						mask);
		}
	}
5118

5119
	if (sample_type & PERF_SAMPLE_STACK_USER) {
5120 5121 5122
		perf_output_sample_ustack(handle,
					  data->stack_user_size,
					  data->regs_user.regs);
5123
	}
A
Andi Kleen 已提交
5124 5125 5126

	if (sample_type & PERF_SAMPLE_WEIGHT)
		perf_output_put(handle, data->weight);
5127 5128 5129

	if (sample_type & PERF_SAMPLE_DATA_SRC)
		perf_output_put(handle, data->data_src.val);
5130

A
Andi Kleen 已提交
5131 5132 5133
	if (sample_type & PERF_SAMPLE_TRANSACTION)
		perf_output_put(handle, data->txn);

5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150
	if (sample_type & PERF_SAMPLE_REGS_INTR) {
		u64 abi = data->regs_intr.abi;
		/*
		 * If there are no regs to dump, notice it through
		 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
		 */
		perf_output_put(handle, abi);

		if (abi) {
			u64 mask = event->attr.sample_regs_intr;

			perf_output_sample_regs(handle,
						data->regs_intr.regs,
						mask);
		}
	}

5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163
	if (!event->attr.watermark) {
		int wakeup_events = event->attr.wakeup_events;

		if (wakeup_events) {
			struct ring_buffer *rb = handle->rb;
			int events = local_inc_return(&rb->events);

			if (events >= wakeup_events) {
				local_sub(wakeup_events, &rb->events);
				local_inc(&rb->wakeup);
			}
		}
	}
5164 5165 5166 5167
}

void perf_prepare_sample(struct perf_event_header *header,
			 struct perf_sample_data *data,
5168
			 struct perf_event *event,
5169
			 struct pt_regs *regs)
5170
{
5171
	u64 sample_type = event->attr.sample_type;
5172

5173
	header->type = PERF_RECORD_SAMPLE;
5174
	header->size = sizeof(*header) + event->header_size;
5175 5176 5177

	header->misc = 0;
	header->misc |= perf_misc_flags(regs);
5178

5179
	__perf_event_header__init_id(header, data, event);
5180

5181
	if (sample_type & PERF_SAMPLE_IP)
5182 5183
		data->ip = perf_instruction_pointer(regs);

5184
	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5185
		int size = 1;
5186

5187
		data->callchain = perf_callchain(event, regs);
5188 5189 5190 5191 5192

		if (data->callchain)
			size += data->callchain->nr;

		header->size += size * sizeof(u64);
5193 5194
	}

5195
	if (sample_type & PERF_SAMPLE_RAW) {
5196 5197 5198 5199 5200 5201 5202 5203
		int size = sizeof(u32);

		if (data->raw)
			size += data->raw->size;
		else
			size += sizeof(u32);

		WARN_ON_ONCE(size & (sizeof(u64)-1));
5204
		header->size += size;
5205
	}
5206 5207 5208 5209 5210 5211 5212 5213 5214

	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
		int size = sizeof(u64); /* nr */
		if (data->br_stack) {
			size += data->br_stack->nr
			      * sizeof(struct perf_branch_entry);
		}
		header->size += size;
	}
5215

5216
	if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
5217 5218
		perf_sample_regs_user(&data->regs_user, regs,
				      &data->regs_user_copy);
5219

5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230
	if (sample_type & PERF_SAMPLE_REGS_USER) {
		/* regs dump ABI info */
		int size = sizeof(u64);

		if (data->regs_user.regs) {
			u64 mask = event->attr.sample_regs_user;
			size += hweight64(mask) * sizeof(u64);
		}

		header->size += size;
	}
5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242

	if (sample_type & PERF_SAMPLE_STACK_USER) {
		/*
		 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
		 * processed as the last one or have additional check added
		 * in case new sample type is added, because we could eat
		 * up the rest of the sample size.
		 */
		u16 stack_size = event->attr.sample_stack_user;
		u16 size = sizeof(u64);

		stack_size = perf_sample_ustack_size(stack_size, header->size,
5243
						     data->regs_user.regs);
5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255

		/*
		 * If there is something to dump, add space for the dump
		 * itself and for the field that tells the dynamic size,
		 * which is how many have been actually dumped.
		 */
		if (stack_size)
			size += sizeof(u64) + stack_size;

		data->stack_user_size = stack_size;
		header->size += size;
	}
5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270

	if (sample_type & PERF_SAMPLE_REGS_INTR) {
		/* regs dump ABI info */
		int size = sizeof(u64);

		perf_sample_regs_intr(&data->regs_intr, regs);

		if (data->regs_intr.regs) {
			u64 mask = event->attr.sample_regs_intr;

			size += hweight64(mask) * sizeof(u64);
		}

		header->size += size;
	}
5271
}
5272

5273
static void perf_event_output(struct perf_event *event,
5274 5275 5276 5277 5278
				struct perf_sample_data *data,
				struct pt_regs *regs)
{
	struct perf_output_handle handle;
	struct perf_event_header header;
5279

5280 5281 5282
	/* protect the callchain buffers */
	rcu_read_lock();

5283
	perf_prepare_sample(&header, data, event, regs);
P
Peter Zijlstra 已提交
5284

5285
	if (perf_output_begin(&handle, event, header.size))
5286
		goto exit;
5287

5288
	perf_output_sample(&handle, &header, data, event);
5289

5290
	perf_output_end(&handle);
5291 5292 5293

exit:
	rcu_read_unlock();
5294 5295
}

5296
/*
5297
 * read event_id
5298 5299 5300 5301 5302 5303 5304 5305 5306 5307
 */

struct perf_read_event {
	struct perf_event_header	header;

	u32				pid;
	u32				tid;
};

static void
5308
perf_event_read_event(struct perf_event *event,
5309 5310 5311
			struct task_struct *task)
{
	struct perf_output_handle handle;
5312
	struct perf_sample_data sample;
5313
	struct perf_read_event read_event = {
5314
		.header = {
5315
			.type = PERF_RECORD_READ,
5316
			.misc = 0,
5317
			.size = sizeof(read_event) + event->read_size,
5318
		},
5319 5320
		.pid = perf_event_pid(event, task),
		.tid = perf_event_tid(event, task),
5321
	};
5322
	int ret;
5323

5324
	perf_event_header__init_id(&read_event.header, &sample, event);
5325
	ret = perf_output_begin(&handle, event, read_event.header.size);
5326 5327 5328
	if (ret)
		return;

5329
	perf_output_put(&handle, read_event);
5330
	perf_output_read(&handle, event);
5331
	perf_event__output_id_sample(event, &handle, &sample);
5332

5333 5334 5335
	perf_output_end(&handle);
}

5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349
typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);

static void
perf_event_aux_ctx(struct perf_event_context *ctx,
		   perf_event_aux_output_cb output,
		   void *data)
{
	struct perf_event *event;

	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
		if (event->state < PERF_EVENT_STATE_INACTIVE)
			continue;
		if (!event_filter_match(event))
			continue;
5350
		output(event, data);
5351 5352 5353 5354
	}
}

static void
5355
perf_event_aux(perf_event_aux_output_cb output, void *data,
5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367
	       struct perf_event_context *task_ctx)
{
	struct perf_cpu_context *cpuctx;
	struct perf_event_context *ctx;
	struct pmu *pmu;
	int ctxn;

	rcu_read_lock();
	list_for_each_entry_rcu(pmu, &pmus, entry) {
		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
		if (cpuctx->unique_pmu != pmu)
			goto next;
5368
		perf_event_aux_ctx(&cpuctx->ctx, output, data);
5369 5370 5371 5372 5373 5374 5375
		if (task_ctx)
			goto next;
		ctxn = pmu->task_ctx_nr;
		if (ctxn < 0)
			goto next;
		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
		if (ctx)
5376
			perf_event_aux_ctx(ctx, output, data);
5377 5378 5379 5380 5381 5382
next:
		put_cpu_ptr(pmu->pmu_cpu_context);
	}

	if (task_ctx) {
		preempt_disable();
5383
		perf_event_aux_ctx(task_ctx, output, data);
5384 5385 5386 5387 5388
		preempt_enable();
	}
	rcu_read_unlock();
}

P
Peter Zijlstra 已提交
5389
/*
P
Peter Zijlstra 已提交
5390 5391
 * task tracking -- fork/exit
 *
5392
 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
P
Peter Zijlstra 已提交
5393 5394
 */

P
Peter Zijlstra 已提交
5395
struct perf_task_event {
5396
	struct task_struct		*task;
5397
	struct perf_event_context	*task_ctx;
P
Peter Zijlstra 已提交
5398 5399 5400 5401 5402 5403

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				ppid;
P
Peter Zijlstra 已提交
5404 5405
		u32				tid;
		u32				ptid;
5406
		u64				time;
5407
	} event_id;
P
Peter Zijlstra 已提交
5408 5409
};

5410 5411
static int perf_event_task_match(struct perf_event *event)
{
5412 5413 5414
	return event->attr.comm  || event->attr.mmap ||
	       event->attr.mmap2 || event->attr.mmap_data ||
	       event->attr.task;
5415 5416
}

5417
static void perf_event_task_output(struct perf_event *event,
5418
				   void *data)
P
Peter Zijlstra 已提交
5419
{
5420
	struct perf_task_event *task_event = data;
P
Peter Zijlstra 已提交
5421
	struct perf_output_handle handle;
5422
	struct perf_sample_data	sample;
P
Peter Zijlstra 已提交
5423
	struct task_struct *task = task_event->task;
5424
	int ret, size = task_event->event_id.header.size;
5425

5426 5427 5428
	if (!perf_event_task_match(event))
		return;

5429
	perf_event_header__init_id(&task_event->event_id.header, &sample, event);
P
Peter Zijlstra 已提交
5430

5431
	ret = perf_output_begin(&handle, event,
5432
				task_event->event_id.header.size);
5433
	if (ret)
5434
		goto out;
P
Peter Zijlstra 已提交
5435

5436 5437
	task_event->event_id.pid = perf_event_pid(event, task);
	task_event->event_id.ppid = perf_event_pid(event, current);
P
Peter Zijlstra 已提交
5438

5439 5440
	task_event->event_id.tid = perf_event_tid(event, task);
	task_event->event_id.ptid = perf_event_tid(event, current);
P
Peter Zijlstra 已提交
5441

5442 5443
	task_event->event_id.time = perf_event_clock(event);

5444
	perf_output_put(&handle, task_event->event_id);
5445

5446 5447
	perf_event__output_id_sample(event, &handle, &sample);

P
Peter Zijlstra 已提交
5448
	perf_output_end(&handle);
5449 5450
out:
	task_event->event_id.header.size = size;
P
Peter Zijlstra 已提交
5451 5452
}

5453 5454
static void perf_event_task(struct task_struct *task,
			      struct perf_event_context *task_ctx,
5455
			      int new)
P
Peter Zijlstra 已提交
5456
{
P
Peter Zijlstra 已提交
5457
	struct perf_task_event task_event;
P
Peter Zijlstra 已提交
5458

5459 5460 5461
	if (!atomic_read(&nr_comm_events) &&
	    !atomic_read(&nr_mmap_events) &&
	    !atomic_read(&nr_task_events))
P
Peter Zijlstra 已提交
5462 5463
		return;

P
Peter Zijlstra 已提交
5464
	task_event = (struct perf_task_event){
5465 5466
		.task	  = task,
		.task_ctx = task_ctx,
5467
		.event_id    = {
P
Peter Zijlstra 已提交
5468
			.header = {
5469
				.type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
5470
				.misc = 0,
5471
				.size = sizeof(task_event.event_id),
P
Peter Zijlstra 已提交
5472
			},
5473 5474
			/* .pid  */
			/* .ppid */
P
Peter Zijlstra 已提交
5475 5476
			/* .tid  */
			/* .ptid */
5477
			/* .time */
P
Peter Zijlstra 已提交
5478 5479 5480
		},
	};

5481
	perf_event_aux(perf_event_task_output,
5482 5483
		       &task_event,
		       task_ctx);
P
Peter Zijlstra 已提交
5484 5485
}

5486
void perf_event_fork(struct task_struct *task)
P
Peter Zijlstra 已提交
5487
{
5488
	perf_event_task(task, NULL, 1);
P
Peter Zijlstra 已提交
5489 5490
}

5491 5492 5493 5494 5495
/*
 * comm tracking
 */

struct perf_comm_event {
5496 5497
	struct task_struct	*task;
	char			*comm;
5498 5499 5500 5501 5502 5503 5504
	int			comm_size;

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				tid;
5505
	} event_id;
5506 5507
};

5508 5509 5510 5511 5512
static int perf_event_comm_match(struct perf_event *event)
{
	return event->attr.comm;
}

5513
static void perf_event_comm_output(struct perf_event *event,
5514
				   void *data)
5515
{
5516
	struct perf_comm_event *comm_event = data;
5517
	struct perf_output_handle handle;
5518
	struct perf_sample_data sample;
5519
	int size = comm_event->event_id.header.size;
5520 5521
	int ret;

5522 5523 5524
	if (!perf_event_comm_match(event))
		return;

5525 5526
	perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
	ret = perf_output_begin(&handle, event,
5527
				comm_event->event_id.header.size);
5528 5529

	if (ret)
5530
		goto out;
5531

5532 5533
	comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
	comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
5534

5535
	perf_output_put(&handle, comm_event->event_id);
5536
	__output_copy(&handle, comm_event->comm,
5537
				   comm_event->comm_size);
5538 5539 5540

	perf_event__output_id_sample(event, &handle, &sample);

5541
	perf_output_end(&handle);
5542 5543
out:
	comm_event->event_id.header.size = size;
5544 5545
}

5546
static void perf_event_comm_event(struct perf_comm_event *comm_event)
5547
{
5548
	char comm[TASK_COMM_LEN];
5549 5550
	unsigned int size;

5551
	memset(comm, 0, sizeof(comm));
5552
	strlcpy(comm, comm_event->task->comm, sizeof(comm));
5553
	size = ALIGN(strlen(comm)+1, sizeof(u64));
5554 5555 5556 5557

	comm_event->comm = comm;
	comm_event->comm_size = size;

5558
	comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
P
Peter Zijlstra 已提交
5559

5560
	perf_event_aux(perf_event_comm_output,
5561 5562
		       comm_event,
		       NULL);
5563 5564
}

5565
void perf_event_comm(struct task_struct *task, bool exec)
5566
{
5567 5568
	struct perf_comm_event comm_event;

5569
	if (!atomic_read(&nr_comm_events))
5570
		return;
5571

5572
	comm_event = (struct perf_comm_event){
5573
		.task	= task,
5574 5575
		/* .comm      */
		/* .comm_size */
5576
		.event_id  = {
5577
			.header = {
5578
				.type = PERF_RECORD_COMM,
5579
				.misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
5580 5581 5582 5583
				/* .size */
			},
			/* .pid */
			/* .tid */
5584 5585 5586
		},
	};

5587
	perf_event_comm_event(&comm_event);
5588 5589
}

5590 5591 5592 5593 5594
/*
 * mmap tracking
 */

struct perf_mmap_event {
5595 5596 5597 5598
	struct vm_area_struct	*vma;

	const char		*file_name;
	int			file_size;
5599 5600 5601
	int			maj, min;
	u64			ino;
	u64			ino_generation;
5602
	u32			prot, flags;
5603 5604 5605 5606 5607 5608 5609 5610 5611

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				tid;
		u64				start;
		u64				len;
		u64				pgoff;
5612
	} event_id;
5613 5614
};

5615 5616 5617 5618 5619 5620 5621 5622
static int perf_event_mmap_match(struct perf_event *event,
				 void *data)
{
	struct perf_mmap_event *mmap_event = data;
	struct vm_area_struct *vma = mmap_event->vma;
	int executable = vma->vm_flags & VM_EXEC;

	return (!executable && event->attr.mmap_data) ||
5623
	       (executable && (event->attr.mmap || event->attr.mmap2));
5624 5625
}

5626
static void perf_event_mmap_output(struct perf_event *event,
5627
				   void *data)
5628
{
5629
	struct perf_mmap_event *mmap_event = data;
5630
	struct perf_output_handle handle;
5631
	struct perf_sample_data sample;
5632
	int size = mmap_event->event_id.header.size;
5633
	int ret;
5634

5635 5636 5637
	if (!perf_event_mmap_match(event, data))
		return;

5638 5639 5640 5641 5642
	if (event->attr.mmap2) {
		mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
		mmap_event->event_id.header.size += sizeof(mmap_event->maj);
		mmap_event->event_id.header.size += sizeof(mmap_event->min);
		mmap_event->event_id.header.size += sizeof(mmap_event->ino);
5643
		mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
5644 5645
		mmap_event->event_id.header.size += sizeof(mmap_event->prot);
		mmap_event->event_id.header.size += sizeof(mmap_event->flags);
5646 5647
	}

5648 5649
	perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
	ret = perf_output_begin(&handle, event,
5650
				mmap_event->event_id.header.size);
5651
	if (ret)
5652
		goto out;
5653

5654 5655
	mmap_event->event_id.pid = perf_event_pid(event, current);
	mmap_event->event_id.tid = perf_event_tid(event, current);
5656

5657
	perf_output_put(&handle, mmap_event->event_id);
5658 5659 5660 5661 5662 5663

	if (event->attr.mmap2) {
		perf_output_put(&handle, mmap_event->maj);
		perf_output_put(&handle, mmap_event->min);
		perf_output_put(&handle, mmap_event->ino);
		perf_output_put(&handle, mmap_event->ino_generation);
5664 5665
		perf_output_put(&handle, mmap_event->prot);
		perf_output_put(&handle, mmap_event->flags);
5666 5667
	}

5668
	__output_copy(&handle, mmap_event->file_name,
5669
				   mmap_event->file_size);
5670 5671 5672

	perf_event__output_id_sample(event, &handle, &sample);

5673
	perf_output_end(&handle);
5674 5675
out:
	mmap_event->event_id.header.size = size;
5676 5677
}

5678
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
5679
{
5680 5681
	struct vm_area_struct *vma = mmap_event->vma;
	struct file *file = vma->vm_file;
5682 5683
	int maj = 0, min = 0;
	u64 ino = 0, gen = 0;
5684
	u32 prot = 0, flags = 0;
5685 5686 5687
	unsigned int size;
	char tmp[16];
	char *buf = NULL;
5688
	char *name;
5689

5690
	if (file) {
5691 5692
		struct inode *inode;
		dev_t dev;
5693

5694
		buf = kmalloc(PATH_MAX, GFP_KERNEL);
5695
		if (!buf) {
5696 5697
			name = "//enomem";
			goto cpy_name;
5698
		}
5699
		/*
5700
		 * d_path() works from the end of the rb backwards, so we
5701 5702 5703
		 * need to add enough zero bytes after the string to handle
		 * the 64bit alignment we do later.
		 */
5704
		name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
5705
		if (IS_ERR(name)) {
5706 5707
			name = "//toolong";
			goto cpy_name;
5708
		}
5709 5710 5711 5712 5713 5714
		inode = file_inode(vma->vm_file);
		dev = inode->i_sb->s_dev;
		ino = inode->i_ino;
		gen = inode->i_generation;
		maj = MAJOR(dev);
		min = MINOR(dev);
5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736

		if (vma->vm_flags & VM_READ)
			prot |= PROT_READ;
		if (vma->vm_flags & VM_WRITE)
			prot |= PROT_WRITE;
		if (vma->vm_flags & VM_EXEC)
			prot |= PROT_EXEC;

		if (vma->vm_flags & VM_MAYSHARE)
			flags = MAP_SHARED;
		else
			flags = MAP_PRIVATE;

		if (vma->vm_flags & VM_DENYWRITE)
			flags |= MAP_DENYWRITE;
		if (vma->vm_flags & VM_MAYEXEC)
			flags |= MAP_EXECUTABLE;
		if (vma->vm_flags & VM_LOCKED)
			flags |= MAP_LOCKED;
		if (vma->vm_flags & VM_HUGETLB)
			flags |= MAP_HUGETLB;

5737
		goto got_name;
5738
	} else {
5739 5740 5741 5742 5743 5744
		if (vma->vm_ops && vma->vm_ops->name) {
			name = (char *) vma->vm_ops->name(vma);
			if (name)
				goto cpy_name;
		}

5745
		name = (char *)arch_vma_name(vma);
5746 5747
		if (name)
			goto cpy_name;
5748

5749
		if (vma->vm_start <= vma->vm_mm->start_brk &&
5750
				vma->vm_end >= vma->vm_mm->brk) {
5751 5752
			name = "[heap]";
			goto cpy_name;
5753 5754
		}
		if (vma->vm_start <= vma->vm_mm->start_stack &&
5755
				vma->vm_end >= vma->vm_mm->start_stack) {
5756 5757
			name = "[stack]";
			goto cpy_name;
5758 5759
		}

5760 5761
		name = "//anon";
		goto cpy_name;
5762 5763
	}

5764 5765 5766
cpy_name:
	strlcpy(tmp, name, sizeof(tmp));
	name = tmp;
5767
got_name:
5768 5769 5770 5771 5772 5773 5774 5775
	/*
	 * Since our buffer works in 8 byte units we need to align our string
	 * size to a multiple of 8. However, we must guarantee the tail end is
	 * zero'd out to avoid leaking random bits to userspace.
	 */
	size = strlen(name)+1;
	while (!IS_ALIGNED(size, sizeof(u64)))
		name[size++] = '\0';
5776 5777 5778

	mmap_event->file_name = name;
	mmap_event->file_size = size;
5779 5780 5781 5782
	mmap_event->maj = maj;
	mmap_event->min = min;
	mmap_event->ino = ino;
	mmap_event->ino_generation = gen;
5783 5784
	mmap_event->prot = prot;
	mmap_event->flags = flags;
5785

5786 5787 5788
	if (!(vma->vm_flags & VM_EXEC))
		mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;

5789
	mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
5790

5791
	perf_event_aux(perf_event_mmap_output,
5792 5793
		       mmap_event,
		       NULL);
5794

5795 5796 5797
	kfree(buf);
}

5798
void perf_event_mmap(struct vm_area_struct *vma)
5799
{
5800 5801
	struct perf_mmap_event mmap_event;

5802
	if (!atomic_read(&nr_mmap_events))
5803 5804 5805
		return;

	mmap_event = (struct perf_mmap_event){
5806
		.vma	= vma,
5807 5808
		/* .file_name */
		/* .file_size */
5809
		.event_id  = {
5810
			.header = {
5811
				.type = PERF_RECORD_MMAP,
5812
				.misc = PERF_RECORD_MISC_USER,
5813 5814 5815 5816
				/* .size */
			},
			/* .pid */
			/* .tid */
5817 5818
			.start  = vma->vm_start,
			.len    = vma->vm_end - vma->vm_start,
5819
			.pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT,
5820
		},
5821 5822 5823 5824
		/* .maj (attr_mmap2 only) */
		/* .min (attr_mmap2 only) */
		/* .ino (attr_mmap2 only) */
		/* .ino_generation (attr_mmap2 only) */
5825 5826
		/* .prot (attr_mmap2 only) */
		/* .flags (attr_mmap2 only) */
5827 5828
	};

5829
	perf_event_mmap_event(&mmap_event);
5830 5831
}

5832 5833 5834 5835
/*
 * IRQ throttle logging
 */

5836
static void perf_log_throttle(struct perf_event *event, int enable)
5837 5838
{
	struct perf_output_handle handle;
5839
	struct perf_sample_data sample;
5840 5841 5842 5843 5844
	int ret;

	struct {
		struct perf_event_header	header;
		u64				time;
5845
		u64				id;
5846
		u64				stream_id;
5847 5848
	} throttle_event = {
		.header = {
5849
			.type = PERF_RECORD_THROTTLE,
5850 5851 5852
			.misc = 0,
			.size = sizeof(throttle_event),
		},
5853
		.time		= perf_event_clock(event),
5854 5855
		.id		= primary_event_id(event),
		.stream_id	= event->id,
5856 5857
	};

5858
	if (enable)
5859
		throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
5860

5861 5862 5863
	perf_event_header__init_id(&throttle_event.header, &sample, event);

	ret = perf_output_begin(&handle, event,
5864
				throttle_event.header.size);
5865 5866 5867 5868
	if (ret)
		return;

	perf_output_put(&handle, throttle_event);
5869
	perf_event__output_id_sample(event, &handle, &sample);
5870 5871 5872
	perf_output_end(&handle);
}

5873
/*
5874
 * Generic event overflow handling, sampling.
5875 5876
 */

5877
static int __perf_event_overflow(struct perf_event *event,
5878 5879
				   int throttle, struct perf_sample_data *data,
				   struct pt_regs *regs)
5880
{
5881 5882
	int events = atomic_read(&event->event_limit);
	struct hw_perf_event *hwc = &event->hw;
5883
	u64 seq;
5884 5885
	int ret = 0;

5886 5887 5888 5889 5890 5891 5892
	/*
	 * Non-sampling counters might still use the PMI to fold short
	 * hardware counters, ignore those.
	 */
	if (unlikely(!is_sampling_event(event)))
		return 0;

5893 5894 5895 5896 5897 5898 5899 5900 5901
	seq = __this_cpu_read(perf_throttled_seq);
	if (seq != hwc->interrupts_seq) {
		hwc->interrupts_seq = seq;
		hwc->interrupts = 1;
	} else {
		hwc->interrupts++;
		if (unlikely(throttle
			     && hwc->interrupts >= max_samples_per_tick)) {
			__this_cpu_inc(perf_throttled_count);
P
Peter Zijlstra 已提交
5902 5903
			hwc->interrupts = MAX_INTERRUPTS;
			perf_log_throttle(event, 0);
5904
			tick_nohz_full_kick();
5905 5906
			ret = 1;
		}
5907
	}
5908

5909
	if (event->attr.freq) {
P
Peter Zijlstra 已提交
5910
		u64 now = perf_clock();
5911
		s64 delta = now - hwc->freq_time_stamp;
5912

5913
		hwc->freq_time_stamp = now;
5914

5915
		if (delta > 0 && delta < 2*TICK_NSEC)
5916
			perf_adjust_period(event, delta, hwc->last_period, true);
5917 5918
	}

5919 5920
	/*
	 * XXX event_limit might not quite work as expected on inherited
5921
	 * events
5922 5923
	 */

5924 5925
	event->pending_kill = POLL_IN;
	if (events && atomic_dec_and_test(&event->event_limit)) {
5926
		ret = 1;
5927
		event->pending_kill = POLL_HUP;
5928 5929
		event->pending_disable = 1;
		irq_work_queue(&event->pending);
5930 5931
	}

5932
	if (event->overflow_handler)
5933
		event->overflow_handler(event, data, regs);
5934
	else
5935
		perf_event_output(event, data, regs);
5936

P
Peter Zijlstra 已提交
5937
	if (event->fasync && event->pending_kill) {
5938 5939
		event->pending_wakeup = 1;
		irq_work_queue(&event->pending);
P
Peter Zijlstra 已提交
5940 5941
	}

5942
	return ret;
5943 5944
}

5945
int perf_event_overflow(struct perf_event *event,
5946 5947
			  struct perf_sample_data *data,
			  struct pt_regs *regs)
5948
{
5949
	return __perf_event_overflow(event, 1, data, regs);
5950 5951
}

5952
/*
5953
 * Generic software event infrastructure
5954 5955
 */

5956 5957 5958 5959 5960 5961 5962
struct swevent_htable {
	struct swevent_hlist		*swevent_hlist;
	struct mutex			hlist_mutex;
	int				hlist_refcount;

	/* Recursion avoidance in each contexts */
	int				recursion[PERF_NR_CONTEXTS];
5963 5964 5965

	/* Keeps track of cpu being initialized/exited */
	bool				online;
5966 5967 5968 5969
};

static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);

5970
/*
5971 5972
 * We directly increment event->count and keep a second value in
 * event->hw.period_left to count intervals. This period event
5973 5974 5975 5976
 * is kept in the range [-sample_period, 0] so that we can use the
 * sign as trigger.
 */

5977
u64 perf_swevent_set_period(struct perf_event *event)
5978
{
5979
	struct hw_perf_event *hwc = &event->hw;
5980 5981 5982 5983 5984
	u64 period = hwc->last_period;
	u64 nr, offset;
	s64 old, val;

	hwc->last_period = hwc->sample_period;
5985 5986

again:
5987
	old = val = local64_read(&hwc->period_left);
5988 5989
	if (val < 0)
		return 0;
5990

5991 5992 5993
	nr = div64_u64(period + val, period);
	offset = nr * period;
	val -= offset;
5994
	if (local64_cmpxchg(&hwc->period_left, old, val) != old)
5995
		goto again;
5996

5997
	return nr;
5998 5999
}

6000
static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
6001
				    struct perf_sample_data *data,
6002
				    struct pt_regs *regs)
6003
{
6004
	struct hw_perf_event *hwc = &event->hw;
6005
	int throttle = 0;
6006

6007 6008
	if (!overflow)
		overflow = perf_swevent_set_period(event);
6009

6010 6011
	if (hwc->interrupts == MAX_INTERRUPTS)
		return;
6012

6013
	for (; overflow; overflow--) {
6014
		if (__perf_event_overflow(event, throttle,
6015
					    data, regs)) {
6016 6017 6018 6019 6020 6021
			/*
			 * We inhibit the overflow from happening when
			 * hwc->interrupts == MAX_INTERRUPTS.
			 */
			break;
		}
6022
		throttle = 1;
6023
	}
6024 6025
}

P
Peter Zijlstra 已提交
6026
static void perf_swevent_event(struct perf_event *event, u64 nr,
6027
			       struct perf_sample_data *data,
6028
			       struct pt_regs *regs)
6029
{
6030
	struct hw_perf_event *hwc = &event->hw;
6031

6032
	local64_add(nr, &event->count);
6033

6034 6035 6036
	if (!regs)
		return;

6037
	if (!is_sampling_event(event))
6038
		return;
6039

6040 6041 6042 6043 6044 6045
	if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
		data->period = nr;
		return perf_swevent_overflow(event, 1, data, regs);
	} else
		data->period = event->hw.last_period;

6046
	if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
6047
		return perf_swevent_overflow(event, 1, data, regs);
6048

6049
	if (local64_add_negative(nr, &hwc->period_left))
6050
		return;
6051

6052
	perf_swevent_overflow(event, 0, data, regs);
6053 6054
}

6055 6056 6057
static int perf_exclude_event(struct perf_event *event,
			      struct pt_regs *regs)
{
P
Peter Zijlstra 已提交
6058
	if (event->hw.state & PERF_HES_STOPPED)
6059
		return 1;
P
Peter Zijlstra 已提交
6060

6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071
	if (regs) {
		if (event->attr.exclude_user && user_mode(regs))
			return 1;

		if (event->attr.exclude_kernel && !user_mode(regs))
			return 1;
	}

	return 0;
}

6072
static int perf_swevent_match(struct perf_event *event,
P
Peter Zijlstra 已提交
6073
				enum perf_type_id type,
L
Li Zefan 已提交
6074 6075 6076
				u32 event_id,
				struct perf_sample_data *data,
				struct pt_regs *regs)
6077
{
6078
	if (event->attr.type != type)
6079
		return 0;
6080

6081
	if (event->attr.config != event_id)
6082 6083
		return 0;

6084 6085
	if (perf_exclude_event(event, regs))
		return 0;
6086 6087 6088 6089

	return 1;
}

6090 6091 6092 6093 6094 6095 6096
static inline u64 swevent_hash(u64 type, u32 event_id)
{
	u64 val = event_id | (type << 32);

	return hash_64(val, SWEVENT_HLIST_BITS);
}

6097 6098
static inline struct hlist_head *
__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
6099
{
6100 6101 6102 6103
	u64 hash = swevent_hash(type, event_id);

	return &hlist->heads[hash];
}
6104

6105 6106
/* For the read side: events when they trigger */
static inline struct hlist_head *
6107
find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
6108 6109
{
	struct swevent_hlist *hlist;
6110

6111
	hlist = rcu_dereference(swhash->swevent_hlist);
6112 6113 6114
	if (!hlist)
		return NULL;

6115 6116 6117 6118 6119
	return __find_swevent_head(hlist, type, event_id);
}

/* For the event head insertion and removal in the hlist */
static inline struct hlist_head *
6120
find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
6121 6122 6123 6124 6125 6126 6127 6128 6129 6130
{
	struct swevent_hlist *hlist;
	u32 event_id = event->attr.config;
	u64 type = event->attr.type;

	/*
	 * Event scheduling is always serialized against hlist allocation
	 * and release. Which makes the protected version suitable here.
	 * The context lock guarantees that.
	 */
6131
	hlist = rcu_dereference_protected(swhash->swevent_hlist,
6132 6133 6134 6135 6136
					  lockdep_is_held(&event->ctx->lock));
	if (!hlist)
		return NULL;

	return __find_swevent_head(hlist, type, event_id);
6137 6138 6139
}

static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
6140
				    u64 nr,
6141 6142
				    struct perf_sample_data *data,
				    struct pt_regs *regs)
6143
{
6144
	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
6145
	struct perf_event *event;
6146
	struct hlist_head *head;
6147

6148
	rcu_read_lock();
6149
	head = find_swevent_head_rcu(swhash, type, event_id);
6150 6151 6152
	if (!head)
		goto end;

6153
	hlist_for_each_entry_rcu(event, head, hlist_entry) {
L
Li Zefan 已提交
6154
		if (perf_swevent_match(event, type, event_id, data, regs))
6155
			perf_swevent_event(event, nr, data, regs);
6156
	}
6157 6158
end:
	rcu_read_unlock();
6159 6160
}

6161 6162
DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);

6163
int perf_swevent_get_recursion_context(void)
P
Peter Zijlstra 已提交
6164
{
6165
	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
P
Peter Zijlstra 已提交
6166

6167
	return get_recursion_context(swhash->recursion);
P
Peter Zijlstra 已提交
6168
}
I
Ingo Molnar 已提交
6169
EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
P
Peter Zijlstra 已提交
6170

6171
inline void perf_swevent_put_recursion_context(int rctx)
6172
{
6173
	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
6174

6175
	put_recursion_context(swhash->recursion, rctx);
6176
}
6177

6178
void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
6179
{
6180
	struct perf_sample_data data;
6181

6182
	if (WARN_ON_ONCE(!regs))
6183
		return;
6184

6185
	perf_sample_data_init(&data, addr, 0);
6186
	do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198
}

void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
	int rctx;

	preempt_disable_notrace();
	rctx = perf_swevent_get_recursion_context();
	if (unlikely(rctx < 0))
		goto fail;

	___perf_sw_event(event_id, nr, regs, addr);
6199 6200

	perf_swevent_put_recursion_context(rctx);
6201
fail:
6202
	preempt_enable_notrace();
6203 6204
}

6205
static void perf_swevent_read(struct perf_event *event)
6206 6207 6208
{
}

P
Peter Zijlstra 已提交
6209
static int perf_swevent_add(struct perf_event *event, int flags)
6210
{
6211
	struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
6212
	struct hw_perf_event *hwc = &event->hw;
6213 6214
	struct hlist_head *head;

6215
	if (is_sampling_event(event)) {
6216
		hwc->last_period = hwc->sample_period;
6217
		perf_swevent_set_period(event);
6218
	}
6219

P
Peter Zijlstra 已提交
6220 6221
	hwc->state = !(flags & PERF_EF_START);

6222
	head = find_swevent_head(swhash, event);
6223 6224 6225 6226 6227 6228
	if (!head) {
		/*
		 * We can race with cpu hotplug code. Do not
		 * WARN if the cpu just got unplugged.
		 */
		WARN_ON_ONCE(swhash->online);
6229
		return -EINVAL;
6230
	}
6231 6232

	hlist_add_head_rcu(&event->hlist_entry, head);
6233
	perf_event_update_userpage(event);
6234

6235 6236 6237
	return 0;
}

P
Peter Zijlstra 已提交
6238
static void perf_swevent_del(struct perf_event *event, int flags)
6239
{
6240
	hlist_del_rcu(&event->hlist_entry);
6241 6242
}

P
Peter Zijlstra 已提交
6243
static void perf_swevent_start(struct perf_event *event, int flags)
6244
{
P
Peter Zijlstra 已提交
6245
	event->hw.state = 0;
6246
}
I
Ingo Molnar 已提交
6247

P
Peter Zijlstra 已提交
6248
static void perf_swevent_stop(struct perf_event *event, int flags)
6249
{
P
Peter Zijlstra 已提交
6250
	event->hw.state = PERF_HES_STOPPED;
6251 6252
}

6253 6254
/* Deref the hlist from the update side */
static inline struct swevent_hlist *
6255
swevent_hlist_deref(struct swevent_htable *swhash)
6256
{
6257 6258
	return rcu_dereference_protected(swhash->swevent_hlist,
					 lockdep_is_held(&swhash->hlist_mutex));
6259 6260
}

6261
static void swevent_hlist_release(struct swevent_htable *swhash)
6262
{
6263
	struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
6264

6265
	if (!hlist)
6266 6267
		return;

6268
	RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
6269
	kfree_rcu(hlist, rcu_head);
6270 6271 6272 6273
}

static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
{
6274
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6275

6276
	mutex_lock(&swhash->hlist_mutex);
6277

6278 6279
	if (!--swhash->hlist_refcount)
		swevent_hlist_release(swhash);
6280

6281
	mutex_unlock(&swhash->hlist_mutex);
6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293
}

static void swevent_hlist_put(struct perf_event *event)
{
	int cpu;

	for_each_possible_cpu(cpu)
		swevent_hlist_put_cpu(event, cpu);
}

static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
{
6294
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6295 6296
	int err = 0;

6297
	mutex_lock(&swhash->hlist_mutex);
6298

6299
	if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
6300 6301 6302 6303 6304 6305 6306
		struct swevent_hlist *hlist;

		hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
		if (!hlist) {
			err = -ENOMEM;
			goto exit;
		}
6307
		rcu_assign_pointer(swhash->swevent_hlist, hlist);
6308
	}
6309
	swhash->hlist_refcount++;
P
Peter Zijlstra 已提交
6310
exit:
6311
	mutex_unlock(&swhash->hlist_mutex);
6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331

	return err;
}

static int swevent_hlist_get(struct perf_event *event)
{
	int err;
	int cpu, failed_cpu;

	get_online_cpus();
	for_each_possible_cpu(cpu) {
		err = swevent_hlist_get_cpu(event, cpu);
		if (err) {
			failed_cpu = cpu;
			goto fail;
		}
	}
	put_online_cpus();

	return 0;
P
Peter Zijlstra 已提交
6332
fail:
6333 6334 6335 6336 6337 6338 6339 6340 6341 6342
	for_each_possible_cpu(cpu) {
		if (cpu == failed_cpu)
			break;
		swevent_hlist_put_cpu(event, cpu);
	}

	put_online_cpus();
	return err;
}

6343
struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
6344

6345 6346 6347
static void sw_perf_event_destroy(struct perf_event *event)
{
	u64 event_id = event->attr.config;
6348

6349 6350
	WARN_ON(event->parent);

6351
	static_key_slow_dec(&perf_swevent_enabled[event_id]);
6352 6353 6354 6355 6356
	swevent_hlist_put(event);
}

static int perf_swevent_init(struct perf_event *event)
{
6357
	u64 event_id = event->attr.config;
6358 6359 6360 6361

	if (event->attr.type != PERF_TYPE_SOFTWARE)
		return -ENOENT;

6362 6363 6364 6365 6366 6367
	/*
	 * no branch sampling for software events
	 */
	if (has_branch_stack(event))
		return -EOPNOTSUPP;

6368 6369 6370 6371 6372 6373 6374 6375 6376
	switch (event_id) {
	case PERF_COUNT_SW_CPU_CLOCK:
	case PERF_COUNT_SW_TASK_CLOCK:
		return -ENOENT;

	default:
		break;
	}

6377
	if (event_id >= PERF_COUNT_SW_MAX)
6378 6379 6380 6381 6382 6383 6384 6385 6386
		return -ENOENT;

	if (!event->parent) {
		int err;

		err = swevent_hlist_get(event);
		if (err)
			return err;

6387
		static_key_slow_inc(&perf_swevent_enabled[event_id]);
6388 6389 6390 6391 6392 6393 6394
		event->destroy = sw_perf_event_destroy;
	}

	return 0;
}

static struct pmu perf_swevent = {
6395
	.task_ctx_nr	= perf_sw_context,
6396

6397 6398
	.capabilities	= PERF_PMU_CAP_NO_NMI,

6399
	.event_init	= perf_swevent_init,
P
Peter Zijlstra 已提交
6400 6401 6402 6403
	.add		= perf_swevent_add,
	.del		= perf_swevent_del,
	.start		= perf_swevent_start,
	.stop		= perf_swevent_stop,
6404 6405 6406
	.read		= perf_swevent_read,
};

6407 6408
#ifdef CONFIG_EVENT_TRACING

6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422
static int perf_tp_filter_match(struct perf_event *event,
				struct perf_sample_data *data)
{
	void *record = data->raw->data;

	if (likely(!event->filter) || filter_match_preds(event->filter, record))
		return 1;
	return 0;
}

static int perf_tp_event_match(struct perf_event *event,
				struct perf_sample_data *data,
				struct pt_regs *regs)
{
6423 6424
	if (event->hw.state & PERF_HES_STOPPED)
		return 0;
6425 6426 6427 6428
	/*
	 * All tracepoints are from kernel-space.
	 */
	if (event->attr.exclude_kernel)
6429 6430 6431 6432 6433 6434 6435 6436 6437
		return 0;

	if (!perf_tp_filter_match(event, data))
		return 0;

	return 1;
}

void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
6438 6439
		   struct pt_regs *regs, struct hlist_head *head, int rctx,
		   struct task_struct *task)
6440 6441
{
	struct perf_sample_data data;
6442 6443
	struct perf_event *event;

6444 6445 6446 6447 6448
	struct perf_raw_record raw = {
		.size = entry_size,
		.data = record,
	};

6449
	perf_sample_data_init(&data, addr, 0);
6450 6451
	data.raw = &raw;

6452
	hlist_for_each_entry_rcu(event, head, hlist_entry) {
6453
		if (perf_tp_event_match(event, &data, regs))
6454
			perf_swevent_event(event, count, &data, regs);
6455
	}
6456

6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481
	/*
	 * If we got specified a target task, also iterate its context and
	 * deliver this event there too.
	 */
	if (task && task != current) {
		struct perf_event_context *ctx;
		struct trace_entry *entry = record;

		rcu_read_lock();
		ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
		if (!ctx)
			goto unlock;

		list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
			if (event->attr.type != PERF_TYPE_TRACEPOINT)
				continue;
			if (event->attr.config != entry->type)
				continue;
			if (perf_tp_event_match(event, &data, regs))
				perf_swevent_event(event, count, &data, regs);
		}
unlock:
		rcu_read_unlock();
	}

6482
	perf_swevent_put_recursion_context(rctx);
6483 6484 6485
}
EXPORT_SYMBOL_GPL(perf_tp_event);

6486
static void tp_perf_event_destroy(struct perf_event *event)
6487
{
6488
	perf_trace_destroy(event);
6489 6490
}

6491
static int perf_tp_event_init(struct perf_event *event)
6492
{
6493 6494
	int err;

6495 6496 6497
	if (event->attr.type != PERF_TYPE_TRACEPOINT)
		return -ENOENT;

6498 6499 6500 6501 6502 6503
	/*
	 * no branch sampling for tracepoint events
	 */
	if (has_branch_stack(event))
		return -EOPNOTSUPP;

6504 6505
	err = perf_trace_init(event);
	if (err)
6506
		return err;
6507

6508
	event->destroy = tp_perf_event_destroy;
6509

6510 6511 6512 6513
	return 0;
}

static struct pmu perf_tracepoint = {
6514 6515
	.task_ctx_nr	= perf_sw_context,

6516
	.event_init	= perf_tp_event_init,
P
Peter Zijlstra 已提交
6517 6518 6519 6520
	.add		= perf_trace_add,
	.del		= perf_trace_del,
	.start		= perf_swevent_start,
	.stop		= perf_swevent_stop,
6521 6522 6523 6524 6525
	.read		= perf_swevent_read,
};

static inline void perf_tp_register(void)
{
P
Peter Zijlstra 已提交
6526
	perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
6527
}
L
Li Zefan 已提交
6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551

static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
	char *filter_str;
	int ret;

	if (event->attr.type != PERF_TYPE_TRACEPOINT)
		return -EINVAL;

	filter_str = strndup_user(arg, PAGE_SIZE);
	if (IS_ERR(filter_str))
		return PTR_ERR(filter_str);

	ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);

	kfree(filter_str);
	return ret;
}

static void perf_event_free_filter(struct perf_event *event)
{
	ftrace_profile_free_filter(event);
}

6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594
static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
{
	struct bpf_prog *prog;

	if (event->attr.type != PERF_TYPE_TRACEPOINT)
		return -EINVAL;

	if (event->tp_event->prog)
		return -EEXIST;

	if (!(event->tp_event->flags & TRACE_EVENT_FL_KPROBE))
		/* bpf programs can only be attached to kprobes */
		return -EINVAL;

	prog = bpf_prog_get(prog_fd);
	if (IS_ERR(prog))
		return PTR_ERR(prog);

	if (prog->aux->prog_type != BPF_PROG_TYPE_KPROBE) {
		/* valid fd, but invalid bpf program type */
		bpf_prog_put(prog);
		return -EINVAL;
	}

	event->tp_event->prog = prog;

	return 0;
}

static void perf_event_free_bpf_prog(struct perf_event *event)
{
	struct bpf_prog *prog;

	if (!event->tp_event)
		return;

	prog = event->tp_event->prog;
	if (prog) {
		event->tp_event->prog = NULL;
		bpf_prog_put(prog);
	}
}

6595
#else
L
Li Zefan 已提交
6596

6597
static inline void perf_tp_register(void)
6598 6599
{
}
L
Li Zefan 已提交
6600 6601 6602 6603 6604 6605 6606 6607 6608 6609

static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
	return -ENOENT;
}

static void perf_event_free_filter(struct perf_event *event)
{
}

6610 6611 6612 6613 6614 6615 6616 6617
static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
{
	return -ENOENT;
}

static void perf_event_free_bpf_prog(struct perf_event *event)
{
}
6618
#endif /* CONFIG_EVENT_TRACING */
6619

6620
#ifdef CONFIG_HAVE_HW_BREAKPOINT
6621
void perf_bp_event(struct perf_event *bp, void *data)
6622
{
6623 6624 6625
	struct perf_sample_data sample;
	struct pt_regs *regs = data;

6626
	perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
6627

P
Peter Zijlstra 已提交
6628
	if (!bp->hw.state && !perf_exclude_event(bp, regs))
6629
		perf_swevent_event(bp, 1, &sample, regs);
6630 6631 6632
}
#endif

6633 6634 6635
/*
 * hrtimer based swevent callback
 */
6636

6637
static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
6638
{
6639 6640 6641 6642 6643
	enum hrtimer_restart ret = HRTIMER_RESTART;
	struct perf_sample_data data;
	struct pt_regs *regs;
	struct perf_event *event;
	u64 period;
6644

6645
	event = container_of(hrtimer, struct perf_event, hw.hrtimer);
P
Peter Zijlstra 已提交
6646 6647 6648 6649

	if (event->state != PERF_EVENT_STATE_ACTIVE)
		return HRTIMER_NORESTART;

6650
	event->pmu->read(event);
6651

6652
	perf_sample_data_init(&data, 0, event->hw.last_period);
6653 6654 6655
	regs = get_irq_regs();

	if (regs && !perf_exclude_event(event, regs)) {
6656
		if (!(event->attr.exclude_idle && is_idle_task(current)))
6657
			if (__perf_event_overflow(event, 1, &data, regs))
6658 6659
				ret = HRTIMER_NORESTART;
	}
6660

6661 6662
	period = max_t(u64, 10000, event->hw.sample_period);
	hrtimer_forward_now(hrtimer, ns_to_ktime(period));
6663

6664
	return ret;
6665 6666
}

6667
static void perf_swevent_start_hrtimer(struct perf_event *event)
6668
{
6669
	struct hw_perf_event *hwc = &event->hw;
6670 6671 6672 6673
	s64 period;

	if (!is_sampling_event(event))
		return;
6674

6675 6676 6677 6678
	period = local64_read(&hwc->period_left);
	if (period) {
		if (period < 0)
			period = 10000;
P
Peter Zijlstra 已提交
6679

6680 6681 6682 6683 6684
		local64_set(&hwc->period_left, 0);
	} else {
		period = max_t(u64, 10000, hwc->sample_period);
	}
	__hrtimer_start_range_ns(&hwc->hrtimer,
6685
				ns_to_ktime(period), 0,
6686
				HRTIMER_MODE_REL_PINNED, 0);
6687
}
6688 6689

static void perf_swevent_cancel_hrtimer(struct perf_event *event)
6690
{
6691 6692
	struct hw_perf_event *hwc = &event->hw;

6693
	if (is_sampling_event(event)) {
6694
		ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
P
Peter Zijlstra 已提交
6695
		local64_set(&hwc->period_left, ktime_to_ns(remaining));
6696 6697 6698

		hrtimer_cancel(&hwc->hrtimer);
	}
6699 6700
}

P
Peter Zijlstra 已提交
6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720
static void perf_swevent_init_hrtimer(struct perf_event *event)
{
	struct hw_perf_event *hwc = &event->hw;

	if (!is_sampling_event(event))
		return;

	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hwc->hrtimer.function = perf_swevent_hrtimer;

	/*
	 * Since hrtimers have a fixed rate, we can do a static freq->period
	 * mapping and avoid the whole period adjust feedback stuff.
	 */
	if (event->attr.freq) {
		long freq = event->attr.sample_freq;

		event->attr.sample_period = NSEC_PER_SEC / freq;
		hwc->sample_period = event->attr.sample_period;
		local64_set(&hwc->period_left, hwc->sample_period);
6721
		hwc->last_period = hwc->sample_period;
P
Peter Zijlstra 已提交
6722 6723 6724 6725
		event->attr.freq = 0;
	}
}

6726 6727 6728 6729 6730
/*
 * Software event: cpu wall time clock
 */

static void cpu_clock_event_update(struct perf_event *event)
6731
{
6732 6733 6734
	s64 prev;
	u64 now;

P
Peter Zijlstra 已提交
6735
	now = local_clock();
6736 6737
	prev = local64_xchg(&event->hw.prev_count, now);
	local64_add(now - prev, &event->count);
6738 6739
}

P
Peter Zijlstra 已提交
6740
static void cpu_clock_event_start(struct perf_event *event, int flags)
6741
{
P
Peter Zijlstra 已提交
6742
	local64_set(&event->hw.prev_count, local_clock());
6743 6744 6745
	perf_swevent_start_hrtimer(event);
}

P
Peter Zijlstra 已提交
6746
static void cpu_clock_event_stop(struct perf_event *event, int flags)
6747
{
6748 6749 6750
	perf_swevent_cancel_hrtimer(event);
	cpu_clock_event_update(event);
}
6751

P
Peter Zijlstra 已提交
6752 6753 6754 6755
static int cpu_clock_event_add(struct perf_event *event, int flags)
{
	if (flags & PERF_EF_START)
		cpu_clock_event_start(event, flags);
6756
	perf_event_update_userpage(event);
P
Peter Zijlstra 已提交
6757 6758 6759 6760 6761 6762 6763 6764 6765

	return 0;
}

static void cpu_clock_event_del(struct perf_event *event, int flags)
{
	cpu_clock_event_stop(event, flags);
}

6766 6767 6768 6769
static void cpu_clock_event_read(struct perf_event *event)
{
	cpu_clock_event_update(event);
}
6770

6771 6772 6773 6774 6775 6776 6777 6778
static int cpu_clock_event_init(struct perf_event *event)
{
	if (event->attr.type != PERF_TYPE_SOFTWARE)
		return -ENOENT;

	if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
		return -ENOENT;

6779 6780 6781 6782 6783 6784
	/*
	 * no branch sampling for software events
	 */
	if (has_branch_stack(event))
		return -EOPNOTSUPP;

P
Peter Zijlstra 已提交
6785 6786
	perf_swevent_init_hrtimer(event);

6787
	return 0;
6788 6789
}

6790
static struct pmu perf_cpu_clock = {
6791 6792
	.task_ctx_nr	= perf_sw_context,

6793 6794
	.capabilities	= PERF_PMU_CAP_NO_NMI,

6795
	.event_init	= cpu_clock_event_init,
P
Peter Zijlstra 已提交
6796 6797 6798 6799
	.add		= cpu_clock_event_add,
	.del		= cpu_clock_event_del,
	.start		= cpu_clock_event_start,
	.stop		= cpu_clock_event_stop,
6800 6801 6802 6803 6804 6805 6806 6807
	.read		= cpu_clock_event_read,
};

/*
 * Software event: task time clock
 */

static void task_clock_event_update(struct perf_event *event, u64 now)
6808
{
6809 6810
	u64 prev;
	s64 delta;
6811

6812 6813 6814 6815
	prev = local64_xchg(&event->hw.prev_count, now);
	delta = now - prev;
	local64_add(delta, &event->count);
}
6816

P
Peter Zijlstra 已提交
6817
static void task_clock_event_start(struct perf_event *event, int flags)
6818
{
P
Peter Zijlstra 已提交
6819
	local64_set(&event->hw.prev_count, event->ctx->time);
6820 6821 6822
	perf_swevent_start_hrtimer(event);
}

P
Peter Zijlstra 已提交
6823
static void task_clock_event_stop(struct perf_event *event, int flags)
6824 6825 6826
{
	perf_swevent_cancel_hrtimer(event);
	task_clock_event_update(event, event->ctx->time);
P
Peter Zijlstra 已提交
6827 6828 6829 6830 6831 6832
}

static int task_clock_event_add(struct perf_event *event, int flags)
{
	if (flags & PERF_EF_START)
		task_clock_event_start(event, flags);
6833
	perf_event_update_userpage(event);
6834

P
Peter Zijlstra 已提交
6835 6836 6837 6838 6839 6840
	return 0;
}

static void task_clock_event_del(struct perf_event *event, int flags)
{
	task_clock_event_stop(event, PERF_EF_UPDATE);
6841 6842 6843 6844
}

static void task_clock_event_read(struct perf_event *event)
{
6845 6846 6847
	u64 now = perf_clock();
	u64 delta = now - event->ctx->timestamp;
	u64 time = event->ctx->time + delta;
6848 6849 6850 6851 6852

	task_clock_event_update(event, time);
}

static int task_clock_event_init(struct perf_event *event)
L
Li Zefan 已提交
6853
{
6854 6855 6856 6857 6858 6859
	if (event->attr.type != PERF_TYPE_SOFTWARE)
		return -ENOENT;

	if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
		return -ENOENT;

6860 6861 6862 6863 6864 6865
	/*
	 * no branch sampling for software events
	 */
	if (has_branch_stack(event))
		return -EOPNOTSUPP;

P
Peter Zijlstra 已提交
6866 6867
	perf_swevent_init_hrtimer(event);

6868
	return 0;
L
Li Zefan 已提交
6869 6870
}

6871
static struct pmu perf_task_clock = {
6872 6873
	.task_ctx_nr	= perf_sw_context,

6874 6875
	.capabilities	= PERF_PMU_CAP_NO_NMI,

6876
	.event_init	= task_clock_event_init,
P
Peter Zijlstra 已提交
6877 6878 6879 6880
	.add		= task_clock_event_add,
	.del		= task_clock_event_del,
	.start		= task_clock_event_start,
	.stop		= task_clock_event_stop,
6881 6882
	.read		= task_clock_event_read,
};
L
Li Zefan 已提交
6883

P
Peter Zijlstra 已提交
6884
static void perf_pmu_nop_void(struct pmu *pmu)
6885 6886
{
}
L
Li Zefan 已提交
6887

P
Peter Zijlstra 已提交
6888
static int perf_pmu_nop_int(struct pmu *pmu)
L
Li Zefan 已提交
6889
{
P
Peter Zijlstra 已提交
6890
	return 0;
L
Li Zefan 已提交
6891 6892
}

P
Peter Zijlstra 已提交
6893
static void perf_pmu_start_txn(struct pmu *pmu)
L
Li Zefan 已提交
6894
{
P
Peter Zijlstra 已提交
6895
	perf_pmu_disable(pmu);
L
Li Zefan 已提交
6896 6897
}

P
Peter Zijlstra 已提交
6898 6899 6900 6901 6902
static int perf_pmu_commit_txn(struct pmu *pmu)
{
	perf_pmu_enable(pmu);
	return 0;
}
6903

P
Peter Zijlstra 已提交
6904
static void perf_pmu_cancel_txn(struct pmu *pmu)
6905
{
P
Peter Zijlstra 已提交
6906
	perf_pmu_enable(pmu);
6907 6908
}

6909 6910
static int perf_event_idx_default(struct perf_event *event)
{
6911
	return 0;
6912 6913
}

P
Peter Zijlstra 已提交
6914 6915 6916 6917
/*
 * Ensures all contexts with the same task_ctx_nr have the same
 * pmu_cpu_context too.
 */
6918
static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
6919
{
P
Peter Zijlstra 已提交
6920
	struct pmu *pmu;
6921

P
Peter Zijlstra 已提交
6922 6923
	if (ctxn < 0)
		return NULL;
6924

P
Peter Zijlstra 已提交
6925 6926 6927 6928
	list_for_each_entry(pmu, &pmus, entry) {
		if (pmu->task_ctx_nr == ctxn)
			return pmu->pmu_cpu_context;
	}
6929

P
Peter Zijlstra 已提交
6930
	return NULL;
6931 6932
}

6933
static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
6934
{
6935 6936 6937 6938 6939 6940 6941
	int cpu;

	for_each_possible_cpu(cpu) {
		struct perf_cpu_context *cpuctx;

		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);

6942 6943
		if (cpuctx->unique_pmu == old_pmu)
			cpuctx->unique_pmu = pmu;
6944 6945 6946 6947 6948 6949
	}
}

static void free_pmu_context(struct pmu *pmu)
{
	struct pmu *i;
6950

P
Peter Zijlstra 已提交
6951
	mutex_lock(&pmus_lock);
6952
	/*
P
Peter Zijlstra 已提交
6953
	 * Like a real lame refcount.
6954
	 */
6955 6956 6957
	list_for_each_entry(i, &pmus, entry) {
		if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
			update_pmu_context(i, pmu);
P
Peter Zijlstra 已提交
6958
			goto out;
6959
		}
P
Peter Zijlstra 已提交
6960
	}
6961

6962
	free_percpu(pmu->pmu_cpu_context);
P
Peter Zijlstra 已提交
6963 6964
out:
	mutex_unlock(&pmus_lock);
6965
}
P
Peter Zijlstra 已提交
6966
static struct idr pmu_idr;
6967

P
Peter Zijlstra 已提交
6968 6969 6970 6971 6972 6973 6974
static ssize_t
type_show(struct device *dev, struct device_attribute *attr, char *page)
{
	struct pmu *pmu = dev_get_drvdata(dev);

	return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
}
6975
static DEVICE_ATTR_RO(type);
P
Peter Zijlstra 已提交
6976

6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019
static ssize_t
perf_event_mux_interval_ms_show(struct device *dev,
				struct device_attribute *attr,
				char *page)
{
	struct pmu *pmu = dev_get_drvdata(dev);

	return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
}

static ssize_t
perf_event_mux_interval_ms_store(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf, size_t count)
{
	struct pmu *pmu = dev_get_drvdata(dev);
	int timer, cpu, ret;

	ret = kstrtoint(buf, 0, &timer);
	if (ret)
		return ret;

	if (timer < 1)
		return -EINVAL;

	/* same value, noting to do */
	if (timer == pmu->hrtimer_interval_ms)
		return count;

	pmu->hrtimer_interval_ms = timer;

	/* update all cpuctx for this PMU */
	for_each_possible_cpu(cpu) {
		struct perf_cpu_context *cpuctx;
		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
		cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);

		if (hrtimer_active(&cpuctx->hrtimer))
			hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval);
	}

	return count;
}
7020
static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
7021

7022 7023 7024 7025
static struct attribute *pmu_dev_attrs[] = {
	&dev_attr_type.attr,
	&dev_attr_perf_event_mux_interval_ms.attr,
	NULL,
P
Peter Zijlstra 已提交
7026
};
7027
ATTRIBUTE_GROUPS(pmu_dev);
P
Peter Zijlstra 已提交
7028 7029 7030 7031

static int pmu_bus_running;
static struct bus_type pmu_bus = {
	.name		= "event_source",
7032
	.dev_groups	= pmu_dev_groups,
P
Peter Zijlstra 已提交
7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047
};

static void pmu_dev_release(struct device *dev)
{
	kfree(dev);
}

static int pmu_dev_alloc(struct pmu *pmu)
{
	int ret = -ENOMEM;

	pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
	if (!pmu->dev)
		goto out;

7048
	pmu->dev->groups = pmu->attr_groups;
P
Peter Zijlstra 已提交
7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068
	device_initialize(pmu->dev);
	ret = dev_set_name(pmu->dev, "%s", pmu->name);
	if (ret)
		goto free_dev;

	dev_set_drvdata(pmu->dev, pmu);
	pmu->dev->bus = &pmu_bus;
	pmu->dev->release = pmu_dev_release;
	ret = device_add(pmu->dev);
	if (ret)
		goto free_dev;

out:
	return ret;

free_dev:
	put_device(pmu->dev);
	goto out;
}

7069
static struct lock_class_key cpuctx_mutex;
7070
static struct lock_class_key cpuctx_lock;
7071

7072
int perf_pmu_register(struct pmu *pmu, const char *name, int type)
7073
{
P
Peter Zijlstra 已提交
7074
	int cpu, ret;
7075

7076
	mutex_lock(&pmus_lock);
P
Peter Zijlstra 已提交
7077 7078 7079 7080
	ret = -ENOMEM;
	pmu->pmu_disable_count = alloc_percpu(int);
	if (!pmu->pmu_disable_count)
		goto unlock;
7081

P
Peter Zijlstra 已提交
7082 7083 7084 7085 7086 7087
	pmu->type = -1;
	if (!name)
		goto skip_type;
	pmu->name = name;

	if (type < 0) {
T
Tejun Heo 已提交
7088 7089 7090
		type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
		if (type < 0) {
			ret = type;
P
Peter Zijlstra 已提交
7091 7092 7093 7094 7095
			goto free_pdc;
		}
	}
	pmu->type = type;

P
Peter Zijlstra 已提交
7096 7097 7098 7099 7100 7101
	if (pmu_bus_running) {
		ret = pmu_dev_alloc(pmu);
		if (ret)
			goto free_idr;
	}

P
Peter Zijlstra 已提交
7102
skip_type:
P
Peter Zijlstra 已提交
7103 7104 7105
	pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
	if (pmu->pmu_cpu_context)
		goto got_cpu_context;
7106

W
Wei Yongjun 已提交
7107
	ret = -ENOMEM;
P
Peter Zijlstra 已提交
7108 7109
	pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
	if (!pmu->pmu_cpu_context)
P
Peter Zijlstra 已提交
7110
		goto free_dev;
7111

P
Peter Zijlstra 已提交
7112 7113 7114 7115
	for_each_possible_cpu(cpu) {
		struct perf_cpu_context *cpuctx;

		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
7116
		__perf_event_init_context(&cpuctx->ctx);
7117
		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
7118
		lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
P
Peter Zijlstra 已提交
7119
		cpuctx->ctx.pmu = pmu;
7120 7121 7122

		__perf_cpu_hrtimer_init(cpuctx, cpu);

7123
		cpuctx->unique_pmu = pmu;
P
Peter Zijlstra 已提交
7124
	}
7125

P
Peter Zijlstra 已提交
7126
got_cpu_context:
P
Peter Zijlstra 已提交
7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140
	if (!pmu->start_txn) {
		if (pmu->pmu_enable) {
			/*
			 * If we have pmu_enable/pmu_disable calls, install
			 * transaction stubs that use that to try and batch
			 * hardware accesses.
			 */
			pmu->start_txn  = perf_pmu_start_txn;
			pmu->commit_txn = perf_pmu_commit_txn;
			pmu->cancel_txn = perf_pmu_cancel_txn;
		} else {
			pmu->start_txn  = perf_pmu_nop_void;
			pmu->commit_txn = perf_pmu_nop_int;
			pmu->cancel_txn = perf_pmu_nop_void;
7141
		}
7142
	}
7143

P
Peter Zijlstra 已提交
7144 7145 7146 7147 7148
	if (!pmu->pmu_enable) {
		pmu->pmu_enable  = perf_pmu_nop_void;
		pmu->pmu_disable = perf_pmu_nop_void;
	}

7149 7150 7151
	if (!pmu->event_idx)
		pmu->event_idx = perf_event_idx_default;

7152
	list_add_rcu(&pmu->entry, &pmus);
P
Peter Zijlstra 已提交
7153 7154
	ret = 0;
unlock:
7155 7156
	mutex_unlock(&pmus_lock);

P
Peter Zijlstra 已提交
7157
	return ret;
P
Peter Zijlstra 已提交
7158

P
Peter Zijlstra 已提交
7159 7160 7161 7162
free_dev:
	device_del(pmu->dev);
	put_device(pmu->dev);

P
Peter Zijlstra 已提交
7163 7164 7165 7166
free_idr:
	if (pmu->type >= PERF_TYPE_MAX)
		idr_remove(&pmu_idr, pmu->type);

P
Peter Zijlstra 已提交
7167 7168 7169
free_pdc:
	free_percpu(pmu->pmu_disable_count);
	goto unlock;
7170
}
7171
EXPORT_SYMBOL_GPL(perf_pmu_register);
7172

7173
void perf_pmu_unregister(struct pmu *pmu)
7174
{
7175 7176 7177
	mutex_lock(&pmus_lock);
	list_del_rcu(&pmu->entry);
	mutex_unlock(&pmus_lock);
7178

7179
	/*
P
Peter Zijlstra 已提交
7180 7181
	 * We dereference the pmu list under both SRCU and regular RCU, so
	 * synchronize against both of those.
7182
	 */
7183
	synchronize_srcu(&pmus_srcu);
P
Peter Zijlstra 已提交
7184
	synchronize_rcu();
7185

P
Peter Zijlstra 已提交
7186
	free_percpu(pmu->pmu_disable_count);
P
Peter Zijlstra 已提交
7187 7188
	if (pmu->type >= PERF_TYPE_MAX)
		idr_remove(&pmu_idr, pmu->type);
P
Peter Zijlstra 已提交
7189 7190
	device_del(pmu->dev);
	put_device(pmu->dev);
7191
	free_pmu_context(pmu);
7192
}
7193
EXPORT_SYMBOL_GPL(perf_pmu_unregister);
7194

7195 7196
static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
{
P
Peter Zijlstra 已提交
7197
	struct perf_event_context *ctx = NULL;
7198 7199 7200 7201
	int ret;

	if (!try_module_get(pmu->module))
		return -ENODEV;
P
Peter Zijlstra 已提交
7202 7203 7204 7205 7206 7207

	if (event->group_leader != event) {
		ctx = perf_event_ctx_lock(event->group_leader);
		BUG_ON(!ctx);
	}

7208 7209
	event->pmu = pmu;
	ret = pmu->event_init(event);
P
Peter Zijlstra 已提交
7210 7211 7212 7213

	if (ctx)
		perf_event_ctx_unlock(event->group_leader, ctx);

7214 7215 7216 7217 7218 7219
	if (ret)
		module_put(pmu->module);

	return ret;
}

7220 7221 7222 7223
struct pmu *perf_init_event(struct perf_event *event)
{
	struct pmu *pmu = NULL;
	int idx;
7224
	int ret;
7225 7226

	idx = srcu_read_lock(&pmus_srcu);
P
Peter Zijlstra 已提交
7227 7228 7229 7230

	rcu_read_lock();
	pmu = idr_find(&pmu_idr, event->attr.type);
	rcu_read_unlock();
7231
	if (pmu) {
7232
		ret = perf_try_init_event(pmu, event);
7233 7234
		if (ret)
			pmu = ERR_PTR(ret);
P
Peter Zijlstra 已提交
7235
		goto unlock;
7236
	}
P
Peter Zijlstra 已提交
7237

7238
	list_for_each_entry_rcu(pmu, &pmus, entry) {
7239
		ret = perf_try_init_event(pmu, event);
7240
		if (!ret)
P
Peter Zijlstra 已提交
7241
			goto unlock;
7242

7243 7244
		if (ret != -ENOENT) {
			pmu = ERR_PTR(ret);
P
Peter Zijlstra 已提交
7245
			goto unlock;
7246
		}
7247
	}
P
Peter Zijlstra 已提交
7248 7249
	pmu = ERR_PTR(-ENOENT);
unlock:
7250
	srcu_read_unlock(&pmus_srcu, idx);
7251

7252
	return pmu;
7253 7254
}

7255 7256 7257 7258 7259 7260 7261 7262 7263
static void account_event_cpu(struct perf_event *event, int cpu)
{
	if (event->parent)
		return;

	if (is_cgroup_event(event))
		atomic_inc(&per_cpu(perf_cgroup_events, cpu));
}

7264 7265
static void account_event(struct perf_event *event)
{
7266 7267 7268
	if (event->parent)
		return;

7269 7270 7271 7272 7273 7274 7275 7276
	if (event->attach_state & PERF_ATTACH_TASK)
		static_key_slow_inc(&perf_sched_events.key);
	if (event->attr.mmap || event->attr.mmap_data)
		atomic_inc(&nr_mmap_events);
	if (event->attr.comm)
		atomic_inc(&nr_comm_events);
	if (event->attr.task)
		atomic_inc(&nr_task_events);
7277 7278 7279 7280
	if (event->attr.freq) {
		if (atomic_inc_return(&nr_freq_events) == 1)
			tick_nohz_full_kick_all();
	}
7281
	if (has_branch_stack(event))
7282
		static_key_slow_inc(&perf_sched_events.key);
7283
	if (is_cgroup_event(event))
7284
		static_key_slow_inc(&perf_sched_events.key);
7285 7286

	account_event_cpu(event, event->cpu);
7287 7288
}

T
Thomas Gleixner 已提交
7289
/*
7290
 * Allocate and initialize a event structure
T
Thomas Gleixner 已提交
7291
 */
7292
static struct perf_event *
7293
perf_event_alloc(struct perf_event_attr *attr, int cpu,
7294 7295 7296
		 struct task_struct *task,
		 struct perf_event *group_leader,
		 struct perf_event *parent_event,
7297
		 perf_overflow_handler_t overflow_handler,
7298
		 void *context, int cgroup_fd)
T
Thomas Gleixner 已提交
7299
{
P
Peter Zijlstra 已提交
7300
	struct pmu *pmu;
7301 7302
	struct perf_event *event;
	struct hw_perf_event *hwc;
7303
	long err = -EINVAL;
T
Thomas Gleixner 已提交
7304

7305 7306 7307 7308 7309
	if ((unsigned)cpu >= nr_cpu_ids) {
		if (!task || cpu != -1)
			return ERR_PTR(-EINVAL);
	}

7310
	event = kzalloc(sizeof(*event), GFP_KERNEL);
7311
	if (!event)
7312
		return ERR_PTR(-ENOMEM);
T
Thomas Gleixner 已提交
7313

7314
	/*
7315
	 * Single events are their own group leaders, with an
7316 7317 7318
	 * empty sibling list:
	 */
	if (!group_leader)
7319
		group_leader = event;
7320

7321 7322
	mutex_init(&event->child_mutex);
	INIT_LIST_HEAD(&event->child_list);
7323

7324 7325 7326
	INIT_LIST_HEAD(&event->group_entry);
	INIT_LIST_HEAD(&event->event_entry);
	INIT_LIST_HEAD(&event->sibling_list);
7327
	INIT_LIST_HEAD(&event->rb_entry);
7328
	INIT_LIST_HEAD(&event->active_entry);
7329 7330
	INIT_HLIST_NODE(&event->hlist_entry);

7331

7332
	init_waitqueue_head(&event->waitq);
7333
	init_irq_work(&event->pending, perf_pending_event);
T
Thomas Gleixner 已提交
7334

7335
	mutex_init(&event->mmap_mutex);
7336

7337
	atomic_long_set(&event->refcount, 1);
7338 7339 7340 7341 7342
	event->cpu		= cpu;
	event->attr		= *attr;
	event->group_leader	= group_leader;
	event->pmu		= NULL;
	event->oncpu		= -1;
7343

7344
	event->parent		= parent_event;
7345

7346
	event->ns		= get_pid_ns(task_active_pid_ns(current));
7347
	event->id		= atomic64_inc_return(&perf_event_id);
7348

7349
	event->state		= PERF_EVENT_STATE_INACTIVE;
7350

7351 7352 7353
	if (task) {
		event->attach_state = PERF_ATTACH_TASK;
		/*
7354 7355 7356
		 * XXX pmu::event_init needs to know what task to account to
		 * and we cannot use the ctx information because we need the
		 * pmu before we get a ctx.
7357
		 */
7358
		event->hw.target = task;
7359 7360
	}

7361 7362 7363 7364
	event->clock = &local_clock;
	if (parent_event)
		event->clock = parent_event->clock;

7365
	if (!overflow_handler && parent_event) {
7366
		overflow_handler = parent_event->overflow_handler;
7367 7368
		context = parent_event->overflow_handler_context;
	}
7369

7370
	event->overflow_handler	= overflow_handler;
7371
	event->overflow_handler_context = context;
7372

J
Jiri Olsa 已提交
7373
	perf_event__state_init(event);
7374

7375
	pmu = NULL;
7376

7377
	hwc = &event->hw;
7378
	hwc->sample_period = attr->sample_period;
7379
	if (attr->freq && attr->sample_freq)
7380
		hwc->sample_period = 1;
7381
	hwc->last_period = hwc->sample_period;
7382

7383
	local64_set(&hwc->period_left, hwc->sample_period);
7384

7385
	/*
7386
	 * we currently do not support PERF_FORMAT_GROUP on inherited events
7387
	 */
7388
	if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
7389
		goto err_ns;
7390 7391 7392

	if (!has_branch_stack(event))
		event->attr.branch_sample_type = 0;
7393

7394 7395 7396 7397 7398 7399
	if (cgroup_fd != -1) {
		err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
		if (err)
			goto err_ns;
	}

7400
	pmu = perf_init_event(event);
7401
	if (!pmu)
7402 7403
		goto err_ns;
	else if (IS_ERR(pmu)) {
7404
		err = PTR_ERR(pmu);
7405
		goto err_ns;
I
Ingo Molnar 已提交
7406
	}
7407

7408
	if (!event->parent) {
7409 7410
		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
			err = get_callchain_buffers();
7411 7412
			if (err)
				goto err_pmu;
7413
		}
7414
	}
7415

7416
	return event;
7417 7418 7419 7420

err_pmu:
	if (event->destroy)
		event->destroy(event);
7421
	module_put(pmu->module);
7422
err_ns:
7423 7424
	if (is_cgroup_event(event))
		perf_detach_cgroup(event);
7425 7426 7427 7428 7429
	if (event->ns)
		put_pid_ns(event->ns);
	kfree(event);

	return ERR_PTR(err);
T
Thomas Gleixner 已提交
7430 7431
}

7432 7433
static int perf_copy_attr(struct perf_event_attr __user *uattr,
			  struct perf_event_attr *attr)
7434 7435
{
	u32 size;
7436
	int ret;
7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460

	if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
		return -EFAULT;

	/*
	 * zero the full structure, so that a short copy will be nice.
	 */
	memset(attr, 0, sizeof(*attr));

	ret = get_user(size, &uattr->size);
	if (ret)
		return ret;

	if (size > PAGE_SIZE)	/* silly large */
		goto err_size;

	if (!size)		/* abi compat */
		size = PERF_ATTR_SIZE_VER0;

	if (size < PERF_ATTR_SIZE_VER0)
		goto err_size;

	/*
	 * If we're handed a bigger struct than we know of,
7461 7462 7463
	 * ensure all the unknown bits are 0 - i.e. new
	 * user-space does not rely on any kernel feature
	 * extensions we dont know about yet.
7464 7465
	 */
	if (size > sizeof(*attr)) {
7466 7467 7468
		unsigned char __user *addr;
		unsigned char __user *end;
		unsigned char val;
7469

7470 7471
		addr = (void __user *)uattr + sizeof(*attr);
		end  = (void __user *)uattr + size;
7472

7473
		for (; addr < end; addr++) {
7474 7475 7476 7477 7478 7479
			ret = get_user(val, addr);
			if (ret)
				return ret;
			if (val)
				goto err_size;
		}
7480
		size = sizeof(*attr);
7481 7482 7483 7484 7485 7486
	}

	ret = copy_from_user(attr, uattr, size);
	if (ret)
		return -EFAULT;

7487
	if (attr->__reserved_1)
7488 7489 7490 7491 7492 7493 7494 7495
		return -EINVAL;

	if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
		return -EINVAL;

	if (attr->read_format & ~(PERF_FORMAT_MAX-1))
		return -EINVAL;

7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523
	if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
		u64 mask = attr->branch_sample_type;

		/* only using defined bits */
		if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
			return -EINVAL;

		/* at least one branch bit must be set */
		if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
			return -EINVAL;

		/* propagate priv level, when not set for branch */
		if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {

			/* exclude_kernel checked on syscall entry */
			if (!attr->exclude_kernel)
				mask |= PERF_SAMPLE_BRANCH_KERNEL;

			if (!attr->exclude_user)
				mask |= PERF_SAMPLE_BRANCH_USER;

			if (!attr->exclude_hv)
				mask |= PERF_SAMPLE_BRANCH_HV;
			/*
			 * adjust user setting (for HW filter setup)
			 */
			attr->branch_sample_type = mask;
		}
7524 7525
		/* privileged levels capture (kernel, hv): check permissions */
		if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
7526 7527
		    && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
			return -EACCES;
7528
	}
7529

7530
	if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
7531
		ret = perf_reg_validate(attr->sample_regs_user);
7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549
		if (ret)
			return ret;
	}

	if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
		if (!arch_perf_have_user_stack_dump())
			return -ENOSYS;

		/*
		 * We have __u32 type for the size, but so far
		 * we can only use __u16 as maximum due to the
		 * __u16 sample size limit.
		 */
		if (attr->sample_stack_user >= USHRT_MAX)
			ret = -EINVAL;
		else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
			ret = -EINVAL;
	}
7550

7551 7552
	if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
		ret = perf_reg_validate(attr->sample_regs_intr);
7553 7554 7555 7556 7557 7558 7559 7560 7561
out:
	return ret;

err_size:
	put_user(sizeof(*attr), &uattr->size);
	ret = -E2BIG;
	goto out;
}

7562 7563
static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
7564
{
7565
	struct ring_buffer *rb = NULL;
7566 7567
	int ret = -EINVAL;

7568
	if (!output_event)
7569 7570
		goto set;

7571 7572
	/* don't allow circular references */
	if (event == output_event)
7573 7574
		goto out;

7575 7576 7577 7578 7579 7580 7581
	/*
	 * Don't allow cross-cpu buffers
	 */
	if (output_event->cpu != event->cpu)
		goto out;

	/*
7582
	 * If its not a per-cpu rb, it must be the same task.
7583 7584 7585 7586
	 */
	if (output_event->cpu == -1 && output_event->ctx != event->ctx)
		goto out;

7587 7588 7589 7590 7591 7592
	/*
	 * Mixing clocks in the same buffer is trouble you don't need.
	 */
	if (output_event->clock != event->clock)
		goto out;

7593 7594 7595 7596 7597 7598 7599
	/*
	 * If both events generate aux data, they must be on the same PMU
	 */
	if (has_aux(event) && has_aux(output_event) &&
	    event->pmu != output_event->pmu)
		goto out;

7600
set:
7601
	mutex_lock(&event->mmap_mutex);
7602 7603 7604
	/* Can't redirect output if we've got an active mmap() */
	if (atomic_read(&event->mmap_count))
		goto unlock;
7605

7606
	if (output_event) {
7607 7608 7609
		/* get the rb we want to redirect to */
		rb = ring_buffer_get(output_event);
		if (!rb)
7610
			goto unlock;
7611 7612
	}

7613
	ring_buffer_attach(event, rb);
7614

7615
	ret = 0;
7616 7617 7618
unlock:
	mutex_unlock(&event->mmap_mutex);

7619 7620 7621 7622
out:
	return ret;
}

P
Peter Zijlstra 已提交
7623 7624 7625 7626 7627 7628 7629 7630 7631
static void mutex_lock_double(struct mutex *a, struct mutex *b)
{
	if (b < a)
		swap(a, b);

	mutex_lock(a);
	mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
}

7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668
static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
{
	bool nmi_safe = false;

	switch (clk_id) {
	case CLOCK_MONOTONIC:
		event->clock = &ktime_get_mono_fast_ns;
		nmi_safe = true;
		break;

	case CLOCK_MONOTONIC_RAW:
		event->clock = &ktime_get_raw_fast_ns;
		nmi_safe = true;
		break;

	case CLOCK_REALTIME:
		event->clock = &ktime_get_real_ns;
		break;

	case CLOCK_BOOTTIME:
		event->clock = &ktime_get_boot_ns;
		break;

	case CLOCK_TAI:
		event->clock = &ktime_get_tai_ns;
		break;

	default:
		return -EINVAL;
	}

	if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
		return -EINVAL;

	return 0;
}

T
Thomas Gleixner 已提交
7669
/**
7670
 * sys_perf_event_open - open a performance event, associate it to a task/cpu
I
Ingo Molnar 已提交
7671
 *
7672
 * @attr_uptr:	event_id type attributes for monitoring/sampling
T
Thomas Gleixner 已提交
7673
 * @pid:		target pid
I
Ingo Molnar 已提交
7674
 * @cpu:		target cpu
7675
 * @group_fd:		group leader event fd
T
Thomas Gleixner 已提交
7676
 */
7677 7678
SYSCALL_DEFINE5(perf_event_open,
		struct perf_event_attr __user *, attr_uptr,
7679
		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
T
Thomas Gleixner 已提交
7680
{
7681 7682
	struct perf_event *group_leader = NULL, *output_event = NULL;
	struct perf_event *event, *sibling;
7683
	struct perf_event_attr attr;
P
Peter Zijlstra 已提交
7684
	struct perf_event_context *ctx, *uninitialized_var(gctx);
7685
	struct file *event_file = NULL;
7686
	struct fd group = {NULL, 0};
M
Matt Helsley 已提交
7687
	struct task_struct *task = NULL;
7688
	struct pmu *pmu;
7689
	int event_fd;
7690
	int move_group = 0;
7691
	int err;
7692
	int f_flags = O_RDWR;
7693
	int cgroup_fd = -1;
T
Thomas Gleixner 已提交
7694

7695
	/* for future expandability... */
S
Stephane Eranian 已提交
7696
	if (flags & ~PERF_FLAG_ALL)
7697 7698
		return -EINVAL;

7699 7700 7701
	err = perf_copy_attr(attr_uptr, &attr);
	if (err)
		return err;
7702

7703 7704 7705 7706 7707
	if (!attr.exclude_kernel) {
		if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
			return -EACCES;
	}

7708
	if (attr.freq) {
7709
		if (attr.sample_freq > sysctl_perf_event_sample_rate)
7710
			return -EINVAL;
7711 7712 7713
	} else {
		if (attr.sample_period & (1ULL << 63))
			return -EINVAL;
7714 7715
	}

S
Stephane Eranian 已提交
7716 7717 7718 7719 7720 7721 7722 7723 7724
	/*
	 * In cgroup mode, the pid argument is used to pass the fd
	 * opened to the cgroup directory in cgroupfs. The cpu argument
	 * designates the cpu on which to monitor threads from that
	 * cgroup.
	 */
	if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
		return -EINVAL;

7725 7726 7727 7728
	if (flags & PERF_FLAG_FD_CLOEXEC)
		f_flags |= O_CLOEXEC;

	event_fd = get_unused_fd_flags(f_flags);
7729 7730 7731
	if (event_fd < 0)
		return event_fd;

7732
	if (group_fd != -1) {
7733 7734
		err = perf_fget_light(group_fd, &group);
		if (err)
7735
			goto err_fd;
7736
		group_leader = group.file->private_data;
7737 7738 7739 7740 7741 7742
		if (flags & PERF_FLAG_FD_OUTPUT)
			output_event = group_leader;
		if (flags & PERF_FLAG_FD_NO_GROUP)
			group_leader = NULL;
	}

S
Stephane Eranian 已提交
7743
	if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
7744 7745 7746 7747 7748 7749 7750
		task = find_lively_task_by_vpid(pid);
		if (IS_ERR(task)) {
			err = PTR_ERR(task);
			goto err_group_fd;
		}
	}

7751 7752 7753 7754 7755 7756
	if (task && group_leader &&
	    group_leader->attr.inherit != attr.inherit) {
		err = -EINVAL;
		goto err_task;
	}

7757 7758
	get_online_cpus();

7759 7760 7761
	if (flags & PERF_FLAG_PID_CGROUP)
		cgroup_fd = pid;

7762
	event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
7763
				 NULL, NULL, cgroup_fd);
7764 7765
	if (IS_ERR(event)) {
		err = PTR_ERR(event);
7766
		goto err_cpus;
7767 7768
	}

7769 7770 7771 7772 7773 7774 7775
	if (is_sampling_event(event)) {
		if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
			err = -ENOTSUPP;
			goto err_alloc;
		}
	}

7776 7777
	account_event(event);

7778 7779 7780 7781 7782
	/*
	 * Special case software events and allow them to be part of
	 * any hardware group.
	 */
	pmu = event->pmu;
7783

7784 7785 7786 7787 7788 7789
	if (attr.use_clockid) {
		err = perf_event_set_clock(event, attr.clockid);
		if (err)
			goto err_alloc;
	}

7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811
	if (group_leader &&
	    (is_software_event(event) != is_software_event(group_leader))) {
		if (is_software_event(event)) {
			/*
			 * If event and group_leader are not both a software
			 * event, and event is, then group leader is not.
			 *
			 * Allow the addition of software events to !software
			 * groups, this is safe because software events never
			 * fail to schedule.
			 */
			pmu = group_leader->pmu;
		} else if (is_software_event(group_leader) &&
			   (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
			/*
			 * In case the group is a pure software group, and we
			 * try to add a hardware event, move the whole group to
			 * the hardware context.
			 */
			move_group = 1;
		}
	}
7812 7813 7814 7815

	/*
	 * Get the target context (task or percpu):
	 */
7816
	ctx = find_get_context(pmu, task, event);
7817 7818
	if (IS_ERR(ctx)) {
		err = PTR_ERR(ctx);
7819
		goto err_alloc;
7820 7821
	}

7822 7823 7824 7825 7826
	if (task) {
		put_task_struct(task);
		task = NULL;
	}

I
Ingo Molnar 已提交
7827
	/*
7828
	 * Look up the group leader (we will attach this event to it):
7829
	 */
7830
	if (group_leader) {
7831
		err = -EINVAL;
7832 7833

		/*
I
Ingo Molnar 已提交
7834 7835 7836 7837
		 * Do not allow a recursive hierarchy (this new sibling
		 * becoming part of another group-sibling):
		 */
		if (group_leader->group_leader != group_leader)
7838
			goto err_context;
7839 7840 7841 7842 7843

		/* All events in a group should have the same clock */
		if (group_leader->clock != event->clock)
			goto err_context;

I
Ingo Molnar 已提交
7844 7845 7846
		/*
		 * Do not allow to attach to a group in a different
		 * task or CPU context:
7847
		 */
7848
		if (move_group) {
7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861
			/*
			 * Make sure we're both on the same task, or both
			 * per-cpu events.
			 */
			if (group_leader->ctx->task != ctx->task)
				goto err_context;

			/*
			 * Make sure we're both events for the same CPU;
			 * grouping events for different CPUs is broken; since
			 * you can never concurrently schedule them anyhow.
			 */
			if (group_leader->cpu != event->cpu)
7862 7863 7864 7865 7866 7867
				goto err_context;
		} else {
			if (group_leader->ctx != ctx)
				goto err_context;
		}

7868 7869 7870
		/*
		 * Only a group leader can be exclusive or pinned
		 */
7871
		if (attr.exclusive || attr.pinned)
7872
			goto err_context;
7873 7874 7875 7876 7877
	}

	if (output_event) {
		err = perf_event_set_output(event, output_event);
		if (err)
7878
			goto err_context;
7879
	}
T
Thomas Gleixner 已提交
7880

7881 7882
	event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
					f_flags);
7883 7884
	if (IS_ERR(event_file)) {
		err = PTR_ERR(event_file);
7885
		goto err_context;
7886
	}
7887

7888
	if (move_group) {
P
Peter Zijlstra 已提交
7889 7890 7891 7892 7893 7894 7895
		gctx = group_leader->ctx;

		/*
		 * See perf_event_ctx_lock() for comments on the details
		 * of swizzling perf_event::ctx.
		 */
		mutex_lock_double(&gctx->mutex, &ctx->mutex);
7896

7897
		perf_remove_from_context(group_leader, false);
J
Jiri Olsa 已提交
7898

7899 7900
		list_for_each_entry(sibling, &group_leader->sibling_list,
				    group_entry) {
7901
			perf_remove_from_context(sibling, false);
7902 7903
			put_ctx(gctx);
		}
P
Peter Zijlstra 已提交
7904 7905
	} else {
		mutex_lock(&ctx->mutex);
7906
	}
7907

7908
	WARN_ON_ONCE(ctx->parent_ctx);
7909 7910

	if (move_group) {
P
Peter Zijlstra 已提交
7911 7912 7913 7914
		/*
		 * Wait for everybody to stop referencing the events through
		 * the old lists, before installing it on new lists.
		 */
7915
		synchronize_rcu();
P
Peter Zijlstra 已提交
7916

7917 7918 7919 7920 7921 7922 7923 7924 7925 7926
		/*
		 * Install the group siblings before the group leader.
		 *
		 * Because a group leader will try and install the entire group
		 * (through the sibling list, which is still in-tact), we can
		 * end up with siblings installed in the wrong context.
		 *
		 * By installing siblings first we NO-OP because they're not
		 * reachable through the group lists.
		 */
7927 7928
		list_for_each_entry(sibling, &group_leader->sibling_list,
				    group_entry) {
7929
			perf_event__state_init(sibling);
7930
			perf_install_in_context(ctx, sibling, sibling->cpu);
7931 7932
			get_ctx(ctx);
		}
7933 7934 7935 7936 7937 7938 7939 7940 7941

		/*
		 * Removing from the context ends up with disabled
		 * event. What we want here is event in the initial
		 * startup state, ready to be add into new context.
		 */
		perf_event__state_init(group_leader);
		perf_install_in_context(ctx, group_leader, group_leader->cpu);
		get_ctx(ctx);
7942 7943
	}

7944
	perf_install_in_context(ctx, event, event->cpu);
7945
	perf_unpin_context(ctx);
P
Peter Zijlstra 已提交
7946 7947 7948 7949 7950

	if (move_group) {
		mutex_unlock(&gctx->mutex);
		put_ctx(gctx);
	}
7951
	mutex_unlock(&ctx->mutex);
7952

7953 7954
	put_online_cpus();

7955
	event->owner = current;
P
Peter Zijlstra 已提交
7956

7957 7958 7959
	mutex_lock(&current->perf_event_mutex);
	list_add_tail(&event->owner_entry, &current->perf_event_list);
	mutex_unlock(&current->perf_event_mutex);
7960

7961 7962 7963 7964
	/*
	 * Precalculate sample_data sizes
	 */
	perf_event__header_size(event);
7965
	perf_event__id_header_size(event);
7966

7967 7968 7969 7970 7971 7972
	/*
	 * Drop the reference on the group_event after placing the
	 * new event on the sibling_list. This ensures destruction
	 * of the group leader will find the pointer to itself in
	 * perf_group_detach().
	 */
7973
	fdput(group);
7974 7975
	fd_install(event_fd, event_file);
	return event_fd;
T
Thomas Gleixner 已提交
7976

7977
err_context:
7978
	perf_unpin_context(ctx);
7979
	put_ctx(ctx);
7980
err_alloc:
7981
	free_event(event);
7982
err_cpus:
7983
	put_online_cpus();
7984
err_task:
P
Peter Zijlstra 已提交
7985 7986
	if (task)
		put_task_struct(task);
7987
err_group_fd:
7988
	fdput(group);
7989 7990
err_fd:
	put_unused_fd(event_fd);
7991
	return err;
T
Thomas Gleixner 已提交
7992 7993
}

7994 7995 7996 7997 7998
/**
 * perf_event_create_kernel_counter
 *
 * @attr: attributes of the counter to create
 * @cpu: cpu in which the counter is bound
M
Matt Helsley 已提交
7999
 * @task: task to profile (NULL for percpu)
8000 8001 8002
 */
struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
M
Matt Helsley 已提交
8003
				 struct task_struct *task,
8004 8005
				 perf_overflow_handler_t overflow_handler,
				 void *context)
8006 8007
{
	struct perf_event_context *ctx;
8008
	struct perf_event *event;
8009
	int err;
8010

8011 8012 8013
	/*
	 * Get the target context (task or percpu):
	 */
8014

8015
	event = perf_event_alloc(attr, cpu, task, NULL, NULL,
8016
				 overflow_handler, context, -1);
8017 8018 8019 8020
	if (IS_ERR(event)) {
		err = PTR_ERR(event);
		goto err;
	}
8021

8022 8023 8024
	/* Mark owner so we could distinguish it from user events. */
	event->owner = EVENT_OWNER_KERNEL;

8025 8026
	account_event(event);

8027
	ctx = find_get_context(event->pmu, task, event);
8028 8029
	if (IS_ERR(ctx)) {
		err = PTR_ERR(ctx);
8030
		goto err_free;
8031
	}
8032 8033 8034 8035

	WARN_ON_ONCE(ctx->parent_ctx);
	mutex_lock(&ctx->mutex);
	perf_install_in_context(ctx, event, cpu);
8036
	perf_unpin_context(ctx);
8037 8038 8039 8040
	mutex_unlock(&ctx->mutex);

	return event;

8041 8042 8043
err_free:
	free_event(event);
err:
8044
	return ERR_PTR(err);
8045
}
8046
EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
8047

8048 8049 8050 8051 8052 8053 8054 8055 8056 8057
void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
{
	struct perf_event_context *src_ctx;
	struct perf_event_context *dst_ctx;
	struct perf_event *event, *tmp;
	LIST_HEAD(events);

	src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
	dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;

P
Peter Zijlstra 已提交
8058 8059 8060 8061 8062
	/*
	 * See perf_event_ctx_lock() for comments on the details
	 * of swizzling perf_event::ctx.
	 */
	mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
8063 8064
	list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
				 event_entry) {
8065
		perf_remove_from_context(event, false);
8066
		unaccount_event_cpu(event, src_cpu);
8067
		put_ctx(src_ctx);
8068
		list_add(&event->migrate_entry, &events);
8069 8070
	}

8071 8072 8073
	/*
	 * Wait for the events to quiesce before re-instating them.
	 */
8074 8075
	synchronize_rcu();

8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099
	/*
	 * Re-instate events in 2 passes.
	 *
	 * Skip over group leaders and only install siblings on this first
	 * pass, siblings will not get enabled without a leader, however a
	 * leader will enable its siblings, even if those are still on the old
	 * context.
	 */
	list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
		if (event->group_leader == event)
			continue;

		list_del(&event->migrate_entry);
		if (event->state >= PERF_EVENT_STATE_OFF)
			event->state = PERF_EVENT_STATE_INACTIVE;
		account_event_cpu(event, dst_cpu);
		perf_install_in_context(dst_ctx, event, dst_cpu);
		get_ctx(dst_ctx);
	}

	/*
	 * Once all the siblings are setup properly, install the group leaders
	 * to make it go.
	 */
8100 8101
	list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
		list_del(&event->migrate_entry);
8102 8103
		if (event->state >= PERF_EVENT_STATE_OFF)
			event->state = PERF_EVENT_STATE_INACTIVE;
8104
		account_event_cpu(event, dst_cpu);
8105 8106 8107 8108
		perf_install_in_context(dst_ctx, event, dst_cpu);
		get_ctx(dst_ctx);
	}
	mutex_unlock(&dst_ctx->mutex);
P
Peter Zijlstra 已提交
8109
	mutex_unlock(&src_ctx->mutex);
8110 8111 8112
}
EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);

8113
static void sync_child_event(struct perf_event *child_event,
8114
			       struct task_struct *child)
8115
{
8116
	struct perf_event *parent_event = child_event->parent;
8117
	u64 child_val;
8118

8119 8120
	if (child_event->attr.inherit_stat)
		perf_event_read_event(child_event, child);
8121

P
Peter Zijlstra 已提交
8122
	child_val = perf_event_count(child_event);
8123 8124 8125 8126

	/*
	 * Add back the child's count to the parent's count:
	 */
8127
	atomic64_add(child_val, &parent_event->child_count);
8128 8129 8130 8131
	atomic64_add(child_event->total_time_enabled,
		     &parent_event->child_total_time_enabled);
	atomic64_add(child_event->total_time_running,
		     &parent_event->child_total_time_running);
8132 8133

	/*
8134
	 * Remove this event from the parent's list
8135
	 */
8136 8137 8138 8139
	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
	mutex_lock(&parent_event->child_mutex);
	list_del_init(&child_event->child_list);
	mutex_unlock(&parent_event->child_mutex);
8140

8141 8142 8143 8144 8145 8146
	/*
	 * Make sure user/parent get notified, that we just
	 * lost one event.
	 */
	perf_event_wakeup(parent_event);

8147
	/*
8148
	 * Release the parent event, if this was the last
8149 8150
	 * reference to it.
	 */
8151
	put_event(parent_event);
8152 8153
}

8154
static void
8155 8156
__perf_event_exit_task(struct perf_event *child_event,
			 struct perf_event_context *child_ctx,
8157
			 struct task_struct *child)
8158
{
8159 8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171
	/*
	 * Do not destroy the 'original' grouping; because of the context
	 * switch optimization the original events could've ended up in a
	 * random child task.
	 *
	 * If we were to destroy the original group, all group related
	 * operations would cease to function properly after this random
	 * child dies.
	 *
	 * Do destroy all inherited groups, we don't care about those
	 * and being thorough is better.
	 */
	perf_remove_from_context(child_event, !!child_event->parent);
8172

8173
	/*
8174
	 * It can happen that the parent exits first, and has events
8175
	 * that are still around due to the child reference. These
8176
	 * events need to be zapped.
8177
	 */
8178
	if (child_event->parent) {
8179 8180
		sync_child_event(child_event, child);
		free_event(child_event);
8181 8182 8183
	} else {
		child_event->state = PERF_EVENT_STATE_EXIT;
		perf_event_wakeup(child_event);
8184
	}
8185 8186
}

P
Peter Zijlstra 已提交
8187
static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
8188
{
8189
	struct perf_event *child_event, *next;
8190
	struct perf_event_context *child_ctx, *clone_ctx = NULL;
8191
	unsigned long flags;
8192

P
Peter Zijlstra 已提交
8193
	if (likely(!child->perf_event_ctxp[ctxn])) {
8194
		perf_event_task(child, NULL, 0);
8195
		return;
P
Peter Zijlstra 已提交
8196
	}
8197

8198
	local_irq_save(flags);
8199 8200 8201 8202 8203 8204
	/*
	 * We can't reschedule here because interrupts are disabled,
	 * and either child is current or it is a task that can't be
	 * scheduled, so we are now safe from rescheduling changing
	 * our context.
	 */
8205
	child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
8206 8207 8208

	/*
	 * Take the context lock here so that if find_get_context is
8209
	 * reading child->perf_event_ctxp, we wait until it has
8210 8211
	 * incremented the context's refcount before we do put_ctx below.
	 */
8212
	raw_spin_lock(&child_ctx->lock);
8213
	task_ctx_sched_out(child_ctx);
P
Peter Zijlstra 已提交
8214
	child->perf_event_ctxp[ctxn] = NULL;
8215

8216 8217 8218
	/*
	 * If this context is a clone; unclone it so it can't get
	 * swapped to another process while we're removing all
8219
	 * the events from it.
8220
	 */
8221
	clone_ctx = unclone_ctx(child_ctx);
8222
	update_context_time(child_ctx);
8223
	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
P
Peter Zijlstra 已提交
8224

8225 8226
	if (clone_ctx)
		put_ctx(clone_ctx);
8227

P
Peter Zijlstra 已提交
8228
	/*
8229 8230 8231
	 * Report the task dead after unscheduling the events so that we
	 * won't get any samples after PERF_RECORD_EXIT. We can however still
	 * get a few PERF_RECORD_READ events.
P
Peter Zijlstra 已提交
8232
	 */
8233
	perf_event_task(child, child_ctx, 0);
8234

8235 8236 8237
	/*
	 * We can recurse on the same lock type through:
	 *
8238 8239
	 *   __perf_event_exit_task()
	 *     sync_child_event()
8240 8241
	 *       put_event()
	 *         mutex_lock(&ctx->mutex)
8242 8243 8244
	 *
	 * But since its the parent context it won't be the same instance.
	 */
8245
	mutex_lock(&child_ctx->mutex);
8246

8247
	list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
8248
		__perf_event_exit_task(child_event, child_ctx, child);
8249

8250 8251 8252
	mutex_unlock(&child_ctx->mutex);

	put_ctx(child_ctx);
8253 8254
}

P
Peter Zijlstra 已提交
8255 8256 8257 8258 8259
/*
 * When a child task exits, feed back event values to parent events.
 */
void perf_event_exit_task(struct task_struct *child)
{
P
Peter Zijlstra 已提交
8260
	struct perf_event *event, *tmp;
P
Peter Zijlstra 已提交
8261 8262
	int ctxn;

P
Peter Zijlstra 已提交
8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277
	mutex_lock(&child->perf_event_mutex);
	list_for_each_entry_safe(event, tmp, &child->perf_event_list,
				 owner_entry) {
		list_del_init(&event->owner_entry);

		/*
		 * Ensure the list deletion is visible before we clear
		 * the owner, closes a race against perf_release() where
		 * we need to serialize on the owner->perf_event_mutex.
		 */
		smp_wmb();
		event->owner = NULL;
	}
	mutex_unlock(&child->perf_event_mutex);

P
Peter Zijlstra 已提交
8278 8279 8280 8281
	for_each_task_context_nr(ctxn)
		perf_event_exit_task_context(child, ctxn);
}

8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293
static void perf_free_event(struct perf_event *event,
			    struct perf_event_context *ctx)
{
	struct perf_event *parent = event->parent;

	if (WARN_ON_ONCE(!parent))
		return;

	mutex_lock(&parent->child_mutex);
	list_del_init(&event->child_list);
	mutex_unlock(&parent->child_mutex);

8294
	put_event(parent);
8295

P
Peter Zijlstra 已提交
8296
	raw_spin_lock_irq(&ctx->lock);
8297
	perf_group_detach(event);
8298
	list_del_event(event, ctx);
P
Peter Zijlstra 已提交
8299
	raw_spin_unlock_irq(&ctx->lock);
8300 8301 8302
	free_event(event);
}

8303
/*
P
Peter Zijlstra 已提交
8304
 * Free an unexposed, unused context as created by inheritance by
P
Peter Zijlstra 已提交
8305
 * perf_event_init_task below, used by fork() in case of fail.
P
Peter Zijlstra 已提交
8306 8307 8308
 *
 * Not all locks are strictly required, but take them anyway to be nice and
 * help out with the lockdep assertions.
8309
 */
8310
void perf_event_free_task(struct task_struct *task)
8311
{
P
Peter Zijlstra 已提交
8312
	struct perf_event_context *ctx;
8313
	struct perf_event *event, *tmp;
P
Peter Zijlstra 已提交
8314
	int ctxn;
8315

P
Peter Zijlstra 已提交
8316 8317 8318 8319
	for_each_task_context_nr(ctxn) {
		ctx = task->perf_event_ctxp[ctxn];
		if (!ctx)
			continue;
8320

P
Peter Zijlstra 已提交
8321
		mutex_lock(&ctx->mutex);
8322
again:
P
Peter Zijlstra 已提交
8323 8324 8325
		list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
				group_entry)
			perf_free_event(event, ctx);
8326

P
Peter Zijlstra 已提交
8327 8328 8329
		list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
				group_entry)
			perf_free_event(event, ctx);
8330

P
Peter Zijlstra 已提交
8331 8332 8333
		if (!list_empty(&ctx->pinned_groups) ||
				!list_empty(&ctx->flexible_groups))
			goto again;
8334

P
Peter Zijlstra 已提交
8335
		mutex_unlock(&ctx->mutex);
8336

P
Peter Zijlstra 已提交
8337 8338
		put_ctx(ctx);
	}
8339 8340
}

8341 8342 8343 8344 8345 8346 8347 8348
void perf_event_delayed_put(struct task_struct *task)
{
	int ctxn;

	for_each_task_context_nr(ctxn)
		WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
}

P
Peter Zijlstra 已提交
8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359
/*
 * inherit a event from parent task to child task:
 */
static struct perf_event *
inherit_event(struct perf_event *parent_event,
	      struct task_struct *parent,
	      struct perf_event_context *parent_ctx,
	      struct task_struct *child,
	      struct perf_event *group_leader,
	      struct perf_event_context *child_ctx)
{
8360
	enum perf_event_active_state parent_state = parent_event->state;
P
Peter Zijlstra 已提交
8361
	struct perf_event *child_event;
8362
	unsigned long flags;
P
Peter Zijlstra 已提交
8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374

	/*
	 * Instead of creating recursive hierarchies of events,
	 * we link inherited events back to the original parent,
	 * which has a filp for sure, which we use as the reference
	 * count:
	 */
	if (parent_event->parent)
		parent_event = parent_event->parent;

	child_event = perf_event_alloc(&parent_event->attr,
					   parent_event->cpu,
8375
					   child,
P
Peter Zijlstra 已提交
8376
					   group_leader, parent_event,
8377
					   NULL, NULL, -1);
P
Peter Zijlstra 已提交
8378 8379
	if (IS_ERR(child_event))
		return child_event;
8380

8381 8382
	if (is_orphaned_event(parent_event) ||
	    !atomic_long_inc_not_zero(&parent_event->refcount)) {
8383 8384 8385 8386
		free_event(child_event);
		return NULL;
	}

P
Peter Zijlstra 已提交
8387 8388 8389 8390 8391 8392 8393
	get_ctx(child_ctx);

	/*
	 * Make the child state follow the state of the parent event,
	 * not its attr.disabled bit.  We hold the parent's mutex,
	 * so we won't race with perf_event_{en, dis}able_family.
	 */
8394
	if (parent_state >= PERF_EVENT_STATE_INACTIVE)
P
Peter Zijlstra 已提交
8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406 8407 8408 8409 8410
		child_event->state = PERF_EVENT_STATE_INACTIVE;
	else
		child_event->state = PERF_EVENT_STATE_OFF;

	if (parent_event->attr.freq) {
		u64 sample_period = parent_event->hw.sample_period;
		struct hw_perf_event *hwc = &child_event->hw;

		hwc->sample_period = sample_period;
		hwc->last_period   = sample_period;

		local64_set(&hwc->period_left, sample_period);
	}

	child_event->ctx = child_ctx;
	child_event->overflow_handler = parent_event->overflow_handler;
8411 8412
	child_event->overflow_handler_context
		= parent_event->overflow_handler_context;
P
Peter Zijlstra 已提交
8413

8414 8415 8416 8417
	/*
	 * Precalculate sample_data sizes
	 */
	perf_event__header_size(child_event);
8418
	perf_event__id_header_size(child_event);
8419

P
Peter Zijlstra 已提交
8420 8421 8422
	/*
	 * Link it up in the child's context:
	 */
8423
	raw_spin_lock_irqsave(&child_ctx->lock, flags);
P
Peter Zijlstra 已提交
8424
	add_event_to_ctx(child_event, child_ctx);
8425
	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
P
Peter Zijlstra 已提交
8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456 8457 8458

	/*
	 * Link this into the parent event's child list
	 */
	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
	mutex_lock(&parent_event->child_mutex);
	list_add_tail(&child_event->child_list, &parent_event->child_list);
	mutex_unlock(&parent_event->child_mutex);

	return child_event;
}

static int inherit_group(struct perf_event *parent_event,
	      struct task_struct *parent,
	      struct perf_event_context *parent_ctx,
	      struct task_struct *child,
	      struct perf_event_context *child_ctx)
{
	struct perf_event *leader;
	struct perf_event *sub;
	struct perf_event *child_ctr;

	leader = inherit_event(parent_event, parent, parent_ctx,
				 child, NULL, child_ctx);
	if (IS_ERR(leader))
		return PTR_ERR(leader);
	list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
		child_ctr = inherit_event(sub, parent, parent_ctx,
					    child, leader, child_ctx);
		if (IS_ERR(child_ctr))
			return PTR_ERR(child_ctr);
	}
	return 0;
8459 8460 8461 8462 8463
}

static int
inherit_task_group(struct perf_event *event, struct task_struct *parent,
		   struct perf_event_context *parent_ctx,
P
Peter Zijlstra 已提交
8464
		   struct task_struct *child, int ctxn,
8465 8466 8467
		   int *inherited_all)
{
	int ret;
P
Peter Zijlstra 已提交
8468
	struct perf_event_context *child_ctx;
8469 8470 8471 8472

	if (!event->attr.inherit) {
		*inherited_all = 0;
		return 0;
8473 8474
	}

8475
	child_ctx = child->perf_event_ctxp[ctxn];
8476 8477 8478 8479 8480 8481 8482
	if (!child_ctx) {
		/*
		 * This is executed from the parent task context, so
		 * inherit events that have been marked for cloning.
		 * First allocate and initialize a context for the
		 * child.
		 */
8483

8484
		child_ctx = alloc_perf_context(parent_ctx->pmu, child);
8485 8486
		if (!child_ctx)
			return -ENOMEM;
8487

P
Peter Zijlstra 已提交
8488
		child->perf_event_ctxp[ctxn] = child_ctx;
8489 8490 8491 8492 8493 8494 8495 8496 8497
	}

	ret = inherit_group(event, parent, parent_ctx,
			    child, child_ctx);

	if (ret)
		*inherited_all = 0;

	return ret;
8498 8499
}

8500
/*
8501
 * Initialize the perf_event context in task_struct
8502
 */
8503
static int perf_event_init_context(struct task_struct *child, int ctxn)
8504
{
8505
	struct perf_event_context *child_ctx, *parent_ctx;
8506 8507
	struct perf_event_context *cloned_ctx;
	struct perf_event *event;
8508
	struct task_struct *parent = current;
8509
	int inherited_all = 1;
8510
	unsigned long flags;
8511
	int ret = 0;
8512

P
Peter Zijlstra 已提交
8513
	if (likely(!parent->perf_event_ctxp[ctxn]))
8514 8515
		return 0;

8516
	/*
8517 8518
	 * If the parent's context is a clone, pin it so it won't get
	 * swapped under us.
8519
	 */
P
Peter Zijlstra 已提交
8520
	parent_ctx = perf_pin_task_context(parent, ctxn);
8521 8522
	if (!parent_ctx)
		return 0;
8523

8524 8525 8526 8527 8528 8529 8530
	/*
	 * No need to check if parent_ctx != NULL here; since we saw
	 * it non-NULL earlier, the only reason for it to become NULL
	 * is if we exit, and since we're currently in the middle of
	 * a fork we can't be exiting at the same time.
	 */

8531 8532 8533 8534
	/*
	 * Lock the parent list. No need to lock the child - not PID
	 * hashed yet and not running, so nobody can access it.
	 */
8535
	mutex_lock(&parent_ctx->mutex);
8536 8537 8538 8539 8540

	/*
	 * We dont have to disable NMIs - we are only looking at
	 * the list, not manipulating it:
	 */
8541
	list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
P
Peter Zijlstra 已提交
8542 8543
		ret = inherit_task_group(event, parent, parent_ctx,
					 child, ctxn, &inherited_all);
8544 8545 8546
		if (ret)
			break;
	}
8547

8548 8549 8550 8551 8552 8553 8554 8555 8556
	/*
	 * We can't hold ctx->lock when iterating the ->flexible_group list due
	 * to allocations, but we need to prevent rotation because
	 * rotate_ctx() will change the list from interrupt context.
	 */
	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
	parent_ctx->rotate_disable = 1;
	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);

8557
	list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
P
Peter Zijlstra 已提交
8558 8559
		ret = inherit_task_group(event, parent, parent_ctx,
					 child, ctxn, &inherited_all);
8560
		if (ret)
8561
			break;
8562 8563
	}

8564 8565 8566
	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
	parent_ctx->rotate_disable = 0;

P
Peter Zijlstra 已提交
8567
	child_ctx = child->perf_event_ctxp[ctxn];
8568

8569
	if (child_ctx && inherited_all) {
8570 8571 8572
		/*
		 * Mark the child context as a clone of the parent
		 * context, or of whatever the parent is a clone of.
P
Peter Zijlstra 已提交
8573 8574 8575
		 *
		 * Note that if the parent is a clone, the holding of
		 * parent_ctx->lock avoids it from being uncloned.
8576
		 */
P
Peter Zijlstra 已提交
8577
		cloned_ctx = parent_ctx->parent_ctx;
8578 8579
		if (cloned_ctx) {
			child_ctx->parent_ctx = cloned_ctx;
8580
			child_ctx->parent_gen = parent_ctx->parent_gen;
8581 8582 8583 8584 8585
		} else {
			child_ctx->parent_ctx = parent_ctx;
			child_ctx->parent_gen = parent_ctx->generation;
		}
		get_ctx(child_ctx->parent_ctx);
8586 8587
	}

P
Peter Zijlstra 已提交
8588
	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
8589
	mutex_unlock(&parent_ctx->mutex);
8590

8591
	perf_unpin_context(parent_ctx);
8592
	put_ctx(parent_ctx);
8593

8594
	return ret;
8595 8596
}

P
Peter Zijlstra 已提交
8597 8598 8599 8600 8601 8602 8603
/*
 * Initialize the perf_event context in task_struct
 */
int perf_event_init_task(struct task_struct *child)
{
	int ctxn, ret;

8604 8605 8606 8607
	memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
	mutex_init(&child->perf_event_mutex);
	INIT_LIST_HEAD(&child->perf_event_list);

P
Peter Zijlstra 已提交
8608 8609
	for_each_task_context_nr(ctxn) {
		ret = perf_event_init_context(child, ctxn);
P
Peter Zijlstra 已提交
8610 8611
		if (ret) {
			perf_event_free_task(child);
P
Peter Zijlstra 已提交
8612
			return ret;
P
Peter Zijlstra 已提交
8613
		}
P
Peter Zijlstra 已提交
8614 8615 8616 8617 8618
	}

	return 0;
}

8619 8620
static void __init perf_event_init_all_cpus(void)
{
8621
	struct swevent_htable *swhash;
8622 8623 8624
	int cpu;

	for_each_possible_cpu(cpu) {
8625 8626
		swhash = &per_cpu(swevent_htable, cpu);
		mutex_init(&swhash->hlist_mutex);
8627
		INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
8628 8629 8630
	}
}

8631
static void perf_event_init_cpu(int cpu)
T
Thomas Gleixner 已提交
8632
{
P
Peter Zijlstra 已提交
8633
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
T
Thomas Gleixner 已提交
8634

8635
	mutex_lock(&swhash->hlist_mutex);
8636
	swhash->online = true;
8637
	if (swhash->hlist_refcount > 0) {
8638 8639
		struct swevent_hlist *hlist;

8640 8641 8642
		hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
		WARN_ON(!hlist);
		rcu_assign_pointer(swhash->swevent_hlist, hlist);
8643
	}
8644
	mutex_unlock(&swhash->hlist_mutex);
T
Thomas Gleixner 已提交
8645 8646
}

P
Peter Zijlstra 已提交
8647
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
P
Peter Zijlstra 已提交
8648
static void __perf_event_exit_context(void *__info)
T
Thomas Gleixner 已提交
8649
{
8650
	struct remove_event re = { .detach_group = true };
P
Peter Zijlstra 已提交
8651
	struct perf_event_context *ctx = __info;
T
Thomas Gleixner 已提交
8652

P
Peter Zijlstra 已提交
8653
	rcu_read_lock();
8654 8655
	list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
		__perf_remove_from_context(&re);
P
Peter Zijlstra 已提交
8656
	rcu_read_unlock();
T
Thomas Gleixner 已提交
8657
}
P
Peter Zijlstra 已提交
8658 8659 8660 8661 8662 8663 8664 8665 8666

static void perf_event_exit_cpu_context(int cpu)
{
	struct perf_event_context *ctx;
	struct pmu *pmu;
	int idx;

	idx = srcu_read_lock(&pmus_srcu);
	list_for_each_entry_rcu(pmu, &pmus, entry) {
8667
		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
P
Peter Zijlstra 已提交
8668 8669 8670 8671 8672 8673 8674 8675

		mutex_lock(&ctx->mutex);
		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
		mutex_unlock(&ctx->mutex);
	}
	srcu_read_unlock(&pmus_srcu, idx);
}

8676
static void perf_event_exit_cpu(int cpu)
T
Thomas Gleixner 已提交
8677
{
8678
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
8679

P
Peter Zijlstra 已提交
8680 8681
	perf_event_exit_cpu_context(cpu);

8682
	mutex_lock(&swhash->hlist_mutex);
8683
	swhash->online = false;
8684 8685
	swevent_hlist_release(swhash);
	mutex_unlock(&swhash->hlist_mutex);
T
Thomas Gleixner 已提交
8686 8687
}
#else
8688
static inline void perf_event_exit_cpu(int cpu) { }
T
Thomas Gleixner 已提交
8689 8690
#endif

P
Peter Zijlstra 已提交
8691 8692 8693 8694 8695 8696 8697 8698 8699 8700 8701 8702 8703 8704 8705 8706 8707 8708 8709 8710
static int
perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
{
	int cpu;

	for_each_online_cpu(cpu)
		perf_event_exit_cpu(cpu);

	return NOTIFY_OK;
}

/*
 * Run the perf reboot notifier at the very last possible moment so that
 * the generic watchdog code runs as long as possible.
 */
static struct notifier_block perf_reboot_notifier = {
	.notifier_call = perf_reboot,
	.priority = INT_MIN,
};

8711
static int
T
Thomas Gleixner 已提交
8712 8713 8714 8715
perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
	unsigned int cpu = (long)hcpu;

8716
	switch (action & ~CPU_TASKS_FROZEN) {
T
Thomas Gleixner 已提交
8717 8718

	case CPU_UP_PREPARE:
P
Peter Zijlstra 已提交
8719
	case CPU_DOWN_FAILED:
8720
		perf_event_init_cpu(cpu);
T
Thomas Gleixner 已提交
8721 8722
		break;

P
Peter Zijlstra 已提交
8723
	case CPU_UP_CANCELED:
T
Thomas Gleixner 已提交
8724
	case CPU_DOWN_PREPARE:
8725
		perf_event_exit_cpu(cpu);
T
Thomas Gleixner 已提交
8726 8727 8728 8729 8730 8731 8732 8733
		break;
	default:
		break;
	}

	return NOTIFY_OK;
}

8734
void __init perf_event_init(void)
T
Thomas Gleixner 已提交
8735
{
8736 8737
	int ret;

P
Peter Zijlstra 已提交
8738 8739
	idr_init(&pmu_idr);

8740
	perf_event_init_all_cpus();
8741
	init_srcu_struct(&pmus_srcu);
P
Peter Zijlstra 已提交
8742 8743 8744
	perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
	perf_pmu_register(&perf_cpu_clock, NULL, -1);
	perf_pmu_register(&perf_task_clock, NULL, -1);
8745 8746
	perf_tp_register();
	perf_cpu_notifier(perf_cpu_notify);
P
Peter Zijlstra 已提交
8747
	register_reboot_notifier(&perf_reboot_notifier);
8748 8749 8750

	ret = init_hw_breakpoint();
	WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
8751 8752 8753

	/* do not patch jump label more than once per second */
	jump_label_rate_limit(&perf_sched_events, HZ);
8754 8755 8756 8757 8758 8759 8760

	/*
	 * Build time assertion that we keep the data_head at the intended
	 * location.  IOW, validation we got the __reserved[] size right.
	 */
	BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
		     != 1024);
T
Thomas Gleixner 已提交
8761
}
P
Peter Zijlstra 已提交
8762

8763 8764 8765 8766 8767 8768 8769 8770 8771 8772 8773 8774
ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
			      char *page)
{
	struct perf_pmu_events_attr *pmu_attr =
		container_of(attr, struct perf_pmu_events_attr, attr);

	if (pmu_attr->event_str)
		return sprintf(page, "%s\n", pmu_attr->event_str);

	return 0;
}

P
Peter Zijlstra 已提交
8775 8776 8777 8778 8779 8780 8781 8782 8783 8784 8785 8786 8787 8788 8789 8790 8791 8792 8793 8794 8795 8796 8797 8798 8799 8800 8801
static int __init perf_event_sysfs_init(void)
{
	struct pmu *pmu;
	int ret;

	mutex_lock(&pmus_lock);

	ret = bus_register(&pmu_bus);
	if (ret)
		goto unlock;

	list_for_each_entry(pmu, &pmus, entry) {
		if (!pmu->name || pmu->type < 0)
			continue;

		ret = pmu_dev_alloc(pmu);
		WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
	}
	pmu_bus_running = 1;
	ret = 0;

unlock:
	mutex_unlock(&pmus_lock);

	return ret;
}
device_initcall(perf_event_sysfs_init);
S
Stephane Eranian 已提交
8802 8803

#ifdef CONFIG_CGROUP_PERF
8804 8805
static struct cgroup_subsys_state *
perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
S
Stephane Eranian 已提交
8806 8807 8808
{
	struct perf_cgroup *jc;

8809
	jc = kzalloc(sizeof(*jc), GFP_KERNEL);
S
Stephane Eranian 已提交
8810 8811 8812 8813 8814 8815 8816 8817 8818 8819 8820 8821
	if (!jc)
		return ERR_PTR(-ENOMEM);

	jc->info = alloc_percpu(struct perf_cgroup_info);
	if (!jc->info) {
		kfree(jc);
		return ERR_PTR(-ENOMEM);
	}

	return &jc->css;
}

8822
static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
S
Stephane Eranian 已提交
8823
{
8824 8825
	struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);

S
Stephane Eranian 已提交
8826 8827 8828 8829 8830 8831 8832 8833 8834 8835 8836
	free_percpu(jc->info);
	kfree(jc);
}

static int __perf_cgroup_move(void *info)
{
	struct task_struct *task = info;
	perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
	return 0;
}

8837 8838
static void perf_cgroup_attach(struct cgroup_subsys_state *css,
			       struct cgroup_taskset *tset)
S
Stephane Eranian 已提交
8839
{
8840 8841
	struct task_struct *task;

8842
	cgroup_taskset_for_each(task, tset)
8843
		task_function_call(task, __perf_cgroup_move, task);
S
Stephane Eranian 已提交
8844 8845
}

8846 8847
static void perf_cgroup_exit(struct cgroup_subsys_state *css,
			     struct cgroup_subsys_state *old_css,
8848
			     struct task_struct *task)
S
Stephane Eranian 已提交
8849 8850 8851 8852 8853 8854 8855 8856 8857
{
	/*
	 * cgroup_exit() is called in the copy_process() failure path.
	 * Ignore this case since the task hasn't ran yet, this avoids
	 * trying to poke a half freed task state from generic code.
	 */
	if (!(task->flags & PF_EXITING))
		return;

8858
	task_function_call(task, __perf_cgroup_move, task);
S
Stephane Eranian 已提交
8859 8860
}

8861
struct cgroup_subsys perf_event_cgrp_subsys = {
8862 8863
	.css_alloc	= perf_cgroup_css_alloc,
	.css_free	= perf_cgroup_css_free,
8864
	.exit		= perf_cgroup_exit,
8865
	.attach		= perf_cgroup_attach,
S
Stephane Eranian 已提交
8866 8867
};
#endif /* CONFIG_CGROUP_PERF */