core.c 166.5 KB
Newer Older
T
Thomas Gleixner 已提交
1
/*
I
Ingo Molnar 已提交
2
 * Performance events core code:
T
Thomas Gleixner 已提交
3
 *
4
 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 6
 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
A
Al Viro 已提交
7
 *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8
 *
I
Ingo Molnar 已提交
9
 * For licensing details see kernel-base/COPYING
T
Thomas Gleixner 已提交
10 11 12
 */

#include <linux/fs.h>
13
#include <linux/mm.h>
T
Thomas Gleixner 已提交
14 15
#include <linux/cpu.h>
#include <linux/smp.h>
P
Peter Zijlstra 已提交
16
#include <linux/idr.h>
17
#include <linux/file.h>
T
Thomas Gleixner 已提交
18
#include <linux/poll.h>
19
#include <linux/slab.h>
20
#include <linux/hash.h>
T
Thomas Gleixner 已提交
21
#include <linux/sysfs.h>
22
#include <linux/dcache.h>
T
Thomas Gleixner 已提交
23
#include <linux/percpu.h>
24
#include <linux/ptrace.h>
P
Peter Zijlstra 已提交
25
#include <linux/reboot.h>
26
#include <linux/vmstat.h>
P
Peter Zijlstra 已提交
27
#include <linux/device.h>
28
#include <linux/export.h>
29
#include <linux/vmalloc.h>
30 31
#include <linux/hardirq.h>
#include <linux/rculist.h>
T
Thomas Gleixner 已提交
32 33 34
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
I
Ingo Molnar 已提交
35
#include <linux/kernel_stat.h>
36
#include <linux/perf_event.h>
L
Li Zefan 已提交
37
#include <linux/ftrace_event.h>
38
#include <linux/hw_breakpoint.h>
T
Thomas Gleixner 已提交
39

40 41
#include "internal.h"

42 43
#include <asm/irq_regs.h>

44
struct remote_function_call {
45 46 47 48
	struct task_struct	*p;
	int			(*func)(void *info);
	void			*info;
	int			ret;
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
};

static void remote_function(void *data)
{
	struct remote_function_call *tfc = data;
	struct task_struct *p = tfc->p;

	if (p) {
		tfc->ret = -EAGAIN;
		if (task_cpu(p) != smp_processor_id() || !task_curr(p))
			return;
	}

	tfc->ret = tfc->func(tfc->info);
}

/**
 * task_function_call - call a function on the cpu on which a task runs
 * @p:		the task to evaluate
 * @func:	the function to be called
 * @info:	the function call argument
 *
 * Calls the function @func when the task is currently running. This might
 * be on the current CPU, which just calls the function directly
 *
 * returns: @func return value, or
 *	    -ESRCH  - when the process isn't running
 *	    -EAGAIN - when the process moved away
 */
static int
task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
{
	struct remote_function_call data = {
82 83 84 85
		.p	= p,
		.func	= func,
		.info	= info,
		.ret	= -ESRCH, /* No such (running) process */
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
	};

	if (task_curr(p))
		smp_call_function_single(task_cpu(p), remote_function, &data, 1);

	return data.ret;
}

/**
 * cpu_function_call - call a function on the cpu
 * @func:	the function to be called
 * @info:	the function call argument
 *
 * Calls the function @func on the remote cpu.
 *
 * returns: @func return value or -ENXIO when the cpu is offline
 */
static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
{
	struct remote_function_call data = {
106 107 108 109
		.p	= NULL,
		.func	= func,
		.info	= info,
		.ret	= -ENXIO, /* No such CPU */
110 111 112 113 114 115 116
	};

	smp_call_function_single(cpu, remote_function, &data, 1);

	return data.ret;
}

S
Stephane Eranian 已提交
117 118 119 120
#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
		       PERF_FLAG_FD_OUTPUT  |\
		       PERF_FLAG_PID_CGROUP)

121 122 123 124 125 126 127
/*
 * branch priv levels that need permission checks
 */
#define PERF_SAMPLE_BRANCH_PERM_PLM \
	(PERF_SAMPLE_BRANCH_KERNEL |\
	 PERF_SAMPLE_BRANCH_HV)

128 129 130 131 132 133
enum event_type_t {
	EVENT_FLEXIBLE = 0x1,
	EVENT_PINNED = 0x2,
	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};

S
Stephane Eranian 已提交
134 135 136 137
/*
 * perf_sched_events : >0 events exist
 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
 */
138
struct static_key_deferred perf_sched_events __read_mostly;
S
Stephane Eranian 已提交
139
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
140
static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
S
Stephane Eranian 已提交
141

142 143 144
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
145

P
Peter Zijlstra 已提交
146 147 148 149
static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu;

150
/*
151
 * perf event paranoia level:
152 153
 *  -1 - not paranoid at all
 *   0 - disallow raw tracepoint access for unpriv
154
 *   1 - disallow cpu events for unpriv
155
 *   2 - disallow kernel profiling for unpriv
156
 */
157
int sysctl_perf_event_paranoid __read_mostly = 1;
158

159 160
/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
161 162

/*
163
 * max perf event sample rate
164
 */
P
Peter Zijlstra 已提交
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
#define DEFAULT_MAX_SAMPLE_RATE 100000
int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
static int max_samples_per_tick __read_mostly =
	DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);

int perf_proc_update_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *lenp,
		loff_t *ppos)
{
	int ret = proc_dointvec(table, write, buffer, lenp, ppos);

	if (ret || !write)
		return ret;

	max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);

	return 0;
}
183

184
static atomic64_t perf_event_id;
185

186 187 188 189
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
			      enum event_type_t event_type);

static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
S
Stephane Eranian 已提交
190 191 192 193 194
			     enum event_type_t event_type,
			     struct task_struct *task);

static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
195

196 197 198
static void ring_buffer_attach(struct perf_event *event,
			       struct ring_buffer *rb);

199
void __weak perf_event_print_debug(void)	{ }
T
Thomas Gleixner 已提交
200

201
extern __weak const char *perf_pmu_name(void)
T
Thomas Gleixner 已提交
202
{
203
	return "pmu";
T
Thomas Gleixner 已提交
204 205
}

206 207 208 209 210
static inline u64 perf_clock(void)
{
	return local_clock();
}

S
Stephane Eranian 已提交
211 212 213 214 215 216
static inline struct perf_cpu_context *
__get_cpu_context(struct perf_event_context *ctx)
{
	return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
}

217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
			  struct perf_event_context *ctx)
{
	raw_spin_lock(&cpuctx->ctx.lock);
	if (ctx)
		raw_spin_lock(&ctx->lock);
}

static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
			    struct perf_event_context *ctx)
{
	if (ctx)
		raw_spin_unlock(&ctx->lock);
	raw_spin_unlock(&cpuctx->ctx.lock);
}

S
Stephane Eranian 已提交
233 234
#ifdef CONFIG_CGROUP_PERF

235 236 237 238 239
/*
 * Must ensure cgroup is pinned (css_get) before calling
 * this function. In other words, we cannot call this function
 * if there is no cgroup event for the current CPU context.
 */
S
Stephane Eranian 已提交
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task)
{
	return container_of(task_subsys_state(task, perf_subsys_id),
			struct perf_cgroup, css);
}

static inline bool
perf_cgroup_match(struct perf_event *event)
{
	struct perf_event_context *ctx = event->ctx;
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);

	return !event->cgrp || event->cgrp == cpuctx->cgrp;
}

static inline void perf_get_cgroup(struct perf_event *event)
{
	css_get(&event->cgrp->css);
}

static inline void perf_put_cgroup(struct perf_event *event)
{
	css_put(&event->cgrp->css);
}

static inline void perf_detach_cgroup(struct perf_event *event)
{
	perf_put_cgroup(event);
	event->cgrp = NULL;
}

static inline int is_cgroup_event(struct perf_event *event)
{
	return event->cgrp != NULL;
}

static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
	struct perf_cgroup_info *t;

	t = per_cpu_ptr(event->cgrp->info, event->cpu);
	return t->time;
}

static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
{
	struct perf_cgroup_info *info;
	u64 now;

	now = perf_clock();

	info = this_cpu_ptr(cgrp->info);

	info->time += now - info->timestamp;
	info->timestamp = now;
}

static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
	struct perf_cgroup *cgrp_out = cpuctx->cgrp;
	if (cgrp_out)
		__update_cgrp_time(cgrp_out);
}

static inline void update_cgrp_time_from_event(struct perf_event *event)
{
307 308
	struct perf_cgroup *cgrp;

S
Stephane Eranian 已提交
309
	/*
310 311
	 * ensure we access cgroup data only when needed and
	 * when we know the cgroup is pinned (css_get)
S
Stephane Eranian 已提交
312
	 */
313
	if (!is_cgroup_event(event))
S
Stephane Eranian 已提交
314 315
		return;

316 317 318 319 320 321
	cgrp = perf_cgroup_from_task(current);
	/*
	 * Do not update time when cgroup is not active
	 */
	if (cgrp == event->cgrp)
		__update_cgrp_time(event->cgrp);
S
Stephane Eranian 已提交
322 323 324
}

static inline void
325 326
perf_cgroup_set_timestamp(struct task_struct *task,
			  struct perf_event_context *ctx)
S
Stephane Eranian 已提交
327 328 329 330
{
	struct perf_cgroup *cgrp;
	struct perf_cgroup_info *info;

331 332 333 334 335 336
	/*
	 * ctx->lock held by caller
	 * ensure we do not access cgroup data
	 * unless we have the cgroup pinned (css_get)
	 */
	if (!task || !ctx->nr_cgroups)
S
Stephane Eranian 已提交
337 338 339 340
		return;

	cgrp = perf_cgroup_from_task(task);
	info = this_cpu_ptr(cgrp->info);
341
	info->timestamp = ctx->timestamp;
S
Stephane Eranian 已提交
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
}

#define PERF_CGROUP_SWOUT	0x1 /* cgroup switch out every event */
#define PERF_CGROUP_SWIN	0x2 /* cgroup switch in events based on task */

/*
 * reschedule events based on the cgroup constraint of task.
 *
 * mode SWOUT : schedule out everything
 * mode SWIN : schedule in based on cgroup for next
 */
void perf_cgroup_switch(struct task_struct *task, int mode)
{
	struct perf_cpu_context *cpuctx;
	struct pmu *pmu;
	unsigned long flags;

	/*
	 * disable interrupts to avoid geting nr_cgroup
	 * changes via __perf_event_disable(). Also
	 * avoids preemption.
	 */
	local_irq_save(flags);

	/*
	 * we reschedule only in the presence of cgroup
	 * constrained events.
	 */
	rcu_read_lock();

	list_for_each_entry_rcu(pmu, &pmus, entry) {
		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);

		/*
		 * perf_cgroup_events says at least one
		 * context on this CPU has cgroup events.
		 *
		 * ctx->nr_cgroups reports the number of cgroup
		 * events for a context.
		 */
		if (cpuctx->ctx.nr_cgroups > 0) {
383 384
			perf_ctx_lock(cpuctx, cpuctx->task_ctx);
			perf_pmu_disable(cpuctx->ctx.pmu);
S
Stephane Eranian 已提交
385 386 387 388 389 390 391 392 393 394 395

			if (mode & PERF_CGROUP_SWOUT) {
				cpu_ctx_sched_out(cpuctx, EVENT_ALL);
				/*
				 * must not be done before ctxswout due
				 * to event_filter_match() in event_sched_out()
				 */
				cpuctx->cgrp = NULL;
			}

			if (mode & PERF_CGROUP_SWIN) {
396
				WARN_ON_ONCE(cpuctx->cgrp);
S
Stephane Eranian 已提交
397 398 399 400 401 402 403
				/* set cgrp before ctxsw in to
				 * allow event_filter_match() to not
				 * have to pass task around
				 */
				cpuctx->cgrp = perf_cgroup_from_task(task);
				cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
			}
404 405
			perf_pmu_enable(cpuctx->ctx.pmu);
			perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
S
Stephane Eranian 已提交
406 407 408 409 410 411 412 413
		}
	}

	rcu_read_unlock();

	local_irq_restore(flags);
}

414 415
static inline void perf_cgroup_sched_out(struct task_struct *task,
					 struct task_struct *next)
S
Stephane Eranian 已提交
416
{
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
	struct perf_cgroup *cgrp1;
	struct perf_cgroup *cgrp2 = NULL;

	/*
	 * we come here when we know perf_cgroup_events > 0
	 */
	cgrp1 = perf_cgroup_from_task(task);

	/*
	 * next is NULL when called from perf_event_enable_on_exec()
	 * that will systematically cause a cgroup_switch()
	 */
	if (next)
		cgrp2 = perf_cgroup_from_task(next);

	/*
	 * only schedule out current cgroup events if we know
	 * that we are switching to a different cgroup. Otherwise,
	 * do no touch the cgroup events.
	 */
	if (cgrp1 != cgrp2)
		perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
S
Stephane Eranian 已提交
439 440
}

441 442
static inline void perf_cgroup_sched_in(struct task_struct *prev,
					struct task_struct *task)
S
Stephane Eranian 已提交
443
{
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
	struct perf_cgroup *cgrp1;
	struct perf_cgroup *cgrp2 = NULL;

	/*
	 * we come here when we know perf_cgroup_events > 0
	 */
	cgrp1 = perf_cgroup_from_task(task);

	/* prev can never be NULL */
	cgrp2 = perf_cgroup_from_task(prev);

	/*
	 * only need to schedule in cgroup events if we are changing
	 * cgroup during ctxsw. Cgroup events were not scheduled
	 * out of ctxsw out if that was not the case.
	 */
	if (cgrp1 != cgrp2)
		perf_cgroup_switch(task, PERF_CGROUP_SWIN);
S
Stephane Eranian 已提交
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
}

static inline int perf_cgroup_connect(int fd, struct perf_event *event,
				      struct perf_event_attr *attr,
				      struct perf_event *group_leader)
{
	struct perf_cgroup *cgrp;
	struct cgroup_subsys_state *css;
	struct file *file;
	int ret = 0, fput_needed;

	file = fget_light(fd, &fput_needed);
	if (!file)
		return -EBADF;

	css = cgroup_css_from_dir(file, perf_subsys_id);
478 479 480 481
	if (IS_ERR(css)) {
		ret = PTR_ERR(css);
		goto out;
	}
S
Stephane Eranian 已提交
482 483 484 485

	cgrp = container_of(css, struct perf_cgroup, css);
	event->cgrp = cgrp;

486 487 488
	/* must be done before we fput() the file */
	perf_get_cgroup(event);

S
Stephane Eranian 已提交
489 490 491 492 493 494 495 496 497
	/*
	 * all events in a group must monitor
	 * the same cgroup because a task belongs
	 * to only one perf cgroup at a time
	 */
	if (group_leader && group_leader->cgrp != cgrp) {
		perf_detach_cgroup(event);
		ret = -EINVAL;
	}
498
out:
S
Stephane Eranian 已提交
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
	fput_light(file, fput_needed);
	return ret;
}

static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
	struct perf_cgroup_info *t;
	t = per_cpu_ptr(event->cgrp->info, event->cpu);
	event->shadow_ctx_time = now - t->timestamp;
}

static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
	/*
	 * when the current task's perf cgroup does not match
	 * the event's, we need to remember to call the
	 * perf_mark_enable() function the first time a task with
	 * a matching perf cgroup is scheduled in.
	 */
	if (is_cgroup_event(event) && !perf_cgroup_match(event))
		event->cgrp_defer_enabled = 1;
}

static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
			 struct perf_event_context *ctx)
{
	struct perf_event *sub;
	u64 tstamp = perf_event_time(event);

	if (!event->cgrp_defer_enabled)
		return;

	event->cgrp_defer_enabled = 0;

	event->tstamp_enabled = tstamp - event->total_time_enabled;
	list_for_each_entry(sub, &event->sibling_list, group_entry) {
		if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
			sub->cgrp_defer_enabled = 0;
		}
	}
}
#else /* !CONFIG_CGROUP_PERF */

static inline bool
perf_cgroup_match(struct perf_event *event)
{
	return true;
}

static inline void perf_detach_cgroup(struct perf_event *event)
{}

static inline int is_cgroup_event(struct perf_event *event)
{
	return 0;
}

static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
{
	return 0;
}

static inline void update_cgrp_time_from_event(struct perf_event *event)
{
}

static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
}

573 574
static inline void perf_cgroup_sched_out(struct task_struct *task,
					 struct task_struct *next)
S
Stephane Eranian 已提交
575 576 577
{
}

578 579
static inline void perf_cgroup_sched_in(struct task_struct *prev,
					struct task_struct *task)
S
Stephane Eranian 已提交
580 581 582 583 584 585 586 587 588 589 590
{
}

static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
				      struct perf_event_attr *attr,
				      struct perf_event *group_leader)
{
	return -EINVAL;
}

static inline void
591 592
perf_cgroup_set_timestamp(struct task_struct *task,
			  struct perf_event_context *ctx)
S
Stephane Eranian 已提交
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
{
}

void
perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
{
}

static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
}

static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
	return 0;
}

static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
}

static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
			 struct perf_event_context *ctx)
{
}
#endif

P
Peter Zijlstra 已提交
623
void perf_pmu_disable(struct pmu *pmu)
624
{
P
Peter Zijlstra 已提交
625 626 627
	int *count = this_cpu_ptr(pmu->pmu_disable_count);
	if (!(*count)++)
		pmu->pmu_disable(pmu);
628 629
}

P
Peter Zijlstra 已提交
630
void perf_pmu_enable(struct pmu *pmu)
631
{
P
Peter Zijlstra 已提交
632 633 634
	int *count = this_cpu_ptr(pmu->pmu_disable_count);
	if (!--(*count))
		pmu->pmu_enable(pmu);
635 636
}

637 638 639 640 641 642 643
static DEFINE_PER_CPU(struct list_head, rotation_list);

/*
 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
 * because they're strictly cpu affine and rotate_start is called with IRQs
 * disabled, while rotate_context is called from IRQ context.
 */
P
Peter Zijlstra 已提交
644
static void perf_pmu_rotate_start(struct pmu *pmu)
645
{
P
Peter Zijlstra 已提交
646
	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
647
	struct list_head *head = &__get_cpu_var(rotation_list);
648

649
	WARN_ON(!irqs_disabled());
650

651 652
	if (list_empty(&cpuctx->rotation_list))
		list_add(&cpuctx->rotation_list, head);
653 654
}

655
static void get_ctx(struct perf_event_context *ctx)
656
{
657
	WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
658 659
}

660
static void put_ctx(struct perf_event_context *ctx)
661
{
662 663 664
	if (atomic_dec_and_test(&ctx->refcount)) {
		if (ctx->parent_ctx)
			put_ctx(ctx->parent_ctx);
665 666
		if (ctx->task)
			put_task_struct(ctx->task);
667
		kfree_rcu(ctx, rcu_head);
668
	}
669 670
}

671
static void unclone_ctx(struct perf_event_context *ctx)
672 673 674 675 676 677 678
{
	if (ctx->parent_ctx) {
		put_ctx(ctx->parent_ctx);
		ctx->parent_ctx = NULL;
	}
}

679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
{
	/*
	 * only top level events have the pid namespace they were created in
	 */
	if (event->parent)
		event = event->parent;

	return task_tgid_nr_ns(p, event->ns);
}

static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
{
	/*
	 * only top level events have the pid namespace they were created in
	 */
	if (event->parent)
		event = event->parent;

	return task_pid_nr_ns(p, event->ns);
}

701
/*
702
 * If we inherit events we want to return the parent event id
703 704
 * to userspace.
 */
705
static u64 primary_event_id(struct perf_event *event)
706
{
707
	u64 id = event->id;
708

709 710
	if (event->parent)
		id = event->parent->id;
711 712 713 714

	return id;
}

715
/*
716
 * Get the perf_event_context for a task and lock it.
717 718 719
 * This has to cope with with the fact that until it is locked,
 * the context could get moved to another task.
 */
720
static struct perf_event_context *
P
Peter Zijlstra 已提交
721
perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
722
{
723
	struct perf_event_context *ctx;
724 725

	rcu_read_lock();
P
Peter Zijlstra 已提交
726
retry:
P
Peter Zijlstra 已提交
727
	ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
728 729 730 731
	if (ctx) {
		/*
		 * If this context is a clone of another, it might
		 * get swapped for another underneath us by
732
		 * perf_event_task_sched_out, though the
733 734 735 736 737 738
		 * rcu_read_lock() protects us from any context
		 * getting freed.  Lock the context and check if it
		 * got swapped before we could get the lock, and retry
		 * if so.  If we locked the right context, then it
		 * can't get swapped on us any more.
		 */
739
		raw_spin_lock_irqsave(&ctx->lock, *flags);
P
Peter Zijlstra 已提交
740
		if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
741
			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
742 743
			goto retry;
		}
744 745

		if (!atomic_inc_not_zero(&ctx->refcount)) {
746
			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
747 748
			ctx = NULL;
		}
749 750 751 752 753 754 755 756 757 758
	}
	rcu_read_unlock();
	return ctx;
}

/*
 * Get the context for a task and increment its pin_count so it
 * can't get swapped to another task.  This also increments its
 * reference count so that the context can't get freed.
 */
P
Peter Zijlstra 已提交
759 760
static struct perf_event_context *
perf_pin_task_context(struct task_struct *task, int ctxn)
761
{
762
	struct perf_event_context *ctx;
763 764
	unsigned long flags;

P
Peter Zijlstra 已提交
765
	ctx = perf_lock_task_context(task, ctxn, &flags);
766 767
	if (ctx) {
		++ctx->pin_count;
768
		raw_spin_unlock_irqrestore(&ctx->lock, flags);
769 770 771 772
	}
	return ctx;
}

773
static void perf_unpin_context(struct perf_event_context *ctx)
774 775 776
{
	unsigned long flags;

777
	raw_spin_lock_irqsave(&ctx->lock, flags);
778
	--ctx->pin_count;
779
	raw_spin_unlock_irqrestore(&ctx->lock, flags);
780 781
}

782 783 784 785 786 787 788 789 790 791 792
/*
 * Update the record of the current time in a context.
 */
static void update_context_time(struct perf_event_context *ctx)
{
	u64 now = perf_clock();

	ctx->time += now - ctx->timestamp;
	ctx->timestamp = now;
}

793 794 795
static u64 perf_event_time(struct perf_event *event)
{
	struct perf_event_context *ctx = event->ctx;
S
Stephane Eranian 已提交
796 797 798 799

	if (is_cgroup_event(event))
		return perf_cgroup_event_time(event);

800 801 802
	return ctx ? ctx->time : 0;
}

803 804
/*
 * Update the total_time_enabled and total_time_running fields for a event.
805
 * The caller of this function needs to hold the ctx->lock.
806 807 808 809 810 811 812 813 814
 */
static void update_event_times(struct perf_event *event)
{
	struct perf_event_context *ctx = event->ctx;
	u64 run_end;

	if (event->state < PERF_EVENT_STATE_INACTIVE ||
	    event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
		return;
S
Stephane Eranian 已提交
815 816 817 818 819 820 821 822 823 824 825
	/*
	 * in cgroup mode, time_enabled represents
	 * the time the event was enabled AND active
	 * tasks were in the monitored cgroup. This is
	 * independent of the activity of the context as
	 * there may be a mix of cgroup and non-cgroup events.
	 *
	 * That is why we treat cgroup events differently
	 * here.
	 */
	if (is_cgroup_event(event))
826
		run_end = perf_cgroup_event_time(event);
S
Stephane Eranian 已提交
827 828
	else if (ctx->is_active)
		run_end = ctx->time;
829 830 831 832
	else
		run_end = event->tstamp_stopped;

	event->total_time_enabled = run_end - event->tstamp_enabled;
833 834 835 836

	if (event->state == PERF_EVENT_STATE_INACTIVE)
		run_end = event->tstamp_stopped;
	else
837
		run_end = perf_event_time(event);
838 839

	event->total_time_running = run_end - event->tstamp_running;
S
Stephane Eranian 已提交
840

841 842
}

843 844 845 846 847 848 849 850 851 852 853 854
/*
 * Update total_time_enabled and total_time_running for all events in a group.
 */
static void update_group_times(struct perf_event *leader)
{
	struct perf_event *event;

	update_event_times(leader);
	list_for_each_entry(event, &leader->sibling_list, group_entry)
		update_event_times(event);
}

855 856 857 858 859 860 861 862 863
static struct list_head *
ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
{
	if (event->attr.pinned)
		return &ctx->pinned_groups;
	else
		return &ctx->flexible_groups;
}

864
/*
865
 * Add a event from the lists for its context.
866 867
 * Must be called with ctx->mutex and ctx->lock held.
 */
868
static void
869
list_add_event(struct perf_event *event, struct perf_event_context *ctx)
870
{
871 872
	WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
	event->attach_state |= PERF_ATTACH_CONTEXT;
873 874

	/*
875 876 877
	 * If we're a stand alone event or group leader, we go to the context
	 * list, group events are kept attached to the group so that
	 * perf_group_detach can, at all times, locate all siblings.
878
	 */
879
	if (event->group_leader == event) {
880 881
		struct list_head *list;

882 883 884
		if (is_software_event(event))
			event->group_flags |= PERF_GROUP_SOFTWARE;

885 886
		list = ctx_group_list(event, ctx);
		list_add_tail(&event->group_entry, list);
P
Peter Zijlstra 已提交
887
	}
P
Peter Zijlstra 已提交
888

889
	if (is_cgroup_event(event))
S
Stephane Eranian 已提交
890 891
		ctx->nr_cgroups++;

892 893 894
	if (has_branch_stack(event))
		ctx->nr_branch_stack++;

895
	list_add_rcu(&event->event_entry, &ctx->event_list);
896
	if (!ctx->nr_events)
P
Peter Zijlstra 已提交
897
		perf_pmu_rotate_start(ctx->pmu);
898 899
	ctx->nr_events++;
	if (event->attr.inherit_stat)
900
		ctx->nr_stat++;
901 902
}

903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941
/*
 * Called at perf_event creation and when events are attached/detached from a
 * group.
 */
static void perf_event__read_size(struct perf_event *event)
{
	int entry = sizeof(u64); /* value */
	int size = 0;
	int nr = 1;

	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		size += sizeof(u64);

	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		size += sizeof(u64);

	if (event->attr.read_format & PERF_FORMAT_ID)
		entry += sizeof(u64);

	if (event->attr.read_format & PERF_FORMAT_GROUP) {
		nr += event->group_leader->nr_siblings;
		size += sizeof(u64);
	}

	size += entry * nr;
	event->read_size = size;
}

static void perf_event__header_size(struct perf_event *event)
{
	struct perf_sample_data *data;
	u64 sample_type = event->attr.sample_type;
	u16 size = 0;

	perf_event__read_size(event);

	if (sample_type & PERF_SAMPLE_IP)
		size += sizeof(data->ip);

942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
	if (sample_type & PERF_SAMPLE_ADDR)
		size += sizeof(data->addr);

	if (sample_type & PERF_SAMPLE_PERIOD)
		size += sizeof(data->period);

	if (sample_type & PERF_SAMPLE_READ)
		size += event->read_size;

	event->header_size = size;
}

static void perf_event__id_header_size(struct perf_event *event)
{
	struct perf_sample_data *data;
	u64 sample_type = event->attr.sample_type;
	u16 size = 0;

960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
	if (sample_type & PERF_SAMPLE_TID)
		size += sizeof(data->tid_entry);

	if (sample_type & PERF_SAMPLE_TIME)
		size += sizeof(data->time);

	if (sample_type & PERF_SAMPLE_ID)
		size += sizeof(data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		size += sizeof(data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		size += sizeof(data->cpu_entry);

975
	event->id_header_size = size;
976 977
}

978 979
static void perf_group_attach(struct perf_event *event)
{
980
	struct perf_event *group_leader = event->group_leader, *pos;
981

P
Peter Zijlstra 已提交
982 983 984 985 986 987
	/*
	 * We can have double attach due to group movement in perf_event_open.
	 */
	if (event->attach_state & PERF_ATTACH_GROUP)
		return;

988 989 990 991 992 993 994 995 996 997 998
	event->attach_state |= PERF_ATTACH_GROUP;

	if (group_leader == event)
		return;

	if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
			!is_software_event(event))
		group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;

	list_add_tail(&event->group_entry, &group_leader->sibling_list);
	group_leader->nr_siblings++;
999 1000 1001 1002 1003

	perf_event__header_size(group_leader);

	list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
		perf_event__header_size(pos);
1004 1005
}

1006
/*
1007
 * Remove a event from the lists for its context.
1008
 * Must be called with ctx->mutex and ctx->lock held.
1009
 */
1010
static void
1011
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1012
{
1013
	struct perf_cpu_context *cpuctx;
1014 1015 1016 1017
	/*
	 * We can have double detach due to exit/hot-unplug + close.
	 */
	if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1018
		return;
1019 1020 1021

	event->attach_state &= ~PERF_ATTACH_CONTEXT;

1022
	if (is_cgroup_event(event)) {
S
Stephane Eranian 已提交
1023
		ctx->nr_cgroups--;
1024 1025 1026 1027 1028 1029 1030 1031 1032
		cpuctx = __get_cpu_context(ctx);
		/*
		 * if there are no more cgroup events
		 * then cler cgrp to avoid stale pointer
		 * in update_cgrp_time_from_cpuctx()
		 */
		if (!ctx->nr_cgroups)
			cpuctx->cgrp = NULL;
	}
S
Stephane Eranian 已提交
1033

1034 1035 1036
	if (has_branch_stack(event))
		ctx->nr_branch_stack--;

1037 1038
	ctx->nr_events--;
	if (event->attr.inherit_stat)
1039
		ctx->nr_stat--;
1040

1041
	list_del_rcu(&event->event_entry);
1042

1043 1044
	if (event->group_leader == event)
		list_del_init(&event->group_entry);
P
Peter Zijlstra 已提交
1045

1046
	update_group_times(event);
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056

	/*
	 * If event was in error state, then keep it
	 * that way, otherwise bogus counts will be
	 * returned on read(). The only way to get out
	 * of error state is by explicit re-enabling
	 * of the event
	 */
	if (event->state > PERF_EVENT_STATE_OFF)
		event->state = PERF_EVENT_STATE_OFF;
1057 1058
}

1059
static void perf_group_detach(struct perf_event *event)
1060 1061
{
	struct perf_event *sibling, *tmp;
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
	struct list_head *list = NULL;

	/*
	 * We can have double detach due to exit/hot-unplug + close.
	 */
	if (!(event->attach_state & PERF_ATTACH_GROUP))
		return;

	event->attach_state &= ~PERF_ATTACH_GROUP;

	/*
	 * If this is a sibling, remove it from its group.
	 */
	if (event->group_leader != event) {
		list_del_init(&event->group_entry);
		event->group_leader->nr_siblings--;
1078
		goto out;
1079 1080 1081 1082
	}

	if (!list_empty(&event->group_entry))
		list = &event->group_entry;
1083

1084
	/*
1085 1086
	 * If this was a group event with sibling events then
	 * upgrade the siblings to singleton events by adding them
1087
	 * to whatever list we are on.
1088
	 */
1089
	list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1090 1091
		if (list)
			list_move_tail(&sibling->group_entry, list);
1092
		sibling->group_leader = sibling;
1093 1094 1095

		/* Inherit group flags from the previous leader */
		sibling->group_flags = event->group_flags;
1096
	}
1097 1098 1099 1100 1101 1102

out:
	perf_event__header_size(event->group_leader);

	list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
		perf_event__header_size(tmp);
1103 1104
}

1105 1106 1107
static inline int
event_filter_match(struct perf_event *event)
{
S
Stephane Eranian 已提交
1108 1109
	return (event->cpu == -1 || event->cpu == smp_processor_id())
	    && perf_cgroup_match(event);
1110 1111
}

1112 1113
static void
event_sched_out(struct perf_event *event,
1114
		  struct perf_cpu_context *cpuctx,
1115
		  struct perf_event_context *ctx)
1116
{
1117
	u64 tstamp = perf_event_time(event);
1118 1119 1120 1121 1122 1123 1124 1125 1126
	u64 delta;
	/*
	 * An event which could not be activated because of
	 * filter mismatch still needs to have its timings
	 * maintained, otherwise bogus information is return
	 * via read() for time_enabled, time_running:
	 */
	if (event->state == PERF_EVENT_STATE_INACTIVE
	    && !event_filter_match(event)) {
S
Stephane Eranian 已提交
1127
		delta = tstamp - event->tstamp_stopped;
1128
		event->tstamp_running += delta;
1129
		event->tstamp_stopped = tstamp;
1130 1131
	}

1132
	if (event->state != PERF_EVENT_STATE_ACTIVE)
1133
		return;
1134

1135 1136 1137 1138
	event->state = PERF_EVENT_STATE_INACTIVE;
	if (event->pending_disable) {
		event->pending_disable = 0;
		event->state = PERF_EVENT_STATE_OFF;
1139
	}
1140
	event->tstamp_stopped = tstamp;
P
Peter Zijlstra 已提交
1141
	event->pmu->del(event, 0);
1142
	event->oncpu = -1;
1143

1144
	if (!is_software_event(event))
1145 1146
		cpuctx->active_oncpu--;
	ctx->nr_active--;
1147 1148
	if (event->attr.freq && event->attr.sample_freq)
		ctx->nr_freq--;
1149
	if (event->attr.exclusive || !cpuctx->active_oncpu)
1150 1151 1152
		cpuctx->exclusive = 0;
}

1153
static void
1154
group_sched_out(struct perf_event *group_event,
1155
		struct perf_cpu_context *cpuctx,
1156
		struct perf_event_context *ctx)
1157
{
1158
	struct perf_event *event;
1159
	int state = group_event->state;
1160

1161
	event_sched_out(group_event, cpuctx, ctx);
1162 1163 1164 1165

	/*
	 * Schedule out siblings (if any):
	 */
1166 1167
	list_for_each_entry(event, &group_event->sibling_list, group_entry)
		event_sched_out(event, cpuctx, ctx);
1168

1169
	if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1170 1171 1172
		cpuctx->exclusive = 0;
}

T
Thomas Gleixner 已提交
1173
/*
1174
 * Cross CPU call to remove a performance event
T
Thomas Gleixner 已提交
1175
 *
1176
 * We disable the event on the hardware level first. After that we
T
Thomas Gleixner 已提交
1177 1178
 * remove it from the context list.
 */
1179
static int __perf_remove_from_context(void *info)
T
Thomas Gleixner 已提交
1180
{
1181 1182
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
P
Peter Zijlstra 已提交
1183
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
T
Thomas Gleixner 已提交
1184

1185
	raw_spin_lock(&ctx->lock);
1186 1187
	event_sched_out(event, cpuctx, ctx);
	list_del_event(event, ctx);
1188 1189 1190 1191
	if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
		ctx->is_active = 0;
		cpuctx->task_ctx = NULL;
	}
1192
	raw_spin_unlock(&ctx->lock);
1193 1194

	return 0;
T
Thomas Gleixner 已提交
1195 1196 1197 1198
}


/*
1199
 * Remove the event from a task's (or a CPU's) list of events.
T
Thomas Gleixner 已提交
1200
 *
1201
 * CPU events are removed with a smp call. For task events we only
T
Thomas Gleixner 已提交
1202
 * call when the task is on a CPU.
1203
 *
1204 1205
 * If event->ctx is a cloned context, callers must make sure that
 * every task struct that event->ctx->task could possibly point to
1206 1207
 * remains valid.  This is OK when called from perf_release since
 * that only calls us on the top-level context, which can't be a clone.
1208
 * When called from perf_event_exit_task, it's OK because the
1209
 * context has been detached from its task.
T
Thomas Gleixner 已提交
1210
 */
1211
static void perf_remove_from_context(struct perf_event *event)
T
Thomas Gleixner 已提交
1212
{
1213
	struct perf_event_context *ctx = event->ctx;
T
Thomas Gleixner 已提交
1214 1215
	struct task_struct *task = ctx->task;

1216 1217
	lockdep_assert_held(&ctx->mutex);

T
Thomas Gleixner 已提交
1218 1219
	if (!task) {
		/*
1220
		 * Per cpu events are removed via an smp call and
1221
		 * the removal is always successful.
T
Thomas Gleixner 已提交
1222
		 */
1223
		cpu_function_call(event->cpu, __perf_remove_from_context, event);
T
Thomas Gleixner 已提交
1224 1225 1226 1227
		return;
	}

retry:
1228 1229
	if (!task_function_call(task, __perf_remove_from_context, event))
		return;
T
Thomas Gleixner 已提交
1230

1231
	raw_spin_lock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1232
	/*
1233 1234
	 * If we failed to find a running task, but find the context active now
	 * that we've acquired the ctx->lock, retry.
T
Thomas Gleixner 已提交
1235
	 */
1236
	if (ctx->is_active) {
1237
		raw_spin_unlock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1238 1239 1240 1241
		goto retry;
	}

	/*
1242 1243
	 * Since the task isn't running, its safe to remove the event, us
	 * holding the ctx->lock ensures the task won't get scheduled in.
T
Thomas Gleixner 已提交
1244
	 */
1245
	list_del_event(event, ctx);
1246
	raw_spin_unlock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1247 1248
}

1249
/*
1250
 * Cross CPU call to disable a performance event
1251
 */
1252
static int __perf_event_disable(void *info)
1253
{
1254 1255
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
P
Peter Zijlstra 已提交
1256
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1257 1258

	/*
1259 1260
	 * If this is a per-task event, need to check whether this
	 * event's task is the current task on this cpu.
1261 1262 1263
	 *
	 * Can trigger due to concurrent perf_event_context_sched_out()
	 * flipping contexts around.
1264
	 */
1265
	if (ctx->task && cpuctx->task_ctx != ctx)
1266
		return -EINVAL;
1267

1268
	raw_spin_lock(&ctx->lock);
1269 1270

	/*
1271
	 * If the event is on, turn it off.
1272 1273
	 * If it is in error state, leave it in error state.
	 */
1274
	if (event->state >= PERF_EVENT_STATE_INACTIVE) {
1275
		update_context_time(ctx);
S
Stephane Eranian 已提交
1276
		update_cgrp_time_from_event(event);
1277 1278 1279
		update_group_times(event);
		if (event == event->group_leader)
			group_sched_out(event, cpuctx, ctx);
1280
		else
1281 1282
			event_sched_out(event, cpuctx, ctx);
		event->state = PERF_EVENT_STATE_OFF;
1283 1284
	}

1285
	raw_spin_unlock(&ctx->lock);
1286 1287

	return 0;
1288 1289 1290
}

/*
1291
 * Disable a event.
1292
 *
1293 1294
 * If event->ctx is a cloned context, callers must make sure that
 * every task struct that event->ctx->task could possibly point to
1295
 * remains valid.  This condition is satisifed when called through
1296 1297 1298 1299
 * perf_event_for_each_child or perf_event_for_each because they
 * hold the top-level event's child_mutex, so any descendant that
 * goes to exit will block in sync_child_event.
 * When called from perf_pending_event it's OK because event->ctx
1300
 * is the current context on this CPU and preemption is disabled,
1301
 * hence we can't get into perf_event_task_sched_out for this context.
1302
 */
1303
void perf_event_disable(struct perf_event *event)
1304
{
1305
	struct perf_event_context *ctx = event->ctx;
1306 1307 1308 1309
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
1310
		 * Disable the event on the cpu that it's on
1311
		 */
1312
		cpu_function_call(event->cpu, __perf_event_disable, event);
1313 1314 1315
		return;
	}

P
Peter Zijlstra 已提交
1316
retry:
1317 1318
	if (!task_function_call(task, __perf_event_disable, event))
		return;
1319

1320
	raw_spin_lock_irq(&ctx->lock);
1321
	/*
1322
	 * If the event is still active, we need to retry the cross-call.
1323
	 */
1324
	if (event->state == PERF_EVENT_STATE_ACTIVE) {
1325
		raw_spin_unlock_irq(&ctx->lock);
1326 1327 1328 1329 1330
		/*
		 * Reload the task pointer, it might have been changed by
		 * a concurrent perf_event_context_sched_out().
		 */
		task = ctx->task;
1331 1332 1333 1334 1335 1336 1337
		goto retry;
	}

	/*
	 * Since we have the lock this context can't be scheduled
	 * in, so we can change the state safely.
	 */
1338 1339 1340
	if (event->state == PERF_EVENT_STATE_INACTIVE) {
		update_group_times(event);
		event->state = PERF_EVENT_STATE_OFF;
1341
	}
1342
	raw_spin_unlock_irq(&ctx->lock);
1343
}
1344
EXPORT_SYMBOL_GPL(perf_event_disable);
1345

S
Stephane Eranian 已提交
1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
static void perf_set_shadow_time(struct perf_event *event,
				 struct perf_event_context *ctx,
				 u64 tstamp)
{
	/*
	 * use the correct time source for the time snapshot
	 *
	 * We could get by without this by leveraging the
	 * fact that to get to this function, the caller
	 * has most likely already called update_context_time()
	 * and update_cgrp_time_xx() and thus both timestamp
	 * are identical (or very close). Given that tstamp is,
	 * already adjusted for cgroup, we could say that:
	 *    tstamp - ctx->timestamp
	 * is equivalent to
	 *    tstamp - cgrp->timestamp.
	 *
	 * Then, in perf_output_read(), the calculation would
	 * work with no changes because:
	 * - event is guaranteed scheduled in
	 * - no scheduled out in between
	 * - thus the timestamp would be the same
	 *
	 * But this is a bit hairy.
	 *
	 * So instead, we have an explicit cgroup call to remain
	 * within the time time source all along. We believe it
	 * is cleaner and simpler to understand.
	 */
	if (is_cgroup_event(event))
		perf_cgroup_set_shadow_time(event, tstamp);
	else
		event->shadow_ctx_time = tstamp - ctx->timestamp;
}

P
Peter Zijlstra 已提交
1381 1382 1383 1384
#define MAX_INTERRUPTS (~0ULL)

static void perf_log_throttle(struct perf_event *event, int enable);

1385
static int
1386
event_sched_in(struct perf_event *event,
1387
		 struct perf_cpu_context *cpuctx,
1388
		 struct perf_event_context *ctx)
1389
{
1390 1391
	u64 tstamp = perf_event_time(event);

1392
	if (event->state <= PERF_EVENT_STATE_OFF)
1393 1394
		return 0;

1395
	event->state = PERF_EVENT_STATE_ACTIVE;
1396
	event->oncpu = smp_processor_id();
P
Peter Zijlstra 已提交
1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407

	/*
	 * Unthrottle events, since we scheduled we might have missed several
	 * ticks already, also for a heavily scheduling task there is little
	 * guarantee it'll get a tick in a timely manner.
	 */
	if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
		perf_log_throttle(event, 1);
		event->hw.interrupts = 0;
	}

1408 1409 1410 1411 1412
	/*
	 * The new state must be visible before we turn it on in the hardware:
	 */
	smp_wmb();

P
Peter Zijlstra 已提交
1413
	if (event->pmu->add(event, PERF_EF_START)) {
1414 1415
		event->state = PERF_EVENT_STATE_INACTIVE;
		event->oncpu = -1;
1416 1417 1418
		return -EAGAIN;
	}

1419
	event->tstamp_running += tstamp - event->tstamp_stopped;
1420

S
Stephane Eranian 已提交
1421
	perf_set_shadow_time(event, ctx, tstamp);
1422

1423
	if (!is_software_event(event))
1424
		cpuctx->active_oncpu++;
1425
	ctx->nr_active++;
1426 1427
	if (event->attr.freq && event->attr.sample_freq)
		ctx->nr_freq++;
1428

1429
	if (event->attr.exclusive)
1430 1431
		cpuctx->exclusive = 1;

1432 1433 1434
	return 0;
}

1435
static int
1436
group_sched_in(struct perf_event *group_event,
1437
	       struct perf_cpu_context *cpuctx,
1438
	       struct perf_event_context *ctx)
1439
{
1440
	struct perf_event *event, *partial_group = NULL;
P
Peter Zijlstra 已提交
1441
	struct pmu *pmu = group_event->pmu;
1442 1443
	u64 now = ctx->time;
	bool simulate = false;
1444

1445
	if (group_event->state == PERF_EVENT_STATE_OFF)
1446 1447
		return 0;

P
Peter Zijlstra 已提交
1448
	pmu->start_txn(pmu);
1449

1450
	if (event_sched_in(group_event, cpuctx, ctx)) {
P
Peter Zijlstra 已提交
1451
		pmu->cancel_txn(pmu);
1452
		return -EAGAIN;
1453
	}
1454 1455 1456 1457

	/*
	 * Schedule in siblings as one group (if any):
	 */
1458
	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1459
		if (event_sched_in(event, cpuctx, ctx)) {
1460
			partial_group = event;
1461 1462 1463 1464
			goto group_error;
		}
	}

1465
	if (!pmu->commit_txn(pmu))
1466
		return 0;
1467

1468 1469 1470 1471
group_error:
	/*
	 * Groups can be scheduled in as one unit only, so undo any
	 * partial group before returning:
1472 1473 1474 1475 1476 1477 1478 1479 1480 1481
	 * The events up to the failed event are scheduled out normally,
	 * tstamp_stopped will be updated.
	 *
	 * The failed events and the remaining siblings need to have
	 * their timings updated as if they had gone thru event_sched_in()
	 * and event_sched_out(). This is required to get consistent timings
	 * across the group. This also takes care of the case where the group
	 * could never be scheduled by ensuring tstamp_stopped is set to mark
	 * the time the event was actually stopped, such that time delta
	 * calculation in update_event_times() is correct.
1482
	 */
1483 1484
	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
		if (event == partial_group)
1485 1486 1487 1488 1489 1490 1491 1492
			simulate = true;

		if (simulate) {
			event->tstamp_running += now - event->tstamp_stopped;
			event->tstamp_stopped = now;
		} else {
			event_sched_out(event, cpuctx, ctx);
		}
1493
	}
1494
	event_sched_out(group_event, cpuctx, ctx);
1495

P
Peter Zijlstra 已提交
1496
	pmu->cancel_txn(pmu);
1497

1498 1499 1500
	return -EAGAIN;
}

1501
/*
1502
 * Work out whether we can put this event group on the CPU now.
1503
 */
1504
static int group_can_go_on(struct perf_event *event,
1505 1506 1507 1508
			   struct perf_cpu_context *cpuctx,
			   int can_add_hw)
{
	/*
1509
	 * Groups consisting entirely of software events can always go on.
1510
	 */
1511
	if (event->group_flags & PERF_GROUP_SOFTWARE)
1512 1513 1514
		return 1;
	/*
	 * If an exclusive group is already on, no other hardware
1515
	 * events can go on.
1516 1517 1518 1519 1520
	 */
	if (cpuctx->exclusive)
		return 0;
	/*
	 * If this group is exclusive and there are already
1521
	 * events on the CPU, it can't go on.
1522
	 */
1523
	if (event->attr.exclusive && cpuctx->active_oncpu)
1524 1525 1526 1527 1528 1529 1530 1531
		return 0;
	/*
	 * Otherwise, try to add it if all previous groups were able
	 * to go on.
	 */
	return can_add_hw;
}

1532 1533
static void add_event_to_ctx(struct perf_event *event,
			       struct perf_event_context *ctx)
1534
{
1535 1536
	u64 tstamp = perf_event_time(event);

1537
	list_add_event(event, ctx);
1538
	perf_group_attach(event);
1539 1540 1541
	event->tstamp_enabled = tstamp;
	event->tstamp_running = tstamp;
	event->tstamp_stopped = tstamp;
1542 1543
}

1544 1545 1546 1547 1548 1549
static void task_ctx_sched_out(struct perf_event_context *ctx);
static void
ctx_sched_in(struct perf_event_context *ctx,
	     struct perf_cpu_context *cpuctx,
	     enum event_type_t event_type,
	     struct task_struct *task);
1550

1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
				struct perf_event_context *ctx,
				struct task_struct *task)
{
	cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
	if (ctx)
		ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
	if (ctx)
		ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
}

T
Thomas Gleixner 已提交
1563
/*
1564
 * Cross CPU call to install and enable a performance event
1565 1566
 *
 * Must be called with ctx->mutex held
T
Thomas Gleixner 已提交
1567
 */
1568
static int  __perf_install_in_context(void *info)
T
Thomas Gleixner 已提交
1569
{
1570 1571
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
P
Peter Zijlstra 已提交
1572
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1573 1574 1575
	struct perf_event_context *task_ctx = cpuctx->task_ctx;
	struct task_struct *task = current;

1576
	perf_ctx_lock(cpuctx, task_ctx);
1577
	perf_pmu_disable(cpuctx->ctx.pmu);
T
Thomas Gleixner 已提交
1578 1579

	/*
1580
	 * If there was an active task_ctx schedule it out.
T
Thomas Gleixner 已提交
1581
	 */
1582
	if (task_ctx)
1583
		task_ctx_sched_out(task_ctx);
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597

	/*
	 * If the context we're installing events in is not the
	 * active task_ctx, flip them.
	 */
	if (ctx->task && task_ctx != ctx) {
		if (task_ctx)
			raw_spin_unlock(&task_ctx->lock);
		raw_spin_lock(&ctx->lock);
		task_ctx = ctx;
	}

	if (task_ctx) {
		cpuctx->task_ctx = task_ctx;
1598 1599
		task = task_ctx->task;
	}
1600

1601
	cpu_ctx_sched_out(cpuctx, EVENT_ALL);
T
Thomas Gleixner 已提交
1602

1603
	update_context_time(ctx);
S
Stephane Eranian 已提交
1604 1605 1606 1607 1608 1609
	/*
	 * update cgrp time only if current cgrp
	 * matches event->cgrp. Must be done before
	 * calling add_event_to_ctx()
	 */
	update_cgrp_time_from_event(event);
T
Thomas Gleixner 已提交
1610

1611
	add_event_to_ctx(event, ctx);
T
Thomas Gleixner 已提交
1612

1613
	/*
1614
	 * Schedule everything back in
1615
	 */
1616
	perf_event_sched_in(cpuctx, task_ctx, task);
1617 1618 1619

	perf_pmu_enable(cpuctx->ctx.pmu);
	perf_ctx_unlock(cpuctx, task_ctx);
1620 1621

	return 0;
T
Thomas Gleixner 已提交
1622 1623 1624
}

/*
1625
 * Attach a performance event to a context
T
Thomas Gleixner 已提交
1626
 *
1627 1628
 * First we add the event to the list with the hardware enable bit
 * in event->hw_config cleared.
T
Thomas Gleixner 已提交
1629
 *
1630
 * If the event is attached to a task which is on a CPU we use a smp
T
Thomas Gleixner 已提交
1631 1632 1633 1634
 * call to enable it in the task context. The task might have been
 * scheduled away, but we check this in the smp call again.
 */
static void
1635 1636
perf_install_in_context(struct perf_event_context *ctx,
			struct perf_event *event,
T
Thomas Gleixner 已提交
1637 1638 1639 1640
			int cpu)
{
	struct task_struct *task = ctx->task;

1641 1642
	lockdep_assert_held(&ctx->mutex);

1643 1644
	event->ctx = ctx;

T
Thomas Gleixner 已提交
1645 1646
	if (!task) {
		/*
1647
		 * Per cpu events are installed via an smp call and
1648
		 * the install is always successful.
T
Thomas Gleixner 已提交
1649
		 */
1650
		cpu_function_call(cpu, __perf_install_in_context, event);
T
Thomas Gleixner 已提交
1651 1652 1653 1654
		return;
	}

retry:
1655 1656
	if (!task_function_call(task, __perf_install_in_context, event))
		return;
T
Thomas Gleixner 已提交
1657

1658
	raw_spin_lock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1659
	/*
1660 1661
	 * If we failed to find a running task, but find the context active now
	 * that we've acquired the ctx->lock, retry.
T
Thomas Gleixner 已提交
1662
	 */
1663
	if (ctx->is_active) {
1664
		raw_spin_unlock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1665 1666 1667 1668
		goto retry;
	}

	/*
1669 1670
	 * Since the task isn't running, its safe to add the event, us holding
	 * the ctx->lock ensures the task won't get scheduled in.
T
Thomas Gleixner 已提交
1671
	 */
1672
	add_event_to_ctx(event, ctx);
1673
	raw_spin_unlock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1674 1675
}

1676
/*
1677
 * Put a event into inactive state and update time fields.
1678 1679 1680 1681 1682 1683
 * Enabling the leader of a group effectively enables all
 * the group members that aren't explicitly disabled, so we
 * have to update their ->tstamp_enabled also.
 * Note: this works for group members as well as group leaders
 * since the non-leader members' sibling_lists will be empty.
 */
1684
static void __perf_event_mark_enabled(struct perf_event *event)
1685
{
1686
	struct perf_event *sub;
1687
	u64 tstamp = perf_event_time(event);
1688

1689
	event->state = PERF_EVENT_STATE_INACTIVE;
1690
	event->tstamp_enabled = tstamp - event->total_time_enabled;
P
Peter Zijlstra 已提交
1691
	list_for_each_entry(sub, &event->sibling_list, group_entry) {
1692 1693
		if (sub->state >= PERF_EVENT_STATE_INACTIVE)
			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
P
Peter Zijlstra 已提交
1694
	}
1695 1696
}

1697
/*
1698
 * Cross CPU call to enable a performance event
1699
 */
1700
static int __perf_event_enable(void *info)
1701
{
1702 1703 1704
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
	struct perf_event *leader = event->group_leader;
P
Peter Zijlstra 已提交
1705
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1706
	int err;
1707

1708 1709
	if (WARN_ON_ONCE(!ctx->is_active))
		return -EINVAL;
1710

1711
	raw_spin_lock(&ctx->lock);
1712
	update_context_time(ctx);
1713

1714
	if (event->state >= PERF_EVENT_STATE_INACTIVE)
1715
		goto unlock;
S
Stephane Eranian 已提交
1716 1717 1718 1719

	/*
	 * set current task's cgroup time reference point
	 */
1720
	perf_cgroup_set_timestamp(current, ctx);
S
Stephane Eranian 已提交
1721

1722
	__perf_event_mark_enabled(event);
1723

S
Stephane Eranian 已提交
1724 1725 1726
	if (!event_filter_match(event)) {
		if (is_cgroup_event(event))
			perf_cgroup_defer_enabled(event);
1727
		goto unlock;
S
Stephane Eranian 已提交
1728
	}
1729

1730
	/*
1731
	 * If the event is in a group and isn't the group leader,
1732
	 * then don't put it on unless the group is on.
1733
	 */
1734
	if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
1735
		goto unlock;
1736

1737
	if (!group_can_go_on(event, cpuctx, 1)) {
1738
		err = -EEXIST;
1739
	} else {
1740
		if (event == leader)
1741
			err = group_sched_in(event, cpuctx, ctx);
1742
		else
1743
			err = event_sched_in(event, cpuctx, ctx);
1744
	}
1745 1746 1747

	if (err) {
		/*
1748
		 * If this event can't go on and it's part of a
1749 1750
		 * group, then the whole group has to come off.
		 */
1751
		if (leader != event)
1752
			group_sched_out(leader, cpuctx, ctx);
1753
		if (leader->attr.pinned) {
1754
			update_group_times(leader);
1755
			leader->state = PERF_EVENT_STATE_ERROR;
1756
		}
1757 1758
	}

P
Peter Zijlstra 已提交
1759
unlock:
1760
	raw_spin_unlock(&ctx->lock);
1761 1762

	return 0;
1763 1764 1765
}

/*
1766
 * Enable a event.
1767
 *
1768 1769
 * If event->ctx is a cloned context, callers must make sure that
 * every task struct that event->ctx->task could possibly point to
1770
 * remains valid.  This condition is satisfied when called through
1771 1772
 * perf_event_for_each_child or perf_event_for_each as described
 * for perf_event_disable.
1773
 */
1774
void perf_event_enable(struct perf_event *event)
1775
{
1776
	struct perf_event_context *ctx = event->ctx;
1777 1778 1779 1780
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
1781
		 * Enable the event on the cpu that it's on
1782
		 */
1783
		cpu_function_call(event->cpu, __perf_event_enable, event);
1784 1785 1786
		return;
	}

1787
	raw_spin_lock_irq(&ctx->lock);
1788
	if (event->state >= PERF_EVENT_STATE_INACTIVE)
1789 1790 1791
		goto out;

	/*
1792 1793
	 * If the event is in error state, clear that first.
	 * That way, if we see the event in error state below, we
1794 1795 1796 1797
	 * know that it has gone back into error state, as distinct
	 * from the task having been scheduled away before the
	 * cross-call arrived.
	 */
1798 1799
	if (event->state == PERF_EVENT_STATE_ERROR)
		event->state = PERF_EVENT_STATE_OFF;
1800

P
Peter Zijlstra 已提交
1801
retry:
1802
	if (!ctx->is_active) {
1803
		__perf_event_mark_enabled(event);
1804 1805 1806
		goto out;
	}

1807
	raw_spin_unlock_irq(&ctx->lock);
1808 1809 1810

	if (!task_function_call(task, __perf_event_enable, event))
		return;
1811

1812
	raw_spin_lock_irq(&ctx->lock);
1813 1814

	/*
1815
	 * If the context is active and the event is still off,
1816 1817
	 * we need to retry the cross-call.
	 */
1818 1819 1820 1821 1822 1823
	if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
		/*
		 * task could have been flipped by a concurrent
		 * perf_event_context_sched_out()
		 */
		task = ctx->task;
1824
		goto retry;
1825
	}
1826

P
Peter Zijlstra 已提交
1827
out:
1828
	raw_spin_unlock_irq(&ctx->lock);
1829
}
1830
EXPORT_SYMBOL_GPL(perf_event_enable);
1831

1832
int perf_event_refresh(struct perf_event *event, int refresh)
1833
{
1834
	/*
1835
	 * not supported on inherited events
1836
	 */
1837
	if (event->attr.inherit || !is_sampling_event(event))
1838 1839
		return -EINVAL;

1840 1841
	atomic_add(refresh, &event->event_limit);
	perf_event_enable(event);
1842 1843

	return 0;
1844
}
1845
EXPORT_SYMBOL_GPL(perf_event_refresh);
1846

1847 1848 1849
static void ctx_sched_out(struct perf_event_context *ctx,
			  struct perf_cpu_context *cpuctx,
			  enum event_type_t event_type)
1850
{
1851
	struct perf_event *event;
1852
	int is_active = ctx->is_active;
1853

1854
	ctx->is_active &= ~event_type;
1855
	if (likely(!ctx->nr_events))
1856 1857
		return;

1858
	update_context_time(ctx);
S
Stephane Eranian 已提交
1859
	update_cgrp_time_from_cpuctx(cpuctx);
1860
	if (!ctx->nr_active)
1861
		return;
1862

P
Peter Zijlstra 已提交
1863
	perf_pmu_disable(ctx->pmu);
1864
	if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
1865 1866
		list_for_each_entry(event, &ctx->pinned_groups, group_entry)
			group_sched_out(event, cpuctx, ctx);
P
Peter Zijlstra 已提交
1867
	}
1868

1869
	if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
1870
		list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1871
			group_sched_out(event, cpuctx, ctx);
P
Peter Zijlstra 已提交
1872
	}
P
Peter Zijlstra 已提交
1873
	perf_pmu_enable(ctx->pmu);
1874 1875
}

1876 1877 1878
/*
 * Test whether two contexts are equivalent, i.e. whether they
 * have both been cloned from the same version of the same context
1879 1880 1881 1882
 * and they both have the same number of enabled events.
 * If the number of enabled events is the same, then the set
 * of enabled events should be the same, because these are both
 * inherited contexts, therefore we can't access individual events
1883
 * in them directly with an fd; we can only enable/disable all
1884
 * events via prctl, or enable/disable all events in a family
1885 1886
 * via ioctl, which will have the same effect on both contexts.
 */
1887 1888
static int context_equiv(struct perf_event_context *ctx1,
			 struct perf_event_context *ctx2)
1889 1890
{
	return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1891
		&& ctx1->parent_gen == ctx2->parent_gen
1892
		&& !ctx1->pin_count && !ctx2->pin_count;
1893 1894
}

1895 1896
static void __perf_event_sync_stat(struct perf_event *event,
				     struct perf_event *next_event)
1897 1898 1899
{
	u64 value;

1900
	if (!event->attr.inherit_stat)
1901 1902 1903
		return;

	/*
1904
	 * Update the event value, we cannot use perf_event_read()
1905 1906
	 * because we're in the middle of a context switch and have IRQs
	 * disabled, which upsets smp_call_function_single(), however
1907
	 * we know the event must be on the current CPU, therefore we
1908 1909
	 * don't need to use it.
	 */
1910 1911
	switch (event->state) {
	case PERF_EVENT_STATE_ACTIVE:
1912 1913
		event->pmu->read(event);
		/* fall-through */
1914

1915 1916
	case PERF_EVENT_STATE_INACTIVE:
		update_event_times(event);
1917 1918 1919 1920 1921 1922 1923
		break;

	default:
		break;
	}

	/*
1924
	 * In order to keep per-task stats reliable we need to flip the event
1925 1926
	 * values when we flip the contexts.
	 */
1927 1928 1929
	value = local64_read(&next_event->count);
	value = local64_xchg(&event->count, value);
	local64_set(&next_event->count, value);
1930

1931 1932
	swap(event->total_time_enabled, next_event->total_time_enabled);
	swap(event->total_time_running, next_event->total_time_running);
1933

1934
	/*
1935
	 * Since we swizzled the values, update the user visible data too.
1936
	 */
1937 1938
	perf_event_update_userpage(event);
	perf_event_update_userpage(next_event);
1939 1940 1941 1942 1943
}

#define list_next_entry(pos, member) \
	list_entry(pos->member.next, typeof(*pos), member)

1944 1945
static void perf_event_sync_stat(struct perf_event_context *ctx,
				   struct perf_event_context *next_ctx)
1946
{
1947
	struct perf_event *event, *next_event;
1948 1949 1950 1951

	if (!ctx->nr_stat)
		return;

1952 1953
	update_context_time(ctx);

1954 1955
	event = list_first_entry(&ctx->event_list,
				   struct perf_event, event_entry);
1956

1957 1958
	next_event = list_first_entry(&next_ctx->event_list,
					struct perf_event, event_entry);
1959

1960 1961
	while (&event->event_entry != &ctx->event_list &&
	       &next_event->event_entry != &next_ctx->event_list) {
1962

1963
		__perf_event_sync_stat(event, next_event);
1964

1965 1966
		event = list_next_entry(event, event_entry);
		next_event = list_next_entry(next_event, event_entry);
1967 1968 1969
	}
}

1970 1971
static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
					 struct task_struct *next)
T
Thomas Gleixner 已提交
1972
{
P
Peter Zijlstra 已提交
1973
	struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
1974 1975
	struct perf_event_context *next_ctx;
	struct perf_event_context *parent;
P
Peter Zijlstra 已提交
1976
	struct perf_cpu_context *cpuctx;
1977
	int do_switch = 1;
T
Thomas Gleixner 已提交
1978

P
Peter Zijlstra 已提交
1979 1980
	if (likely(!ctx))
		return;
1981

P
Peter Zijlstra 已提交
1982 1983
	cpuctx = __get_cpu_context(ctx);
	if (!cpuctx->task_ctx)
T
Thomas Gleixner 已提交
1984 1985
		return;

1986 1987
	rcu_read_lock();
	parent = rcu_dereference(ctx->parent_ctx);
P
Peter Zijlstra 已提交
1988
	next_ctx = next->perf_event_ctxp[ctxn];
1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
	if (parent && next_ctx &&
	    rcu_dereference(next_ctx->parent_ctx) == parent) {
		/*
		 * Looks like the two contexts are clones, so we might be
		 * able to optimize the context switch.  We lock both
		 * contexts and check that they are clones under the
		 * lock (including re-checking that neither has been
		 * uncloned in the meantime).  It doesn't matter which
		 * order we take the locks because no other cpu could
		 * be trying to lock both of these tasks.
		 */
2000 2001
		raw_spin_lock(&ctx->lock);
		raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
2002
		if (context_equiv(ctx, next_ctx)) {
2003 2004
			/*
			 * XXX do we need a memory barrier of sorts
2005
			 * wrt to rcu_dereference() of perf_event_ctxp
2006
			 */
P
Peter Zijlstra 已提交
2007 2008
			task->perf_event_ctxp[ctxn] = next_ctx;
			next->perf_event_ctxp[ctxn] = ctx;
2009 2010 2011
			ctx->task = next;
			next_ctx->task = task;
			do_switch = 0;
2012

2013
			perf_event_sync_stat(ctx, next_ctx);
2014
		}
2015 2016
		raw_spin_unlock(&next_ctx->lock);
		raw_spin_unlock(&ctx->lock);
2017
	}
2018
	rcu_read_unlock();
2019

2020
	if (do_switch) {
2021
		raw_spin_lock(&ctx->lock);
2022
		ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2023
		cpuctx->task_ctx = NULL;
2024
		raw_spin_unlock(&ctx->lock);
2025
	}
T
Thomas Gleixner 已提交
2026 2027
}

P
Peter Zijlstra 已提交
2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041
#define for_each_task_context_nr(ctxn)					\
	for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)

/*
 * Called from scheduler to remove the events of the current task,
 * with interrupts disabled.
 *
 * We stop each event and update the event value in event->count.
 *
 * This does not protect us against NMI, but disable()
 * sets the disabled bit in the control field of event _before_
 * accessing the event control register. If a NMI hits, then it will
 * not restart the event.
 */
2042 2043
void __perf_event_task_sched_out(struct task_struct *task,
				 struct task_struct *next)
P
Peter Zijlstra 已提交
2044 2045 2046 2047 2048
{
	int ctxn;

	for_each_task_context_nr(ctxn)
		perf_event_context_sched_out(task, ctxn, next);
S
Stephane Eranian 已提交
2049 2050 2051 2052 2053 2054 2055

	/*
	 * if cgroup events exist on this CPU, then we need
	 * to check if we have to switch out PMU state.
	 * cgroup event are system-wide mode only
	 */
	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2056
		perf_cgroup_sched_out(task, next);
P
Peter Zijlstra 已提交
2057 2058
}

2059
static void task_ctx_sched_out(struct perf_event_context *ctx)
2060
{
P
Peter Zijlstra 已提交
2061
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2062

2063 2064
	if (!cpuctx->task_ctx)
		return;
2065 2066 2067 2068

	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
		return;

2069
	ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2070 2071 2072
	cpuctx->task_ctx = NULL;
}

2073 2074 2075 2076 2077 2078 2079
/*
 * Called with IRQs disabled
 */
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
			      enum event_type_t event_type)
{
	ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2080 2081
}

2082
static void
2083
ctx_pinned_sched_in(struct perf_event_context *ctx,
2084
		    struct perf_cpu_context *cpuctx)
T
Thomas Gleixner 已提交
2085
{
2086
	struct perf_event *event;
T
Thomas Gleixner 已提交
2087

2088 2089
	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
		if (event->state <= PERF_EVENT_STATE_OFF)
2090
			continue;
2091
		if (!event_filter_match(event))
2092 2093
			continue;

S
Stephane Eranian 已提交
2094 2095 2096 2097
		/* may need to reset tstamp_enabled */
		if (is_cgroup_event(event))
			perf_cgroup_mark_enabled(event, ctx);

2098
		if (group_can_go_on(event, cpuctx, 1))
2099
			group_sched_in(event, cpuctx, ctx);
2100 2101 2102 2103 2104

		/*
		 * If this pinned group hasn't been scheduled,
		 * put it in error state.
		 */
2105 2106 2107
		if (event->state == PERF_EVENT_STATE_INACTIVE) {
			update_group_times(event);
			event->state = PERF_EVENT_STATE_ERROR;
2108
		}
2109
	}
2110 2111 2112 2113
}

static void
ctx_flexible_sched_in(struct perf_event_context *ctx,
2114
		      struct perf_cpu_context *cpuctx)
2115 2116 2117
{
	struct perf_event *event;
	int can_add_hw = 1;
2118

2119 2120 2121
	list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
		/* Ignore events in OFF or ERROR state */
		if (event->state <= PERF_EVENT_STATE_OFF)
2122
			continue;
2123 2124
		/*
		 * Listen to the 'cpu' scheduling filter constraint
2125
		 * of events:
2126
		 */
2127
		if (!event_filter_match(event))
T
Thomas Gleixner 已提交
2128 2129
			continue;

S
Stephane Eranian 已提交
2130 2131 2132 2133
		/* may need to reset tstamp_enabled */
		if (is_cgroup_event(event))
			perf_cgroup_mark_enabled(event, ctx);

P
Peter Zijlstra 已提交
2134
		if (group_can_go_on(event, cpuctx, can_add_hw)) {
2135
			if (group_sched_in(event, cpuctx, ctx))
2136
				can_add_hw = 0;
P
Peter Zijlstra 已提交
2137
		}
T
Thomas Gleixner 已提交
2138
	}
2139 2140 2141 2142 2143
}

static void
ctx_sched_in(struct perf_event_context *ctx,
	     struct perf_cpu_context *cpuctx,
S
Stephane Eranian 已提交
2144 2145
	     enum event_type_t event_type,
	     struct task_struct *task)
2146
{
S
Stephane Eranian 已提交
2147
	u64 now;
2148
	int is_active = ctx->is_active;
S
Stephane Eranian 已提交
2149

2150
	ctx->is_active |= event_type;
2151
	if (likely(!ctx->nr_events))
2152
		return;
2153

S
Stephane Eranian 已提交
2154 2155
	now = perf_clock();
	ctx->timestamp = now;
2156
	perf_cgroup_set_timestamp(task, ctx);
2157 2158 2159 2160
	/*
	 * First go through the list and put on any pinned groups
	 * in order to give them the best chance of going on.
	 */
2161
	if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
2162
		ctx_pinned_sched_in(ctx, cpuctx);
2163 2164

	/* Then walk through the lower prio flexible groups */
2165
	if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
2166
		ctx_flexible_sched_in(ctx, cpuctx);
2167 2168
}

2169
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
S
Stephane Eranian 已提交
2170 2171
			     enum event_type_t event_type,
			     struct task_struct *task)
2172 2173 2174
{
	struct perf_event_context *ctx = &cpuctx->ctx;

S
Stephane Eranian 已提交
2175
	ctx_sched_in(ctx, cpuctx, event_type, task);
2176 2177
}

S
Stephane Eranian 已提交
2178 2179
static void perf_event_context_sched_in(struct perf_event_context *ctx,
					struct task_struct *task)
2180
{
P
Peter Zijlstra 已提交
2181
	struct perf_cpu_context *cpuctx;
2182

P
Peter Zijlstra 已提交
2183
	cpuctx = __get_cpu_context(ctx);
2184 2185 2186
	if (cpuctx->task_ctx == ctx)
		return;

2187
	perf_ctx_lock(cpuctx, ctx);
P
Peter Zijlstra 已提交
2188
	perf_pmu_disable(ctx->pmu);
2189 2190 2191 2192 2193 2194 2195
	/*
	 * We want to keep the following priority order:
	 * cpu pinned (that don't need to move), task pinned,
	 * cpu flexible, task flexible.
	 */
	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);

2196 2197
	if (ctx->nr_events)
		cpuctx->task_ctx = ctx;
2198

2199 2200
	perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);

2201 2202 2203
	perf_pmu_enable(ctx->pmu);
	perf_ctx_unlock(cpuctx, ctx);

2204 2205 2206 2207
	/*
	 * Since these rotations are per-cpu, we need to ensure the
	 * cpu-context we got scheduled on is actually rotating.
	 */
P
Peter Zijlstra 已提交
2208
	perf_pmu_rotate_start(ctx->pmu);
2209 2210
}

2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
/*
 * When sampling the branck stack in system-wide, it may be necessary
 * to flush the stack on context switch. This happens when the branch
 * stack does not tag its entries with the pid of the current task.
 * Otherwise it becomes impossible to associate a branch entry with a
 * task. This ambiguity is more likely to appear when the branch stack
 * supports priv level filtering and the user sets it to monitor only
 * at the user level (which could be a useful measurement in system-wide
 * mode). In that case, the risk is high of having a branch stack with
 * branch from multiple tasks. Flushing may mean dropping the existing
 * entries or stashing them somewhere in the PMU specific code layer.
 *
 * This function provides the context switch callback to the lower code
 * layer. It is invoked ONLY when there is at least one system-wide context
 * with at least one active event using taken branch sampling.
 */
static void perf_branch_stack_sched_in(struct task_struct *prev,
				       struct task_struct *task)
{
	struct perf_cpu_context *cpuctx;
	struct pmu *pmu;
	unsigned long flags;

	/* no need to flush branch stack if not changing task */
	if (prev == task)
		return;

	local_irq_save(flags);

	rcu_read_lock();

	list_for_each_entry_rcu(pmu, &pmus, entry) {
		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);

		/*
		 * check if the context has at least one
		 * event using PERF_SAMPLE_BRANCH_STACK
		 */
		if (cpuctx->ctx.nr_branch_stack > 0
		    && pmu->flush_branch_stack) {

			pmu = cpuctx->ctx.pmu;

			perf_ctx_lock(cpuctx, cpuctx->task_ctx);

			perf_pmu_disable(pmu);

			pmu->flush_branch_stack();

			perf_pmu_enable(pmu);

			perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
		}
	}

	rcu_read_unlock();

	local_irq_restore(flags);
}

P
Peter Zijlstra 已提交
2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281
/*
 * Called from scheduler to add the events of the current task
 * with interrupts disabled.
 *
 * We restore the event value and then enable it.
 *
 * This does not protect us against NMI, but enable()
 * sets the enabled bit in the control field of event _before_
 * accessing the event control register. If a NMI hits, then it will
 * keep the event running.
 */
2282 2283
void __perf_event_task_sched_in(struct task_struct *prev,
				struct task_struct *task)
P
Peter Zijlstra 已提交
2284 2285 2286 2287 2288 2289 2290 2291 2292
{
	struct perf_event_context *ctx;
	int ctxn;

	for_each_task_context_nr(ctxn) {
		ctx = task->perf_event_ctxp[ctxn];
		if (likely(!ctx))
			continue;

S
Stephane Eranian 已提交
2293
		perf_event_context_sched_in(ctx, task);
P
Peter Zijlstra 已提交
2294
	}
S
Stephane Eranian 已提交
2295 2296 2297 2298 2299 2300
	/*
	 * if cgroup events exist on this CPU, then we need
	 * to check if we have to switch in PMU state.
	 * cgroup event are system-wide mode only
	 */
	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2301
		perf_cgroup_sched_in(prev, task);
2302 2303 2304 2305

	/* check for system-wide branch_stack events */
	if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
		perf_branch_stack_sched_in(prev, task);
2306 2307
}

2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{
	u64 frequency = event->attr.sample_freq;
	u64 sec = NSEC_PER_SEC;
	u64 divisor, dividend;

	int count_fls, nsec_fls, frequency_fls, sec_fls;

	count_fls = fls64(count);
	nsec_fls = fls64(nsec);
	frequency_fls = fls64(frequency);
	sec_fls = 30;

	/*
	 * We got @count in @nsec, with a target of sample_freq HZ
	 * the target period becomes:
	 *
	 *             @count * 10^9
	 * period = -------------------
	 *          @nsec * sample_freq
	 *
	 */

	/*
	 * Reduce accuracy by one bit such that @a and @b converge
	 * to a similar magnitude.
	 */
2335
#define REDUCE_FLS(a, b)		\
2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374
do {					\
	if (a##_fls > b##_fls) {	\
		a >>= 1;		\
		a##_fls--;		\
	} else {			\
		b >>= 1;		\
		b##_fls--;		\
	}				\
} while (0)

	/*
	 * Reduce accuracy until either term fits in a u64, then proceed with
	 * the other, so that finally we can do a u64/u64 division.
	 */
	while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
		REDUCE_FLS(nsec, frequency);
		REDUCE_FLS(sec, count);
	}

	if (count_fls + sec_fls > 64) {
		divisor = nsec * frequency;

		while (count_fls + sec_fls > 64) {
			REDUCE_FLS(count, sec);
			divisor >>= 1;
		}

		dividend = count * sec;
	} else {
		dividend = count * sec;

		while (nsec_fls + frequency_fls > 64) {
			REDUCE_FLS(nsec, frequency);
			dividend >>= 1;
		}

		divisor = nsec * frequency;
	}

2375 2376 2377
	if (!divisor)
		return dividend;

2378 2379 2380
	return div64_u64(dividend, divisor);
}

2381 2382 2383
static DEFINE_PER_CPU(int, perf_throttled_count);
static DEFINE_PER_CPU(u64, perf_throttled_seq);

2384
static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
2385
{
2386
	struct hw_perf_event *hwc = &event->hw;
2387
	s64 period, sample_period;
2388 2389
	s64 delta;

2390
	period = perf_calculate_period(event, nsec, count);
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400

	delta = (s64)(period - hwc->sample_period);
	delta = (delta + 7) / 8; /* low pass filter */

	sample_period = hwc->sample_period + delta;

	if (!sample_period)
		sample_period = 1;

	hwc->sample_period = sample_period;
2401

2402
	if (local64_read(&hwc->period_left) > 8*sample_period) {
2403 2404 2405
		if (disable)
			event->pmu->stop(event, PERF_EF_UPDATE);

2406
		local64_set(&hwc->period_left, 0);
2407 2408 2409

		if (disable)
			event->pmu->start(event, PERF_EF_RELOAD);
2410
	}
2411 2412
}

2413 2414 2415 2416 2417 2418 2419
/*
 * combine freq adjustment with unthrottling to avoid two passes over the
 * events. At the same time, make sure, having freq events does not change
 * the rate of unthrottling as that would introduce bias.
 */
static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
					   int needs_unthr)
2420
{
2421 2422
	struct perf_event *event;
	struct hw_perf_event *hwc;
2423
	u64 now, period = TICK_NSEC;
2424
	s64 delta;
2425

2426 2427 2428 2429 2430 2431
	/*
	 * only need to iterate over all events iff:
	 * - context have events in frequency mode (needs freq adjust)
	 * - there are events to unthrottle on this cpu
	 */
	if (!(ctx->nr_freq || needs_unthr))
2432 2433
		return;

2434
	raw_spin_lock(&ctx->lock);
2435
	perf_pmu_disable(ctx->pmu);
2436

2437
	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2438
		if (event->state != PERF_EVENT_STATE_ACTIVE)
2439 2440
			continue;

2441
		if (!event_filter_match(event))
2442 2443
			continue;

2444
		hwc = &event->hw;
2445

2446 2447
		if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
			hwc->interrupts = 0;
2448
			perf_log_throttle(event, 1);
P
Peter Zijlstra 已提交
2449
			event->pmu->start(event, 0);
2450 2451
		}

2452
		if (!event->attr.freq || !event->attr.sample_freq)
2453 2454
			continue;

2455 2456 2457 2458 2459
		/*
		 * stop the event and update event->count
		 */
		event->pmu->stop(event, PERF_EF_UPDATE);

2460
		now = local64_read(&event->count);
2461 2462
		delta = now - hwc->freq_count_stamp;
		hwc->freq_count_stamp = now;
2463

2464 2465 2466
		/*
		 * restart the event
		 * reload only if value has changed
2467 2468 2469
		 * we have stopped the event so tell that
		 * to perf_adjust_period() to avoid stopping it
		 * twice.
2470
		 */
2471
		if (delta > 0)
2472
			perf_adjust_period(event, period, delta, false);
2473 2474

		event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
2475
	}
2476

2477
	perf_pmu_enable(ctx->pmu);
2478
	raw_spin_unlock(&ctx->lock);
2479 2480
}

2481
/*
2482
 * Round-robin a context's events:
2483
 */
2484
static void rotate_ctx(struct perf_event_context *ctx)
T
Thomas Gleixner 已提交
2485
{
2486 2487 2488 2489 2490 2491
	/*
	 * Rotate the first entry last of non-pinned groups. Rotation might be
	 * disabled by the inheritance code.
	 */
	if (!ctx->rotate_disable)
		list_rotate_left(&ctx->flexible_groups);
2492 2493
}

2494
/*
2495 2496 2497
 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
 * because they're strictly cpu affine and rotate_start is called with IRQs
 * disabled, while rotate_context is called from IRQ context.
2498
 */
2499
static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2500
{
P
Peter Zijlstra 已提交
2501
	struct perf_event_context *ctx = NULL;
2502
	int rotate = 0, remove = 1;
2503

2504
	if (cpuctx->ctx.nr_events) {
2505
		remove = 0;
2506 2507 2508
		if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
			rotate = 1;
	}
2509

P
Peter Zijlstra 已提交
2510
	ctx = cpuctx->task_ctx;
2511
	if (ctx && ctx->nr_events) {
2512
		remove = 0;
2513 2514 2515
		if (ctx->nr_events != ctx->nr_active)
			rotate = 1;
	}
2516

2517
	if (!rotate)
2518 2519
		goto done;

2520
	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
P
Peter Zijlstra 已提交
2521
	perf_pmu_disable(cpuctx->ctx.pmu);
2522

2523 2524 2525
	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
	if (ctx)
		ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
T
Thomas Gleixner 已提交
2526

2527 2528 2529
	rotate_ctx(&cpuctx->ctx);
	if (ctx)
		rotate_ctx(ctx);
2530

2531
	perf_event_sched_in(cpuctx, ctx, current);
2532

2533 2534
	perf_pmu_enable(cpuctx->ctx.pmu);
	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2535
done:
2536 2537 2538 2539 2540 2541 2542 2543
	if (remove)
		list_del_init(&cpuctx->rotation_list);
}

void perf_event_task_tick(void)
{
	struct list_head *head = &__get_cpu_var(rotation_list);
	struct perf_cpu_context *cpuctx, *tmp;
2544 2545
	struct perf_event_context *ctx;
	int throttled;
2546

2547 2548
	WARN_ON(!irqs_disabled());

2549 2550 2551
	__this_cpu_inc(perf_throttled_seq);
	throttled = __this_cpu_xchg(perf_throttled_count, 0);

2552
	list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
2553 2554 2555 2556 2557 2558 2559
		ctx = &cpuctx->ctx;
		perf_adjust_freq_unthr_context(ctx, throttled);

		ctx = cpuctx->task_ctx;
		if (ctx)
			perf_adjust_freq_unthr_context(ctx, throttled);

2560 2561 2562 2563
		if (cpuctx->jiffies_interval == 1 ||
				!(jiffies % cpuctx->jiffies_interval))
			perf_rotate_context(cpuctx);
	}
T
Thomas Gleixner 已提交
2564 2565
}

2566 2567 2568 2569 2570 2571 2572 2573 2574 2575
static int event_enable_on_exec(struct perf_event *event,
				struct perf_event_context *ctx)
{
	if (!event->attr.enable_on_exec)
		return 0;

	event->attr.enable_on_exec = 0;
	if (event->state >= PERF_EVENT_STATE_INACTIVE)
		return 0;

2576
	__perf_event_mark_enabled(event);
2577 2578 2579 2580

	return 1;
}

2581
/*
2582
 * Enable all of a task's events that have been marked enable-on-exec.
2583 2584
 * This expects task == current.
 */
P
Peter Zijlstra 已提交
2585
static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2586
{
2587
	struct perf_event *event;
2588 2589
	unsigned long flags;
	int enabled = 0;
2590
	int ret;
2591 2592

	local_irq_save(flags);
2593
	if (!ctx || !ctx->nr_events)
2594 2595
		goto out;

2596 2597 2598 2599 2600 2601 2602
	/*
	 * We must ctxsw out cgroup events to avoid conflict
	 * when invoking perf_task_event_sched_in() later on
	 * in this function. Otherwise we end up trying to
	 * ctxswin cgroup events which are already scheduled
	 * in.
	 */
2603
	perf_cgroup_sched_out(current, NULL);
2604

2605
	raw_spin_lock(&ctx->lock);
2606
	task_ctx_sched_out(ctx);
2607

2608
	list_for_each_entry(event, &ctx->event_list, event_entry) {
2609 2610 2611
		ret = event_enable_on_exec(event, ctx);
		if (ret)
			enabled = 1;
2612 2613 2614
	}

	/*
2615
	 * Unclone this context if we enabled any event.
2616
	 */
2617 2618
	if (enabled)
		unclone_ctx(ctx);
2619

2620
	raw_spin_unlock(&ctx->lock);
2621

2622 2623 2624
	/*
	 * Also calls ctxswin for cgroup events, if any:
	 */
S
Stephane Eranian 已提交
2625
	perf_event_context_sched_in(ctx, ctx->task);
P
Peter Zijlstra 已提交
2626
out:
2627 2628 2629
	local_irq_restore(flags);
}

T
Thomas Gleixner 已提交
2630
/*
2631
 * Cross CPU call to read the hardware event
T
Thomas Gleixner 已提交
2632
 */
2633
static void __perf_event_read(void *info)
T
Thomas Gleixner 已提交
2634
{
2635 2636
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
P
Peter Zijlstra 已提交
2637
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
I
Ingo Molnar 已提交
2638

2639 2640 2641 2642
	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu.  If not it has been
	 * scheduled out before the smp call arrived.  In that case
2643 2644
	 * event->count would have been updated to a recent sample
	 * when the event was scheduled out.
2645 2646 2647 2648
	 */
	if (ctx->task && cpuctx->task_ctx != ctx)
		return;

2649
	raw_spin_lock(&ctx->lock);
S
Stephane Eranian 已提交
2650
	if (ctx->is_active) {
2651
		update_context_time(ctx);
S
Stephane Eranian 已提交
2652 2653
		update_cgrp_time_from_event(event);
	}
2654
	update_event_times(event);
2655 2656
	if (event->state == PERF_EVENT_STATE_ACTIVE)
		event->pmu->read(event);
2657
	raw_spin_unlock(&ctx->lock);
T
Thomas Gleixner 已提交
2658 2659
}

P
Peter Zijlstra 已提交
2660 2661
static inline u64 perf_event_count(struct perf_event *event)
{
2662
	return local64_read(&event->count) + atomic64_read(&event->child_count);
P
Peter Zijlstra 已提交
2663 2664
}

2665
static u64 perf_event_read(struct perf_event *event)
T
Thomas Gleixner 已提交
2666 2667
{
	/*
2668 2669
	 * If event is enabled and currently active on a CPU, update the
	 * value in the event structure:
T
Thomas Gleixner 已提交
2670
	 */
2671 2672 2673 2674
	if (event->state == PERF_EVENT_STATE_ACTIVE) {
		smp_call_function_single(event->oncpu,
					 __perf_event_read, event, 1);
	} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
P
Peter Zijlstra 已提交
2675 2676 2677
		struct perf_event_context *ctx = event->ctx;
		unsigned long flags;

2678
		raw_spin_lock_irqsave(&ctx->lock, flags);
2679 2680 2681 2682 2683
		/*
		 * may read while context is not active
		 * (e.g., thread is blocked), in that case
		 * we cannot update context time
		 */
S
Stephane Eranian 已提交
2684
		if (ctx->is_active) {
2685
			update_context_time(ctx);
S
Stephane Eranian 已提交
2686 2687
			update_cgrp_time_from_event(event);
		}
2688
		update_event_times(event);
2689
		raw_spin_unlock_irqrestore(&ctx->lock, flags);
T
Thomas Gleixner 已提交
2690 2691
	}

P
Peter Zijlstra 已提交
2692
	return perf_event_count(event);
T
Thomas Gleixner 已提交
2693 2694
}

2695
/*
2696
 * Initialize the perf_event context in a task_struct:
2697
 */
2698
static void __perf_event_init_context(struct perf_event_context *ctx)
2699
{
2700
	raw_spin_lock_init(&ctx->lock);
2701
	mutex_init(&ctx->mutex);
2702 2703
	INIT_LIST_HEAD(&ctx->pinned_groups);
	INIT_LIST_HEAD(&ctx->flexible_groups);
2704 2705
	INIT_LIST_HEAD(&ctx->event_list);
	atomic_set(&ctx->refcount, 1);
2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720
}

static struct perf_event_context *
alloc_perf_context(struct pmu *pmu, struct task_struct *task)
{
	struct perf_event_context *ctx;

	ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
	if (!ctx)
		return NULL;

	__perf_event_init_context(ctx);
	if (task) {
		ctx->task = task;
		get_task_struct(task);
T
Thomas Gleixner 已提交
2721
	}
2722 2723 2724
	ctx->pmu = pmu;

	return ctx;
2725 2726
}

2727 2728 2729 2730 2731
static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)
{
	struct task_struct *task;
	int err;
T
Thomas Gleixner 已提交
2732 2733

	rcu_read_lock();
2734
	if (!vpid)
T
Thomas Gleixner 已提交
2735 2736
		task = current;
	else
2737
		task = find_task_by_vpid(vpid);
T
Thomas Gleixner 已提交
2738 2739 2740 2741 2742 2743 2744 2745
	if (task)
		get_task_struct(task);
	rcu_read_unlock();

	if (!task)
		return ERR_PTR(-ESRCH);

	/* Reuse ptrace permission checks for now. */
2746 2747 2748 2749
	err = -EACCES;
	if (!ptrace_may_access(task, PTRACE_MODE_READ))
		goto errout;

2750 2751 2752 2753 2754 2755 2756
	return task;
errout:
	put_task_struct(task);
	return ERR_PTR(err);

}

2757 2758 2759
/*
 * Returns a matching context with refcount and pincount.
 */
P
Peter Zijlstra 已提交
2760
static struct perf_event_context *
M
Matt Helsley 已提交
2761
find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
T
Thomas Gleixner 已提交
2762
{
2763
	struct perf_event_context *ctx;
2764
	struct perf_cpu_context *cpuctx;
2765
	unsigned long flags;
P
Peter Zijlstra 已提交
2766
	int ctxn, err;
T
Thomas Gleixner 已提交
2767

2768
	if (!task) {
2769
		/* Must be root to operate on a CPU event: */
2770
		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
T
Thomas Gleixner 已提交
2771 2772 2773
			return ERR_PTR(-EACCES);

		/*
2774
		 * We could be clever and allow to attach a event to an
T
Thomas Gleixner 已提交
2775 2776 2777
		 * offline CPU and activate it when the CPU comes up, but
		 * that's for later.
		 */
2778
		if (!cpu_online(cpu))
T
Thomas Gleixner 已提交
2779 2780
			return ERR_PTR(-ENODEV);

P
Peter Zijlstra 已提交
2781
		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
T
Thomas Gleixner 已提交
2782
		ctx = &cpuctx->ctx;
2783
		get_ctx(ctx);
2784
		++ctx->pin_count;
T
Thomas Gleixner 已提交
2785 2786 2787 2788

		return ctx;
	}

P
Peter Zijlstra 已提交
2789 2790 2791 2792 2793
	err = -EINVAL;
	ctxn = pmu->task_ctx_nr;
	if (ctxn < 0)
		goto errout;

P
Peter Zijlstra 已提交
2794
retry:
P
Peter Zijlstra 已提交
2795
	ctx = perf_lock_task_context(task, ctxn, &flags);
2796
	if (ctx) {
2797
		unclone_ctx(ctx);
2798
		++ctx->pin_count;
2799
		raw_spin_unlock_irqrestore(&ctx->lock, flags);
2800
	} else {
2801
		ctx = alloc_perf_context(pmu, task);
2802 2803 2804
		err = -ENOMEM;
		if (!ctx)
			goto errout;
2805

2806 2807 2808 2809 2810 2811 2812 2813 2814 2815
		err = 0;
		mutex_lock(&task->perf_event_mutex);
		/*
		 * If it has already passed perf_event_exit_task().
		 * we must see PF_EXITING, it takes this mutex too.
		 */
		if (task->flags & PF_EXITING)
			err = -ESRCH;
		else if (task->perf_event_ctxp[ctxn])
			err = -EAGAIN;
2816
		else {
2817
			get_ctx(ctx);
2818
			++ctx->pin_count;
2819
			rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
2820
		}
2821 2822 2823
		mutex_unlock(&task->perf_event_mutex);

		if (unlikely(err)) {
2824
			put_ctx(ctx);
2825 2826 2827 2828

			if (err == -EAGAIN)
				goto retry;
			goto errout;
2829 2830 2831
		}
	}

T
Thomas Gleixner 已提交
2832
	return ctx;
2833

P
Peter Zijlstra 已提交
2834
errout:
2835
	return ERR_PTR(err);
T
Thomas Gleixner 已提交
2836 2837
}

L
Li Zefan 已提交
2838 2839
static void perf_event_free_filter(struct perf_event *event);

2840
static void free_event_rcu(struct rcu_head *head)
P
Peter Zijlstra 已提交
2841
{
2842
	struct perf_event *event;
P
Peter Zijlstra 已提交
2843

2844 2845 2846
	event = container_of(head, struct perf_event, rcu_head);
	if (event->ns)
		put_pid_ns(event->ns);
L
Li Zefan 已提交
2847
	perf_event_free_filter(event);
2848
	kfree(event);
P
Peter Zijlstra 已提交
2849 2850
}

2851
static void ring_buffer_put(struct ring_buffer *rb);
2852

2853
static void free_event(struct perf_event *event)
2854
{
2855
	irq_work_sync(&event->pending);
2856

2857
	if (!event->parent) {
2858
		if (event->attach_state & PERF_ATTACH_TASK)
2859
			static_key_slow_dec_deferred(&perf_sched_events);
2860
		if (event->attr.mmap || event->attr.mmap_data)
2861 2862 2863 2864 2865
			atomic_dec(&nr_mmap_events);
		if (event->attr.comm)
			atomic_dec(&nr_comm_events);
		if (event->attr.task)
			atomic_dec(&nr_task_events);
2866 2867
		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
			put_callchain_buffers();
2868 2869
		if (is_cgroup_event(event)) {
			atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
2870
			static_key_slow_dec_deferred(&perf_sched_events);
2871
		}
2872 2873 2874 2875 2876 2877 2878 2879

		if (has_branch_stack(event)) {
			static_key_slow_dec_deferred(&perf_sched_events);
			/* is system-wide event */
			if (!(event->attach_state & PERF_ATTACH_TASK))
				atomic_dec(&per_cpu(perf_branch_stack_events,
						    event->cpu));
		}
2880
	}
2881

2882 2883 2884
	if (event->rb) {
		ring_buffer_put(event->rb);
		event->rb = NULL;
2885 2886
	}

S
Stephane Eranian 已提交
2887 2888 2889
	if (is_cgroup_event(event))
		perf_detach_cgroup(event);

2890 2891
	if (event->destroy)
		event->destroy(event);
2892

P
Peter Zijlstra 已提交
2893 2894 2895
	if (event->ctx)
		put_ctx(event->ctx);

2896
	call_rcu(&event->rcu_head, free_event_rcu);
2897 2898
}

2899
int perf_event_release_kernel(struct perf_event *event)
T
Thomas Gleixner 已提交
2900
{
2901
	struct perf_event_context *ctx = event->ctx;
T
Thomas Gleixner 已提交
2902

2903
	WARN_ON_ONCE(ctx->parent_ctx);
2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916
	/*
	 * There are two ways this annotation is useful:
	 *
	 *  1) there is a lock recursion from perf_event_exit_task
	 *     see the comment there.
	 *
	 *  2) there is a lock-inversion with mmap_sem through
	 *     perf_event_read_group(), which takes faults while
	 *     holding ctx->mutex, however this is called after
	 *     the last filedesc died, so there is no possibility
	 *     to trigger the AB-BA case.
	 */
	mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
2917
	raw_spin_lock_irq(&ctx->lock);
2918
	perf_group_detach(event);
2919
	raw_spin_unlock_irq(&ctx->lock);
2920
	perf_remove_from_context(event);
2921
	mutex_unlock(&ctx->mutex);
T
Thomas Gleixner 已提交
2922

2923
	free_event(event);
T
Thomas Gleixner 已提交
2924 2925 2926

	return 0;
}
2927
EXPORT_SYMBOL_GPL(perf_event_release_kernel);
T
Thomas Gleixner 已提交
2928

2929 2930 2931 2932
/*
 * Called when the last reference to the file is gone.
 */
static int perf_release(struct inode *inode, struct file *file)
2933
{
2934
	struct perf_event *event = file->private_data;
P
Peter Zijlstra 已提交
2935
	struct task_struct *owner;
2936

2937
	file->private_data = NULL;
2938

P
Peter Zijlstra 已提交
2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971
	rcu_read_lock();
	owner = ACCESS_ONCE(event->owner);
	/*
	 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
	 * !owner it means the list deletion is complete and we can indeed
	 * free this event, otherwise we need to serialize on
	 * owner->perf_event_mutex.
	 */
	smp_read_barrier_depends();
	if (owner) {
		/*
		 * Since delayed_put_task_struct() also drops the last
		 * task reference we can safely take a new reference
		 * while holding the rcu_read_lock().
		 */
		get_task_struct(owner);
	}
	rcu_read_unlock();

	if (owner) {
		mutex_lock(&owner->perf_event_mutex);
		/*
		 * We have to re-check the event->owner field, if it is cleared
		 * we raced with perf_event_exit_task(), acquiring the mutex
		 * ensured they're done, and we can proceed with freeing the
		 * event.
		 */
		if (event->owner)
			list_del_init(&event->owner_entry);
		mutex_unlock(&owner->perf_event_mutex);
		put_task_struct(owner);
	}

2972
	return perf_event_release_kernel(event);
2973 2974
}

2975
u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
2976
{
2977
	struct perf_event *child;
2978 2979
	u64 total = 0;

2980 2981 2982
	*enabled = 0;
	*running = 0;

2983
	mutex_lock(&event->child_mutex);
2984
	total += perf_event_read(event);
2985 2986 2987 2988 2989 2990
	*enabled += event->total_time_enabled +
			atomic64_read(&event->child_total_time_enabled);
	*running += event->total_time_running +
			atomic64_read(&event->child_total_time_running);

	list_for_each_entry(child, &event->child_list, child_list) {
2991
		total += perf_event_read(child);
2992 2993 2994
		*enabled += child->total_time_enabled;
		*running += child->total_time_running;
	}
2995
	mutex_unlock(&event->child_mutex);
2996 2997 2998

	return total;
}
2999
EXPORT_SYMBOL_GPL(perf_event_read_value);
3000

3001
static int perf_event_read_group(struct perf_event *event,
3002 3003
				   u64 read_format, char __user *buf)
{
3004
	struct perf_event *leader = event->group_leader, *sub;
3005 3006
	int n = 0, size = 0, ret = -EFAULT;
	struct perf_event_context *ctx = leader->ctx;
3007
	u64 values[5];
3008
	u64 count, enabled, running;
3009

3010
	mutex_lock(&ctx->mutex);
3011
	count = perf_event_read_value(leader, &enabled, &running);
3012 3013

	values[n++] = 1 + leader->nr_siblings;
3014 3015 3016 3017
	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		values[n++] = enabled;
	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		values[n++] = running;
3018 3019 3020
	values[n++] = count;
	if (read_format & PERF_FORMAT_ID)
		values[n++] = primary_event_id(leader);
3021 3022 3023 3024

	size = n * sizeof(u64);

	if (copy_to_user(buf, values, size))
3025
		goto unlock;
3026

3027
	ret = size;
3028

3029
	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3030
		n = 0;
3031

3032
		values[n++] = perf_event_read_value(sub, &enabled, &running);
3033 3034 3035 3036 3037
		if (read_format & PERF_FORMAT_ID)
			values[n++] = primary_event_id(sub);

		size = n * sizeof(u64);

3038
		if (copy_to_user(buf + ret, values, size)) {
3039 3040 3041
			ret = -EFAULT;
			goto unlock;
		}
3042 3043

		ret += size;
3044
	}
3045 3046
unlock:
	mutex_unlock(&ctx->mutex);
3047

3048
	return ret;
3049 3050
}

3051
static int perf_event_read_one(struct perf_event *event,
3052 3053
				 u64 read_format, char __user *buf)
{
3054
	u64 enabled, running;
3055 3056 3057
	u64 values[4];
	int n = 0;

3058 3059 3060 3061 3062
	values[n++] = perf_event_read_value(event, &enabled, &running);
	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		values[n++] = enabled;
	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		values[n++] = running;
3063
	if (read_format & PERF_FORMAT_ID)
3064
		values[n++] = primary_event_id(event);
3065 3066 3067 3068 3069 3070 3071

	if (copy_to_user(buf, values, n * sizeof(u64)))
		return -EFAULT;

	return n * sizeof(u64);
}

T
Thomas Gleixner 已提交
3072
/*
3073
 * Read the performance event - simple non blocking version for now
T
Thomas Gleixner 已提交
3074 3075
 */
static ssize_t
3076
perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
T
Thomas Gleixner 已提交
3077
{
3078
	u64 read_format = event->attr.read_format;
3079
	int ret;
T
Thomas Gleixner 已提交
3080

3081
	/*
3082
	 * Return end-of-file for a read on a event that is in
3083 3084 3085
	 * error state (i.e. because it was pinned but it couldn't be
	 * scheduled on to the CPU at some point).
	 */
3086
	if (event->state == PERF_EVENT_STATE_ERROR)
3087 3088
		return 0;

3089
	if (count < event->read_size)
3090 3091
		return -ENOSPC;

3092
	WARN_ON_ONCE(event->ctx->parent_ctx);
3093
	if (read_format & PERF_FORMAT_GROUP)
3094
		ret = perf_event_read_group(event, read_format, buf);
3095
	else
3096
		ret = perf_event_read_one(event, read_format, buf);
T
Thomas Gleixner 已提交
3097

3098
	return ret;
T
Thomas Gleixner 已提交
3099 3100 3101 3102 3103
}

static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
3104
	struct perf_event *event = file->private_data;
T
Thomas Gleixner 已提交
3105

3106
	return perf_read_hw(event, buf, count);
T
Thomas Gleixner 已提交
3107 3108 3109 3110
}

static unsigned int perf_poll(struct file *file, poll_table *wait)
{
3111
	struct perf_event *event = file->private_data;
3112
	struct ring_buffer *rb;
3113
	unsigned int events = POLL_HUP;
P
Peter Zijlstra 已提交
3114

3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131
	/*
	 * Race between perf_event_set_output() and perf_poll(): perf_poll()
	 * grabs the rb reference but perf_event_set_output() overrides it.
	 * Here is the timeline for two threads T1, T2:
	 * t0: T1, rb = rcu_dereference(event->rb)
	 * t1: T2, old_rb = event->rb
	 * t2: T2, event->rb = new rb
	 * t3: T2, ring_buffer_detach(old_rb)
	 * t4: T1, ring_buffer_attach(rb1)
	 * t5: T1, poll_wait(event->waitq)
	 *
	 * To avoid this problem, we grab mmap_mutex in perf_poll()
	 * thereby ensuring that the assignment of the new ring buffer
	 * and the detachment of the old buffer appear atomic to perf_poll()
	 */
	mutex_lock(&event->mmap_mutex);

P
Peter Zijlstra 已提交
3132
	rcu_read_lock();
3133
	rb = rcu_dereference(event->rb);
3134 3135
	if (rb) {
		ring_buffer_attach(event, rb);
3136
		events = atomic_xchg(&rb->poll, 0);
3137
	}
P
Peter Zijlstra 已提交
3138
	rcu_read_unlock();
T
Thomas Gleixner 已提交
3139

3140 3141
	mutex_unlock(&event->mmap_mutex);

3142
	poll_wait(file, &event->waitq, wait);
T
Thomas Gleixner 已提交
3143 3144 3145 3146

	return events;
}

3147
static void perf_event_reset(struct perf_event *event)
3148
{
3149
	(void)perf_event_read(event);
3150
	local64_set(&event->count, 0);
3151
	perf_event_update_userpage(event);
P
Peter Zijlstra 已提交
3152 3153
}

3154
/*
3155 3156 3157 3158
 * Holding the top-level event's child_mutex means that any
 * descendant process that has inherited this event will block
 * in sync_child_event if it goes to exit, thus satisfying the
 * task existence requirements of perf_event_enable/disable.
3159
 */
3160 3161
static void perf_event_for_each_child(struct perf_event *event,
					void (*func)(struct perf_event *))
P
Peter Zijlstra 已提交
3162
{
3163
	struct perf_event *child;
P
Peter Zijlstra 已提交
3164

3165 3166 3167 3168
	WARN_ON_ONCE(event->ctx->parent_ctx);
	mutex_lock(&event->child_mutex);
	func(event);
	list_for_each_entry(child, &event->child_list, child_list)
P
Peter Zijlstra 已提交
3169
		func(child);
3170
	mutex_unlock(&event->child_mutex);
P
Peter Zijlstra 已提交
3171 3172
}

3173 3174
static void perf_event_for_each(struct perf_event *event,
				  void (*func)(struct perf_event *))
P
Peter Zijlstra 已提交
3175
{
3176 3177
	struct perf_event_context *ctx = event->ctx;
	struct perf_event *sibling;
P
Peter Zijlstra 已提交
3178

3179 3180
	WARN_ON_ONCE(ctx->parent_ctx);
	mutex_lock(&ctx->mutex);
3181
	event = event->group_leader;
3182

3183 3184
	perf_event_for_each_child(event, func);
	list_for_each_entry(sibling, &event->sibling_list, group_entry)
3185
		perf_event_for_each_child(sibling, func);
3186
	mutex_unlock(&ctx->mutex);
3187 3188
}

3189
static int perf_event_period(struct perf_event *event, u64 __user *arg)
3190
{
3191
	struct perf_event_context *ctx = event->ctx;
3192 3193 3194
	int ret = 0;
	u64 value;

3195
	if (!is_sampling_event(event))
3196 3197
		return -EINVAL;

3198
	if (copy_from_user(&value, arg, sizeof(value)))
3199 3200 3201 3202 3203
		return -EFAULT;

	if (!value)
		return -EINVAL;

3204
	raw_spin_lock_irq(&ctx->lock);
3205 3206
	if (event->attr.freq) {
		if (value > sysctl_perf_event_sample_rate) {
3207 3208 3209 3210
			ret = -EINVAL;
			goto unlock;
		}

3211
		event->attr.sample_freq = value;
3212
	} else {
3213 3214
		event->attr.sample_period = value;
		event->hw.sample_period = value;
3215 3216
	}
unlock:
3217
	raw_spin_unlock_irq(&ctx->lock);
3218 3219 3220 3221

	return ret;
}

3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242
static const struct file_operations perf_fops;

static struct perf_event *perf_fget_light(int fd, int *fput_needed)
{
	struct file *file;

	file = fget_light(fd, fput_needed);
	if (!file)
		return ERR_PTR(-EBADF);

	if (file->f_op != &perf_fops) {
		fput_light(file, *fput_needed);
		*fput_needed = 0;
		return ERR_PTR(-EBADF);
	}

	return file->private_data;
}

static int perf_event_set_output(struct perf_event *event,
				 struct perf_event *output_event);
L
Li Zefan 已提交
3243
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
3244

3245 3246
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
3247 3248
	struct perf_event *event = file->private_data;
	void (*func)(struct perf_event *);
P
Peter Zijlstra 已提交
3249
	u32 flags = arg;
3250 3251

	switch (cmd) {
3252 3253
	case PERF_EVENT_IOC_ENABLE:
		func = perf_event_enable;
3254
		break;
3255 3256
	case PERF_EVENT_IOC_DISABLE:
		func = perf_event_disable;
3257
		break;
3258 3259
	case PERF_EVENT_IOC_RESET:
		func = perf_event_reset;
3260
		break;
P
Peter Zijlstra 已提交
3261

3262 3263
	case PERF_EVENT_IOC_REFRESH:
		return perf_event_refresh(event, arg);
3264

3265 3266
	case PERF_EVENT_IOC_PERIOD:
		return perf_event_period(event, (u64 __user *)arg);
3267

3268
	case PERF_EVENT_IOC_SET_OUTPUT:
3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285
	{
		struct perf_event *output_event = NULL;
		int fput_needed = 0;
		int ret;

		if (arg != -1) {
			output_event = perf_fget_light(arg, &fput_needed);
			if (IS_ERR(output_event))
				return PTR_ERR(output_event);
		}

		ret = perf_event_set_output(event, output_event);
		if (output_event)
			fput_light(output_event->filp, fput_needed);

		return ret;
	}
3286

L
Li Zefan 已提交
3287 3288 3289
	case PERF_EVENT_IOC_SET_FILTER:
		return perf_event_set_filter(event, (void __user *)arg);

3290
	default:
P
Peter Zijlstra 已提交
3291
		return -ENOTTY;
3292
	}
P
Peter Zijlstra 已提交
3293 3294

	if (flags & PERF_IOC_FLAG_GROUP)
3295
		perf_event_for_each(event, func);
P
Peter Zijlstra 已提交
3296
	else
3297
		perf_event_for_each_child(event, func);
P
Peter Zijlstra 已提交
3298 3299

	return 0;
3300 3301
}

3302
int perf_event_task_enable(void)
3303
{
3304
	struct perf_event *event;
3305

3306 3307 3308 3309
	mutex_lock(&current->perf_event_mutex);
	list_for_each_entry(event, &current->perf_event_list, owner_entry)
		perf_event_for_each_child(event, perf_event_enable);
	mutex_unlock(&current->perf_event_mutex);
3310 3311 3312 3313

	return 0;
}

3314
int perf_event_task_disable(void)
3315
{
3316
	struct perf_event *event;
3317

3318 3319 3320 3321
	mutex_lock(&current->perf_event_mutex);
	list_for_each_entry(event, &current->perf_event_list, owner_entry)
		perf_event_for_each_child(event, perf_event_disable);
	mutex_unlock(&current->perf_event_mutex);
3322 3323 3324 3325

	return 0;
}

3326
static int perf_event_index(struct perf_event *event)
3327
{
P
Peter Zijlstra 已提交
3328 3329 3330
	if (event->hw.state & PERF_HES_STOPPED)
		return 0;

3331
	if (event->state != PERF_EVENT_STATE_ACTIVE)
3332 3333
		return 0;

3334
	return event->pmu->event_idx(event);
3335 3336
}

3337
static void calc_timer_values(struct perf_event *event,
3338
				u64 *now,
3339 3340
				u64 *enabled,
				u64 *running)
3341
{
3342
	u64 ctx_time;
3343

3344 3345
	*now = perf_clock();
	ctx_time = event->shadow_ctx_time + *now;
3346 3347 3348 3349
	*enabled = ctx_time - event->tstamp_enabled;
	*running = ctx_time - event->tstamp_running;
}

3350
void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
3351 3352 3353
{
}

3354 3355 3356 3357 3358
/*
 * Callers need to ensure there can be no nesting of this function, otherwise
 * the seqlock logic goes bad. We can not serialize this because the arch
 * code calls this from NMI context.
 */
3359
void perf_event_update_userpage(struct perf_event *event)
3360
{
3361
	struct perf_event_mmap_page *userpg;
3362
	struct ring_buffer *rb;
3363
	u64 enabled, running, now;
3364 3365

	rcu_read_lock();
3366 3367 3368 3369 3370 3371 3372 3373 3374
	/*
	 * compute total_time_enabled, total_time_running
	 * based on snapshot values taken when the event
	 * was last scheduled in.
	 *
	 * we cannot simply called update_context_time()
	 * because of locking issue as we can be called in
	 * NMI context
	 */
3375
	calc_timer_values(event, &now, &enabled, &running);
3376 3377
	rb = rcu_dereference(event->rb);
	if (!rb)
3378 3379
		goto unlock;

3380
	userpg = rb->user_page;
3381

3382 3383 3384 3385 3386
	/*
	 * Disable preemption so as to not let the corresponding user-space
	 * spin too long if we get preempted.
	 */
	preempt_disable();
3387
	++userpg->lock;
3388
	barrier();
3389
	userpg->index = perf_event_index(event);
P
Peter Zijlstra 已提交
3390
	userpg->offset = perf_event_count(event);
3391
	if (userpg->index)
3392
		userpg->offset -= local64_read(&event->hw.prev_count);
3393

3394
	userpg->time_enabled = enabled +
3395
			atomic64_read(&event->child_total_time_enabled);
3396

3397
	userpg->time_running = running +
3398
			atomic64_read(&event->child_total_time_running);
3399

3400
	arch_perf_update_userpage(userpg, now);
3401

3402
	barrier();
3403
	++userpg->lock;
3404
	preempt_enable();
3405
unlock:
3406
	rcu_read_unlock();
3407 3408
}

3409 3410 3411
static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct perf_event *event = vma->vm_file->private_data;
3412
	struct ring_buffer *rb;
3413 3414 3415 3416 3417 3418 3419 3420 3421
	int ret = VM_FAULT_SIGBUS;

	if (vmf->flags & FAULT_FLAG_MKWRITE) {
		if (vmf->pgoff == 0)
			ret = 0;
		return ret;
	}

	rcu_read_lock();
3422 3423
	rb = rcu_dereference(event->rb);
	if (!rb)
3424 3425 3426 3427 3428
		goto unlock;

	if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
		goto unlock;

3429
	vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443
	if (!vmf->page)
		goto unlock;

	get_page(vmf->page);
	vmf->page->mapping = vma->vm_file->f_mapping;
	vmf->page->index   = vmf->pgoff;

	ret = 0;
unlock:
	rcu_read_unlock();

	return ret;
}

3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480
static void ring_buffer_attach(struct perf_event *event,
			       struct ring_buffer *rb)
{
	unsigned long flags;

	if (!list_empty(&event->rb_entry))
		return;

	spin_lock_irqsave(&rb->event_lock, flags);
	if (!list_empty(&event->rb_entry))
		goto unlock;

	list_add(&event->rb_entry, &rb->event_list);
unlock:
	spin_unlock_irqrestore(&rb->event_lock, flags);
}

static void ring_buffer_detach(struct perf_event *event,
			       struct ring_buffer *rb)
{
	unsigned long flags;

	if (list_empty(&event->rb_entry))
		return;

	spin_lock_irqsave(&rb->event_lock, flags);
	list_del_init(&event->rb_entry);
	wake_up_all(&event->waitq);
	spin_unlock_irqrestore(&rb->event_lock, flags);
}

static void ring_buffer_wakeup(struct perf_event *event)
{
	struct ring_buffer *rb;

	rcu_read_lock();
	rb = rcu_dereference(event->rb);
3481 3482 3483 3484
	if (!rb)
		goto unlock;

	list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
3485
		wake_up_all(&event->waitq);
3486 3487

unlock:
3488 3489 3490
	rcu_read_unlock();
}

3491
static void rb_free_rcu(struct rcu_head *rcu_head)
3492
{
3493
	struct ring_buffer *rb;
3494

3495 3496
	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
	rb_free(rb);
3497 3498
}

3499
static struct ring_buffer *ring_buffer_get(struct perf_event *event)
3500
{
3501
	struct ring_buffer *rb;
3502

3503
	rcu_read_lock();
3504 3505 3506 3507
	rb = rcu_dereference(event->rb);
	if (rb) {
		if (!atomic_inc_not_zero(&rb->refcount))
			rb = NULL;
3508 3509 3510
	}
	rcu_read_unlock();

3511
	return rb;
3512 3513
}

3514
static void ring_buffer_put(struct ring_buffer *rb)
3515
{
3516 3517 3518
	struct perf_event *event, *n;
	unsigned long flags;

3519
	if (!atomic_dec_and_test(&rb->refcount))
3520
		return;
3521

3522 3523 3524 3525 3526 3527 3528
	spin_lock_irqsave(&rb->event_lock, flags);
	list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
		list_del_init(&event->rb_entry);
		wake_up_all(&event->waitq);
	}
	spin_unlock_irqrestore(&rb->event_lock, flags);

3529
	call_rcu(&rb->rcu_head, rb_free_rcu);
3530 3531 3532 3533
}

static void perf_mmap_open(struct vm_area_struct *vma)
{
3534
	struct perf_event *event = vma->vm_file->private_data;
3535

3536
	atomic_inc(&event->mmap_count);
3537 3538 3539 3540
}

static void perf_mmap_close(struct vm_area_struct *vma)
{
3541
	struct perf_event *event = vma->vm_file->private_data;
3542

3543
	if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
3544
		unsigned long size = perf_data_size(event->rb);
3545
		struct user_struct *user = event->mmap_user;
3546
		struct ring_buffer *rb = event->rb;
3547

3548
		atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
3549
		vma->vm_mm->pinned_vm -= event->mmap_locked;
3550
		rcu_assign_pointer(event->rb, NULL);
3551
		ring_buffer_detach(event, rb);
3552
		mutex_unlock(&event->mmap_mutex);
3553

3554
		ring_buffer_put(rb);
3555
		free_uid(user);
3556
	}
3557 3558
}

3559
static const struct vm_operations_struct perf_mmap_vmops = {
3560 3561 3562 3563
	.open		= perf_mmap_open,
	.close		= perf_mmap_close,
	.fault		= perf_mmap_fault,
	.page_mkwrite	= perf_mmap_fault,
3564 3565 3566 3567
};

static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
3568
	struct perf_event *event = file->private_data;
3569
	unsigned long user_locked, user_lock_limit;
3570
	struct user_struct *user = current_user();
3571
	unsigned long locked, lock_limit;
3572
	struct ring_buffer *rb;
3573 3574
	unsigned long vma_size;
	unsigned long nr_pages;
3575
	long user_extra, extra;
3576
	int ret = 0, flags = 0;
3577

3578 3579 3580
	/*
	 * Don't allow mmap() of inherited per-task counters. This would
	 * create a performance issue due to all children writing to the
3581
	 * same rb.
3582 3583 3584 3585
	 */
	if (event->cpu == -1 && event->attr.inherit)
		return -EINVAL;

3586
	if (!(vma->vm_flags & VM_SHARED))
3587
		return -EINVAL;
3588 3589 3590 3591

	vma_size = vma->vm_end - vma->vm_start;
	nr_pages = (vma_size / PAGE_SIZE) - 1;

3592
	/*
3593
	 * If we have rb pages ensure they're a power-of-two number, so we
3594 3595 3596
	 * can do bitmasks instead of modulo.
	 */
	if (nr_pages != 0 && !is_power_of_2(nr_pages))
3597 3598
		return -EINVAL;

3599
	if (vma_size != PAGE_SIZE * (1 + nr_pages))
3600 3601
		return -EINVAL;

3602 3603
	if (vma->vm_pgoff != 0)
		return -EINVAL;
3604

3605 3606
	WARN_ON_ONCE(event->ctx->parent_ctx);
	mutex_lock(&event->mmap_mutex);
3607 3608 3609
	if (event->rb) {
		if (event->rb->nr_pages == nr_pages)
			atomic_inc(&event->rb->refcount);
3610
		else
3611 3612 3613 3614
			ret = -EINVAL;
		goto unlock;
	}

3615
	user_extra = nr_pages + 1;
3616
	user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
I
Ingo Molnar 已提交
3617 3618 3619 3620 3621 3622

	/*
	 * Increase the limit linearly with more CPUs:
	 */
	user_lock_limit *= num_online_cpus();

3623
	user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3624

3625 3626 3627
	extra = 0;
	if (user_locked > user_lock_limit)
		extra = user_locked - user_lock_limit;
3628

3629
	lock_limit = rlimit(RLIMIT_MEMLOCK);
3630
	lock_limit >>= PAGE_SHIFT;
3631
	locked = vma->vm_mm->pinned_vm + extra;
3632

3633 3634
	if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
		!capable(CAP_IPC_LOCK)) {
3635 3636 3637
		ret = -EPERM;
		goto unlock;
	}
3638

3639
	WARN_ON(event->rb);
3640

3641
	if (vma->vm_flags & VM_WRITE)
3642
		flags |= RING_BUFFER_WRITABLE;
3643

3644 3645 3646 3647
	rb = rb_alloc(nr_pages, 
		event->attr.watermark ? event->attr.wakeup_watermark : 0,
		event->cpu, flags);

3648
	if (!rb) {
3649
		ret = -ENOMEM;
3650
		goto unlock;
3651
	}
3652
	rcu_assign_pointer(event->rb, rb);
3653

3654 3655 3656
	atomic_long_add(user_extra, &user->locked_vm);
	event->mmap_locked = extra;
	event->mmap_user = get_current_user();
3657
	vma->vm_mm->pinned_vm += event->mmap_locked;
3658

3659 3660
	perf_event_update_userpage(event);

3661
unlock:
3662 3663
	if (!ret)
		atomic_inc(&event->mmap_count);
3664
	mutex_unlock(&event->mmap_mutex);
3665 3666 3667

	vma->vm_flags |= VM_RESERVED;
	vma->vm_ops = &perf_mmap_vmops;
3668 3669

	return ret;
3670 3671
}

P
Peter Zijlstra 已提交
3672 3673 3674
static int perf_fasync(int fd, struct file *filp, int on)
{
	struct inode *inode = filp->f_path.dentry->d_inode;
3675
	struct perf_event *event = filp->private_data;
P
Peter Zijlstra 已提交
3676 3677 3678
	int retval;

	mutex_lock(&inode->i_mutex);
3679
	retval = fasync_helper(fd, filp, on, &event->fasync);
P
Peter Zijlstra 已提交
3680 3681 3682 3683 3684 3685 3686 3687
	mutex_unlock(&inode->i_mutex);

	if (retval < 0)
		return retval;

	return 0;
}

T
Thomas Gleixner 已提交
3688
static const struct file_operations perf_fops = {
3689
	.llseek			= no_llseek,
T
Thomas Gleixner 已提交
3690 3691 3692
	.release		= perf_release,
	.read			= perf_read,
	.poll			= perf_poll,
3693 3694
	.unlocked_ioctl		= perf_ioctl,
	.compat_ioctl		= perf_ioctl,
3695
	.mmap			= perf_mmap,
P
Peter Zijlstra 已提交
3696
	.fasync			= perf_fasync,
T
Thomas Gleixner 已提交
3697 3698
};

3699
/*
3700
 * Perf event wakeup
3701 3702 3703 3704 3705
 *
 * If there's data, ensure we set the poll() state and publish everything
 * to user-space before waking everybody up.
 */

3706
void perf_event_wakeup(struct perf_event *event)
3707
{
3708
	ring_buffer_wakeup(event);
3709

3710 3711 3712
	if (event->pending_kill) {
		kill_fasync(&event->fasync, SIGIO, event->pending_kill);
		event->pending_kill = 0;
3713
	}
3714 3715
}

3716
static void perf_pending_event(struct irq_work *entry)
3717
{
3718 3719
	struct perf_event *event = container_of(entry,
			struct perf_event, pending);
3720

3721 3722 3723
	if (event->pending_disable) {
		event->pending_disable = 0;
		__perf_event_disable(event);
3724 3725
	}

3726 3727 3728
	if (event->pending_wakeup) {
		event->pending_wakeup = 0;
		perf_event_wakeup(event);
3729 3730 3731
	}
}

3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752
/*
 * We assume there is only KVM supporting the callbacks.
 * Later on, we might change it to a list if there is
 * another virtualization implementation supporting the callbacks.
 */
struct perf_guest_info_callbacks *perf_guest_cbs;

int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
	perf_guest_cbs = cbs;
	return 0;
}
EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);

int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
	perf_guest_cbs = NULL;
	return 0;
}
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);

3753 3754 3755
static void __perf_event_header__init_id(struct perf_event_header *header,
					 struct perf_sample_data *data,
					 struct perf_event *event)
3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782
{
	u64 sample_type = event->attr.sample_type;

	data->type = sample_type;
	header->size += event->id_header_size;

	if (sample_type & PERF_SAMPLE_TID) {
		/* namespace issues */
		data->tid_entry.pid = perf_event_pid(event, current);
		data->tid_entry.tid = perf_event_tid(event, current);
	}

	if (sample_type & PERF_SAMPLE_TIME)
		data->time = perf_clock();

	if (sample_type & PERF_SAMPLE_ID)
		data->id = primary_event_id(event);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		data->stream_id = event->id;

	if (sample_type & PERF_SAMPLE_CPU) {
		data->cpu_entry.cpu	 = raw_smp_processor_id();
		data->cpu_entry.reserved = 0;
	}
}

3783 3784 3785
void perf_event_header__init_id(struct perf_event_header *header,
				struct perf_sample_data *data,
				struct perf_event *event)
3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811
{
	if (event->attr.sample_id_all)
		__perf_event_header__init_id(header, data, event);
}

static void __perf_event__output_id_sample(struct perf_output_handle *handle,
					   struct perf_sample_data *data)
{
	u64 sample_type = data->type;

	if (sample_type & PERF_SAMPLE_TID)
		perf_output_put(handle, data->tid_entry);

	if (sample_type & PERF_SAMPLE_TIME)
		perf_output_put(handle, data->time);

	if (sample_type & PERF_SAMPLE_ID)
		perf_output_put(handle, data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		perf_output_put(handle, data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		perf_output_put(handle, data->cpu_entry);
}

3812 3813 3814
void perf_event__output_id_sample(struct perf_event *event,
				  struct perf_output_handle *handle,
				  struct perf_sample_data *sample)
3815 3816 3817 3818 3819
{
	if (event->attr.sample_id_all)
		__perf_event__output_id_sample(handle, sample);
}

3820
static void perf_output_read_one(struct perf_output_handle *handle,
3821 3822
				 struct perf_event *event,
				 u64 enabled, u64 running)
3823
{
3824
	u64 read_format = event->attr.read_format;
3825 3826 3827
	u64 values[4];
	int n = 0;

P
Peter Zijlstra 已提交
3828
	values[n++] = perf_event_count(event);
3829
	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3830
		values[n++] = enabled +
3831
			atomic64_read(&event->child_total_time_enabled);
3832 3833
	}
	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3834
		values[n++] = running +
3835
			atomic64_read(&event->child_total_time_running);
3836 3837
	}
	if (read_format & PERF_FORMAT_ID)
3838
		values[n++] = primary_event_id(event);
3839

3840
	__output_copy(handle, values, n * sizeof(u64));
3841 3842 3843
}

/*
3844
 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3845 3846
 */
static void perf_output_read_group(struct perf_output_handle *handle,
3847 3848
			    struct perf_event *event,
			    u64 enabled, u64 running)
3849
{
3850 3851
	struct perf_event *leader = event->group_leader, *sub;
	u64 read_format = event->attr.read_format;
3852 3853 3854 3855 3856 3857
	u64 values[5];
	int n = 0;

	values[n++] = 1 + leader->nr_siblings;

	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3858
		values[n++] = enabled;
3859 3860

	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3861
		values[n++] = running;
3862

3863
	if (leader != event)
3864 3865
		leader->pmu->read(leader);

P
Peter Zijlstra 已提交
3866
	values[n++] = perf_event_count(leader);
3867
	if (read_format & PERF_FORMAT_ID)
3868
		values[n++] = primary_event_id(leader);
3869

3870
	__output_copy(handle, values, n * sizeof(u64));
3871

3872
	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3873 3874
		n = 0;

3875
		if (sub != event)
3876 3877
			sub->pmu->read(sub);

P
Peter Zijlstra 已提交
3878
		values[n++] = perf_event_count(sub);
3879
		if (read_format & PERF_FORMAT_ID)
3880
			values[n++] = primary_event_id(sub);
3881

3882
		__output_copy(handle, values, n * sizeof(u64));
3883 3884 3885
	}
}

3886 3887 3888
#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
				 PERF_FORMAT_TOTAL_TIME_RUNNING)

3889
static void perf_output_read(struct perf_output_handle *handle,
3890
			     struct perf_event *event)
3891
{
3892
	u64 enabled = 0, running = 0, now;
3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903
	u64 read_format = event->attr.read_format;

	/*
	 * compute total_time_enabled, total_time_running
	 * based on snapshot values taken when the event
	 * was last scheduled in.
	 *
	 * we cannot simply called update_context_time()
	 * because of locking issue as we are called in
	 * NMI context
	 */
3904
	if (read_format & PERF_FORMAT_TOTAL_TIMES)
3905
		calc_timer_values(event, &now, &enabled, &running);
3906

3907
	if (event->attr.read_format & PERF_FORMAT_GROUP)
3908
		perf_output_read_group(handle, event, enabled, running);
3909
	else
3910
		perf_output_read_one(handle, event, enabled, running);
3911 3912
}

3913 3914 3915
void perf_output_sample(struct perf_output_handle *handle,
			struct perf_event_header *header,
			struct perf_sample_data *data,
3916
			struct perf_event *event)
3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946
{
	u64 sample_type = data->type;

	perf_output_put(handle, *header);

	if (sample_type & PERF_SAMPLE_IP)
		perf_output_put(handle, data->ip);

	if (sample_type & PERF_SAMPLE_TID)
		perf_output_put(handle, data->tid_entry);

	if (sample_type & PERF_SAMPLE_TIME)
		perf_output_put(handle, data->time);

	if (sample_type & PERF_SAMPLE_ADDR)
		perf_output_put(handle, data->addr);

	if (sample_type & PERF_SAMPLE_ID)
		perf_output_put(handle, data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		perf_output_put(handle, data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		perf_output_put(handle, data->cpu_entry);

	if (sample_type & PERF_SAMPLE_PERIOD)
		perf_output_put(handle, data->period);

	if (sample_type & PERF_SAMPLE_READ)
3947
		perf_output_read(handle, event);
3948 3949 3950 3951 3952 3953 3954 3955 3956 3957

	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
		if (data->callchain) {
			int size = 1;

			if (data->callchain)
				size += data->callchain->nr;

			size *= sizeof(u64);

3958
			__output_copy(handle, data->callchain, size);
3959 3960 3961 3962 3963 3964 3965 3966 3967
		} else {
			u64 nr = 0;
			perf_output_put(handle, nr);
		}
	}

	if (sample_type & PERF_SAMPLE_RAW) {
		if (data->raw) {
			perf_output_put(handle, data->raw->size);
3968 3969
			__output_copy(handle, data->raw->data,
					   data->raw->size);
3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980
		} else {
			struct {
				u32	size;
				u32	data;
			} raw = {
				.size = sizeof(u32),
				.data = 0,
			};
			perf_output_put(handle, raw);
		}
	}
3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994

	if (!event->attr.watermark) {
		int wakeup_events = event->attr.wakeup_events;

		if (wakeup_events) {
			struct ring_buffer *rb = handle->rb;
			int events = local_inc_return(&rb->events);

			if (events >= wakeup_events) {
				local_sub(wakeup_events, &rb->events);
				local_inc(&rb->wakeup);
			}
		}
	}
3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012

	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
		if (data->br_stack) {
			size_t size;

			size = data->br_stack->nr
			     * sizeof(struct perf_branch_entry);

			perf_output_put(handle, data->br_stack->nr);
			perf_output_copy(handle, data->br_stack->entries, size);
		} else {
			/*
			 * we always store at least the value of nr
			 */
			u64 nr = 0;
			perf_output_put(handle, nr);
		}
	}
4013 4014 4015 4016
}

void perf_prepare_sample(struct perf_event_header *header,
			 struct perf_sample_data *data,
4017
			 struct perf_event *event,
4018
			 struct pt_regs *regs)
4019
{
4020
	u64 sample_type = event->attr.sample_type;
4021

4022
	header->type = PERF_RECORD_SAMPLE;
4023
	header->size = sizeof(*header) + event->header_size;
4024 4025 4026

	header->misc = 0;
	header->misc |= perf_misc_flags(regs);
4027

4028
	__perf_event_header__init_id(header, data, event);
4029

4030
	if (sample_type & PERF_SAMPLE_IP)
4031 4032
		data->ip = perf_instruction_pointer(regs);

4033
	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4034
		int size = 1;
4035

4036 4037 4038 4039 4040 4041
		data->callchain = perf_callchain(regs);

		if (data->callchain)
			size += data->callchain->nr;

		header->size += size * sizeof(u64);
4042 4043
	}

4044
	if (sample_type & PERF_SAMPLE_RAW) {
4045 4046 4047 4048 4049 4050 4051 4052
		int size = sizeof(u32);

		if (data->raw)
			size += data->raw->size;
		else
			size += sizeof(u32);

		WARN_ON_ONCE(size & (sizeof(u64)-1));
4053
		header->size += size;
4054
	}
4055 4056 4057 4058 4059 4060 4061 4062 4063

	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
		int size = sizeof(u64); /* nr */
		if (data->br_stack) {
			size += data->br_stack->nr
			      * sizeof(struct perf_branch_entry);
		}
		header->size += size;
	}
4064
}
4065

4066
static void perf_event_output(struct perf_event *event,
4067 4068 4069 4070 4071
				struct perf_sample_data *data,
				struct pt_regs *regs)
{
	struct perf_output_handle handle;
	struct perf_event_header header;
4072

4073 4074 4075
	/* protect the callchain buffers */
	rcu_read_lock();

4076
	perf_prepare_sample(&header, data, event, regs);
P
Peter Zijlstra 已提交
4077

4078
	if (perf_output_begin(&handle, event, header.size))
4079
		goto exit;
4080

4081
	perf_output_sample(&handle, &header, data, event);
4082

4083
	perf_output_end(&handle);
4084 4085 4086

exit:
	rcu_read_unlock();
4087 4088
}

4089
/*
4090
 * read event_id
4091 4092 4093 4094 4095 4096 4097 4098 4099 4100
 */

struct perf_read_event {
	struct perf_event_header	header;

	u32				pid;
	u32				tid;
};

static void
4101
perf_event_read_event(struct perf_event *event,
4102 4103 4104
			struct task_struct *task)
{
	struct perf_output_handle handle;
4105
	struct perf_sample_data sample;
4106
	struct perf_read_event read_event = {
4107
		.header = {
4108
			.type = PERF_RECORD_READ,
4109
			.misc = 0,
4110
			.size = sizeof(read_event) + event->read_size,
4111
		},
4112 4113
		.pid = perf_event_pid(event, task),
		.tid = perf_event_tid(event, task),
4114
	};
4115
	int ret;
4116

4117
	perf_event_header__init_id(&read_event.header, &sample, event);
4118
	ret = perf_output_begin(&handle, event, read_event.header.size);
4119 4120 4121
	if (ret)
		return;

4122
	perf_output_put(&handle, read_event);
4123
	perf_output_read(&handle, event);
4124
	perf_event__output_id_sample(event, &handle, &sample);
4125

4126 4127 4128
	perf_output_end(&handle);
}

P
Peter Zijlstra 已提交
4129
/*
P
Peter Zijlstra 已提交
4130 4131
 * task tracking -- fork/exit
 *
4132
 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
P
Peter Zijlstra 已提交
4133 4134
 */

P
Peter Zijlstra 已提交
4135
struct perf_task_event {
4136
	struct task_struct		*task;
4137
	struct perf_event_context	*task_ctx;
P
Peter Zijlstra 已提交
4138 4139 4140 4141 4142 4143

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				ppid;
P
Peter Zijlstra 已提交
4144 4145
		u32				tid;
		u32				ptid;
4146
		u64				time;
4147
	} event_id;
P
Peter Zijlstra 已提交
4148 4149
};

4150
static void perf_event_task_output(struct perf_event *event,
P
Peter Zijlstra 已提交
4151
				     struct perf_task_event *task_event)
P
Peter Zijlstra 已提交
4152 4153
{
	struct perf_output_handle handle;
4154
	struct perf_sample_data	sample;
P
Peter Zijlstra 已提交
4155
	struct task_struct *task = task_event->task;
4156
	int ret, size = task_event->event_id.header.size;
4157

4158
	perf_event_header__init_id(&task_event->event_id.header, &sample, event);
P
Peter Zijlstra 已提交
4159

4160
	ret = perf_output_begin(&handle, event,
4161
				task_event->event_id.header.size);
4162
	if (ret)
4163
		goto out;
P
Peter Zijlstra 已提交
4164

4165 4166
	task_event->event_id.pid = perf_event_pid(event, task);
	task_event->event_id.ppid = perf_event_pid(event, current);
P
Peter Zijlstra 已提交
4167

4168 4169
	task_event->event_id.tid = perf_event_tid(event, task);
	task_event->event_id.ptid = perf_event_tid(event, current);
P
Peter Zijlstra 已提交
4170

4171
	perf_output_put(&handle, task_event->event_id);
4172

4173 4174
	perf_event__output_id_sample(event, &handle, &sample);

P
Peter Zijlstra 已提交
4175
	perf_output_end(&handle);
4176 4177
out:
	task_event->event_id.header.size = size;
P
Peter Zijlstra 已提交
4178 4179
}

4180
static int perf_event_task_match(struct perf_event *event)
P
Peter Zijlstra 已提交
4181
{
P
Peter Zijlstra 已提交
4182
	if (event->state < PERF_EVENT_STATE_INACTIVE)
4183 4184
		return 0;

4185
	if (!event_filter_match(event))
4186 4187
		return 0;

4188 4189
	if (event->attr.comm || event->attr.mmap ||
	    event->attr.mmap_data || event->attr.task)
P
Peter Zijlstra 已提交
4190 4191 4192 4193 4194
		return 1;

	return 0;
}

4195
static void perf_event_task_ctx(struct perf_event_context *ctx,
P
Peter Zijlstra 已提交
4196
				  struct perf_task_event *task_event)
P
Peter Zijlstra 已提交
4197
{
4198
	struct perf_event *event;
P
Peter Zijlstra 已提交
4199

4200 4201 4202
	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
		if (perf_event_task_match(event))
			perf_event_task_output(event, task_event);
P
Peter Zijlstra 已提交
4203 4204 4205
	}
}

4206
static void perf_event_task_event(struct perf_task_event *task_event)
P
Peter Zijlstra 已提交
4207 4208
{
	struct perf_cpu_context *cpuctx;
P
Peter Zijlstra 已提交
4209
	struct perf_event_context *ctx;
P
Peter Zijlstra 已提交
4210
	struct pmu *pmu;
P
Peter Zijlstra 已提交
4211
	int ctxn;
P
Peter Zijlstra 已提交
4212

4213
	rcu_read_lock();
P
Peter Zijlstra 已提交
4214
	list_for_each_entry_rcu(pmu, &pmus, entry) {
4215
		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4216 4217
		if (cpuctx->active_pmu != pmu)
			goto next;
P
Peter Zijlstra 已提交
4218
		perf_event_task_ctx(&cpuctx->ctx, task_event);
P
Peter Zijlstra 已提交
4219 4220 4221 4222 4223

		ctx = task_event->task_ctx;
		if (!ctx) {
			ctxn = pmu->task_ctx_nr;
			if (ctxn < 0)
4224
				goto next;
P
Peter Zijlstra 已提交
4225 4226 4227 4228
			ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
		}
		if (ctx)
			perf_event_task_ctx(ctx, task_event);
4229 4230
next:
		put_cpu_ptr(pmu->pmu_cpu_context);
P
Peter Zijlstra 已提交
4231
	}
P
Peter Zijlstra 已提交
4232 4233 4234
	rcu_read_unlock();
}

4235 4236
static void perf_event_task(struct task_struct *task,
			      struct perf_event_context *task_ctx,
4237
			      int new)
P
Peter Zijlstra 已提交
4238
{
P
Peter Zijlstra 已提交
4239
	struct perf_task_event task_event;
P
Peter Zijlstra 已提交
4240

4241 4242 4243
	if (!atomic_read(&nr_comm_events) &&
	    !atomic_read(&nr_mmap_events) &&
	    !atomic_read(&nr_task_events))
P
Peter Zijlstra 已提交
4244 4245
		return;

P
Peter Zijlstra 已提交
4246
	task_event = (struct perf_task_event){
4247 4248
		.task	  = task,
		.task_ctx = task_ctx,
4249
		.event_id    = {
P
Peter Zijlstra 已提交
4250
			.header = {
4251
				.type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
4252
				.misc = 0,
4253
				.size = sizeof(task_event.event_id),
P
Peter Zijlstra 已提交
4254
			},
4255 4256
			/* .pid  */
			/* .ppid */
P
Peter Zijlstra 已提交
4257 4258
			/* .tid  */
			/* .ptid */
P
Peter Zijlstra 已提交
4259
			.time = perf_clock(),
P
Peter Zijlstra 已提交
4260 4261 4262
		},
	};

4263
	perf_event_task_event(&task_event);
P
Peter Zijlstra 已提交
4264 4265
}

4266
void perf_event_fork(struct task_struct *task)
P
Peter Zijlstra 已提交
4267
{
4268
	perf_event_task(task, NULL, 1);
P
Peter Zijlstra 已提交
4269 4270
}

4271 4272 4273 4274 4275
/*
 * comm tracking
 */

struct perf_comm_event {
4276 4277
	struct task_struct	*task;
	char			*comm;
4278 4279 4280 4281 4282 4283 4284
	int			comm_size;

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				tid;
4285
	} event_id;
4286 4287
};

4288
static void perf_event_comm_output(struct perf_event *event,
4289 4290 4291
				     struct perf_comm_event *comm_event)
{
	struct perf_output_handle handle;
4292
	struct perf_sample_data sample;
4293
	int size = comm_event->event_id.header.size;
4294 4295 4296 4297
	int ret;

	perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
	ret = perf_output_begin(&handle, event,
4298
				comm_event->event_id.header.size);
4299 4300

	if (ret)
4301
		goto out;
4302

4303 4304
	comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
	comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
4305

4306
	perf_output_put(&handle, comm_event->event_id);
4307
	__output_copy(&handle, comm_event->comm,
4308
				   comm_event->comm_size);
4309 4310 4311

	perf_event__output_id_sample(event, &handle, &sample);

4312
	perf_output_end(&handle);
4313 4314
out:
	comm_event->event_id.header.size = size;
4315 4316
}

4317
static int perf_event_comm_match(struct perf_event *event)
4318
{
P
Peter Zijlstra 已提交
4319
	if (event->state < PERF_EVENT_STATE_INACTIVE)
4320 4321
		return 0;

4322
	if (!event_filter_match(event))
4323 4324
		return 0;

4325
	if (event->attr.comm)
4326 4327 4328 4329 4330
		return 1;

	return 0;
}

4331
static void perf_event_comm_ctx(struct perf_event_context *ctx,
4332 4333
				  struct perf_comm_event *comm_event)
{
4334
	struct perf_event *event;
4335

4336 4337 4338
	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
		if (perf_event_comm_match(event))
			perf_event_comm_output(event, comm_event);
4339 4340 4341
	}
}

4342
static void perf_event_comm_event(struct perf_comm_event *comm_event)
4343 4344
{
	struct perf_cpu_context *cpuctx;
4345
	struct perf_event_context *ctx;
4346
	char comm[TASK_COMM_LEN];
4347
	unsigned int size;
P
Peter Zijlstra 已提交
4348
	struct pmu *pmu;
P
Peter Zijlstra 已提交
4349
	int ctxn;
4350

4351
	memset(comm, 0, sizeof(comm));
4352
	strlcpy(comm, comm_event->task->comm, sizeof(comm));
4353
	size = ALIGN(strlen(comm)+1, sizeof(u64));
4354 4355 4356 4357

	comm_event->comm = comm;
	comm_event->comm_size = size;

4358
	comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
4359
	rcu_read_lock();
P
Peter Zijlstra 已提交
4360
	list_for_each_entry_rcu(pmu, &pmus, entry) {
4361
		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4362 4363
		if (cpuctx->active_pmu != pmu)
			goto next;
P
Peter Zijlstra 已提交
4364
		perf_event_comm_ctx(&cpuctx->ctx, comm_event);
P
Peter Zijlstra 已提交
4365 4366 4367

		ctxn = pmu->task_ctx_nr;
		if (ctxn < 0)
4368
			goto next;
P
Peter Zijlstra 已提交
4369 4370 4371 4372

		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
		if (ctx)
			perf_event_comm_ctx(ctx, comm_event);
4373 4374
next:
		put_cpu_ptr(pmu->pmu_cpu_context);
P
Peter Zijlstra 已提交
4375
	}
4376
	rcu_read_unlock();
4377 4378
}

4379
void perf_event_comm(struct task_struct *task)
4380
{
4381
	struct perf_comm_event comm_event;
P
Peter Zijlstra 已提交
4382 4383
	struct perf_event_context *ctx;
	int ctxn;
4384

P
Peter Zijlstra 已提交
4385 4386 4387 4388
	for_each_task_context_nr(ctxn) {
		ctx = task->perf_event_ctxp[ctxn];
		if (!ctx)
			continue;
4389

P
Peter Zijlstra 已提交
4390 4391
		perf_event_enable_on_exec(ctx);
	}
4392

4393
	if (!atomic_read(&nr_comm_events))
4394
		return;
4395

4396
	comm_event = (struct perf_comm_event){
4397
		.task	= task,
4398 4399
		/* .comm      */
		/* .comm_size */
4400
		.event_id  = {
4401
			.header = {
4402
				.type = PERF_RECORD_COMM,
4403 4404 4405 4406 4407
				.misc = 0,
				/* .size */
			},
			/* .pid */
			/* .tid */
4408 4409 4410
		},
	};

4411
	perf_event_comm_event(&comm_event);
4412 4413
}

4414 4415 4416 4417 4418
/*
 * mmap tracking
 */

struct perf_mmap_event {
4419 4420 4421 4422
	struct vm_area_struct	*vma;

	const char		*file_name;
	int			file_size;
4423 4424 4425 4426 4427 4428 4429 4430 4431

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				tid;
		u64				start;
		u64				len;
		u64				pgoff;
4432
	} event_id;
4433 4434
};

4435
static void perf_event_mmap_output(struct perf_event *event,
4436 4437 4438
				     struct perf_mmap_event *mmap_event)
{
	struct perf_output_handle handle;
4439
	struct perf_sample_data sample;
4440
	int size = mmap_event->event_id.header.size;
4441
	int ret;
4442

4443 4444
	perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
	ret = perf_output_begin(&handle, event,
4445
				mmap_event->event_id.header.size);
4446
	if (ret)
4447
		goto out;
4448

4449 4450
	mmap_event->event_id.pid = perf_event_pid(event, current);
	mmap_event->event_id.tid = perf_event_tid(event, current);
4451

4452
	perf_output_put(&handle, mmap_event->event_id);
4453
	__output_copy(&handle, mmap_event->file_name,
4454
				   mmap_event->file_size);
4455 4456 4457

	perf_event__output_id_sample(event, &handle, &sample);

4458
	perf_output_end(&handle);
4459 4460
out:
	mmap_event->event_id.header.size = size;
4461 4462
}

4463
static int perf_event_mmap_match(struct perf_event *event,
4464 4465
				   struct perf_mmap_event *mmap_event,
				   int executable)
4466
{
P
Peter Zijlstra 已提交
4467
	if (event->state < PERF_EVENT_STATE_INACTIVE)
4468 4469
		return 0;

4470
	if (!event_filter_match(event))
4471 4472
		return 0;

4473 4474
	if ((!executable && event->attr.mmap_data) ||
	    (executable && event->attr.mmap))
4475 4476 4477 4478 4479
		return 1;

	return 0;
}

4480
static void perf_event_mmap_ctx(struct perf_event_context *ctx,
4481 4482
				  struct perf_mmap_event *mmap_event,
				  int executable)
4483
{
4484
	struct perf_event *event;
4485

4486
	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4487
		if (perf_event_mmap_match(event, mmap_event, executable))
4488
			perf_event_mmap_output(event, mmap_event);
4489 4490 4491
	}
}

4492
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4493 4494
{
	struct perf_cpu_context *cpuctx;
4495
	struct perf_event_context *ctx;
4496 4497
	struct vm_area_struct *vma = mmap_event->vma;
	struct file *file = vma->vm_file;
4498 4499 4500
	unsigned int size;
	char tmp[16];
	char *buf = NULL;
4501
	const char *name;
P
Peter Zijlstra 已提交
4502
	struct pmu *pmu;
P
Peter Zijlstra 已提交
4503
	int ctxn;
4504

4505 4506
	memset(tmp, 0, sizeof(tmp));

4507
	if (file) {
4508
		/*
4509
		 * d_path works from the end of the rb backwards, so we
4510 4511 4512 4513
		 * need to add enough zero bytes after the string to handle
		 * the 64bit alignment we do later.
		 */
		buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
4514 4515 4516 4517
		if (!buf) {
			name = strncpy(tmp, "//enomem", sizeof(tmp));
			goto got_name;
		}
4518
		name = d_path(&file->f_path, buf, PATH_MAX);
4519 4520 4521 4522 4523
		if (IS_ERR(name)) {
			name = strncpy(tmp, "//toolong", sizeof(tmp));
			goto got_name;
		}
	} else {
4524 4525 4526
		if (arch_vma_name(mmap_event->vma)) {
			name = strncpy(tmp, arch_vma_name(mmap_event->vma),
				       sizeof(tmp));
4527
			goto got_name;
4528
		}
4529 4530 4531 4532

		if (!vma->vm_mm) {
			name = strncpy(tmp, "[vdso]", sizeof(tmp));
			goto got_name;
4533 4534 4535 4536 4537 4538 4539 4540
		} else if (vma->vm_start <= vma->vm_mm->start_brk &&
				vma->vm_end >= vma->vm_mm->brk) {
			name = strncpy(tmp, "[heap]", sizeof(tmp));
			goto got_name;
		} else if (vma->vm_start <= vma->vm_mm->start_stack &&
				vma->vm_end >= vma->vm_mm->start_stack) {
			name = strncpy(tmp, "[stack]", sizeof(tmp));
			goto got_name;
4541 4542
		}

4543 4544 4545 4546 4547
		name = strncpy(tmp, "//anon", sizeof(tmp));
		goto got_name;
	}

got_name:
4548
	size = ALIGN(strlen(name)+1, sizeof(u64));
4549 4550 4551 4552

	mmap_event->file_name = name;
	mmap_event->file_size = size;

4553
	mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4554

4555
	rcu_read_lock();
P
Peter Zijlstra 已提交
4556
	list_for_each_entry_rcu(pmu, &pmus, entry) {
4557
		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4558 4559
		if (cpuctx->active_pmu != pmu)
			goto next;
P
Peter Zijlstra 已提交
4560 4561
		perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
					vma->vm_flags & VM_EXEC);
P
Peter Zijlstra 已提交
4562 4563 4564

		ctxn = pmu->task_ctx_nr;
		if (ctxn < 0)
4565
			goto next;
P
Peter Zijlstra 已提交
4566 4567 4568 4569 4570 4571

		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
		if (ctx) {
			perf_event_mmap_ctx(ctx, mmap_event,
					vma->vm_flags & VM_EXEC);
		}
4572 4573
next:
		put_cpu_ptr(pmu->pmu_cpu_context);
P
Peter Zijlstra 已提交
4574
	}
4575 4576
	rcu_read_unlock();

4577 4578 4579
	kfree(buf);
}

4580
void perf_event_mmap(struct vm_area_struct *vma)
4581
{
4582 4583
	struct perf_mmap_event mmap_event;

4584
	if (!atomic_read(&nr_mmap_events))
4585 4586 4587
		return;

	mmap_event = (struct perf_mmap_event){
4588
		.vma	= vma,
4589 4590
		/* .file_name */
		/* .file_size */
4591
		.event_id  = {
4592
			.header = {
4593
				.type = PERF_RECORD_MMAP,
4594
				.misc = PERF_RECORD_MISC_USER,
4595 4596 4597 4598
				/* .size */
			},
			/* .pid */
			/* .tid */
4599 4600
			.start  = vma->vm_start,
			.len    = vma->vm_end - vma->vm_start,
4601
			.pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT,
4602 4603 4604
		},
	};

4605
	perf_event_mmap_event(&mmap_event);
4606 4607
}

4608 4609 4610 4611
/*
 * IRQ throttle logging
 */

4612
static void perf_log_throttle(struct perf_event *event, int enable)
4613 4614
{
	struct perf_output_handle handle;
4615
	struct perf_sample_data sample;
4616 4617 4618 4619 4620
	int ret;

	struct {
		struct perf_event_header	header;
		u64				time;
4621
		u64				id;
4622
		u64				stream_id;
4623 4624
	} throttle_event = {
		.header = {
4625
			.type = PERF_RECORD_THROTTLE,
4626 4627 4628
			.misc = 0,
			.size = sizeof(throttle_event),
		},
P
Peter Zijlstra 已提交
4629
		.time		= perf_clock(),
4630 4631
		.id		= primary_event_id(event),
		.stream_id	= event->id,
4632 4633
	};

4634
	if (enable)
4635
		throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4636

4637 4638 4639
	perf_event_header__init_id(&throttle_event.header, &sample, event);

	ret = perf_output_begin(&handle, event,
4640
				throttle_event.header.size);
4641 4642 4643 4644
	if (ret)
		return;

	perf_output_put(&handle, throttle_event);
4645
	perf_event__output_id_sample(event, &handle, &sample);
4646 4647 4648
	perf_output_end(&handle);
}

4649
/*
4650
 * Generic event overflow handling, sampling.
4651 4652
 */

4653
static int __perf_event_overflow(struct perf_event *event,
4654 4655
				   int throttle, struct perf_sample_data *data,
				   struct pt_regs *regs)
4656
{
4657 4658
	int events = atomic_read(&event->event_limit);
	struct hw_perf_event *hwc = &event->hw;
4659
	u64 seq;
4660 4661
	int ret = 0;

4662 4663 4664 4665 4666 4667 4668
	/*
	 * Non-sampling counters might still use the PMI to fold short
	 * hardware counters, ignore those.
	 */
	if (unlikely(!is_sampling_event(event)))
		return 0;

4669 4670 4671 4672 4673 4674 4675 4676 4677
	seq = __this_cpu_read(perf_throttled_seq);
	if (seq != hwc->interrupts_seq) {
		hwc->interrupts_seq = seq;
		hwc->interrupts = 1;
	} else {
		hwc->interrupts++;
		if (unlikely(throttle
			     && hwc->interrupts >= max_samples_per_tick)) {
			__this_cpu_inc(perf_throttled_count);
P
Peter Zijlstra 已提交
4678 4679
			hwc->interrupts = MAX_INTERRUPTS;
			perf_log_throttle(event, 0);
4680 4681
			ret = 1;
		}
4682
	}
4683

4684
	if (event->attr.freq) {
P
Peter Zijlstra 已提交
4685
		u64 now = perf_clock();
4686
		s64 delta = now - hwc->freq_time_stamp;
4687

4688
		hwc->freq_time_stamp = now;
4689

4690
		if (delta > 0 && delta < 2*TICK_NSEC)
4691
			perf_adjust_period(event, delta, hwc->last_period, true);
4692 4693
	}

4694 4695
	/*
	 * XXX event_limit might not quite work as expected on inherited
4696
	 * events
4697 4698
	 */

4699 4700
	event->pending_kill = POLL_IN;
	if (events && atomic_dec_and_test(&event->event_limit)) {
4701
		ret = 1;
4702
		event->pending_kill = POLL_HUP;
4703 4704
		event->pending_disable = 1;
		irq_work_queue(&event->pending);
4705 4706
	}

4707
	if (event->overflow_handler)
4708
		event->overflow_handler(event, data, regs);
4709
	else
4710
		perf_event_output(event, data, regs);
4711

P
Peter Zijlstra 已提交
4712
	if (event->fasync && event->pending_kill) {
4713 4714
		event->pending_wakeup = 1;
		irq_work_queue(&event->pending);
P
Peter Zijlstra 已提交
4715 4716
	}

4717
	return ret;
4718 4719
}

4720
int perf_event_overflow(struct perf_event *event,
4721 4722
			  struct perf_sample_data *data,
			  struct pt_regs *regs)
4723
{
4724
	return __perf_event_overflow(event, 1, data, regs);
4725 4726
}

4727
/*
4728
 * Generic software event infrastructure
4729 4730
 */

4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741
struct swevent_htable {
	struct swevent_hlist		*swevent_hlist;
	struct mutex			hlist_mutex;
	int				hlist_refcount;

	/* Recursion avoidance in each contexts */
	int				recursion[PERF_NR_CONTEXTS];
};

static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);

4742
/*
4743 4744
 * We directly increment event->count and keep a second value in
 * event->hw.period_left to count intervals. This period event
4745 4746 4747 4748
 * is kept in the range [-sample_period, 0] so that we can use the
 * sign as trigger.
 */

4749
static u64 perf_swevent_set_period(struct perf_event *event)
4750
{
4751
	struct hw_perf_event *hwc = &event->hw;
4752 4753 4754 4755 4756
	u64 period = hwc->last_period;
	u64 nr, offset;
	s64 old, val;

	hwc->last_period = hwc->sample_period;
4757 4758

again:
4759
	old = val = local64_read(&hwc->period_left);
4760 4761
	if (val < 0)
		return 0;
4762

4763 4764 4765
	nr = div64_u64(period + val, period);
	offset = nr * period;
	val -= offset;
4766
	if (local64_cmpxchg(&hwc->period_left, old, val) != old)
4767
		goto again;
4768

4769
	return nr;
4770 4771
}

4772
static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4773
				    struct perf_sample_data *data,
4774
				    struct pt_regs *regs)
4775
{
4776
	struct hw_perf_event *hwc = &event->hw;
4777
	int throttle = 0;
4778

4779 4780
	if (!overflow)
		overflow = perf_swevent_set_period(event);
4781

4782 4783
	if (hwc->interrupts == MAX_INTERRUPTS)
		return;
4784

4785
	for (; overflow; overflow--) {
4786
		if (__perf_event_overflow(event, throttle,
4787
					    data, regs)) {
4788 4789 4790 4791 4792 4793
			/*
			 * We inhibit the overflow from happening when
			 * hwc->interrupts == MAX_INTERRUPTS.
			 */
			break;
		}
4794
		throttle = 1;
4795
	}
4796 4797
}

P
Peter Zijlstra 已提交
4798
static void perf_swevent_event(struct perf_event *event, u64 nr,
4799
			       struct perf_sample_data *data,
4800
			       struct pt_regs *regs)
4801
{
4802
	struct hw_perf_event *hwc = &event->hw;
4803

4804
	local64_add(nr, &event->count);
4805

4806 4807 4808
	if (!regs)
		return;

4809
	if (!is_sampling_event(event))
4810
		return;
4811

4812 4813 4814 4815 4816 4817
	if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
		data->period = nr;
		return perf_swevent_overflow(event, 1, data, regs);
	} else
		data->period = event->hw.last_period;

4818
	if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4819
		return perf_swevent_overflow(event, 1, data, regs);
4820

4821
	if (local64_add_negative(nr, &hwc->period_left))
4822
		return;
4823

4824
	perf_swevent_overflow(event, 0, data, regs);
4825 4826
}

4827 4828 4829
static int perf_exclude_event(struct perf_event *event,
			      struct pt_regs *regs)
{
P
Peter Zijlstra 已提交
4830
	if (event->hw.state & PERF_HES_STOPPED)
4831
		return 1;
P
Peter Zijlstra 已提交
4832

4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843
	if (regs) {
		if (event->attr.exclude_user && user_mode(regs))
			return 1;

		if (event->attr.exclude_kernel && !user_mode(regs))
			return 1;
	}

	return 0;
}

4844
static int perf_swevent_match(struct perf_event *event,
P
Peter Zijlstra 已提交
4845
				enum perf_type_id type,
L
Li Zefan 已提交
4846 4847 4848
				u32 event_id,
				struct perf_sample_data *data,
				struct pt_regs *regs)
4849
{
4850
	if (event->attr.type != type)
4851
		return 0;
4852

4853
	if (event->attr.config != event_id)
4854 4855
		return 0;

4856 4857
	if (perf_exclude_event(event, regs))
		return 0;
4858 4859 4860 4861

	return 1;
}

4862 4863 4864 4865 4866 4867 4868
static inline u64 swevent_hash(u64 type, u32 event_id)
{
	u64 val = event_id | (type << 32);

	return hash_64(val, SWEVENT_HLIST_BITS);
}

4869 4870
static inline struct hlist_head *
__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
4871
{
4872 4873 4874 4875
	u64 hash = swevent_hash(type, event_id);

	return &hlist->heads[hash];
}
4876

4877 4878
/* For the read side: events when they trigger */
static inline struct hlist_head *
4879
find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
4880 4881
{
	struct swevent_hlist *hlist;
4882

4883
	hlist = rcu_dereference(swhash->swevent_hlist);
4884 4885 4886
	if (!hlist)
		return NULL;

4887 4888 4889 4890 4891
	return __find_swevent_head(hlist, type, event_id);
}

/* For the event head insertion and removal in the hlist */
static inline struct hlist_head *
4892
find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
4893 4894 4895 4896 4897 4898 4899 4900 4901 4902
{
	struct swevent_hlist *hlist;
	u32 event_id = event->attr.config;
	u64 type = event->attr.type;

	/*
	 * Event scheduling is always serialized against hlist allocation
	 * and release. Which makes the protected version suitable here.
	 * The context lock guarantees that.
	 */
4903
	hlist = rcu_dereference_protected(swhash->swevent_hlist,
4904 4905 4906 4907 4908
					  lockdep_is_held(&event->ctx->lock));
	if (!hlist)
		return NULL;

	return __find_swevent_head(hlist, type, event_id);
4909 4910 4911
}

static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4912
				    u64 nr,
4913 4914
				    struct perf_sample_data *data,
				    struct pt_regs *regs)
4915
{
4916
	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4917
	struct perf_event *event;
4918 4919
	struct hlist_node *node;
	struct hlist_head *head;
4920

4921
	rcu_read_lock();
4922
	head = find_swevent_head_rcu(swhash, type, event_id);
4923 4924 4925 4926
	if (!head)
		goto end;

	hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
L
Li Zefan 已提交
4927
		if (perf_swevent_match(event, type, event_id, data, regs))
4928
			perf_swevent_event(event, nr, data, regs);
4929
	}
4930 4931
end:
	rcu_read_unlock();
4932 4933
}

4934
int perf_swevent_get_recursion_context(void)
P
Peter Zijlstra 已提交
4935
{
4936
	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
P
Peter Zijlstra 已提交
4937

4938
	return get_recursion_context(swhash->recursion);
P
Peter Zijlstra 已提交
4939
}
I
Ingo Molnar 已提交
4940
EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
P
Peter Zijlstra 已提交
4941

4942
inline void perf_swevent_put_recursion_context(int rctx)
4943
{
4944
	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4945

4946
	put_recursion_context(swhash->recursion, rctx);
4947
}
4948

4949
void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
4950
{
4951
	struct perf_sample_data data;
4952 4953
	int rctx;

4954
	preempt_disable_notrace();
4955 4956 4957
	rctx = perf_swevent_get_recursion_context();
	if (rctx < 0)
		return;
4958

4959
	perf_sample_data_init(&data, addr, 0);
4960

4961
	do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
4962 4963

	perf_swevent_put_recursion_context(rctx);
4964
	preempt_enable_notrace();
4965 4966
}

4967
static void perf_swevent_read(struct perf_event *event)
4968 4969 4970
{
}

P
Peter Zijlstra 已提交
4971
static int perf_swevent_add(struct perf_event *event, int flags)
4972
{
4973
	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4974
	struct hw_perf_event *hwc = &event->hw;
4975 4976
	struct hlist_head *head;

4977
	if (is_sampling_event(event)) {
4978
		hwc->last_period = hwc->sample_period;
4979
		perf_swevent_set_period(event);
4980
	}
4981

P
Peter Zijlstra 已提交
4982 4983
	hwc->state = !(flags & PERF_EF_START);

4984
	head = find_swevent_head(swhash, event);
4985 4986 4987 4988 4989
	if (WARN_ON_ONCE(!head))
		return -EINVAL;

	hlist_add_head_rcu(&event->hlist_entry, head);

4990 4991 4992
	return 0;
}

P
Peter Zijlstra 已提交
4993
static void perf_swevent_del(struct perf_event *event, int flags)
4994
{
4995
	hlist_del_rcu(&event->hlist_entry);
4996 4997
}

P
Peter Zijlstra 已提交
4998
static void perf_swevent_start(struct perf_event *event, int flags)
4999
{
P
Peter Zijlstra 已提交
5000
	event->hw.state = 0;
5001
}
I
Ingo Molnar 已提交
5002

P
Peter Zijlstra 已提交
5003
static void perf_swevent_stop(struct perf_event *event, int flags)
5004
{
P
Peter Zijlstra 已提交
5005
	event->hw.state = PERF_HES_STOPPED;
5006 5007
}

5008 5009
/* Deref the hlist from the update side */
static inline struct swevent_hlist *
5010
swevent_hlist_deref(struct swevent_htable *swhash)
5011
{
5012 5013
	return rcu_dereference_protected(swhash->swevent_hlist,
					 lockdep_is_held(&swhash->hlist_mutex));
5014 5015
}

5016
static void swevent_hlist_release(struct swevent_htable *swhash)
5017
{
5018
	struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
5019

5020
	if (!hlist)
5021 5022
		return;

5023
	rcu_assign_pointer(swhash->swevent_hlist, NULL);
5024
	kfree_rcu(hlist, rcu_head);
5025 5026 5027 5028
}

static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
{
5029
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5030

5031
	mutex_lock(&swhash->hlist_mutex);
5032

5033 5034
	if (!--swhash->hlist_refcount)
		swevent_hlist_release(swhash);
5035

5036
	mutex_unlock(&swhash->hlist_mutex);
5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053
}

static void swevent_hlist_put(struct perf_event *event)
{
	int cpu;

	if (event->cpu != -1) {
		swevent_hlist_put_cpu(event, event->cpu);
		return;
	}

	for_each_possible_cpu(cpu)
		swevent_hlist_put_cpu(event, cpu);
}

static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
{
5054
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5055 5056
	int err = 0;

5057
	mutex_lock(&swhash->hlist_mutex);
5058

5059
	if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
5060 5061 5062 5063 5064 5065 5066
		struct swevent_hlist *hlist;

		hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
		if (!hlist) {
			err = -ENOMEM;
			goto exit;
		}
5067
		rcu_assign_pointer(swhash->swevent_hlist, hlist);
5068
	}
5069
	swhash->hlist_refcount++;
P
Peter Zijlstra 已提交
5070
exit:
5071
	mutex_unlock(&swhash->hlist_mutex);
5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094

	return err;
}

static int swevent_hlist_get(struct perf_event *event)
{
	int err;
	int cpu, failed_cpu;

	if (event->cpu != -1)
		return swevent_hlist_get_cpu(event, event->cpu);

	get_online_cpus();
	for_each_possible_cpu(cpu) {
		err = swevent_hlist_get_cpu(event, cpu);
		if (err) {
			failed_cpu = cpu;
			goto fail;
		}
	}
	put_online_cpus();

	return 0;
P
Peter Zijlstra 已提交
5095
fail:
5096 5097 5098 5099 5100 5101 5102 5103 5104 5105
	for_each_possible_cpu(cpu) {
		if (cpu == failed_cpu)
			break;
		swevent_hlist_put_cpu(event, cpu);
	}

	put_online_cpus();
	return err;
}

5106
struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
5107

5108 5109 5110
static void sw_perf_event_destroy(struct perf_event *event)
{
	u64 event_id = event->attr.config;
5111

5112 5113
	WARN_ON(event->parent);

5114
	static_key_slow_dec(&perf_swevent_enabled[event_id]);
5115 5116 5117 5118 5119 5120 5121 5122 5123 5124
	swevent_hlist_put(event);
}

static int perf_swevent_init(struct perf_event *event)
{
	int event_id = event->attr.config;

	if (event->attr.type != PERF_TYPE_SOFTWARE)
		return -ENOENT;

5125 5126 5127 5128 5129 5130
	/*
	 * no branch sampling for software events
	 */
	if (has_branch_stack(event))
		return -EOPNOTSUPP;

5131 5132 5133 5134 5135 5136 5137 5138 5139
	switch (event_id) {
	case PERF_COUNT_SW_CPU_CLOCK:
	case PERF_COUNT_SW_TASK_CLOCK:
		return -ENOENT;

	default:
		break;
	}

5140
	if (event_id >= PERF_COUNT_SW_MAX)
5141 5142 5143 5144 5145 5146 5147 5148 5149
		return -ENOENT;

	if (!event->parent) {
		int err;

		err = swevent_hlist_get(event);
		if (err)
			return err;

5150
		static_key_slow_inc(&perf_swevent_enabled[event_id]);
5151 5152 5153 5154 5155 5156
		event->destroy = sw_perf_event_destroy;
	}

	return 0;
}

5157 5158 5159 5160 5161
static int perf_swevent_event_idx(struct perf_event *event)
{
	return 0;
}

5162
static struct pmu perf_swevent = {
5163
	.task_ctx_nr	= perf_sw_context,
5164

5165
	.event_init	= perf_swevent_init,
P
Peter Zijlstra 已提交
5166 5167 5168 5169
	.add		= perf_swevent_add,
	.del		= perf_swevent_del,
	.start		= perf_swevent_start,
	.stop		= perf_swevent_stop,
5170
	.read		= perf_swevent_read,
5171 5172

	.event_idx	= perf_swevent_event_idx,
5173 5174
};

5175 5176
#ifdef CONFIG_EVENT_TRACING

5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190
static int perf_tp_filter_match(struct perf_event *event,
				struct perf_sample_data *data)
{
	void *record = data->raw->data;

	if (likely(!event->filter) || filter_match_preds(event->filter, record))
		return 1;
	return 0;
}

static int perf_tp_event_match(struct perf_event *event,
				struct perf_sample_data *data,
				struct pt_regs *regs)
{
5191 5192
	if (event->hw.state & PERF_HES_STOPPED)
		return 0;
5193 5194 5195 5196
	/*
	 * All tracepoints are from kernel-space.
	 */
	if (event->attr.exclude_kernel)
5197 5198 5199 5200 5201 5202 5203 5204 5205
		return 0;

	if (!perf_tp_filter_match(event, data))
		return 0;

	return 1;
}

void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5206
		   struct pt_regs *regs, struct hlist_head *head, int rctx)
5207 5208
{
	struct perf_sample_data data;
5209 5210 5211
	struct perf_event *event;
	struct hlist_node *node;

5212 5213 5214 5215 5216
	struct perf_raw_record raw = {
		.size = entry_size,
		.data = record,
	};

5217
	perf_sample_data_init(&data, addr, 0);
5218 5219
	data.raw = &raw;

5220 5221
	hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
		if (perf_tp_event_match(event, &data, regs))
5222
			perf_swevent_event(event, count, &data, regs);
5223
	}
5224 5225

	perf_swevent_put_recursion_context(rctx);
5226 5227 5228
}
EXPORT_SYMBOL_GPL(perf_tp_event);

5229
static void tp_perf_event_destroy(struct perf_event *event)
5230
{
5231
	perf_trace_destroy(event);
5232 5233
}

5234
static int perf_tp_event_init(struct perf_event *event)
5235
{
5236 5237
	int err;

5238 5239 5240
	if (event->attr.type != PERF_TYPE_TRACEPOINT)
		return -ENOENT;

5241 5242 5243 5244 5245 5246
	/*
	 * no branch sampling for tracepoint events
	 */
	if (has_branch_stack(event))
		return -EOPNOTSUPP;

5247 5248
	err = perf_trace_init(event);
	if (err)
5249
		return err;
5250

5251
	event->destroy = tp_perf_event_destroy;
5252

5253 5254 5255 5256
	return 0;
}

static struct pmu perf_tracepoint = {
5257 5258
	.task_ctx_nr	= perf_sw_context,

5259
	.event_init	= perf_tp_event_init,
P
Peter Zijlstra 已提交
5260 5261 5262 5263
	.add		= perf_trace_add,
	.del		= perf_trace_del,
	.start		= perf_swevent_start,
	.stop		= perf_swevent_stop,
5264
	.read		= perf_swevent_read,
5265 5266

	.event_idx	= perf_swevent_event_idx,
5267 5268 5269 5270
};

static inline void perf_tp_register(void)
{
P
Peter Zijlstra 已提交
5271
	perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
5272
}
L
Li Zefan 已提交
5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296

static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
	char *filter_str;
	int ret;

	if (event->attr.type != PERF_TYPE_TRACEPOINT)
		return -EINVAL;

	filter_str = strndup_user(arg, PAGE_SIZE);
	if (IS_ERR(filter_str))
		return PTR_ERR(filter_str);

	ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);

	kfree(filter_str);
	return ret;
}

static void perf_event_free_filter(struct perf_event *event)
{
	ftrace_profile_free_filter(event);
}

5297
#else
L
Li Zefan 已提交
5298

5299
static inline void perf_tp_register(void)
5300 5301
{
}
L
Li Zefan 已提交
5302 5303 5304 5305 5306 5307 5308 5309 5310 5311

static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
	return -ENOENT;
}

static void perf_event_free_filter(struct perf_event *event)
{
}

5312
#endif /* CONFIG_EVENT_TRACING */
5313

5314
#ifdef CONFIG_HAVE_HW_BREAKPOINT
5315
void perf_bp_event(struct perf_event *bp, void *data)
5316
{
5317 5318 5319
	struct perf_sample_data sample;
	struct pt_regs *regs = data;

5320
	perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
5321

P
Peter Zijlstra 已提交
5322
	if (!bp->hw.state && !perf_exclude_event(bp, regs))
5323
		perf_swevent_event(bp, 1, &sample, regs);
5324 5325 5326
}
#endif

5327 5328 5329
/*
 * hrtimer based swevent callback
 */
5330

5331
static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
5332
{
5333 5334 5335 5336 5337
	enum hrtimer_restart ret = HRTIMER_RESTART;
	struct perf_sample_data data;
	struct pt_regs *regs;
	struct perf_event *event;
	u64 period;
5338

5339
	event = container_of(hrtimer, struct perf_event, hw.hrtimer);
P
Peter Zijlstra 已提交
5340 5341 5342 5343

	if (event->state != PERF_EVENT_STATE_ACTIVE)
		return HRTIMER_NORESTART;

5344
	event->pmu->read(event);
5345

5346
	perf_sample_data_init(&data, 0, event->hw.last_period);
5347 5348 5349
	regs = get_irq_regs();

	if (regs && !perf_exclude_event(event, regs)) {
5350
		if (!(event->attr.exclude_idle && is_idle_task(current)))
5351
			if (__perf_event_overflow(event, 1, &data, regs))
5352 5353
				ret = HRTIMER_NORESTART;
	}
5354

5355 5356
	period = max_t(u64, 10000, event->hw.sample_period);
	hrtimer_forward_now(hrtimer, ns_to_ktime(period));
5357

5358
	return ret;
5359 5360
}

5361
static void perf_swevent_start_hrtimer(struct perf_event *event)
5362
{
5363
	struct hw_perf_event *hwc = &event->hw;
5364 5365 5366 5367
	s64 period;

	if (!is_sampling_event(event))
		return;
5368

5369 5370 5371 5372
	period = local64_read(&hwc->period_left);
	if (period) {
		if (period < 0)
			period = 10000;
P
Peter Zijlstra 已提交
5373

5374 5375 5376 5377 5378
		local64_set(&hwc->period_left, 0);
	} else {
		period = max_t(u64, 10000, hwc->sample_period);
	}
	__hrtimer_start_range_ns(&hwc->hrtimer,
5379
				ns_to_ktime(period), 0,
5380
				HRTIMER_MODE_REL_PINNED, 0);
5381
}
5382 5383

static void perf_swevent_cancel_hrtimer(struct perf_event *event)
5384
{
5385 5386
	struct hw_perf_event *hwc = &event->hw;

5387
	if (is_sampling_event(event)) {
5388
		ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
P
Peter Zijlstra 已提交
5389
		local64_set(&hwc->period_left, ktime_to_ns(remaining));
5390 5391 5392

		hrtimer_cancel(&hwc->hrtimer);
	}
5393 5394
}

P
Peter Zijlstra 已提交
5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418
static void perf_swevent_init_hrtimer(struct perf_event *event)
{
	struct hw_perf_event *hwc = &event->hw;

	if (!is_sampling_event(event))
		return;

	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hwc->hrtimer.function = perf_swevent_hrtimer;

	/*
	 * Since hrtimers have a fixed rate, we can do a static freq->period
	 * mapping and avoid the whole period adjust feedback stuff.
	 */
	if (event->attr.freq) {
		long freq = event->attr.sample_freq;

		event->attr.sample_period = NSEC_PER_SEC / freq;
		hwc->sample_period = event->attr.sample_period;
		local64_set(&hwc->period_left, hwc->sample_period);
		event->attr.freq = 0;
	}
}

5419 5420 5421 5422 5423
/*
 * Software event: cpu wall time clock
 */

static void cpu_clock_event_update(struct perf_event *event)
5424
{
5425 5426 5427
	s64 prev;
	u64 now;

P
Peter Zijlstra 已提交
5428
	now = local_clock();
5429 5430
	prev = local64_xchg(&event->hw.prev_count, now);
	local64_add(now - prev, &event->count);
5431 5432
}

P
Peter Zijlstra 已提交
5433
static void cpu_clock_event_start(struct perf_event *event, int flags)
5434
{
P
Peter Zijlstra 已提交
5435
	local64_set(&event->hw.prev_count, local_clock());
5436 5437 5438
	perf_swevent_start_hrtimer(event);
}

P
Peter Zijlstra 已提交
5439
static void cpu_clock_event_stop(struct perf_event *event, int flags)
5440
{
5441 5442 5443
	perf_swevent_cancel_hrtimer(event);
	cpu_clock_event_update(event);
}
5444

P
Peter Zijlstra 已提交
5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457
static int cpu_clock_event_add(struct perf_event *event, int flags)
{
	if (flags & PERF_EF_START)
		cpu_clock_event_start(event, flags);

	return 0;
}

static void cpu_clock_event_del(struct perf_event *event, int flags)
{
	cpu_clock_event_stop(event, flags);
}

5458 5459 5460 5461
static void cpu_clock_event_read(struct perf_event *event)
{
	cpu_clock_event_update(event);
}
5462

5463 5464 5465 5466 5467 5468 5469 5470
static int cpu_clock_event_init(struct perf_event *event)
{
	if (event->attr.type != PERF_TYPE_SOFTWARE)
		return -ENOENT;

	if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
		return -ENOENT;

5471 5472 5473 5474 5475 5476
	/*
	 * no branch sampling for software events
	 */
	if (has_branch_stack(event))
		return -EOPNOTSUPP;

P
Peter Zijlstra 已提交
5477 5478
	perf_swevent_init_hrtimer(event);

5479
	return 0;
5480 5481
}

5482
static struct pmu perf_cpu_clock = {
5483 5484
	.task_ctx_nr	= perf_sw_context,

5485
	.event_init	= cpu_clock_event_init,
P
Peter Zijlstra 已提交
5486 5487 5488 5489
	.add		= cpu_clock_event_add,
	.del		= cpu_clock_event_del,
	.start		= cpu_clock_event_start,
	.stop		= cpu_clock_event_stop,
5490
	.read		= cpu_clock_event_read,
5491 5492

	.event_idx	= perf_swevent_event_idx,
5493 5494 5495 5496 5497 5498 5499
};

/*
 * Software event: task time clock
 */

static void task_clock_event_update(struct perf_event *event, u64 now)
5500
{
5501 5502
	u64 prev;
	s64 delta;
5503

5504 5505 5506 5507
	prev = local64_xchg(&event->hw.prev_count, now);
	delta = now - prev;
	local64_add(delta, &event->count);
}
5508

P
Peter Zijlstra 已提交
5509
static void task_clock_event_start(struct perf_event *event, int flags)
5510
{
P
Peter Zijlstra 已提交
5511
	local64_set(&event->hw.prev_count, event->ctx->time);
5512 5513 5514
	perf_swevent_start_hrtimer(event);
}

P
Peter Zijlstra 已提交
5515
static void task_clock_event_stop(struct perf_event *event, int flags)
5516 5517 5518
{
	perf_swevent_cancel_hrtimer(event);
	task_clock_event_update(event, event->ctx->time);
P
Peter Zijlstra 已提交
5519 5520 5521 5522 5523 5524
}

static int task_clock_event_add(struct perf_event *event, int flags)
{
	if (flags & PERF_EF_START)
		task_clock_event_start(event, flags);
5525

P
Peter Zijlstra 已提交
5526 5527 5528 5529 5530 5531
	return 0;
}

static void task_clock_event_del(struct perf_event *event, int flags)
{
	task_clock_event_stop(event, PERF_EF_UPDATE);
5532 5533 5534 5535
}

static void task_clock_event_read(struct perf_event *event)
{
5536 5537 5538
	u64 now = perf_clock();
	u64 delta = now - event->ctx->timestamp;
	u64 time = event->ctx->time + delta;
5539 5540 5541 5542 5543

	task_clock_event_update(event, time);
}

static int task_clock_event_init(struct perf_event *event)
L
Li Zefan 已提交
5544
{
5545 5546 5547 5548 5549 5550
	if (event->attr.type != PERF_TYPE_SOFTWARE)
		return -ENOENT;

	if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
		return -ENOENT;

5551 5552 5553 5554 5555 5556
	/*
	 * no branch sampling for software events
	 */
	if (has_branch_stack(event))
		return -EOPNOTSUPP;

P
Peter Zijlstra 已提交
5557 5558
	perf_swevent_init_hrtimer(event);

5559
	return 0;
L
Li Zefan 已提交
5560 5561
}

5562
static struct pmu perf_task_clock = {
5563 5564
	.task_ctx_nr	= perf_sw_context,

5565
	.event_init	= task_clock_event_init,
P
Peter Zijlstra 已提交
5566 5567 5568 5569
	.add		= task_clock_event_add,
	.del		= task_clock_event_del,
	.start		= task_clock_event_start,
	.stop		= task_clock_event_stop,
5570
	.read		= task_clock_event_read,
5571 5572

	.event_idx	= perf_swevent_event_idx,
5573
};
L
Li Zefan 已提交
5574

P
Peter Zijlstra 已提交
5575
static void perf_pmu_nop_void(struct pmu *pmu)
5576 5577
{
}
L
Li Zefan 已提交
5578

P
Peter Zijlstra 已提交
5579
static int perf_pmu_nop_int(struct pmu *pmu)
L
Li Zefan 已提交
5580
{
P
Peter Zijlstra 已提交
5581
	return 0;
L
Li Zefan 已提交
5582 5583
}

P
Peter Zijlstra 已提交
5584
static void perf_pmu_start_txn(struct pmu *pmu)
L
Li Zefan 已提交
5585
{
P
Peter Zijlstra 已提交
5586
	perf_pmu_disable(pmu);
L
Li Zefan 已提交
5587 5588
}

P
Peter Zijlstra 已提交
5589 5590 5591 5592 5593
static int perf_pmu_commit_txn(struct pmu *pmu)
{
	perf_pmu_enable(pmu);
	return 0;
}
5594

P
Peter Zijlstra 已提交
5595
static void perf_pmu_cancel_txn(struct pmu *pmu)
5596
{
P
Peter Zijlstra 已提交
5597
	perf_pmu_enable(pmu);
5598 5599
}

5600 5601 5602 5603 5604
static int perf_event_idx_default(struct perf_event *event)
{
	return event->hw.idx + 1;
}

P
Peter Zijlstra 已提交
5605 5606 5607 5608 5609
/*
 * Ensures all contexts with the same task_ctx_nr have the same
 * pmu_cpu_context too.
 */
static void *find_pmu_context(int ctxn)
5610
{
P
Peter Zijlstra 已提交
5611
	struct pmu *pmu;
5612

P
Peter Zijlstra 已提交
5613 5614
	if (ctxn < 0)
		return NULL;
5615

P
Peter Zijlstra 已提交
5616 5617 5618 5619
	list_for_each_entry(pmu, &pmus, entry) {
		if (pmu->task_ctx_nr == ctxn)
			return pmu->pmu_cpu_context;
	}
5620

P
Peter Zijlstra 已提交
5621
	return NULL;
5622 5623
}

5624
static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
5625
{
5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640
	int cpu;

	for_each_possible_cpu(cpu) {
		struct perf_cpu_context *cpuctx;

		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);

		if (cpuctx->active_pmu == old_pmu)
			cpuctx->active_pmu = pmu;
	}
}

static void free_pmu_context(struct pmu *pmu)
{
	struct pmu *i;
5641

P
Peter Zijlstra 已提交
5642
	mutex_lock(&pmus_lock);
5643
	/*
P
Peter Zijlstra 已提交
5644
	 * Like a real lame refcount.
5645
	 */
5646 5647 5648
	list_for_each_entry(i, &pmus, entry) {
		if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
			update_pmu_context(i, pmu);
P
Peter Zijlstra 已提交
5649
			goto out;
5650
		}
P
Peter Zijlstra 已提交
5651
	}
5652

5653
	free_percpu(pmu->pmu_cpu_context);
P
Peter Zijlstra 已提交
5654 5655
out:
	mutex_unlock(&pmus_lock);
5656
}
P
Peter Zijlstra 已提交
5657
static struct idr pmu_idr;
5658

P
Peter Zijlstra 已提交
5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690
static ssize_t
type_show(struct device *dev, struct device_attribute *attr, char *page)
{
	struct pmu *pmu = dev_get_drvdata(dev);

	return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
}

static struct device_attribute pmu_dev_attrs[] = {
       __ATTR_RO(type),
       __ATTR_NULL,
};

static int pmu_bus_running;
static struct bus_type pmu_bus = {
	.name		= "event_source",
	.dev_attrs	= pmu_dev_attrs,
};

static void pmu_dev_release(struct device *dev)
{
	kfree(dev);
}

static int pmu_dev_alloc(struct pmu *pmu)
{
	int ret = -ENOMEM;

	pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
	if (!pmu->dev)
		goto out;

5691
	pmu->dev->groups = pmu->attr_groups;
P
Peter Zijlstra 已提交
5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711
	device_initialize(pmu->dev);
	ret = dev_set_name(pmu->dev, "%s", pmu->name);
	if (ret)
		goto free_dev;

	dev_set_drvdata(pmu->dev, pmu);
	pmu->dev->bus = &pmu_bus;
	pmu->dev->release = pmu_dev_release;
	ret = device_add(pmu->dev);
	if (ret)
		goto free_dev;

out:
	return ret;

free_dev:
	put_device(pmu->dev);
	goto out;
}

5712
static struct lock_class_key cpuctx_mutex;
5713
static struct lock_class_key cpuctx_lock;
5714

P
Peter Zijlstra 已提交
5715
int perf_pmu_register(struct pmu *pmu, char *name, int type)
5716
{
P
Peter Zijlstra 已提交
5717
	int cpu, ret;
5718

5719
	mutex_lock(&pmus_lock);
P
Peter Zijlstra 已提交
5720 5721 5722 5723
	ret = -ENOMEM;
	pmu->pmu_disable_count = alloc_percpu(int);
	if (!pmu->pmu_disable_count)
		goto unlock;
5724

P
Peter Zijlstra 已提交
5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742
	pmu->type = -1;
	if (!name)
		goto skip_type;
	pmu->name = name;

	if (type < 0) {
		int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
		if (!err)
			goto free_pdc;

		err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
		if (err) {
			ret = err;
			goto free_pdc;
		}
	}
	pmu->type = type;

P
Peter Zijlstra 已提交
5743 5744 5745 5746 5747 5748
	if (pmu_bus_running) {
		ret = pmu_dev_alloc(pmu);
		if (ret)
			goto free_idr;
	}

P
Peter Zijlstra 已提交
5749
skip_type:
P
Peter Zijlstra 已提交
5750 5751 5752
	pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
	if (pmu->pmu_cpu_context)
		goto got_cpu_context;
5753

P
Peter Zijlstra 已提交
5754 5755
	pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
	if (!pmu->pmu_cpu_context)
P
Peter Zijlstra 已提交
5756
		goto free_dev;
5757

P
Peter Zijlstra 已提交
5758 5759 5760 5761
	for_each_possible_cpu(cpu) {
		struct perf_cpu_context *cpuctx;

		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5762
		__perf_event_init_context(&cpuctx->ctx);
5763
		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
5764
		lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
5765
		cpuctx->ctx.type = cpu_context;
P
Peter Zijlstra 已提交
5766
		cpuctx->ctx.pmu = pmu;
5767 5768
		cpuctx->jiffies_interval = 1;
		INIT_LIST_HEAD(&cpuctx->rotation_list);
5769
		cpuctx->active_pmu = pmu;
P
Peter Zijlstra 已提交
5770
	}
5771

P
Peter Zijlstra 已提交
5772
got_cpu_context:
P
Peter Zijlstra 已提交
5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786
	if (!pmu->start_txn) {
		if (pmu->pmu_enable) {
			/*
			 * If we have pmu_enable/pmu_disable calls, install
			 * transaction stubs that use that to try and batch
			 * hardware accesses.
			 */
			pmu->start_txn  = perf_pmu_start_txn;
			pmu->commit_txn = perf_pmu_commit_txn;
			pmu->cancel_txn = perf_pmu_cancel_txn;
		} else {
			pmu->start_txn  = perf_pmu_nop_void;
			pmu->commit_txn = perf_pmu_nop_int;
			pmu->cancel_txn = perf_pmu_nop_void;
5787
		}
5788
	}
5789

P
Peter Zijlstra 已提交
5790 5791 5792 5793 5794
	if (!pmu->pmu_enable) {
		pmu->pmu_enable  = perf_pmu_nop_void;
		pmu->pmu_disable = perf_pmu_nop_void;
	}

5795 5796 5797
	if (!pmu->event_idx)
		pmu->event_idx = perf_event_idx_default;

5798
	list_add_rcu(&pmu->entry, &pmus);
P
Peter Zijlstra 已提交
5799 5800
	ret = 0;
unlock:
5801 5802
	mutex_unlock(&pmus_lock);

P
Peter Zijlstra 已提交
5803
	return ret;
P
Peter Zijlstra 已提交
5804

P
Peter Zijlstra 已提交
5805 5806 5807 5808
free_dev:
	device_del(pmu->dev);
	put_device(pmu->dev);

P
Peter Zijlstra 已提交
5809 5810 5811 5812
free_idr:
	if (pmu->type >= PERF_TYPE_MAX)
		idr_remove(&pmu_idr, pmu->type);

P
Peter Zijlstra 已提交
5813 5814 5815
free_pdc:
	free_percpu(pmu->pmu_disable_count);
	goto unlock;
5816 5817
}

5818
void perf_pmu_unregister(struct pmu *pmu)
5819
{
5820 5821 5822
	mutex_lock(&pmus_lock);
	list_del_rcu(&pmu->entry);
	mutex_unlock(&pmus_lock);
5823

5824
	/*
P
Peter Zijlstra 已提交
5825 5826
	 * We dereference the pmu list under both SRCU and regular RCU, so
	 * synchronize against both of those.
5827
	 */
5828
	synchronize_srcu(&pmus_srcu);
P
Peter Zijlstra 已提交
5829
	synchronize_rcu();
5830

P
Peter Zijlstra 已提交
5831
	free_percpu(pmu->pmu_disable_count);
P
Peter Zijlstra 已提交
5832 5833
	if (pmu->type >= PERF_TYPE_MAX)
		idr_remove(&pmu_idr, pmu->type);
P
Peter Zijlstra 已提交
5834 5835
	device_del(pmu->dev);
	put_device(pmu->dev);
5836
	free_pmu_context(pmu);
5837
}
5838

5839 5840 5841 5842
struct pmu *perf_init_event(struct perf_event *event)
{
	struct pmu *pmu = NULL;
	int idx;
5843
	int ret;
5844 5845

	idx = srcu_read_lock(&pmus_srcu);
P
Peter Zijlstra 已提交
5846 5847 5848 5849

	rcu_read_lock();
	pmu = idr_find(&pmu_idr, event->attr.type);
	rcu_read_unlock();
5850
	if (pmu) {
5851
		event->pmu = pmu;
5852 5853 5854
		ret = pmu->event_init(event);
		if (ret)
			pmu = ERR_PTR(ret);
P
Peter Zijlstra 已提交
5855
		goto unlock;
5856
	}
P
Peter Zijlstra 已提交
5857

5858
	list_for_each_entry_rcu(pmu, &pmus, entry) {
5859
		event->pmu = pmu;
5860
		ret = pmu->event_init(event);
5861
		if (!ret)
P
Peter Zijlstra 已提交
5862
			goto unlock;
5863

5864 5865
		if (ret != -ENOENT) {
			pmu = ERR_PTR(ret);
P
Peter Zijlstra 已提交
5866
			goto unlock;
5867
		}
5868
	}
P
Peter Zijlstra 已提交
5869 5870
	pmu = ERR_PTR(-ENOENT);
unlock:
5871
	srcu_read_unlock(&pmus_srcu, idx);
5872

5873
	return pmu;
5874 5875
}

T
Thomas Gleixner 已提交
5876
/*
5877
 * Allocate and initialize a event structure
T
Thomas Gleixner 已提交
5878
 */
5879
static struct perf_event *
5880
perf_event_alloc(struct perf_event_attr *attr, int cpu,
5881 5882 5883
		 struct task_struct *task,
		 struct perf_event *group_leader,
		 struct perf_event *parent_event,
5884 5885
		 perf_overflow_handler_t overflow_handler,
		 void *context)
T
Thomas Gleixner 已提交
5886
{
P
Peter Zijlstra 已提交
5887
	struct pmu *pmu;
5888 5889
	struct perf_event *event;
	struct hw_perf_event *hwc;
5890
	long err;
T
Thomas Gleixner 已提交
5891

5892 5893 5894 5895 5896
	if ((unsigned)cpu >= nr_cpu_ids) {
		if (!task || cpu != -1)
			return ERR_PTR(-EINVAL);
	}

5897
	event = kzalloc(sizeof(*event), GFP_KERNEL);
5898
	if (!event)
5899
		return ERR_PTR(-ENOMEM);
T
Thomas Gleixner 已提交
5900

5901
	/*
5902
	 * Single events are their own group leaders, with an
5903 5904 5905
	 * empty sibling list:
	 */
	if (!group_leader)
5906
		group_leader = event;
5907

5908 5909
	mutex_init(&event->child_mutex);
	INIT_LIST_HEAD(&event->child_list);
5910

5911 5912 5913
	INIT_LIST_HEAD(&event->group_entry);
	INIT_LIST_HEAD(&event->event_entry);
	INIT_LIST_HEAD(&event->sibling_list);
5914 5915
	INIT_LIST_HEAD(&event->rb_entry);

5916
	init_waitqueue_head(&event->waitq);
5917
	init_irq_work(&event->pending, perf_pending_event);
T
Thomas Gleixner 已提交
5918

5919
	mutex_init(&event->mmap_mutex);
5920

5921 5922 5923 5924 5925
	event->cpu		= cpu;
	event->attr		= *attr;
	event->group_leader	= group_leader;
	event->pmu		= NULL;
	event->oncpu		= -1;
5926

5927
	event->parent		= parent_event;
5928

5929 5930
	event->ns		= get_pid_ns(current->nsproxy->pid_ns);
	event->id		= atomic64_inc_return(&perf_event_id);
5931

5932
	event->state		= PERF_EVENT_STATE_INACTIVE;
5933

5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944
	if (task) {
		event->attach_state = PERF_ATTACH_TASK;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
		/*
		 * hw_breakpoint is a bit difficult here..
		 */
		if (attr->type == PERF_TYPE_BREAKPOINT)
			event->hw.bp_target = task;
#endif
	}

5945
	if (!overflow_handler && parent_event) {
5946
		overflow_handler = parent_event->overflow_handler;
5947 5948
		context = parent_event->overflow_handler_context;
	}
5949

5950
	event->overflow_handler	= overflow_handler;
5951
	event->overflow_handler_context = context;
5952

5953
	if (attr->disabled)
5954
		event->state = PERF_EVENT_STATE_OFF;
5955

5956
	pmu = NULL;
5957

5958
	hwc = &event->hw;
5959
	hwc->sample_period = attr->sample_period;
5960
	if (attr->freq && attr->sample_freq)
5961
		hwc->sample_period = 1;
5962
	hwc->last_period = hwc->sample_period;
5963

5964
	local64_set(&hwc->period_left, hwc->sample_period);
5965

5966
	/*
5967
	 * we currently do not support PERF_FORMAT_GROUP on inherited events
5968
	 */
5969
	if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5970 5971
		goto done;

5972
	pmu = perf_init_event(event);
5973

5974 5975
done:
	err = 0;
5976
	if (!pmu)
5977
		err = -EINVAL;
5978 5979
	else if (IS_ERR(pmu))
		err = PTR_ERR(pmu);
5980

5981
	if (err) {
5982 5983 5984
		if (event->ns)
			put_pid_ns(event->ns);
		kfree(event);
5985
		return ERR_PTR(err);
I
Ingo Molnar 已提交
5986
	}
5987

5988
	if (!event->parent) {
5989
		if (event->attach_state & PERF_ATTACH_TASK)
5990
			static_key_slow_inc(&perf_sched_events.key);
5991
		if (event->attr.mmap || event->attr.mmap_data)
5992 5993 5994 5995 5996
			atomic_inc(&nr_mmap_events);
		if (event->attr.comm)
			atomic_inc(&nr_comm_events);
		if (event->attr.task)
			atomic_inc(&nr_task_events);
5997 5998 5999 6000 6001 6002 6003
		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
			err = get_callchain_buffers();
			if (err) {
				free_event(event);
				return ERR_PTR(err);
			}
		}
6004 6005 6006 6007 6008 6009
		if (has_branch_stack(event)) {
			static_key_slow_inc(&perf_sched_events.key);
			if (!(event->attach_state & PERF_ATTACH_TASK))
				atomic_inc(&per_cpu(perf_branch_stack_events,
						    event->cpu));
		}
6010
	}
6011

6012
	return event;
T
Thomas Gleixner 已提交
6013 6014
}

6015 6016
static int perf_copy_attr(struct perf_event_attr __user *uattr,
			  struct perf_event_attr *attr)
6017 6018
{
	u32 size;
6019
	int ret;
6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043

	if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
		return -EFAULT;

	/*
	 * zero the full structure, so that a short copy will be nice.
	 */
	memset(attr, 0, sizeof(*attr));

	ret = get_user(size, &uattr->size);
	if (ret)
		return ret;

	if (size > PAGE_SIZE)	/* silly large */
		goto err_size;

	if (!size)		/* abi compat */
		size = PERF_ATTR_SIZE_VER0;

	if (size < PERF_ATTR_SIZE_VER0)
		goto err_size;

	/*
	 * If we're handed a bigger struct than we know of,
6044 6045 6046
	 * ensure all the unknown bits are 0 - i.e. new
	 * user-space does not rely on any kernel feature
	 * extensions we dont know about yet.
6047 6048
	 */
	if (size > sizeof(*attr)) {
6049 6050 6051
		unsigned char __user *addr;
		unsigned char __user *end;
		unsigned char val;
6052

6053 6054
		addr = (void __user *)uattr + sizeof(*attr);
		end  = (void __user *)uattr + size;
6055

6056
		for (; addr < end; addr++) {
6057 6058 6059 6060 6061 6062
			ret = get_user(val, addr);
			if (ret)
				return ret;
			if (val)
				goto err_size;
		}
6063
		size = sizeof(*attr);
6064 6065 6066 6067 6068 6069
	}

	ret = copy_from_user(attr, uattr, size);
	if (ret)
		return -EFAULT;

6070
	if (attr->__reserved_1)
6071 6072 6073 6074 6075 6076 6077 6078
		return -EINVAL;

	if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
		return -EINVAL;

	if (attr->read_format & ~(PERF_FORMAT_MAX-1))
		return -EINVAL;

6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112
	if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
		u64 mask = attr->branch_sample_type;

		/* only using defined bits */
		if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
			return -EINVAL;

		/* at least one branch bit must be set */
		if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
			return -EINVAL;

		/* kernel level capture: check permissions */
		if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
		    && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
			return -EACCES;

		/* propagate priv level, when not set for branch */
		if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {

			/* exclude_kernel checked on syscall entry */
			if (!attr->exclude_kernel)
				mask |= PERF_SAMPLE_BRANCH_KERNEL;

			if (!attr->exclude_user)
				mask |= PERF_SAMPLE_BRANCH_USER;

			if (!attr->exclude_hv)
				mask |= PERF_SAMPLE_BRANCH_HV;
			/*
			 * adjust user setting (for HW filter setup)
			 */
			attr->branch_sample_type = mask;
		}
	}
6113 6114 6115 6116 6117 6118 6119 6120 6121
out:
	return ret;

err_size:
	put_user(sizeof(*attr), &uattr->size);
	ret = -E2BIG;
	goto out;
}

6122 6123
static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
6124
{
6125
	struct ring_buffer *rb = NULL, *old_rb = NULL;
6126 6127
	int ret = -EINVAL;

6128
	if (!output_event)
6129 6130
		goto set;

6131 6132
	/* don't allow circular references */
	if (event == output_event)
6133 6134
		goto out;

6135 6136 6137 6138 6139 6140 6141
	/*
	 * Don't allow cross-cpu buffers
	 */
	if (output_event->cpu != event->cpu)
		goto out;

	/*
6142
	 * If its not a per-cpu rb, it must be the same task.
6143 6144 6145 6146
	 */
	if (output_event->cpu == -1 && output_event->ctx != event->ctx)
		goto out;

6147
set:
6148
	mutex_lock(&event->mmap_mutex);
6149 6150 6151
	/* Can't redirect output if we've got an active mmap() */
	if (atomic_read(&event->mmap_count))
		goto unlock;
6152

6153
	if (output_event) {
6154 6155 6156
		/* get the rb we want to redirect to */
		rb = ring_buffer_get(output_event);
		if (!rb)
6157
			goto unlock;
6158 6159
	}

6160 6161
	old_rb = event->rb;
	rcu_assign_pointer(event->rb, rb);
6162 6163
	if (old_rb)
		ring_buffer_detach(event, old_rb);
6164
	ret = 0;
6165 6166 6167
unlock:
	mutex_unlock(&event->mmap_mutex);

6168 6169
	if (old_rb)
		ring_buffer_put(old_rb);
6170 6171 6172 6173
out:
	return ret;
}

T
Thomas Gleixner 已提交
6174
/**
6175
 * sys_perf_event_open - open a performance event, associate it to a task/cpu
I
Ingo Molnar 已提交
6176
 *
6177
 * @attr_uptr:	event_id type attributes for monitoring/sampling
T
Thomas Gleixner 已提交
6178
 * @pid:		target pid
I
Ingo Molnar 已提交
6179
 * @cpu:		target cpu
6180
 * @group_fd:		group leader event fd
T
Thomas Gleixner 已提交
6181
 */
6182 6183
SYSCALL_DEFINE5(perf_event_open,
		struct perf_event_attr __user *, attr_uptr,
6184
		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
T
Thomas Gleixner 已提交
6185
{
6186 6187
	struct perf_event *group_leader = NULL, *output_event = NULL;
	struct perf_event *event, *sibling;
6188 6189 6190
	struct perf_event_attr attr;
	struct perf_event_context *ctx;
	struct file *event_file = NULL;
6191
	struct file *group_file = NULL;
M
Matt Helsley 已提交
6192
	struct task_struct *task = NULL;
6193
	struct pmu *pmu;
6194
	int event_fd;
6195
	int move_group = 0;
6196
	int fput_needed = 0;
6197
	int err;
T
Thomas Gleixner 已提交
6198

6199
	/* for future expandability... */
S
Stephane Eranian 已提交
6200
	if (flags & ~PERF_FLAG_ALL)
6201 6202
		return -EINVAL;

6203 6204 6205
	err = perf_copy_attr(attr_uptr, &attr);
	if (err)
		return err;
6206

6207 6208 6209 6210 6211
	if (!attr.exclude_kernel) {
		if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
			return -EACCES;
	}

6212
	if (attr.freq) {
6213
		if (attr.sample_freq > sysctl_perf_event_sample_rate)
6214 6215 6216
			return -EINVAL;
	}

S
Stephane Eranian 已提交
6217 6218 6219 6220 6221 6222 6223 6224 6225
	/*
	 * In cgroup mode, the pid argument is used to pass the fd
	 * opened to the cgroup directory in cgroupfs. The cpu argument
	 * designates the cpu on which to monitor threads from that
	 * cgroup.
	 */
	if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
		return -EINVAL;

6226 6227 6228 6229
	event_fd = get_unused_fd_flags(O_RDWR);
	if (event_fd < 0)
		return event_fd;

6230 6231 6232 6233
	if (group_fd != -1) {
		group_leader = perf_fget_light(group_fd, &fput_needed);
		if (IS_ERR(group_leader)) {
			err = PTR_ERR(group_leader);
6234
			goto err_fd;
6235 6236 6237 6238 6239 6240 6241 6242
		}
		group_file = group_leader->filp;
		if (flags & PERF_FLAG_FD_OUTPUT)
			output_event = group_leader;
		if (flags & PERF_FLAG_FD_NO_GROUP)
			group_leader = NULL;
	}

S
Stephane Eranian 已提交
6243
	if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
6244 6245 6246 6247 6248 6249 6250
		task = find_lively_task_by_vpid(pid);
		if (IS_ERR(task)) {
			err = PTR_ERR(task);
			goto err_group_fd;
		}
	}

6251 6252
	event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
				 NULL, NULL);
6253 6254
	if (IS_ERR(event)) {
		err = PTR_ERR(event);
6255
		goto err_task;
6256 6257
	}

S
Stephane Eranian 已提交
6258 6259 6260 6261
	if (flags & PERF_FLAG_PID_CGROUP) {
		err = perf_cgroup_connect(pid, event, &attr, group_leader);
		if (err)
			goto err_alloc;
6262 6263 6264 6265 6266 6267
		/*
		 * one more event:
		 * - that has cgroup constraint on event->cpu
		 * - that may need work on context switch
		 */
		atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
6268
		static_key_slow_inc(&perf_sched_events.key);
S
Stephane Eranian 已提交
6269 6270
	}

6271 6272 6273 6274 6275
	/*
	 * Special case software events and allow them to be part of
	 * any hardware group.
	 */
	pmu = event->pmu;
6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298

	if (group_leader &&
	    (is_software_event(event) != is_software_event(group_leader))) {
		if (is_software_event(event)) {
			/*
			 * If event and group_leader are not both a software
			 * event, and event is, then group leader is not.
			 *
			 * Allow the addition of software events to !software
			 * groups, this is safe because software events never
			 * fail to schedule.
			 */
			pmu = group_leader->pmu;
		} else if (is_software_event(group_leader) &&
			   (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
			/*
			 * In case the group is a pure software group, and we
			 * try to add a hardware event, move the whole group to
			 * the hardware context.
			 */
			move_group = 1;
		}
	}
6299 6300 6301 6302

	/*
	 * Get the target context (task or percpu):
	 */
M
Matt Helsley 已提交
6303
	ctx = find_get_context(pmu, task, cpu);
6304 6305
	if (IS_ERR(ctx)) {
		err = PTR_ERR(ctx);
6306
		goto err_alloc;
6307 6308
	}

6309 6310 6311 6312 6313
	if (task) {
		put_task_struct(task);
		task = NULL;
	}

I
Ingo Molnar 已提交
6314
	/*
6315
	 * Look up the group leader (we will attach this event to it):
6316
	 */
6317
	if (group_leader) {
6318
		err = -EINVAL;
6319 6320

		/*
I
Ingo Molnar 已提交
6321 6322 6323 6324
		 * Do not allow a recursive hierarchy (this new sibling
		 * becoming part of another group-sibling):
		 */
		if (group_leader->group_leader != group_leader)
6325
			goto err_context;
I
Ingo Molnar 已提交
6326 6327 6328
		/*
		 * Do not allow to attach to a group in a different
		 * task or CPU context:
6329
		 */
6330 6331 6332 6333 6334 6335 6336 6337
		if (move_group) {
			if (group_leader->ctx->type != ctx->type)
				goto err_context;
		} else {
			if (group_leader->ctx != ctx)
				goto err_context;
		}

6338 6339 6340
		/*
		 * Only a group leader can be exclusive or pinned
		 */
6341
		if (attr.exclusive || attr.pinned)
6342
			goto err_context;
6343 6344 6345 6346 6347
	}

	if (output_event) {
		err = perf_event_set_output(event, output_event);
		if (err)
6348
			goto err_context;
6349
	}
T
Thomas Gleixner 已提交
6350

6351 6352 6353
	event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
	if (IS_ERR(event_file)) {
		err = PTR_ERR(event_file);
6354
		goto err_context;
6355
	}
6356

6357 6358 6359 6360
	if (move_group) {
		struct perf_event_context *gctx = group_leader->ctx;

		mutex_lock(&gctx->mutex);
6361
		perf_remove_from_context(group_leader);
6362 6363
		list_for_each_entry(sibling, &group_leader->sibling_list,
				    group_entry) {
6364
			perf_remove_from_context(sibling);
6365 6366 6367 6368
			put_ctx(gctx);
		}
		mutex_unlock(&gctx->mutex);
		put_ctx(gctx);
6369
	}
6370

6371
	event->filp = event_file;
6372
	WARN_ON_ONCE(ctx->parent_ctx);
6373
	mutex_lock(&ctx->mutex);
6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384

	if (move_group) {
		perf_install_in_context(ctx, group_leader, cpu);
		get_ctx(ctx);
		list_for_each_entry(sibling, &group_leader->sibling_list,
				    group_entry) {
			perf_install_in_context(ctx, sibling, cpu);
			get_ctx(ctx);
		}
	}

6385
	perf_install_in_context(ctx, event, cpu);
6386
	++ctx->generation;
6387
	perf_unpin_context(ctx);
6388
	mutex_unlock(&ctx->mutex);
6389

6390
	event->owner = current;
P
Peter Zijlstra 已提交
6391

6392 6393 6394
	mutex_lock(&current->perf_event_mutex);
	list_add_tail(&event->owner_entry, &current->perf_event_list);
	mutex_unlock(&current->perf_event_mutex);
6395

6396 6397 6398 6399
	/*
	 * Precalculate sample_data sizes
	 */
	perf_event__header_size(event);
6400
	perf_event__id_header_size(event);
6401

6402 6403 6404 6405 6406 6407
	/*
	 * Drop the reference on the group_event after placing the
	 * new event on the sibling_list. This ensures destruction
	 * of the group leader will find the pointer to itself in
	 * perf_group_detach().
	 */
6408 6409 6410
	fput_light(group_file, fput_needed);
	fd_install(event_fd, event_file);
	return event_fd;
T
Thomas Gleixner 已提交
6411

6412
err_context:
6413
	perf_unpin_context(ctx);
6414
	put_ctx(ctx);
6415
err_alloc:
6416
	free_event(event);
P
Peter Zijlstra 已提交
6417 6418 6419
err_task:
	if (task)
		put_task_struct(task);
6420
err_group_fd:
6421
	fput_light(group_file, fput_needed);
6422 6423
err_fd:
	put_unused_fd(event_fd);
6424
	return err;
T
Thomas Gleixner 已提交
6425 6426
}

6427 6428 6429 6430 6431
/**
 * perf_event_create_kernel_counter
 *
 * @attr: attributes of the counter to create
 * @cpu: cpu in which the counter is bound
M
Matt Helsley 已提交
6432
 * @task: task to profile (NULL for percpu)
6433 6434 6435
 */
struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
M
Matt Helsley 已提交
6436
				 struct task_struct *task,
6437 6438
				 perf_overflow_handler_t overflow_handler,
				 void *context)
6439 6440
{
	struct perf_event_context *ctx;
6441
	struct perf_event *event;
6442
	int err;
6443

6444 6445 6446
	/*
	 * Get the target context (task or percpu):
	 */
6447

6448 6449
	event = perf_event_alloc(attr, cpu, task, NULL, NULL,
				 overflow_handler, context);
6450 6451 6452 6453
	if (IS_ERR(event)) {
		err = PTR_ERR(event);
		goto err;
	}
6454

M
Matt Helsley 已提交
6455
	ctx = find_get_context(event->pmu, task, cpu);
6456 6457
	if (IS_ERR(ctx)) {
		err = PTR_ERR(ctx);
6458
		goto err_free;
6459
	}
6460 6461 6462 6463 6464 6465

	event->filp = NULL;
	WARN_ON_ONCE(ctx->parent_ctx);
	mutex_lock(&ctx->mutex);
	perf_install_in_context(ctx, event, cpu);
	++ctx->generation;
6466
	perf_unpin_context(ctx);
6467 6468 6469 6470
	mutex_unlock(&ctx->mutex);

	return event;

6471 6472 6473
err_free:
	free_event(event);
err:
6474
	return ERR_PTR(err);
6475
}
6476
EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
6477

6478
static void sync_child_event(struct perf_event *child_event,
6479
			       struct task_struct *child)
6480
{
6481
	struct perf_event *parent_event = child_event->parent;
6482
	u64 child_val;
6483

6484 6485
	if (child_event->attr.inherit_stat)
		perf_event_read_event(child_event, child);
6486

P
Peter Zijlstra 已提交
6487
	child_val = perf_event_count(child_event);
6488 6489 6490 6491

	/*
	 * Add back the child's count to the parent's count:
	 */
6492
	atomic64_add(child_val, &parent_event->child_count);
6493 6494 6495 6496
	atomic64_add(child_event->total_time_enabled,
		     &parent_event->child_total_time_enabled);
	atomic64_add(child_event->total_time_running,
		     &parent_event->child_total_time_running);
6497 6498

	/*
6499
	 * Remove this event from the parent's list
6500
	 */
6501 6502 6503 6504
	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
	mutex_lock(&parent_event->child_mutex);
	list_del_init(&child_event->child_list);
	mutex_unlock(&parent_event->child_mutex);
6505 6506

	/*
6507
	 * Release the parent event, if this was the last
6508 6509
	 * reference to it.
	 */
6510
	fput(parent_event->filp);
6511 6512
}

6513
static void
6514 6515
__perf_event_exit_task(struct perf_event *child_event,
			 struct perf_event_context *child_ctx,
6516
			 struct task_struct *child)
6517
{
6518 6519 6520 6521 6522
	if (child_event->parent) {
		raw_spin_lock_irq(&child_ctx->lock);
		perf_group_detach(child_event);
		raw_spin_unlock_irq(&child_ctx->lock);
	}
6523

6524
	perf_remove_from_context(child_event);
6525

6526
	/*
6527
	 * It can happen that the parent exits first, and has events
6528
	 * that are still around due to the child reference. These
6529
	 * events need to be zapped.
6530
	 */
6531
	if (child_event->parent) {
6532 6533
		sync_child_event(child_event, child);
		free_event(child_event);
6534
	}
6535 6536
}

P
Peter Zijlstra 已提交
6537
static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
6538
{
6539 6540
	struct perf_event *child_event, *tmp;
	struct perf_event_context *child_ctx;
6541
	unsigned long flags;
6542

P
Peter Zijlstra 已提交
6543
	if (likely(!child->perf_event_ctxp[ctxn])) {
6544
		perf_event_task(child, NULL, 0);
6545
		return;
P
Peter Zijlstra 已提交
6546
	}
6547

6548
	local_irq_save(flags);
6549 6550 6551 6552 6553 6554
	/*
	 * We can't reschedule here because interrupts are disabled,
	 * and either child is current or it is a task that can't be
	 * scheduled, so we are now safe from rescheduling changing
	 * our context.
	 */
6555
	child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
6556 6557 6558

	/*
	 * Take the context lock here so that if find_get_context is
6559
	 * reading child->perf_event_ctxp, we wait until it has
6560 6561
	 * incremented the context's refcount before we do put_ctx below.
	 */
6562
	raw_spin_lock(&child_ctx->lock);
6563
	task_ctx_sched_out(child_ctx);
P
Peter Zijlstra 已提交
6564
	child->perf_event_ctxp[ctxn] = NULL;
6565 6566 6567
	/*
	 * If this context is a clone; unclone it so it can't get
	 * swapped to another process while we're removing all
6568
	 * the events from it.
6569 6570
	 */
	unclone_ctx(child_ctx);
6571
	update_context_time(child_ctx);
6572
	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
P
Peter Zijlstra 已提交
6573 6574

	/*
6575 6576 6577
	 * Report the task dead after unscheduling the events so that we
	 * won't get any samples after PERF_RECORD_EXIT. We can however still
	 * get a few PERF_RECORD_READ events.
P
Peter Zijlstra 已提交
6578
	 */
6579
	perf_event_task(child, child_ctx, 0);
6580

6581 6582 6583
	/*
	 * We can recurse on the same lock type through:
	 *
6584 6585 6586
	 *   __perf_event_exit_task()
	 *     sync_child_event()
	 *       fput(parent_event->filp)
6587 6588 6589 6590 6591
	 *         perf_release()
	 *           mutex_lock(&ctx->mutex)
	 *
	 * But since its the parent context it won't be the same instance.
	 */
6592
	mutex_lock(&child_ctx->mutex);
6593

6594
again:
6595 6596 6597 6598 6599
	list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
				 group_entry)
		__perf_event_exit_task(child_event, child_ctx, child);

	list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
6600
				 group_entry)
6601
		__perf_event_exit_task(child_event, child_ctx, child);
6602 6603

	/*
6604
	 * If the last event was a group event, it will have appended all
6605 6606 6607
	 * its siblings to the list, but we obtained 'tmp' before that which
	 * will still point to the list head terminating the iteration.
	 */
6608 6609
	if (!list_empty(&child_ctx->pinned_groups) ||
	    !list_empty(&child_ctx->flexible_groups))
6610
		goto again;
6611 6612 6613 6614

	mutex_unlock(&child_ctx->mutex);

	put_ctx(child_ctx);
6615 6616
}

P
Peter Zijlstra 已提交
6617 6618 6619 6620 6621
/*
 * When a child task exits, feed back event values to parent events.
 */
void perf_event_exit_task(struct task_struct *child)
{
P
Peter Zijlstra 已提交
6622
	struct perf_event *event, *tmp;
P
Peter Zijlstra 已提交
6623 6624
	int ctxn;

P
Peter Zijlstra 已提交
6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639
	mutex_lock(&child->perf_event_mutex);
	list_for_each_entry_safe(event, tmp, &child->perf_event_list,
				 owner_entry) {
		list_del_init(&event->owner_entry);

		/*
		 * Ensure the list deletion is visible before we clear
		 * the owner, closes a race against perf_release() where
		 * we need to serialize on the owner->perf_event_mutex.
		 */
		smp_wmb();
		event->owner = NULL;
	}
	mutex_unlock(&child->perf_event_mutex);

P
Peter Zijlstra 已提交
6640 6641 6642 6643
	for_each_task_context_nr(ctxn)
		perf_event_exit_task_context(child, ctxn);
}

6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657
static void perf_free_event(struct perf_event *event,
			    struct perf_event_context *ctx)
{
	struct perf_event *parent = event->parent;

	if (WARN_ON_ONCE(!parent))
		return;

	mutex_lock(&parent->child_mutex);
	list_del_init(&event->child_list);
	mutex_unlock(&parent->child_mutex);

	fput(parent->filp);

6658
	perf_group_detach(event);
6659 6660 6661 6662
	list_del_event(event, ctx);
	free_event(event);
}

6663 6664
/*
 * free an unexposed, unused context as created by inheritance by
P
Peter Zijlstra 已提交
6665
 * perf_event_init_task below, used by fork() in case of fail.
6666
 */
6667
void perf_event_free_task(struct task_struct *task)
6668
{
P
Peter Zijlstra 已提交
6669
	struct perf_event_context *ctx;
6670
	struct perf_event *event, *tmp;
P
Peter Zijlstra 已提交
6671
	int ctxn;
6672

P
Peter Zijlstra 已提交
6673 6674 6675 6676
	for_each_task_context_nr(ctxn) {
		ctx = task->perf_event_ctxp[ctxn];
		if (!ctx)
			continue;
6677

P
Peter Zijlstra 已提交
6678
		mutex_lock(&ctx->mutex);
6679
again:
P
Peter Zijlstra 已提交
6680 6681 6682
		list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
				group_entry)
			perf_free_event(event, ctx);
6683

P
Peter Zijlstra 已提交
6684 6685 6686
		list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
				group_entry)
			perf_free_event(event, ctx);
6687

P
Peter Zijlstra 已提交
6688 6689 6690
		if (!list_empty(&ctx->pinned_groups) ||
				!list_empty(&ctx->flexible_groups))
			goto again;
6691

P
Peter Zijlstra 已提交
6692
		mutex_unlock(&ctx->mutex);
6693

P
Peter Zijlstra 已提交
6694 6695
		put_ctx(ctx);
	}
6696 6697
}

6698 6699 6700 6701 6702 6703 6704 6705
void perf_event_delayed_put(struct task_struct *task)
{
	int ctxn;

	for_each_task_context_nr(ctxn)
		WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
}

P
Peter Zijlstra 已提交
6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717
/*
 * inherit a event from parent task to child task:
 */
static struct perf_event *
inherit_event(struct perf_event *parent_event,
	      struct task_struct *parent,
	      struct perf_event_context *parent_ctx,
	      struct task_struct *child,
	      struct perf_event *group_leader,
	      struct perf_event_context *child_ctx)
{
	struct perf_event *child_event;
6718
	unsigned long flags;
P
Peter Zijlstra 已提交
6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730

	/*
	 * Instead of creating recursive hierarchies of events,
	 * we link inherited events back to the original parent,
	 * which has a filp for sure, which we use as the reference
	 * count:
	 */
	if (parent_event->parent)
		parent_event = parent_event->parent;

	child_event = perf_event_alloc(&parent_event->attr,
					   parent_event->cpu,
6731
					   child,
P
Peter Zijlstra 已提交
6732
					   group_leader, parent_event,
6733
				           NULL, NULL);
P
Peter Zijlstra 已提交
6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759
	if (IS_ERR(child_event))
		return child_event;
	get_ctx(child_ctx);

	/*
	 * Make the child state follow the state of the parent event,
	 * not its attr.disabled bit.  We hold the parent's mutex,
	 * so we won't race with perf_event_{en, dis}able_family.
	 */
	if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
		child_event->state = PERF_EVENT_STATE_INACTIVE;
	else
		child_event->state = PERF_EVENT_STATE_OFF;

	if (parent_event->attr.freq) {
		u64 sample_period = parent_event->hw.sample_period;
		struct hw_perf_event *hwc = &child_event->hw;

		hwc->sample_period = sample_period;
		hwc->last_period   = sample_period;

		local64_set(&hwc->period_left, sample_period);
	}

	child_event->ctx = child_ctx;
	child_event->overflow_handler = parent_event->overflow_handler;
6760 6761
	child_event->overflow_handler_context
		= parent_event->overflow_handler_context;
P
Peter Zijlstra 已提交
6762

6763 6764 6765 6766
	/*
	 * Precalculate sample_data sizes
	 */
	perf_event__header_size(child_event);
6767
	perf_event__id_header_size(child_event);
6768

P
Peter Zijlstra 已提交
6769 6770 6771
	/*
	 * Link it up in the child's context:
	 */
6772
	raw_spin_lock_irqsave(&child_ctx->lock, flags);
P
Peter Zijlstra 已提交
6773
	add_event_to_ctx(child_event, child_ctx);
6774
	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
P
Peter Zijlstra 已提交
6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815

	/*
	 * Get a reference to the parent filp - we will fput it
	 * when the child event exits. This is safe to do because
	 * we are in the parent and we know that the filp still
	 * exists and has a nonzero count:
	 */
	atomic_long_inc(&parent_event->filp->f_count);

	/*
	 * Link this into the parent event's child list
	 */
	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
	mutex_lock(&parent_event->child_mutex);
	list_add_tail(&child_event->child_list, &parent_event->child_list);
	mutex_unlock(&parent_event->child_mutex);

	return child_event;
}

static int inherit_group(struct perf_event *parent_event,
	      struct task_struct *parent,
	      struct perf_event_context *parent_ctx,
	      struct task_struct *child,
	      struct perf_event_context *child_ctx)
{
	struct perf_event *leader;
	struct perf_event *sub;
	struct perf_event *child_ctr;

	leader = inherit_event(parent_event, parent, parent_ctx,
				 child, NULL, child_ctx);
	if (IS_ERR(leader))
		return PTR_ERR(leader);
	list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
		child_ctr = inherit_event(sub, parent, parent_ctx,
					    child, leader, child_ctx);
		if (IS_ERR(child_ctr))
			return PTR_ERR(child_ctr);
	}
	return 0;
6816 6817 6818 6819 6820
}

static int
inherit_task_group(struct perf_event *event, struct task_struct *parent,
		   struct perf_event_context *parent_ctx,
P
Peter Zijlstra 已提交
6821
		   struct task_struct *child, int ctxn,
6822 6823 6824
		   int *inherited_all)
{
	int ret;
P
Peter Zijlstra 已提交
6825
	struct perf_event_context *child_ctx;
6826 6827 6828 6829

	if (!event->attr.inherit) {
		*inherited_all = 0;
		return 0;
6830 6831
	}

6832
	child_ctx = child->perf_event_ctxp[ctxn];
6833 6834 6835 6836 6837 6838 6839
	if (!child_ctx) {
		/*
		 * This is executed from the parent task context, so
		 * inherit events that have been marked for cloning.
		 * First allocate and initialize a context for the
		 * child.
		 */
6840

6841
		child_ctx = alloc_perf_context(event->pmu, child);
6842 6843
		if (!child_ctx)
			return -ENOMEM;
6844

P
Peter Zijlstra 已提交
6845
		child->perf_event_ctxp[ctxn] = child_ctx;
6846 6847 6848 6849 6850 6851 6852 6853 6854
	}

	ret = inherit_group(event, parent, parent_ctx,
			    child, child_ctx);

	if (ret)
		*inherited_all = 0;

	return ret;
6855 6856
}

6857
/*
6858
 * Initialize the perf_event context in task_struct
6859
 */
P
Peter Zijlstra 已提交
6860
int perf_event_init_context(struct task_struct *child, int ctxn)
6861
{
6862
	struct perf_event_context *child_ctx, *parent_ctx;
6863 6864
	struct perf_event_context *cloned_ctx;
	struct perf_event *event;
6865
	struct task_struct *parent = current;
6866
	int inherited_all = 1;
6867
	unsigned long flags;
6868
	int ret = 0;
6869

P
Peter Zijlstra 已提交
6870
	if (likely(!parent->perf_event_ctxp[ctxn]))
6871 6872
		return 0;

6873
	/*
6874 6875
	 * If the parent's context is a clone, pin it so it won't get
	 * swapped under us.
6876
	 */
P
Peter Zijlstra 已提交
6877
	parent_ctx = perf_pin_task_context(parent, ctxn);
6878

6879 6880 6881 6882 6883 6884 6885
	/*
	 * No need to check if parent_ctx != NULL here; since we saw
	 * it non-NULL earlier, the only reason for it to become NULL
	 * is if we exit, and since we're currently in the middle of
	 * a fork we can't be exiting at the same time.
	 */

6886 6887 6888 6889
	/*
	 * Lock the parent list. No need to lock the child - not PID
	 * hashed yet and not running, so nobody can access it.
	 */
6890
	mutex_lock(&parent_ctx->mutex);
6891 6892 6893 6894 6895

	/*
	 * We dont have to disable NMIs - we are only looking at
	 * the list, not manipulating it:
	 */
6896
	list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
P
Peter Zijlstra 已提交
6897 6898
		ret = inherit_task_group(event, parent, parent_ctx,
					 child, ctxn, &inherited_all);
6899 6900 6901
		if (ret)
			break;
	}
6902

6903 6904 6905 6906 6907 6908 6909 6910 6911
	/*
	 * We can't hold ctx->lock when iterating the ->flexible_group list due
	 * to allocations, but we need to prevent rotation because
	 * rotate_ctx() will change the list from interrupt context.
	 */
	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
	parent_ctx->rotate_disable = 1;
	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);

6912
	list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
P
Peter Zijlstra 已提交
6913 6914
		ret = inherit_task_group(event, parent, parent_ctx,
					 child, ctxn, &inherited_all);
6915
		if (ret)
6916
			break;
6917 6918
	}

6919 6920 6921
	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
	parent_ctx->rotate_disable = 0;

P
Peter Zijlstra 已提交
6922
	child_ctx = child->perf_event_ctxp[ctxn];
6923

6924
	if (child_ctx && inherited_all) {
6925 6926 6927
		/*
		 * Mark the child context as a clone of the parent
		 * context, or of whatever the parent is a clone of.
P
Peter Zijlstra 已提交
6928 6929 6930
		 *
		 * Note that if the parent is a clone, the holding of
		 * parent_ctx->lock avoids it from being uncloned.
6931
		 */
P
Peter Zijlstra 已提交
6932
		cloned_ctx = parent_ctx->parent_ctx;
6933 6934
		if (cloned_ctx) {
			child_ctx->parent_ctx = cloned_ctx;
6935
			child_ctx->parent_gen = parent_ctx->parent_gen;
6936 6937 6938 6939 6940
		} else {
			child_ctx->parent_ctx = parent_ctx;
			child_ctx->parent_gen = parent_ctx->generation;
		}
		get_ctx(child_ctx->parent_ctx);
6941 6942
	}

P
Peter Zijlstra 已提交
6943
	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6944
	mutex_unlock(&parent_ctx->mutex);
6945

6946
	perf_unpin_context(parent_ctx);
6947
	put_ctx(parent_ctx);
6948

6949
	return ret;
6950 6951
}

P
Peter Zijlstra 已提交
6952 6953 6954 6955 6956 6957 6958
/*
 * Initialize the perf_event context in task_struct
 */
int perf_event_init_task(struct task_struct *child)
{
	int ctxn, ret;

6959 6960 6961 6962
	memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
	mutex_init(&child->perf_event_mutex);
	INIT_LIST_HEAD(&child->perf_event_list);

P
Peter Zijlstra 已提交
6963 6964 6965 6966 6967 6968 6969 6970 6971
	for_each_task_context_nr(ctxn) {
		ret = perf_event_init_context(child, ctxn);
		if (ret)
			return ret;
	}

	return 0;
}

6972 6973
static void __init perf_event_init_all_cpus(void)
{
6974
	struct swevent_htable *swhash;
6975 6976 6977
	int cpu;

	for_each_possible_cpu(cpu) {
6978 6979
		swhash = &per_cpu(swevent_htable, cpu);
		mutex_init(&swhash->hlist_mutex);
6980
		INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
6981 6982 6983
	}
}

6984
static void __cpuinit perf_event_init_cpu(int cpu)
T
Thomas Gleixner 已提交
6985
{
P
Peter Zijlstra 已提交
6986
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
T
Thomas Gleixner 已提交
6987

6988
	mutex_lock(&swhash->hlist_mutex);
6989
	if (swhash->hlist_refcount > 0) {
6990 6991
		struct swevent_hlist *hlist;

6992 6993 6994
		hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
		WARN_ON(!hlist);
		rcu_assign_pointer(swhash->swevent_hlist, hlist);
6995
	}
6996
	mutex_unlock(&swhash->hlist_mutex);
T
Thomas Gleixner 已提交
6997 6998
}

P
Peter Zijlstra 已提交
6999
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
7000
static void perf_pmu_rotate_stop(struct pmu *pmu)
T
Thomas Gleixner 已提交
7001
{
7002 7003 7004 7005 7006 7007 7008
	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);

	WARN_ON(!irqs_disabled());

	list_del_init(&cpuctx->rotation_list);
}

P
Peter Zijlstra 已提交
7009
static void __perf_event_exit_context(void *__info)
T
Thomas Gleixner 已提交
7010
{
P
Peter Zijlstra 已提交
7011
	struct perf_event_context *ctx = __info;
7012
	struct perf_event *event, *tmp;
T
Thomas Gleixner 已提交
7013

P
Peter Zijlstra 已提交
7014
	perf_pmu_rotate_stop(ctx->pmu);
7015

7016
	list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
7017
		__perf_remove_from_context(event);
7018
	list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
7019
		__perf_remove_from_context(event);
T
Thomas Gleixner 已提交
7020
}
P
Peter Zijlstra 已提交
7021 7022 7023 7024 7025 7026 7027 7028 7029

static void perf_event_exit_cpu_context(int cpu)
{
	struct perf_event_context *ctx;
	struct pmu *pmu;
	int idx;

	idx = srcu_read_lock(&pmus_srcu);
	list_for_each_entry_rcu(pmu, &pmus, entry) {
7030
		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
P
Peter Zijlstra 已提交
7031 7032 7033 7034 7035 7036 7037 7038

		mutex_lock(&ctx->mutex);
		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
		mutex_unlock(&ctx->mutex);
	}
	srcu_read_unlock(&pmus_srcu, idx);
}

7039
static void perf_event_exit_cpu(int cpu)
T
Thomas Gleixner 已提交
7040
{
7041
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7042

7043 7044 7045
	mutex_lock(&swhash->hlist_mutex);
	swevent_hlist_release(swhash);
	mutex_unlock(&swhash->hlist_mutex);
7046

P
Peter Zijlstra 已提交
7047
	perf_event_exit_cpu_context(cpu);
T
Thomas Gleixner 已提交
7048 7049
}
#else
7050
static inline void perf_event_exit_cpu(int cpu) { }
T
Thomas Gleixner 已提交
7051 7052
#endif

P
Peter Zijlstra 已提交
7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072
static int
perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
{
	int cpu;

	for_each_online_cpu(cpu)
		perf_event_exit_cpu(cpu);

	return NOTIFY_OK;
}

/*
 * Run the perf reboot notifier at the very last possible moment so that
 * the generic watchdog code runs as long as possible.
 */
static struct notifier_block perf_reboot_notifier = {
	.notifier_call = perf_reboot,
	.priority = INT_MIN,
};

T
Thomas Gleixner 已提交
7073 7074 7075 7076 7077
static int __cpuinit
perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
	unsigned int cpu = (long)hcpu;

7078
	switch (action & ~CPU_TASKS_FROZEN) {
T
Thomas Gleixner 已提交
7079 7080

	case CPU_UP_PREPARE:
P
Peter Zijlstra 已提交
7081
	case CPU_DOWN_FAILED:
7082
		perf_event_init_cpu(cpu);
T
Thomas Gleixner 已提交
7083 7084
		break;

P
Peter Zijlstra 已提交
7085
	case CPU_UP_CANCELED:
T
Thomas Gleixner 已提交
7086
	case CPU_DOWN_PREPARE:
7087
		perf_event_exit_cpu(cpu);
T
Thomas Gleixner 已提交
7088 7089 7090 7091 7092 7093 7094 7095 7096
		break;

	default:
		break;
	}

	return NOTIFY_OK;
}

7097
void __init perf_event_init(void)
T
Thomas Gleixner 已提交
7098
{
7099 7100
	int ret;

P
Peter Zijlstra 已提交
7101 7102
	idr_init(&pmu_idr);

7103
	perf_event_init_all_cpus();
7104
	init_srcu_struct(&pmus_srcu);
P
Peter Zijlstra 已提交
7105 7106 7107
	perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
	perf_pmu_register(&perf_cpu_clock, NULL, -1);
	perf_pmu_register(&perf_task_clock, NULL, -1);
7108 7109
	perf_tp_register();
	perf_cpu_notifier(perf_cpu_notify);
P
Peter Zijlstra 已提交
7110
	register_reboot_notifier(&perf_reboot_notifier);
7111 7112 7113

	ret = init_hw_breakpoint();
	WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
7114 7115 7116

	/* do not patch jump label more than once per second */
	jump_label_rate_limit(&perf_sched_events, HZ);
7117 7118 7119 7120 7121 7122 7123

	/*
	 * Build time assertion that we keep the data_head at the intended
	 * location.  IOW, validation we got the __reserved[] size right.
	 */
	BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
		     != 1024);
T
Thomas Gleixner 已提交
7124
}
P
Peter Zijlstra 已提交
7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152

static int __init perf_event_sysfs_init(void)
{
	struct pmu *pmu;
	int ret;

	mutex_lock(&pmus_lock);

	ret = bus_register(&pmu_bus);
	if (ret)
		goto unlock;

	list_for_each_entry(pmu, &pmus, entry) {
		if (!pmu->name || pmu->type < 0)
			continue;

		ret = pmu_dev_alloc(pmu);
		WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
	}
	pmu_bus_running = 1;
	ret = 0;

unlock:
	mutex_unlock(&pmus_lock);

	return ret;
}
device_initcall(perf_event_sysfs_init);
S
Stephane Eranian 已提交
7153 7154

#ifdef CONFIG_CGROUP_PERF
7155
static struct cgroup_subsys_state *perf_cgroup_create(struct cgroup *cont)
S
Stephane Eranian 已提交
7156 7157 7158
{
	struct perf_cgroup *jc;

7159
	jc = kzalloc(sizeof(*jc), GFP_KERNEL);
S
Stephane Eranian 已提交
7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171
	if (!jc)
		return ERR_PTR(-ENOMEM);

	jc->info = alloc_percpu(struct perf_cgroup_info);
	if (!jc->info) {
		kfree(jc);
		return ERR_PTR(-ENOMEM);
	}

	return &jc->css;
}

7172
static void perf_cgroup_destroy(struct cgroup *cont)
S
Stephane Eranian 已提交
7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187
{
	struct perf_cgroup *jc;
	jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
			  struct perf_cgroup, css);
	free_percpu(jc->info);
	kfree(jc);
}

static int __perf_cgroup_move(void *info)
{
	struct task_struct *task = info;
	perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
	return 0;
}

7188
static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
S
Stephane Eranian 已提交
7189
{
7190 7191 7192 7193
	struct task_struct *task;

	cgroup_taskset_for_each(task, cgrp, tset)
		task_function_call(task, __perf_cgroup_move, task);
S
Stephane Eranian 已提交
7194 7195
}

7196 7197
static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
			     struct task_struct *task)
S
Stephane Eranian 已提交
7198 7199 7200 7201 7202 7203 7204 7205 7206
{
	/*
	 * cgroup_exit() is called in the copy_process() failure path.
	 * Ignore this case since the task hasn't ran yet, this avoids
	 * trying to poke a half freed task state from generic code.
	 */
	if (!(task->flags & PF_EXITING))
		return;

7207
	task_function_call(task, __perf_cgroup_move, task);
S
Stephane Eranian 已提交
7208 7209 7210
}

struct cgroup_subsys perf_subsys = {
7211 7212 7213 7214 7215
	.name		= "perf_event",
	.subsys_id	= perf_subsys_id,
	.create		= perf_cgroup_create,
	.destroy	= perf_cgroup_destroy,
	.exit		= perf_cgroup_exit,
7216
	.attach		= perf_cgroup_attach,
S
Stephane Eranian 已提交
7217 7218
};
#endif /* CONFIG_CGROUP_PERF */