core.c 160.4 KB
Newer Older
T
Thomas Gleixner 已提交
1
/*
I
Ingo Molnar 已提交
2
 * Performance events core code:
T
Thomas Gleixner 已提交
3
 *
4
 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 6
 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7
 *  Copyright    2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8
 *
I
Ingo Molnar 已提交
9
 * For licensing details see kernel-base/COPYING
T
Thomas Gleixner 已提交
10 11 12
 */

#include <linux/fs.h>
13
#include <linux/mm.h>
T
Thomas Gleixner 已提交
14 15
#include <linux/cpu.h>
#include <linux/smp.h>
P
Peter Zijlstra 已提交
16
#include <linux/idr.h>
17
#include <linux/file.h>
T
Thomas Gleixner 已提交
18
#include <linux/poll.h>
19
#include <linux/slab.h>
20
#include <linux/hash.h>
T
Thomas Gleixner 已提交
21
#include <linux/sysfs.h>
22
#include <linux/dcache.h>
T
Thomas Gleixner 已提交
23
#include <linux/percpu.h>
24
#include <linux/ptrace.h>
P
Peter Zijlstra 已提交
25
#include <linux/reboot.h>
26
#include <linux/vmstat.h>
P
Peter Zijlstra 已提交
27
#include <linux/device.h>
28
#include <linux/export.h>
29
#include <linux/vmalloc.h>
30 31
#include <linux/hardirq.h>
#include <linux/rculist.h>
T
Thomas Gleixner 已提交
32 33 34
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
I
Ingo Molnar 已提交
35
#include <linux/kernel_stat.h>
36
#include <linux/perf_event.h>
L
Li Zefan 已提交
37
#include <linux/ftrace_event.h>
38
#include <linux/hw_breakpoint.h>
T
Thomas Gleixner 已提交
39

40 41
#include "internal.h"

42 43
#include <asm/irq_regs.h>

44
struct remote_function_call {
45 46 47 48
	struct task_struct	*p;
	int			(*func)(void *info);
	void			*info;
	int			ret;
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
};

static void remote_function(void *data)
{
	struct remote_function_call *tfc = data;
	struct task_struct *p = tfc->p;

	if (p) {
		tfc->ret = -EAGAIN;
		if (task_cpu(p) != smp_processor_id() || !task_curr(p))
			return;
	}

	tfc->ret = tfc->func(tfc->info);
}

/**
 * task_function_call - call a function on the cpu on which a task runs
 * @p:		the task to evaluate
 * @func:	the function to be called
 * @info:	the function call argument
 *
 * Calls the function @func when the task is currently running. This might
 * be on the current CPU, which just calls the function directly
 *
 * returns: @func return value, or
 *	    -ESRCH  - when the process isn't running
 *	    -EAGAIN - when the process moved away
 */
static int
task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
{
	struct remote_function_call data = {
82 83 84 85
		.p	= p,
		.func	= func,
		.info	= info,
		.ret	= -ESRCH, /* No such (running) process */
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
	};

	if (task_curr(p))
		smp_call_function_single(task_cpu(p), remote_function, &data, 1);

	return data.ret;
}

/**
 * cpu_function_call - call a function on the cpu
 * @func:	the function to be called
 * @info:	the function call argument
 *
 * Calls the function @func on the remote cpu.
 *
 * returns: @func return value or -ENXIO when the cpu is offline
 */
static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
{
	struct remote_function_call data = {
106 107 108 109
		.p	= NULL,
		.func	= func,
		.info	= info,
		.ret	= -ENXIO, /* No such CPU */
110 111 112 113 114 115 116
	};

	smp_call_function_single(cpu, remote_function, &data, 1);

	return data.ret;
}

S
Stephane Eranian 已提交
117 118 119 120
#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
		       PERF_FLAG_FD_OUTPUT  |\
		       PERF_FLAG_PID_CGROUP)

121 122 123 124 125 126
enum event_type_t {
	EVENT_FLEXIBLE = 0x1,
	EVENT_PINNED = 0x2,
	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};

S
Stephane Eranian 已提交
127 128 129 130
/*
 * perf_sched_events : >0 events exist
 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
 */
131
struct jump_label_key_deferred perf_sched_events __read_mostly;
S
Stephane Eranian 已提交
132 133
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);

134 135 136
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
137

P
Peter Zijlstra 已提交
138 139 140 141
static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu;

142
/*
143
 * perf event paranoia level:
144 145
 *  -1 - not paranoid at all
 *   0 - disallow raw tracepoint access for unpriv
146
 *   1 - disallow cpu events for unpriv
147
 *   2 - disallow kernel profiling for unpriv
148
 */
149
int sysctl_perf_event_paranoid __read_mostly = 1;
150

151 152
/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
153 154

/*
155
 * max perf event sample rate
156
 */
P
Peter Zijlstra 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
#define DEFAULT_MAX_SAMPLE_RATE 100000
int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
static int max_samples_per_tick __read_mostly =
	DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);

int perf_proc_update_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *lenp,
		loff_t *ppos)
{
	int ret = proc_dointvec(table, write, buffer, lenp, ppos);

	if (ret || !write)
		return ret;

	max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);

	return 0;
}
175

176
static atomic64_t perf_event_id;
177

178 179 180 181
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
			      enum event_type_t event_type);

static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
S
Stephane Eranian 已提交
182 183 184 185 186
			     enum event_type_t event_type,
			     struct task_struct *task);

static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
187

188 189 190
static void ring_buffer_attach(struct perf_event *event,
			       struct ring_buffer *rb);

191
void __weak perf_event_print_debug(void)	{ }
T
Thomas Gleixner 已提交
192

193
extern __weak const char *perf_pmu_name(void)
T
Thomas Gleixner 已提交
194
{
195
	return "pmu";
T
Thomas Gleixner 已提交
196 197
}

198 199 200 201 202
static inline u64 perf_clock(void)
{
	return local_clock();
}

S
Stephane Eranian 已提交
203 204 205 206 207 208
static inline struct perf_cpu_context *
__get_cpu_context(struct perf_event_context *ctx)
{
	return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
}

209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
			  struct perf_event_context *ctx)
{
	raw_spin_lock(&cpuctx->ctx.lock);
	if (ctx)
		raw_spin_lock(&ctx->lock);
}

static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
			    struct perf_event_context *ctx)
{
	if (ctx)
		raw_spin_unlock(&ctx->lock);
	raw_spin_unlock(&cpuctx->ctx.lock);
}

S
Stephane Eranian 已提交
225 226
#ifdef CONFIG_CGROUP_PERF

227 228 229 230 231
/*
 * Must ensure cgroup is pinned (css_get) before calling
 * this function. In other words, we cannot call this function
 * if there is no cgroup event for the current CPU context.
 */
S
Stephane Eranian 已提交
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task)
{
	return container_of(task_subsys_state(task, perf_subsys_id),
			struct perf_cgroup, css);
}

static inline bool
perf_cgroup_match(struct perf_event *event)
{
	struct perf_event_context *ctx = event->ctx;
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);

	return !event->cgrp || event->cgrp == cpuctx->cgrp;
}

static inline void perf_get_cgroup(struct perf_event *event)
{
	css_get(&event->cgrp->css);
}

static inline void perf_put_cgroup(struct perf_event *event)
{
	css_put(&event->cgrp->css);
}

static inline void perf_detach_cgroup(struct perf_event *event)
{
	perf_put_cgroup(event);
	event->cgrp = NULL;
}

static inline int is_cgroup_event(struct perf_event *event)
{
	return event->cgrp != NULL;
}

static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
	struct perf_cgroup_info *t;

	t = per_cpu_ptr(event->cgrp->info, event->cpu);
	return t->time;
}

static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
{
	struct perf_cgroup_info *info;
	u64 now;

	now = perf_clock();

	info = this_cpu_ptr(cgrp->info);

	info->time += now - info->timestamp;
	info->timestamp = now;
}

static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
	struct perf_cgroup *cgrp_out = cpuctx->cgrp;
	if (cgrp_out)
		__update_cgrp_time(cgrp_out);
}

static inline void update_cgrp_time_from_event(struct perf_event *event)
{
299 300
	struct perf_cgroup *cgrp;

S
Stephane Eranian 已提交
301
	/*
302 303
	 * ensure we access cgroup data only when needed and
	 * when we know the cgroup is pinned (css_get)
S
Stephane Eranian 已提交
304
	 */
305
	if (!is_cgroup_event(event))
S
Stephane Eranian 已提交
306 307
		return;

308 309 310 311 312 313
	cgrp = perf_cgroup_from_task(current);
	/*
	 * Do not update time when cgroup is not active
	 */
	if (cgrp == event->cgrp)
		__update_cgrp_time(event->cgrp);
S
Stephane Eranian 已提交
314 315 316
}

static inline void
317 318
perf_cgroup_set_timestamp(struct task_struct *task,
			  struct perf_event_context *ctx)
S
Stephane Eranian 已提交
319 320 321 322
{
	struct perf_cgroup *cgrp;
	struct perf_cgroup_info *info;

323 324 325 326 327 328
	/*
	 * ctx->lock held by caller
	 * ensure we do not access cgroup data
	 * unless we have the cgroup pinned (css_get)
	 */
	if (!task || !ctx->nr_cgroups)
S
Stephane Eranian 已提交
329 330 331 332
		return;

	cgrp = perf_cgroup_from_task(task);
	info = this_cpu_ptr(cgrp->info);
333
	info->timestamp = ctx->timestamp;
S
Stephane Eranian 已提交
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
}

#define PERF_CGROUP_SWOUT	0x1 /* cgroup switch out every event */
#define PERF_CGROUP_SWIN	0x2 /* cgroup switch in events based on task */

/*
 * reschedule events based on the cgroup constraint of task.
 *
 * mode SWOUT : schedule out everything
 * mode SWIN : schedule in based on cgroup for next
 */
void perf_cgroup_switch(struct task_struct *task, int mode)
{
	struct perf_cpu_context *cpuctx;
	struct pmu *pmu;
	unsigned long flags;

	/*
	 * disable interrupts to avoid geting nr_cgroup
	 * changes via __perf_event_disable(). Also
	 * avoids preemption.
	 */
	local_irq_save(flags);

	/*
	 * we reschedule only in the presence of cgroup
	 * constrained events.
	 */
	rcu_read_lock();

	list_for_each_entry_rcu(pmu, &pmus, entry) {
		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);

		/*
		 * perf_cgroup_events says at least one
		 * context on this CPU has cgroup events.
		 *
		 * ctx->nr_cgroups reports the number of cgroup
		 * events for a context.
		 */
		if (cpuctx->ctx.nr_cgroups > 0) {
375 376
			perf_ctx_lock(cpuctx, cpuctx->task_ctx);
			perf_pmu_disable(cpuctx->ctx.pmu);
S
Stephane Eranian 已提交
377 378 379 380 381 382 383 384 385 386 387

			if (mode & PERF_CGROUP_SWOUT) {
				cpu_ctx_sched_out(cpuctx, EVENT_ALL);
				/*
				 * must not be done before ctxswout due
				 * to event_filter_match() in event_sched_out()
				 */
				cpuctx->cgrp = NULL;
			}

			if (mode & PERF_CGROUP_SWIN) {
388
				WARN_ON_ONCE(cpuctx->cgrp);
S
Stephane Eranian 已提交
389 390 391 392 393 394 395
				/* set cgrp before ctxsw in to
				 * allow event_filter_match() to not
				 * have to pass task around
				 */
				cpuctx->cgrp = perf_cgroup_from_task(task);
				cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
			}
396 397
			perf_pmu_enable(cpuctx->ctx.pmu);
			perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
S
Stephane Eranian 已提交
398 399 400 401 402 403 404 405
		}
	}

	rcu_read_unlock();

	local_irq_restore(flags);
}

406 407
static inline void perf_cgroup_sched_out(struct task_struct *task,
					 struct task_struct *next)
S
Stephane Eranian 已提交
408
{
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
	struct perf_cgroup *cgrp1;
	struct perf_cgroup *cgrp2 = NULL;

	/*
	 * we come here when we know perf_cgroup_events > 0
	 */
	cgrp1 = perf_cgroup_from_task(task);

	/*
	 * next is NULL when called from perf_event_enable_on_exec()
	 * that will systematically cause a cgroup_switch()
	 */
	if (next)
		cgrp2 = perf_cgroup_from_task(next);

	/*
	 * only schedule out current cgroup events if we know
	 * that we are switching to a different cgroup. Otherwise,
	 * do no touch the cgroup events.
	 */
	if (cgrp1 != cgrp2)
		perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
S
Stephane Eranian 已提交
431 432
}

433 434
static inline void perf_cgroup_sched_in(struct task_struct *prev,
					struct task_struct *task)
S
Stephane Eranian 已提交
435
{
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
	struct perf_cgroup *cgrp1;
	struct perf_cgroup *cgrp2 = NULL;

	/*
	 * we come here when we know perf_cgroup_events > 0
	 */
	cgrp1 = perf_cgroup_from_task(task);

	/* prev can never be NULL */
	cgrp2 = perf_cgroup_from_task(prev);

	/*
	 * only need to schedule in cgroup events if we are changing
	 * cgroup during ctxsw. Cgroup events were not scheduled
	 * out of ctxsw out if that was not the case.
	 */
	if (cgrp1 != cgrp2)
		perf_cgroup_switch(task, PERF_CGROUP_SWIN);
S
Stephane Eranian 已提交
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
}

static inline int perf_cgroup_connect(int fd, struct perf_event *event,
				      struct perf_event_attr *attr,
				      struct perf_event *group_leader)
{
	struct perf_cgroup *cgrp;
	struct cgroup_subsys_state *css;
	struct file *file;
	int ret = 0, fput_needed;

	file = fget_light(fd, &fput_needed);
	if (!file)
		return -EBADF;

	css = cgroup_css_from_dir(file, perf_subsys_id);
470 471 472 473
	if (IS_ERR(css)) {
		ret = PTR_ERR(css);
		goto out;
	}
S
Stephane Eranian 已提交
474 475 476 477

	cgrp = container_of(css, struct perf_cgroup, css);
	event->cgrp = cgrp;

478 479 480
	/* must be done before we fput() the file */
	perf_get_cgroup(event);

S
Stephane Eranian 已提交
481 482 483 484 485 486 487 488 489
	/*
	 * all events in a group must monitor
	 * the same cgroup because a task belongs
	 * to only one perf cgroup at a time
	 */
	if (group_leader && group_leader->cgrp != cgrp) {
		perf_detach_cgroup(event);
		ret = -EINVAL;
	}
490
out:
S
Stephane Eranian 已提交
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
	fput_light(file, fput_needed);
	return ret;
}

static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
	struct perf_cgroup_info *t;
	t = per_cpu_ptr(event->cgrp->info, event->cpu);
	event->shadow_ctx_time = now - t->timestamp;
}

static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
	/*
	 * when the current task's perf cgroup does not match
	 * the event's, we need to remember to call the
	 * perf_mark_enable() function the first time a task with
	 * a matching perf cgroup is scheduled in.
	 */
	if (is_cgroup_event(event) && !perf_cgroup_match(event))
		event->cgrp_defer_enabled = 1;
}

static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
			 struct perf_event_context *ctx)
{
	struct perf_event *sub;
	u64 tstamp = perf_event_time(event);

	if (!event->cgrp_defer_enabled)
		return;

	event->cgrp_defer_enabled = 0;

	event->tstamp_enabled = tstamp - event->total_time_enabled;
	list_for_each_entry(sub, &event->sibling_list, group_entry) {
		if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
			sub->cgrp_defer_enabled = 0;
		}
	}
}
#else /* !CONFIG_CGROUP_PERF */

static inline bool
perf_cgroup_match(struct perf_event *event)
{
	return true;
}

static inline void perf_detach_cgroup(struct perf_event *event)
{}

static inline int is_cgroup_event(struct perf_event *event)
{
	return 0;
}

static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
{
	return 0;
}

static inline void update_cgrp_time_from_event(struct perf_event *event)
{
}

static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
}

565 566
static inline void perf_cgroup_sched_out(struct task_struct *task,
					 struct task_struct *next)
S
Stephane Eranian 已提交
567 568 569
{
}

570 571
static inline void perf_cgroup_sched_in(struct task_struct *prev,
					struct task_struct *task)
S
Stephane Eranian 已提交
572 573 574 575 576 577 578 579 580 581 582
{
}

static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
				      struct perf_event_attr *attr,
				      struct perf_event *group_leader)
{
	return -EINVAL;
}

static inline void
583 584
perf_cgroup_set_timestamp(struct task_struct *task,
			  struct perf_event_context *ctx)
S
Stephane Eranian 已提交
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
{
}

void
perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
{
}

static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
}

static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
	return 0;
}

static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
}

static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
			 struct perf_event_context *ctx)
{
}
#endif

P
Peter Zijlstra 已提交
615
void perf_pmu_disable(struct pmu *pmu)
616
{
P
Peter Zijlstra 已提交
617 618 619
	int *count = this_cpu_ptr(pmu->pmu_disable_count);
	if (!(*count)++)
		pmu->pmu_disable(pmu);
620 621
}

P
Peter Zijlstra 已提交
622
void perf_pmu_enable(struct pmu *pmu)
623
{
P
Peter Zijlstra 已提交
624 625 626
	int *count = this_cpu_ptr(pmu->pmu_disable_count);
	if (!--(*count))
		pmu->pmu_enable(pmu);
627 628
}

629 630 631 632 633 634 635
static DEFINE_PER_CPU(struct list_head, rotation_list);

/*
 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
 * because they're strictly cpu affine and rotate_start is called with IRQs
 * disabled, while rotate_context is called from IRQ context.
 */
P
Peter Zijlstra 已提交
636
static void perf_pmu_rotate_start(struct pmu *pmu)
637
{
P
Peter Zijlstra 已提交
638
	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
639
	struct list_head *head = &__get_cpu_var(rotation_list);
640

641
	WARN_ON(!irqs_disabled());
642

643 644
	if (list_empty(&cpuctx->rotation_list))
		list_add(&cpuctx->rotation_list, head);
645 646
}

647
static void get_ctx(struct perf_event_context *ctx)
648
{
649
	WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
650 651
}

652
static void put_ctx(struct perf_event_context *ctx)
653
{
654 655 656
	if (atomic_dec_and_test(&ctx->refcount)) {
		if (ctx->parent_ctx)
			put_ctx(ctx->parent_ctx);
657 658
		if (ctx->task)
			put_task_struct(ctx->task);
659
		kfree_rcu(ctx, rcu_head);
660
	}
661 662
}

663
static void unclone_ctx(struct perf_event_context *ctx)
664 665 666 667 668 669 670
{
	if (ctx->parent_ctx) {
		put_ctx(ctx->parent_ctx);
		ctx->parent_ctx = NULL;
	}
}

671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
{
	/*
	 * only top level events have the pid namespace they were created in
	 */
	if (event->parent)
		event = event->parent;

	return task_tgid_nr_ns(p, event->ns);
}

static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
{
	/*
	 * only top level events have the pid namespace they were created in
	 */
	if (event->parent)
		event = event->parent;

	return task_pid_nr_ns(p, event->ns);
}

693
/*
694
 * If we inherit events we want to return the parent event id
695 696
 * to userspace.
 */
697
static u64 primary_event_id(struct perf_event *event)
698
{
699
	u64 id = event->id;
700

701 702
	if (event->parent)
		id = event->parent->id;
703 704 705 706

	return id;
}

707
/*
708
 * Get the perf_event_context for a task and lock it.
709 710 711
 * This has to cope with with the fact that until it is locked,
 * the context could get moved to another task.
 */
712
static struct perf_event_context *
P
Peter Zijlstra 已提交
713
perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
714
{
715
	struct perf_event_context *ctx;
716 717

	rcu_read_lock();
P
Peter Zijlstra 已提交
718
retry:
P
Peter Zijlstra 已提交
719
	ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
720 721 722 723
	if (ctx) {
		/*
		 * If this context is a clone of another, it might
		 * get swapped for another underneath us by
724
		 * perf_event_task_sched_out, though the
725 726 727 728 729 730
		 * rcu_read_lock() protects us from any context
		 * getting freed.  Lock the context and check if it
		 * got swapped before we could get the lock, and retry
		 * if so.  If we locked the right context, then it
		 * can't get swapped on us any more.
		 */
731
		raw_spin_lock_irqsave(&ctx->lock, *flags);
P
Peter Zijlstra 已提交
732
		if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
733
			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
734 735
			goto retry;
		}
736 737

		if (!atomic_inc_not_zero(&ctx->refcount)) {
738
			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
739 740
			ctx = NULL;
		}
741 742 743 744 745 746 747 748 749 750
	}
	rcu_read_unlock();
	return ctx;
}

/*
 * Get the context for a task and increment its pin_count so it
 * can't get swapped to another task.  This also increments its
 * reference count so that the context can't get freed.
 */
P
Peter Zijlstra 已提交
751 752
static struct perf_event_context *
perf_pin_task_context(struct task_struct *task, int ctxn)
753
{
754
	struct perf_event_context *ctx;
755 756
	unsigned long flags;

P
Peter Zijlstra 已提交
757
	ctx = perf_lock_task_context(task, ctxn, &flags);
758 759
	if (ctx) {
		++ctx->pin_count;
760
		raw_spin_unlock_irqrestore(&ctx->lock, flags);
761 762 763 764
	}
	return ctx;
}

765
static void perf_unpin_context(struct perf_event_context *ctx)
766 767 768
{
	unsigned long flags;

769
	raw_spin_lock_irqsave(&ctx->lock, flags);
770
	--ctx->pin_count;
771
	raw_spin_unlock_irqrestore(&ctx->lock, flags);
772 773
}

774 775 776 777 778 779 780 781 782 783 784
/*
 * Update the record of the current time in a context.
 */
static void update_context_time(struct perf_event_context *ctx)
{
	u64 now = perf_clock();

	ctx->time += now - ctx->timestamp;
	ctx->timestamp = now;
}

785 786 787
static u64 perf_event_time(struct perf_event *event)
{
	struct perf_event_context *ctx = event->ctx;
S
Stephane Eranian 已提交
788 789 790 791

	if (is_cgroup_event(event))
		return perf_cgroup_event_time(event);

792 793 794
	return ctx ? ctx->time : 0;
}

795 796
/*
 * Update the total_time_enabled and total_time_running fields for a event.
797
 * The caller of this function needs to hold the ctx->lock.
798 799 800 801 802 803 804 805 806
 */
static void update_event_times(struct perf_event *event)
{
	struct perf_event_context *ctx = event->ctx;
	u64 run_end;

	if (event->state < PERF_EVENT_STATE_INACTIVE ||
	    event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
		return;
S
Stephane Eranian 已提交
807 808 809 810 811 812 813 814 815 816 817
	/*
	 * in cgroup mode, time_enabled represents
	 * the time the event was enabled AND active
	 * tasks were in the monitored cgroup. This is
	 * independent of the activity of the context as
	 * there may be a mix of cgroup and non-cgroup events.
	 *
	 * That is why we treat cgroup events differently
	 * here.
	 */
	if (is_cgroup_event(event))
818
		run_end = perf_event_time(event);
S
Stephane Eranian 已提交
819 820
	else if (ctx->is_active)
		run_end = ctx->time;
821 822 823 824
	else
		run_end = event->tstamp_stopped;

	event->total_time_enabled = run_end - event->tstamp_enabled;
825 826 827 828

	if (event->state == PERF_EVENT_STATE_INACTIVE)
		run_end = event->tstamp_stopped;
	else
829
		run_end = perf_event_time(event);
830 831

	event->total_time_running = run_end - event->tstamp_running;
S
Stephane Eranian 已提交
832

833 834
}

835 836 837 838 839 840 841 842 843 844 845 846
/*
 * Update total_time_enabled and total_time_running for all events in a group.
 */
static void update_group_times(struct perf_event *leader)
{
	struct perf_event *event;

	update_event_times(leader);
	list_for_each_entry(event, &leader->sibling_list, group_entry)
		update_event_times(event);
}

847 848 849 850 851 852 853 854 855
static struct list_head *
ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
{
	if (event->attr.pinned)
		return &ctx->pinned_groups;
	else
		return &ctx->flexible_groups;
}

856
/*
857
 * Add a event from the lists for its context.
858 859
 * Must be called with ctx->mutex and ctx->lock held.
 */
860
static void
861
list_add_event(struct perf_event *event, struct perf_event_context *ctx)
862
{
863 864
	WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
	event->attach_state |= PERF_ATTACH_CONTEXT;
865 866

	/*
867 868 869
	 * If we're a stand alone event or group leader, we go to the context
	 * list, group events are kept attached to the group so that
	 * perf_group_detach can, at all times, locate all siblings.
870
	 */
871
	if (event->group_leader == event) {
872 873
		struct list_head *list;

874 875 876
		if (is_software_event(event))
			event->group_flags |= PERF_GROUP_SOFTWARE;

877 878
		list = ctx_group_list(event, ctx);
		list_add_tail(&event->group_entry, list);
P
Peter Zijlstra 已提交
879
	}
P
Peter Zijlstra 已提交
880

881
	if (is_cgroup_event(event))
S
Stephane Eranian 已提交
882 883
		ctx->nr_cgroups++;

884
	list_add_rcu(&event->event_entry, &ctx->event_list);
885
	if (!ctx->nr_events)
P
Peter Zijlstra 已提交
886
		perf_pmu_rotate_start(ctx->pmu);
887 888
	ctx->nr_events++;
	if (event->attr.inherit_stat)
889
		ctx->nr_stat++;
890 891
}

892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
/*
 * Called at perf_event creation and when events are attached/detached from a
 * group.
 */
static void perf_event__read_size(struct perf_event *event)
{
	int entry = sizeof(u64); /* value */
	int size = 0;
	int nr = 1;

	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		size += sizeof(u64);

	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		size += sizeof(u64);

	if (event->attr.read_format & PERF_FORMAT_ID)
		entry += sizeof(u64);

	if (event->attr.read_format & PERF_FORMAT_GROUP) {
		nr += event->group_leader->nr_siblings;
		size += sizeof(u64);
	}

	size += entry * nr;
	event->read_size = size;
}

static void perf_event__header_size(struct perf_event *event)
{
	struct perf_sample_data *data;
	u64 sample_type = event->attr.sample_type;
	u16 size = 0;

	perf_event__read_size(event);

	if (sample_type & PERF_SAMPLE_IP)
		size += sizeof(data->ip);

931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
	if (sample_type & PERF_SAMPLE_ADDR)
		size += sizeof(data->addr);

	if (sample_type & PERF_SAMPLE_PERIOD)
		size += sizeof(data->period);

	if (sample_type & PERF_SAMPLE_READ)
		size += event->read_size;

	event->header_size = size;
}

static void perf_event__id_header_size(struct perf_event *event)
{
	struct perf_sample_data *data;
	u64 sample_type = event->attr.sample_type;
	u16 size = 0;

949 950 951 952 953 954 955 956 957 958 959 960 961 962 963
	if (sample_type & PERF_SAMPLE_TID)
		size += sizeof(data->tid_entry);

	if (sample_type & PERF_SAMPLE_TIME)
		size += sizeof(data->time);

	if (sample_type & PERF_SAMPLE_ID)
		size += sizeof(data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		size += sizeof(data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		size += sizeof(data->cpu_entry);

964
	event->id_header_size = size;
965 966
}

967 968
static void perf_group_attach(struct perf_event *event)
{
969
	struct perf_event *group_leader = event->group_leader, *pos;
970

P
Peter Zijlstra 已提交
971 972 973 974 975 976
	/*
	 * We can have double attach due to group movement in perf_event_open.
	 */
	if (event->attach_state & PERF_ATTACH_GROUP)
		return;

977 978 979 980 981 982 983 984 985 986 987
	event->attach_state |= PERF_ATTACH_GROUP;

	if (group_leader == event)
		return;

	if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
			!is_software_event(event))
		group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;

	list_add_tail(&event->group_entry, &group_leader->sibling_list);
	group_leader->nr_siblings++;
988 989 990 991 992

	perf_event__header_size(group_leader);

	list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
		perf_event__header_size(pos);
993 994
}

995
/*
996
 * Remove a event from the lists for its context.
997
 * Must be called with ctx->mutex and ctx->lock held.
998
 */
999
static void
1000
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1001
{
1002
	struct perf_cpu_context *cpuctx;
1003 1004 1005 1006
	/*
	 * We can have double detach due to exit/hot-unplug + close.
	 */
	if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1007
		return;
1008 1009 1010

	event->attach_state &= ~PERF_ATTACH_CONTEXT;

1011
	if (is_cgroup_event(event)) {
S
Stephane Eranian 已提交
1012
		ctx->nr_cgroups--;
1013 1014 1015 1016 1017 1018 1019 1020 1021
		cpuctx = __get_cpu_context(ctx);
		/*
		 * if there are no more cgroup events
		 * then cler cgrp to avoid stale pointer
		 * in update_cgrp_time_from_cpuctx()
		 */
		if (!ctx->nr_cgroups)
			cpuctx->cgrp = NULL;
	}
S
Stephane Eranian 已提交
1022

1023 1024
	ctx->nr_events--;
	if (event->attr.inherit_stat)
1025
		ctx->nr_stat--;
1026

1027
	list_del_rcu(&event->event_entry);
1028

1029 1030
	if (event->group_leader == event)
		list_del_init(&event->group_entry);
P
Peter Zijlstra 已提交
1031

1032
	update_group_times(event);
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042

	/*
	 * If event was in error state, then keep it
	 * that way, otherwise bogus counts will be
	 * returned on read(). The only way to get out
	 * of error state is by explicit re-enabling
	 * of the event
	 */
	if (event->state > PERF_EVENT_STATE_OFF)
		event->state = PERF_EVENT_STATE_OFF;
1043 1044
}

1045
static void perf_group_detach(struct perf_event *event)
1046 1047
{
	struct perf_event *sibling, *tmp;
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
	struct list_head *list = NULL;

	/*
	 * We can have double detach due to exit/hot-unplug + close.
	 */
	if (!(event->attach_state & PERF_ATTACH_GROUP))
		return;

	event->attach_state &= ~PERF_ATTACH_GROUP;

	/*
	 * If this is a sibling, remove it from its group.
	 */
	if (event->group_leader != event) {
		list_del_init(&event->group_entry);
		event->group_leader->nr_siblings--;
1064
		goto out;
1065 1066 1067 1068
	}

	if (!list_empty(&event->group_entry))
		list = &event->group_entry;
1069

1070
	/*
1071 1072
	 * If this was a group event with sibling events then
	 * upgrade the siblings to singleton events by adding them
1073
	 * to whatever list we are on.
1074
	 */
1075
	list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1076 1077
		if (list)
			list_move_tail(&sibling->group_entry, list);
1078
		sibling->group_leader = sibling;
1079 1080 1081

		/* Inherit group flags from the previous leader */
		sibling->group_flags = event->group_flags;
1082
	}
1083 1084 1085 1086 1087 1088

out:
	perf_event__header_size(event->group_leader);

	list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
		perf_event__header_size(tmp);
1089 1090
}

1091 1092 1093
static inline int
event_filter_match(struct perf_event *event)
{
S
Stephane Eranian 已提交
1094 1095
	return (event->cpu == -1 || event->cpu == smp_processor_id())
	    && perf_cgroup_match(event);
1096 1097
}

1098 1099
static void
event_sched_out(struct perf_event *event,
1100
		  struct perf_cpu_context *cpuctx,
1101
		  struct perf_event_context *ctx)
1102
{
1103
	u64 tstamp = perf_event_time(event);
1104 1105 1106 1107 1108 1109 1110 1111 1112
	u64 delta;
	/*
	 * An event which could not be activated because of
	 * filter mismatch still needs to have its timings
	 * maintained, otherwise bogus information is return
	 * via read() for time_enabled, time_running:
	 */
	if (event->state == PERF_EVENT_STATE_INACTIVE
	    && !event_filter_match(event)) {
S
Stephane Eranian 已提交
1113
		delta = tstamp - event->tstamp_stopped;
1114
		event->tstamp_running += delta;
1115
		event->tstamp_stopped = tstamp;
1116 1117
	}

1118
	if (event->state != PERF_EVENT_STATE_ACTIVE)
1119
		return;
1120

1121 1122 1123 1124
	event->state = PERF_EVENT_STATE_INACTIVE;
	if (event->pending_disable) {
		event->pending_disable = 0;
		event->state = PERF_EVENT_STATE_OFF;
1125
	}
1126
	event->tstamp_stopped = tstamp;
P
Peter Zijlstra 已提交
1127
	event->pmu->del(event, 0);
1128
	event->oncpu = -1;
1129

1130
	if (!is_software_event(event))
1131 1132
		cpuctx->active_oncpu--;
	ctx->nr_active--;
1133 1134
	if (event->attr.freq && event->attr.sample_freq)
		ctx->nr_freq--;
1135
	if (event->attr.exclusive || !cpuctx->active_oncpu)
1136 1137 1138
		cpuctx->exclusive = 0;
}

1139
static void
1140
group_sched_out(struct perf_event *group_event,
1141
		struct perf_cpu_context *cpuctx,
1142
		struct perf_event_context *ctx)
1143
{
1144
	struct perf_event *event;
1145
	int state = group_event->state;
1146

1147
	event_sched_out(group_event, cpuctx, ctx);
1148 1149 1150 1151

	/*
	 * Schedule out siblings (if any):
	 */
1152 1153
	list_for_each_entry(event, &group_event->sibling_list, group_entry)
		event_sched_out(event, cpuctx, ctx);
1154

1155
	if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1156 1157 1158
		cpuctx->exclusive = 0;
}

T
Thomas Gleixner 已提交
1159
/*
1160
 * Cross CPU call to remove a performance event
T
Thomas Gleixner 已提交
1161
 *
1162
 * We disable the event on the hardware level first. After that we
T
Thomas Gleixner 已提交
1163 1164
 * remove it from the context list.
 */
1165
static int __perf_remove_from_context(void *info)
T
Thomas Gleixner 已提交
1166
{
1167 1168
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
P
Peter Zijlstra 已提交
1169
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
T
Thomas Gleixner 已提交
1170

1171
	raw_spin_lock(&ctx->lock);
1172 1173
	event_sched_out(event, cpuctx, ctx);
	list_del_event(event, ctx);
1174 1175 1176 1177
	if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
		ctx->is_active = 0;
		cpuctx->task_ctx = NULL;
	}
1178
	raw_spin_unlock(&ctx->lock);
1179 1180

	return 0;
T
Thomas Gleixner 已提交
1181 1182 1183 1184
}


/*
1185
 * Remove the event from a task's (or a CPU's) list of events.
T
Thomas Gleixner 已提交
1186
 *
1187
 * CPU events are removed with a smp call. For task events we only
T
Thomas Gleixner 已提交
1188
 * call when the task is on a CPU.
1189
 *
1190 1191
 * If event->ctx is a cloned context, callers must make sure that
 * every task struct that event->ctx->task could possibly point to
1192 1193
 * remains valid.  This is OK when called from perf_release since
 * that only calls us on the top-level context, which can't be a clone.
1194
 * When called from perf_event_exit_task, it's OK because the
1195
 * context has been detached from its task.
T
Thomas Gleixner 已提交
1196
 */
1197
static void perf_remove_from_context(struct perf_event *event)
T
Thomas Gleixner 已提交
1198
{
1199
	struct perf_event_context *ctx = event->ctx;
T
Thomas Gleixner 已提交
1200 1201
	struct task_struct *task = ctx->task;

1202 1203
	lockdep_assert_held(&ctx->mutex);

T
Thomas Gleixner 已提交
1204 1205
	if (!task) {
		/*
1206
		 * Per cpu events are removed via an smp call and
1207
		 * the removal is always successful.
T
Thomas Gleixner 已提交
1208
		 */
1209
		cpu_function_call(event->cpu, __perf_remove_from_context, event);
T
Thomas Gleixner 已提交
1210 1211 1212 1213
		return;
	}

retry:
1214 1215
	if (!task_function_call(task, __perf_remove_from_context, event))
		return;
T
Thomas Gleixner 已提交
1216

1217
	raw_spin_lock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1218
	/*
1219 1220
	 * If we failed to find a running task, but find the context active now
	 * that we've acquired the ctx->lock, retry.
T
Thomas Gleixner 已提交
1221
	 */
1222
	if (ctx->is_active) {
1223
		raw_spin_unlock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1224 1225 1226 1227
		goto retry;
	}

	/*
1228 1229
	 * Since the task isn't running, its safe to remove the event, us
	 * holding the ctx->lock ensures the task won't get scheduled in.
T
Thomas Gleixner 已提交
1230
	 */
1231
	list_del_event(event, ctx);
1232
	raw_spin_unlock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1233 1234
}

1235
/*
1236
 * Cross CPU call to disable a performance event
1237
 */
1238
static int __perf_event_disable(void *info)
1239
{
1240 1241
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
P
Peter Zijlstra 已提交
1242
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1243 1244

	/*
1245 1246
	 * If this is a per-task event, need to check whether this
	 * event's task is the current task on this cpu.
1247 1248 1249
	 *
	 * Can trigger due to concurrent perf_event_context_sched_out()
	 * flipping contexts around.
1250
	 */
1251
	if (ctx->task && cpuctx->task_ctx != ctx)
1252
		return -EINVAL;
1253

1254
	raw_spin_lock(&ctx->lock);
1255 1256

	/*
1257
	 * If the event is on, turn it off.
1258 1259
	 * If it is in error state, leave it in error state.
	 */
1260
	if (event->state >= PERF_EVENT_STATE_INACTIVE) {
1261
		update_context_time(ctx);
S
Stephane Eranian 已提交
1262
		update_cgrp_time_from_event(event);
1263 1264 1265
		update_group_times(event);
		if (event == event->group_leader)
			group_sched_out(event, cpuctx, ctx);
1266
		else
1267 1268
			event_sched_out(event, cpuctx, ctx);
		event->state = PERF_EVENT_STATE_OFF;
1269 1270
	}

1271
	raw_spin_unlock(&ctx->lock);
1272 1273

	return 0;
1274 1275 1276
}

/*
1277
 * Disable a event.
1278
 *
1279 1280
 * If event->ctx is a cloned context, callers must make sure that
 * every task struct that event->ctx->task could possibly point to
1281
 * remains valid.  This condition is satisifed when called through
1282 1283 1284 1285
 * perf_event_for_each_child or perf_event_for_each because they
 * hold the top-level event's child_mutex, so any descendant that
 * goes to exit will block in sync_child_event.
 * When called from perf_pending_event it's OK because event->ctx
1286
 * is the current context on this CPU and preemption is disabled,
1287
 * hence we can't get into perf_event_task_sched_out for this context.
1288
 */
1289
void perf_event_disable(struct perf_event *event)
1290
{
1291
	struct perf_event_context *ctx = event->ctx;
1292 1293 1294 1295
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
1296
		 * Disable the event on the cpu that it's on
1297
		 */
1298
		cpu_function_call(event->cpu, __perf_event_disable, event);
1299 1300 1301
		return;
	}

P
Peter Zijlstra 已提交
1302
retry:
1303 1304
	if (!task_function_call(task, __perf_event_disable, event))
		return;
1305

1306
	raw_spin_lock_irq(&ctx->lock);
1307
	/*
1308
	 * If the event is still active, we need to retry the cross-call.
1309
	 */
1310
	if (event->state == PERF_EVENT_STATE_ACTIVE) {
1311
		raw_spin_unlock_irq(&ctx->lock);
1312 1313 1314 1315 1316
		/*
		 * Reload the task pointer, it might have been changed by
		 * a concurrent perf_event_context_sched_out().
		 */
		task = ctx->task;
1317 1318 1319 1320 1321 1322 1323
		goto retry;
	}

	/*
	 * Since we have the lock this context can't be scheduled
	 * in, so we can change the state safely.
	 */
1324 1325 1326
	if (event->state == PERF_EVENT_STATE_INACTIVE) {
		update_group_times(event);
		event->state = PERF_EVENT_STATE_OFF;
1327
	}
1328
	raw_spin_unlock_irq(&ctx->lock);
1329
}
1330
EXPORT_SYMBOL_GPL(perf_event_disable);
1331

S
Stephane Eranian 已提交
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
static void perf_set_shadow_time(struct perf_event *event,
				 struct perf_event_context *ctx,
				 u64 tstamp)
{
	/*
	 * use the correct time source for the time snapshot
	 *
	 * We could get by without this by leveraging the
	 * fact that to get to this function, the caller
	 * has most likely already called update_context_time()
	 * and update_cgrp_time_xx() and thus both timestamp
	 * are identical (or very close). Given that tstamp is,
	 * already adjusted for cgroup, we could say that:
	 *    tstamp - ctx->timestamp
	 * is equivalent to
	 *    tstamp - cgrp->timestamp.
	 *
	 * Then, in perf_output_read(), the calculation would
	 * work with no changes because:
	 * - event is guaranteed scheduled in
	 * - no scheduled out in between
	 * - thus the timestamp would be the same
	 *
	 * But this is a bit hairy.
	 *
	 * So instead, we have an explicit cgroup call to remain
	 * within the time time source all along. We believe it
	 * is cleaner and simpler to understand.
	 */
	if (is_cgroup_event(event))
		perf_cgroup_set_shadow_time(event, tstamp);
	else
		event->shadow_ctx_time = tstamp - ctx->timestamp;
}

P
Peter Zijlstra 已提交
1367 1368 1369 1370
#define MAX_INTERRUPTS (~0ULL)

static void perf_log_throttle(struct perf_event *event, int enable);

1371
static int
1372
event_sched_in(struct perf_event *event,
1373
		 struct perf_cpu_context *cpuctx,
1374
		 struct perf_event_context *ctx)
1375
{
1376 1377
	u64 tstamp = perf_event_time(event);

1378
	if (event->state <= PERF_EVENT_STATE_OFF)
1379 1380
		return 0;

1381
	event->state = PERF_EVENT_STATE_ACTIVE;
1382
	event->oncpu = smp_processor_id();
P
Peter Zijlstra 已提交
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393

	/*
	 * Unthrottle events, since we scheduled we might have missed several
	 * ticks already, also for a heavily scheduling task there is little
	 * guarantee it'll get a tick in a timely manner.
	 */
	if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
		perf_log_throttle(event, 1);
		event->hw.interrupts = 0;
	}

1394 1395 1396 1397 1398
	/*
	 * The new state must be visible before we turn it on in the hardware:
	 */
	smp_wmb();

P
Peter Zijlstra 已提交
1399
	if (event->pmu->add(event, PERF_EF_START)) {
1400 1401
		event->state = PERF_EVENT_STATE_INACTIVE;
		event->oncpu = -1;
1402 1403 1404
		return -EAGAIN;
	}

1405
	event->tstamp_running += tstamp - event->tstamp_stopped;
1406

S
Stephane Eranian 已提交
1407
	perf_set_shadow_time(event, ctx, tstamp);
1408

1409
	if (!is_software_event(event))
1410
		cpuctx->active_oncpu++;
1411
	ctx->nr_active++;
1412 1413
	if (event->attr.freq && event->attr.sample_freq)
		ctx->nr_freq++;
1414

1415
	if (event->attr.exclusive)
1416 1417
		cpuctx->exclusive = 1;

1418 1419 1420
	return 0;
}

1421
static int
1422
group_sched_in(struct perf_event *group_event,
1423
	       struct perf_cpu_context *cpuctx,
1424
	       struct perf_event_context *ctx)
1425
{
1426
	struct perf_event *event, *partial_group = NULL;
P
Peter Zijlstra 已提交
1427
	struct pmu *pmu = group_event->pmu;
1428 1429
	u64 now = ctx->time;
	bool simulate = false;
1430

1431
	if (group_event->state == PERF_EVENT_STATE_OFF)
1432 1433
		return 0;

P
Peter Zijlstra 已提交
1434
	pmu->start_txn(pmu);
1435

1436
	if (event_sched_in(group_event, cpuctx, ctx)) {
P
Peter Zijlstra 已提交
1437
		pmu->cancel_txn(pmu);
1438
		return -EAGAIN;
1439
	}
1440 1441 1442 1443

	/*
	 * Schedule in siblings as one group (if any):
	 */
1444
	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1445
		if (event_sched_in(event, cpuctx, ctx)) {
1446
			partial_group = event;
1447 1448 1449 1450
			goto group_error;
		}
	}

1451
	if (!pmu->commit_txn(pmu))
1452
		return 0;
1453

1454 1455 1456 1457
group_error:
	/*
	 * Groups can be scheduled in as one unit only, so undo any
	 * partial group before returning:
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
	 * The events up to the failed event are scheduled out normally,
	 * tstamp_stopped will be updated.
	 *
	 * The failed events and the remaining siblings need to have
	 * their timings updated as if they had gone thru event_sched_in()
	 * and event_sched_out(). This is required to get consistent timings
	 * across the group. This also takes care of the case where the group
	 * could never be scheduled by ensuring tstamp_stopped is set to mark
	 * the time the event was actually stopped, such that time delta
	 * calculation in update_event_times() is correct.
1468
	 */
1469 1470
	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
		if (event == partial_group)
1471 1472 1473 1474 1475 1476 1477 1478
			simulate = true;

		if (simulate) {
			event->tstamp_running += now - event->tstamp_stopped;
			event->tstamp_stopped = now;
		} else {
			event_sched_out(event, cpuctx, ctx);
		}
1479
	}
1480
	event_sched_out(group_event, cpuctx, ctx);
1481

P
Peter Zijlstra 已提交
1482
	pmu->cancel_txn(pmu);
1483

1484 1485 1486
	return -EAGAIN;
}

1487
/*
1488
 * Work out whether we can put this event group on the CPU now.
1489
 */
1490
static int group_can_go_on(struct perf_event *event,
1491 1492 1493 1494
			   struct perf_cpu_context *cpuctx,
			   int can_add_hw)
{
	/*
1495
	 * Groups consisting entirely of software events can always go on.
1496
	 */
1497
	if (event->group_flags & PERF_GROUP_SOFTWARE)
1498 1499 1500
		return 1;
	/*
	 * If an exclusive group is already on, no other hardware
1501
	 * events can go on.
1502 1503 1504 1505 1506
	 */
	if (cpuctx->exclusive)
		return 0;
	/*
	 * If this group is exclusive and there are already
1507
	 * events on the CPU, it can't go on.
1508
	 */
1509
	if (event->attr.exclusive && cpuctx->active_oncpu)
1510 1511 1512 1513 1514 1515 1516 1517
		return 0;
	/*
	 * Otherwise, try to add it if all previous groups were able
	 * to go on.
	 */
	return can_add_hw;
}

1518 1519
static void add_event_to_ctx(struct perf_event *event,
			       struct perf_event_context *ctx)
1520
{
1521 1522
	u64 tstamp = perf_event_time(event);

1523
	list_add_event(event, ctx);
1524
	perf_group_attach(event);
1525 1526 1527
	event->tstamp_enabled = tstamp;
	event->tstamp_running = tstamp;
	event->tstamp_stopped = tstamp;
1528 1529
}

1530 1531 1532 1533 1534 1535
static void task_ctx_sched_out(struct perf_event_context *ctx);
static void
ctx_sched_in(struct perf_event_context *ctx,
	     struct perf_cpu_context *cpuctx,
	     enum event_type_t event_type,
	     struct task_struct *task);
1536

1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
				struct perf_event_context *ctx,
				struct task_struct *task)
{
	cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
	if (ctx)
		ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
	if (ctx)
		ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
}

T
Thomas Gleixner 已提交
1549
/*
1550
 * Cross CPU call to install and enable a performance event
1551 1552
 *
 * Must be called with ctx->mutex held
T
Thomas Gleixner 已提交
1553
 */
1554
static int  __perf_install_in_context(void *info)
T
Thomas Gleixner 已提交
1555
{
1556 1557
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
P
Peter Zijlstra 已提交
1558
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1559 1560 1561
	struct perf_event_context *task_ctx = cpuctx->task_ctx;
	struct task_struct *task = current;

1562
	perf_ctx_lock(cpuctx, task_ctx);
1563
	perf_pmu_disable(cpuctx->ctx.pmu);
T
Thomas Gleixner 已提交
1564 1565

	/*
1566
	 * If there was an active task_ctx schedule it out.
T
Thomas Gleixner 已提交
1567
	 */
1568
	if (task_ctx)
1569
		task_ctx_sched_out(task_ctx);
1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583

	/*
	 * If the context we're installing events in is not the
	 * active task_ctx, flip them.
	 */
	if (ctx->task && task_ctx != ctx) {
		if (task_ctx)
			raw_spin_unlock(&task_ctx->lock);
		raw_spin_lock(&ctx->lock);
		task_ctx = ctx;
	}

	if (task_ctx) {
		cpuctx->task_ctx = task_ctx;
1584 1585
		task = task_ctx->task;
	}
1586

1587
	cpu_ctx_sched_out(cpuctx, EVENT_ALL);
T
Thomas Gleixner 已提交
1588

1589
	update_context_time(ctx);
S
Stephane Eranian 已提交
1590 1591 1592 1593 1594 1595
	/*
	 * update cgrp time only if current cgrp
	 * matches event->cgrp. Must be done before
	 * calling add_event_to_ctx()
	 */
	update_cgrp_time_from_event(event);
T
Thomas Gleixner 已提交
1596

1597
	add_event_to_ctx(event, ctx);
T
Thomas Gleixner 已提交
1598

1599
	/*
1600
	 * Schedule everything back in
1601
	 */
1602
	perf_event_sched_in(cpuctx, task_ctx, task);
1603 1604 1605

	perf_pmu_enable(cpuctx->ctx.pmu);
	perf_ctx_unlock(cpuctx, task_ctx);
1606 1607

	return 0;
T
Thomas Gleixner 已提交
1608 1609 1610
}

/*
1611
 * Attach a performance event to a context
T
Thomas Gleixner 已提交
1612
 *
1613 1614
 * First we add the event to the list with the hardware enable bit
 * in event->hw_config cleared.
T
Thomas Gleixner 已提交
1615
 *
1616
 * If the event is attached to a task which is on a CPU we use a smp
T
Thomas Gleixner 已提交
1617 1618 1619 1620
 * call to enable it in the task context. The task might have been
 * scheduled away, but we check this in the smp call again.
 */
static void
1621 1622
perf_install_in_context(struct perf_event_context *ctx,
			struct perf_event *event,
T
Thomas Gleixner 已提交
1623 1624 1625 1626
			int cpu)
{
	struct task_struct *task = ctx->task;

1627 1628
	lockdep_assert_held(&ctx->mutex);

1629 1630
	event->ctx = ctx;

T
Thomas Gleixner 已提交
1631 1632
	if (!task) {
		/*
1633
		 * Per cpu events are installed via an smp call and
1634
		 * the install is always successful.
T
Thomas Gleixner 已提交
1635
		 */
1636
		cpu_function_call(cpu, __perf_install_in_context, event);
T
Thomas Gleixner 已提交
1637 1638 1639 1640
		return;
	}

retry:
1641 1642
	if (!task_function_call(task, __perf_install_in_context, event))
		return;
T
Thomas Gleixner 已提交
1643

1644
	raw_spin_lock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1645
	/*
1646 1647
	 * If we failed to find a running task, but find the context active now
	 * that we've acquired the ctx->lock, retry.
T
Thomas Gleixner 已提交
1648
	 */
1649
	if (ctx->is_active) {
1650
		raw_spin_unlock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1651 1652 1653 1654
		goto retry;
	}

	/*
1655 1656
	 * Since the task isn't running, its safe to add the event, us holding
	 * the ctx->lock ensures the task won't get scheduled in.
T
Thomas Gleixner 已提交
1657
	 */
1658
	add_event_to_ctx(event, ctx);
1659
	raw_spin_unlock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1660 1661
}

1662
/*
1663
 * Put a event into inactive state and update time fields.
1664 1665 1666 1667 1668 1669
 * Enabling the leader of a group effectively enables all
 * the group members that aren't explicitly disabled, so we
 * have to update their ->tstamp_enabled also.
 * Note: this works for group members as well as group leaders
 * since the non-leader members' sibling_lists will be empty.
 */
1670
static void __perf_event_mark_enabled(struct perf_event *event)
1671
{
1672
	struct perf_event *sub;
1673
	u64 tstamp = perf_event_time(event);
1674

1675
	event->state = PERF_EVENT_STATE_INACTIVE;
1676
	event->tstamp_enabled = tstamp - event->total_time_enabled;
P
Peter Zijlstra 已提交
1677
	list_for_each_entry(sub, &event->sibling_list, group_entry) {
1678 1679
		if (sub->state >= PERF_EVENT_STATE_INACTIVE)
			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
P
Peter Zijlstra 已提交
1680
	}
1681 1682
}

1683
/*
1684
 * Cross CPU call to enable a performance event
1685
 */
1686
static int __perf_event_enable(void *info)
1687
{
1688 1689 1690
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
	struct perf_event *leader = event->group_leader;
P
Peter Zijlstra 已提交
1691
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1692
	int err;
1693

1694 1695
	if (WARN_ON_ONCE(!ctx->is_active))
		return -EINVAL;
1696

1697
	raw_spin_lock(&ctx->lock);
1698
	update_context_time(ctx);
1699

1700
	if (event->state >= PERF_EVENT_STATE_INACTIVE)
1701
		goto unlock;
S
Stephane Eranian 已提交
1702 1703 1704 1705

	/*
	 * set current task's cgroup time reference point
	 */
1706
	perf_cgroup_set_timestamp(current, ctx);
S
Stephane Eranian 已提交
1707

1708
	__perf_event_mark_enabled(event);
1709

S
Stephane Eranian 已提交
1710 1711 1712
	if (!event_filter_match(event)) {
		if (is_cgroup_event(event))
			perf_cgroup_defer_enabled(event);
1713
		goto unlock;
S
Stephane Eranian 已提交
1714
	}
1715

1716
	/*
1717
	 * If the event is in a group and isn't the group leader,
1718
	 * then don't put it on unless the group is on.
1719
	 */
1720
	if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
1721
		goto unlock;
1722

1723
	if (!group_can_go_on(event, cpuctx, 1)) {
1724
		err = -EEXIST;
1725
	} else {
1726
		if (event == leader)
1727
			err = group_sched_in(event, cpuctx, ctx);
1728
		else
1729
			err = event_sched_in(event, cpuctx, ctx);
1730
	}
1731 1732 1733

	if (err) {
		/*
1734
		 * If this event can't go on and it's part of a
1735 1736
		 * group, then the whole group has to come off.
		 */
1737
		if (leader != event)
1738
			group_sched_out(leader, cpuctx, ctx);
1739
		if (leader->attr.pinned) {
1740
			update_group_times(leader);
1741
			leader->state = PERF_EVENT_STATE_ERROR;
1742
		}
1743 1744
	}

P
Peter Zijlstra 已提交
1745
unlock:
1746
	raw_spin_unlock(&ctx->lock);
1747 1748

	return 0;
1749 1750 1751
}

/*
1752
 * Enable a event.
1753
 *
1754 1755
 * If event->ctx is a cloned context, callers must make sure that
 * every task struct that event->ctx->task could possibly point to
1756
 * remains valid.  This condition is satisfied when called through
1757 1758
 * perf_event_for_each_child or perf_event_for_each as described
 * for perf_event_disable.
1759
 */
1760
void perf_event_enable(struct perf_event *event)
1761
{
1762
	struct perf_event_context *ctx = event->ctx;
1763 1764 1765 1766
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
1767
		 * Enable the event on the cpu that it's on
1768
		 */
1769
		cpu_function_call(event->cpu, __perf_event_enable, event);
1770 1771 1772
		return;
	}

1773
	raw_spin_lock_irq(&ctx->lock);
1774
	if (event->state >= PERF_EVENT_STATE_INACTIVE)
1775 1776 1777
		goto out;

	/*
1778 1779
	 * If the event is in error state, clear that first.
	 * That way, if we see the event in error state below, we
1780 1781 1782 1783
	 * know that it has gone back into error state, as distinct
	 * from the task having been scheduled away before the
	 * cross-call arrived.
	 */
1784 1785
	if (event->state == PERF_EVENT_STATE_ERROR)
		event->state = PERF_EVENT_STATE_OFF;
1786

P
Peter Zijlstra 已提交
1787
retry:
1788
	if (!ctx->is_active) {
1789
		__perf_event_mark_enabled(event);
1790 1791 1792
		goto out;
	}

1793
	raw_spin_unlock_irq(&ctx->lock);
1794 1795 1796

	if (!task_function_call(task, __perf_event_enable, event))
		return;
1797

1798
	raw_spin_lock_irq(&ctx->lock);
1799 1800

	/*
1801
	 * If the context is active and the event is still off,
1802 1803
	 * we need to retry the cross-call.
	 */
1804 1805 1806 1807 1808 1809
	if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
		/*
		 * task could have been flipped by a concurrent
		 * perf_event_context_sched_out()
		 */
		task = ctx->task;
1810
		goto retry;
1811
	}
1812

P
Peter Zijlstra 已提交
1813
out:
1814
	raw_spin_unlock_irq(&ctx->lock);
1815
}
1816
EXPORT_SYMBOL_GPL(perf_event_enable);
1817

1818
int perf_event_refresh(struct perf_event *event, int refresh)
1819
{
1820
	/*
1821
	 * not supported on inherited events
1822
	 */
1823
	if (event->attr.inherit || !is_sampling_event(event))
1824 1825
		return -EINVAL;

1826 1827
	atomic_add(refresh, &event->event_limit);
	perf_event_enable(event);
1828 1829

	return 0;
1830
}
1831
EXPORT_SYMBOL_GPL(perf_event_refresh);
1832

1833 1834 1835
static void ctx_sched_out(struct perf_event_context *ctx,
			  struct perf_cpu_context *cpuctx,
			  enum event_type_t event_type)
1836
{
1837
	struct perf_event *event;
1838
	int is_active = ctx->is_active;
1839

1840
	ctx->is_active &= ~event_type;
1841
	if (likely(!ctx->nr_events))
1842 1843
		return;

1844
	update_context_time(ctx);
S
Stephane Eranian 已提交
1845
	update_cgrp_time_from_cpuctx(cpuctx);
1846
	if (!ctx->nr_active)
1847
		return;
1848

P
Peter Zijlstra 已提交
1849
	perf_pmu_disable(ctx->pmu);
1850
	if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
1851 1852
		list_for_each_entry(event, &ctx->pinned_groups, group_entry)
			group_sched_out(event, cpuctx, ctx);
P
Peter Zijlstra 已提交
1853
	}
1854

1855
	if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
1856
		list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1857
			group_sched_out(event, cpuctx, ctx);
P
Peter Zijlstra 已提交
1858
	}
P
Peter Zijlstra 已提交
1859
	perf_pmu_enable(ctx->pmu);
1860 1861
}

1862 1863 1864
/*
 * Test whether two contexts are equivalent, i.e. whether they
 * have both been cloned from the same version of the same context
1865 1866 1867 1868
 * and they both have the same number of enabled events.
 * If the number of enabled events is the same, then the set
 * of enabled events should be the same, because these are both
 * inherited contexts, therefore we can't access individual events
1869
 * in them directly with an fd; we can only enable/disable all
1870
 * events via prctl, or enable/disable all events in a family
1871 1872
 * via ioctl, which will have the same effect on both contexts.
 */
1873 1874
static int context_equiv(struct perf_event_context *ctx1,
			 struct perf_event_context *ctx2)
1875 1876
{
	return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1877
		&& ctx1->parent_gen == ctx2->parent_gen
1878
		&& !ctx1->pin_count && !ctx2->pin_count;
1879 1880
}

1881 1882
static void __perf_event_sync_stat(struct perf_event *event,
				     struct perf_event *next_event)
1883 1884 1885
{
	u64 value;

1886
	if (!event->attr.inherit_stat)
1887 1888 1889
		return;

	/*
1890
	 * Update the event value, we cannot use perf_event_read()
1891 1892
	 * because we're in the middle of a context switch and have IRQs
	 * disabled, which upsets smp_call_function_single(), however
1893
	 * we know the event must be on the current CPU, therefore we
1894 1895
	 * don't need to use it.
	 */
1896 1897
	switch (event->state) {
	case PERF_EVENT_STATE_ACTIVE:
1898 1899
		event->pmu->read(event);
		/* fall-through */
1900

1901 1902
	case PERF_EVENT_STATE_INACTIVE:
		update_event_times(event);
1903 1904 1905 1906 1907 1908 1909
		break;

	default:
		break;
	}

	/*
1910
	 * In order to keep per-task stats reliable we need to flip the event
1911 1912
	 * values when we flip the contexts.
	 */
1913 1914 1915
	value = local64_read(&next_event->count);
	value = local64_xchg(&event->count, value);
	local64_set(&next_event->count, value);
1916

1917 1918
	swap(event->total_time_enabled, next_event->total_time_enabled);
	swap(event->total_time_running, next_event->total_time_running);
1919

1920
	/*
1921
	 * Since we swizzled the values, update the user visible data too.
1922
	 */
1923 1924
	perf_event_update_userpage(event);
	perf_event_update_userpage(next_event);
1925 1926 1927 1928 1929
}

#define list_next_entry(pos, member) \
	list_entry(pos->member.next, typeof(*pos), member)

1930 1931
static void perf_event_sync_stat(struct perf_event_context *ctx,
				   struct perf_event_context *next_ctx)
1932
{
1933
	struct perf_event *event, *next_event;
1934 1935 1936 1937

	if (!ctx->nr_stat)
		return;

1938 1939
	update_context_time(ctx);

1940 1941
	event = list_first_entry(&ctx->event_list,
				   struct perf_event, event_entry);
1942

1943 1944
	next_event = list_first_entry(&next_ctx->event_list,
					struct perf_event, event_entry);
1945

1946 1947
	while (&event->event_entry != &ctx->event_list &&
	       &next_event->event_entry != &next_ctx->event_list) {
1948

1949
		__perf_event_sync_stat(event, next_event);
1950

1951 1952
		event = list_next_entry(event, event_entry);
		next_event = list_next_entry(next_event, event_entry);
1953 1954 1955
	}
}

1956 1957
static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
					 struct task_struct *next)
T
Thomas Gleixner 已提交
1958
{
P
Peter Zijlstra 已提交
1959
	struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
1960 1961
	struct perf_event_context *next_ctx;
	struct perf_event_context *parent;
P
Peter Zijlstra 已提交
1962
	struct perf_cpu_context *cpuctx;
1963
	int do_switch = 1;
T
Thomas Gleixner 已提交
1964

P
Peter Zijlstra 已提交
1965 1966
	if (likely(!ctx))
		return;
1967

P
Peter Zijlstra 已提交
1968 1969
	cpuctx = __get_cpu_context(ctx);
	if (!cpuctx->task_ctx)
T
Thomas Gleixner 已提交
1970 1971
		return;

1972 1973
	rcu_read_lock();
	parent = rcu_dereference(ctx->parent_ctx);
P
Peter Zijlstra 已提交
1974
	next_ctx = next->perf_event_ctxp[ctxn];
1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985
	if (parent && next_ctx &&
	    rcu_dereference(next_ctx->parent_ctx) == parent) {
		/*
		 * Looks like the two contexts are clones, so we might be
		 * able to optimize the context switch.  We lock both
		 * contexts and check that they are clones under the
		 * lock (including re-checking that neither has been
		 * uncloned in the meantime).  It doesn't matter which
		 * order we take the locks because no other cpu could
		 * be trying to lock both of these tasks.
		 */
1986 1987
		raw_spin_lock(&ctx->lock);
		raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1988
		if (context_equiv(ctx, next_ctx)) {
1989 1990
			/*
			 * XXX do we need a memory barrier of sorts
1991
			 * wrt to rcu_dereference() of perf_event_ctxp
1992
			 */
P
Peter Zijlstra 已提交
1993 1994
			task->perf_event_ctxp[ctxn] = next_ctx;
			next->perf_event_ctxp[ctxn] = ctx;
1995 1996 1997
			ctx->task = next;
			next_ctx->task = task;
			do_switch = 0;
1998

1999
			perf_event_sync_stat(ctx, next_ctx);
2000
		}
2001 2002
		raw_spin_unlock(&next_ctx->lock);
		raw_spin_unlock(&ctx->lock);
2003
	}
2004
	rcu_read_unlock();
2005

2006
	if (do_switch) {
2007
		raw_spin_lock(&ctx->lock);
2008
		ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2009
		cpuctx->task_ctx = NULL;
2010
		raw_spin_unlock(&ctx->lock);
2011
	}
T
Thomas Gleixner 已提交
2012 2013
}

P
Peter Zijlstra 已提交
2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027
#define for_each_task_context_nr(ctxn)					\
	for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)

/*
 * Called from scheduler to remove the events of the current task,
 * with interrupts disabled.
 *
 * We stop each event and update the event value in event->count.
 *
 * This does not protect us against NMI, but disable()
 * sets the disabled bit in the control field of event _before_
 * accessing the event control register. If a NMI hits, then it will
 * not restart the event.
 */
2028 2029
void __perf_event_task_sched_out(struct task_struct *task,
				 struct task_struct *next)
P
Peter Zijlstra 已提交
2030 2031 2032 2033 2034
{
	int ctxn;

	for_each_task_context_nr(ctxn)
		perf_event_context_sched_out(task, ctxn, next);
S
Stephane Eranian 已提交
2035 2036 2037 2038 2039 2040 2041

	/*
	 * if cgroup events exist on this CPU, then we need
	 * to check if we have to switch out PMU state.
	 * cgroup event are system-wide mode only
	 */
	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2042
		perf_cgroup_sched_out(task, next);
P
Peter Zijlstra 已提交
2043 2044
}

2045
static void task_ctx_sched_out(struct perf_event_context *ctx)
2046
{
P
Peter Zijlstra 已提交
2047
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2048

2049 2050
	if (!cpuctx->task_ctx)
		return;
2051 2052 2053 2054

	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
		return;

2055
	ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2056 2057 2058
	cpuctx->task_ctx = NULL;
}

2059 2060 2061 2062 2063 2064 2065
/*
 * Called with IRQs disabled
 */
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
			      enum event_type_t event_type)
{
	ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2066 2067
}

2068
static void
2069
ctx_pinned_sched_in(struct perf_event_context *ctx,
2070
		    struct perf_cpu_context *cpuctx)
T
Thomas Gleixner 已提交
2071
{
2072
	struct perf_event *event;
T
Thomas Gleixner 已提交
2073

2074 2075
	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
		if (event->state <= PERF_EVENT_STATE_OFF)
2076
			continue;
2077
		if (!event_filter_match(event))
2078 2079
			continue;

S
Stephane Eranian 已提交
2080 2081 2082 2083
		/* may need to reset tstamp_enabled */
		if (is_cgroup_event(event))
			perf_cgroup_mark_enabled(event, ctx);

2084
		if (group_can_go_on(event, cpuctx, 1))
2085
			group_sched_in(event, cpuctx, ctx);
2086 2087 2088 2089 2090

		/*
		 * If this pinned group hasn't been scheduled,
		 * put it in error state.
		 */
2091 2092 2093
		if (event->state == PERF_EVENT_STATE_INACTIVE) {
			update_group_times(event);
			event->state = PERF_EVENT_STATE_ERROR;
2094
		}
2095
	}
2096 2097 2098 2099
}

static void
ctx_flexible_sched_in(struct perf_event_context *ctx,
2100
		      struct perf_cpu_context *cpuctx)
2101 2102 2103
{
	struct perf_event *event;
	int can_add_hw = 1;
2104

2105 2106 2107
	list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
		/* Ignore events in OFF or ERROR state */
		if (event->state <= PERF_EVENT_STATE_OFF)
2108
			continue;
2109 2110
		/*
		 * Listen to the 'cpu' scheduling filter constraint
2111
		 * of events:
2112
		 */
2113
		if (!event_filter_match(event))
T
Thomas Gleixner 已提交
2114 2115
			continue;

S
Stephane Eranian 已提交
2116 2117 2118 2119
		/* may need to reset tstamp_enabled */
		if (is_cgroup_event(event))
			perf_cgroup_mark_enabled(event, ctx);

P
Peter Zijlstra 已提交
2120
		if (group_can_go_on(event, cpuctx, can_add_hw)) {
2121
			if (group_sched_in(event, cpuctx, ctx))
2122
				can_add_hw = 0;
P
Peter Zijlstra 已提交
2123
		}
T
Thomas Gleixner 已提交
2124
	}
2125 2126 2127 2128 2129
}

static void
ctx_sched_in(struct perf_event_context *ctx,
	     struct perf_cpu_context *cpuctx,
S
Stephane Eranian 已提交
2130 2131
	     enum event_type_t event_type,
	     struct task_struct *task)
2132
{
S
Stephane Eranian 已提交
2133
	u64 now;
2134
	int is_active = ctx->is_active;
S
Stephane Eranian 已提交
2135

2136
	ctx->is_active |= event_type;
2137
	if (likely(!ctx->nr_events))
2138
		return;
2139

S
Stephane Eranian 已提交
2140 2141
	now = perf_clock();
	ctx->timestamp = now;
2142
	perf_cgroup_set_timestamp(task, ctx);
2143 2144 2145 2146
	/*
	 * First go through the list and put on any pinned groups
	 * in order to give them the best chance of going on.
	 */
2147
	if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
2148
		ctx_pinned_sched_in(ctx, cpuctx);
2149 2150

	/* Then walk through the lower prio flexible groups */
2151
	if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
2152
		ctx_flexible_sched_in(ctx, cpuctx);
2153 2154
}

2155
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
S
Stephane Eranian 已提交
2156 2157
			     enum event_type_t event_type,
			     struct task_struct *task)
2158 2159 2160
{
	struct perf_event_context *ctx = &cpuctx->ctx;

S
Stephane Eranian 已提交
2161
	ctx_sched_in(ctx, cpuctx, event_type, task);
2162 2163
}

S
Stephane Eranian 已提交
2164 2165
static void perf_event_context_sched_in(struct perf_event_context *ctx,
					struct task_struct *task)
2166
{
P
Peter Zijlstra 已提交
2167
	struct perf_cpu_context *cpuctx;
2168

P
Peter Zijlstra 已提交
2169
	cpuctx = __get_cpu_context(ctx);
2170 2171 2172
	if (cpuctx->task_ctx == ctx)
		return;

2173
	perf_ctx_lock(cpuctx, ctx);
P
Peter Zijlstra 已提交
2174
	perf_pmu_disable(ctx->pmu);
2175 2176 2177 2178 2179 2180 2181
	/*
	 * We want to keep the following priority order:
	 * cpu pinned (that don't need to move), task pinned,
	 * cpu flexible, task flexible.
	 */
	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);

2182 2183
	if (ctx->nr_events)
		cpuctx->task_ctx = ctx;
2184

2185 2186
	perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);

2187 2188 2189
	perf_pmu_enable(ctx->pmu);
	perf_ctx_unlock(cpuctx, ctx);

2190 2191 2192 2193
	/*
	 * Since these rotations are per-cpu, we need to ensure the
	 * cpu-context we got scheduled on is actually rotating.
	 */
P
Peter Zijlstra 已提交
2194
	perf_pmu_rotate_start(ctx->pmu);
2195 2196
}

P
Peter Zijlstra 已提交
2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207
/*
 * Called from scheduler to add the events of the current task
 * with interrupts disabled.
 *
 * We restore the event value and then enable it.
 *
 * This does not protect us against NMI, but enable()
 * sets the enabled bit in the control field of event _before_
 * accessing the event control register. If a NMI hits, then it will
 * keep the event running.
 */
2208 2209
void __perf_event_task_sched_in(struct task_struct *prev,
				struct task_struct *task)
P
Peter Zijlstra 已提交
2210 2211 2212 2213 2214 2215 2216 2217 2218
{
	struct perf_event_context *ctx;
	int ctxn;

	for_each_task_context_nr(ctxn) {
		ctx = task->perf_event_ctxp[ctxn];
		if (likely(!ctx))
			continue;

S
Stephane Eranian 已提交
2219
		perf_event_context_sched_in(ctx, task);
P
Peter Zijlstra 已提交
2220
	}
S
Stephane Eranian 已提交
2221 2222 2223 2224 2225 2226
	/*
	 * if cgroup events exist on this CPU, then we need
	 * to check if we have to switch in PMU state.
	 * cgroup event are system-wide mode only
	 */
	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2227
		perf_cgroup_sched_in(prev, task);
2228 2229
}

2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{
	u64 frequency = event->attr.sample_freq;
	u64 sec = NSEC_PER_SEC;
	u64 divisor, dividend;

	int count_fls, nsec_fls, frequency_fls, sec_fls;

	count_fls = fls64(count);
	nsec_fls = fls64(nsec);
	frequency_fls = fls64(frequency);
	sec_fls = 30;

	/*
	 * We got @count in @nsec, with a target of sample_freq HZ
	 * the target period becomes:
	 *
	 *             @count * 10^9
	 * period = -------------------
	 *          @nsec * sample_freq
	 *
	 */

	/*
	 * Reduce accuracy by one bit such that @a and @b converge
	 * to a similar magnitude.
	 */
2257
#define REDUCE_FLS(a, b)		\
2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296
do {					\
	if (a##_fls > b##_fls) {	\
		a >>= 1;		\
		a##_fls--;		\
	} else {			\
		b >>= 1;		\
		b##_fls--;		\
	}				\
} while (0)

	/*
	 * Reduce accuracy until either term fits in a u64, then proceed with
	 * the other, so that finally we can do a u64/u64 division.
	 */
	while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
		REDUCE_FLS(nsec, frequency);
		REDUCE_FLS(sec, count);
	}

	if (count_fls + sec_fls > 64) {
		divisor = nsec * frequency;

		while (count_fls + sec_fls > 64) {
			REDUCE_FLS(count, sec);
			divisor >>= 1;
		}

		dividend = count * sec;
	} else {
		dividend = count * sec;

		while (nsec_fls + frequency_fls > 64) {
			REDUCE_FLS(nsec, frequency);
			dividend >>= 1;
		}

		divisor = nsec * frequency;
	}

2297 2298 2299
	if (!divisor)
		return dividend;

2300 2301 2302 2303
	return div64_u64(dividend, divisor);
}

static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
2304
{
2305
	struct hw_perf_event *hwc = &event->hw;
2306
	s64 period, sample_period;
2307 2308
	s64 delta;

2309
	period = perf_calculate_period(event, nsec, count);
2310 2311 2312 2313 2314 2315 2316 2317 2318 2319

	delta = (s64)(period - hwc->sample_period);
	delta = (delta + 7) / 8; /* low pass filter */

	sample_period = hwc->sample_period + delta;

	if (!sample_period)
		sample_period = 1;

	hwc->sample_period = sample_period;
2320

2321
	if (local64_read(&hwc->period_left) > 8*sample_period) {
P
Peter Zijlstra 已提交
2322
		event->pmu->stop(event, PERF_EF_UPDATE);
2323
		local64_set(&hwc->period_left, 0);
P
Peter Zijlstra 已提交
2324
		event->pmu->start(event, PERF_EF_RELOAD);
2325
	}
2326 2327
}

2328
static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
2329
{
2330 2331
	struct perf_event *event;
	struct hw_perf_event *hwc;
2332 2333
	u64 interrupts, now;
	s64 delta;
2334

2335 2336 2337
	if (!ctx->nr_freq)
		return;

2338
	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2339
		if (event->state != PERF_EVENT_STATE_ACTIVE)
2340 2341
			continue;

2342
		if (!event_filter_match(event))
2343 2344
			continue;

2345
		hwc = &event->hw;
2346 2347 2348

		interrupts = hwc->interrupts;
		hwc->interrupts = 0;
2349

2350
		/*
2351
		 * unthrottle events on the tick
2352
		 */
2353
		if (interrupts == MAX_INTERRUPTS) {
2354
			perf_log_throttle(event, 1);
P
Peter Zijlstra 已提交
2355
			event->pmu->start(event, 0);
2356 2357
		}

2358
		if (!event->attr.freq || !event->attr.sample_freq)
2359 2360
			continue;

2361
		event->pmu->read(event);
2362
		now = local64_read(&event->count);
2363 2364
		delta = now - hwc->freq_count_stamp;
		hwc->freq_count_stamp = now;
2365

2366
		if (delta > 0)
2367
			perf_adjust_period(event, period, delta);
2368 2369 2370
	}
}

2371
/*
2372
 * Round-robin a context's events:
2373
 */
2374
static void rotate_ctx(struct perf_event_context *ctx)
T
Thomas Gleixner 已提交
2375
{
2376 2377 2378 2379 2380 2381
	/*
	 * Rotate the first entry last of non-pinned groups. Rotation might be
	 * disabled by the inheritance code.
	 */
	if (!ctx->rotate_disable)
		list_rotate_left(&ctx->flexible_groups);
2382 2383
}

2384
/*
2385 2386 2387
 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
 * because they're strictly cpu affine and rotate_start is called with IRQs
 * disabled, while rotate_context is called from IRQ context.
2388
 */
2389
static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2390
{
2391
	u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
P
Peter Zijlstra 已提交
2392
	struct perf_event_context *ctx = NULL;
2393
	int rotate = 0, remove = 1, freq = 0;
2394

2395
	if (cpuctx->ctx.nr_events) {
2396
		remove = 0;
2397 2398
		if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
			rotate = 1;
2399 2400
		if (cpuctx->ctx.nr_freq)
			freq = 1;
2401
	}
2402

P
Peter Zijlstra 已提交
2403
	ctx = cpuctx->task_ctx;
2404
	if (ctx && ctx->nr_events) {
2405
		remove = 0;
2406 2407
		if (ctx->nr_events != ctx->nr_active)
			rotate = 1;
2408 2409
		if (ctx->nr_freq)
			freq = 1;
2410
	}
2411

2412 2413 2414
	if (!rotate && !freq)
		goto done;

2415
	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
P
Peter Zijlstra 已提交
2416
	perf_pmu_disable(cpuctx->ctx.pmu);
2417

2418 2419 2420 2421 2422
	if (freq) {
		perf_ctx_adjust_freq(&cpuctx->ctx, interval);
		if (ctx)
			perf_ctx_adjust_freq(ctx, interval);
	}
2423

2424 2425 2426 2427
	if (rotate) {
		cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
		if (ctx)
			ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
T
Thomas Gleixner 已提交
2428

2429 2430 2431 2432 2433 2434
		rotate_ctx(&cpuctx->ctx);
		if (ctx)
			rotate_ctx(ctx);

		perf_event_sched_in(cpuctx, ctx, current);
	}
2435

2436 2437
	perf_pmu_enable(cpuctx->ctx.pmu);
	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2438 2439

done:
2440 2441 2442 2443 2444 2445 2446 2447
	if (remove)
		list_del_init(&cpuctx->rotation_list);
}

void perf_event_task_tick(void)
{
	struct list_head *head = &__get_cpu_var(rotation_list);
	struct perf_cpu_context *cpuctx, *tmp;
2448

2449 2450 2451 2452 2453 2454 2455
	WARN_ON(!irqs_disabled());

	list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
		if (cpuctx->jiffies_interval == 1 ||
				!(jiffies % cpuctx->jiffies_interval))
			perf_rotate_context(cpuctx);
	}
T
Thomas Gleixner 已提交
2456 2457
}

2458 2459 2460 2461 2462 2463 2464 2465 2466 2467
static int event_enable_on_exec(struct perf_event *event,
				struct perf_event_context *ctx)
{
	if (!event->attr.enable_on_exec)
		return 0;

	event->attr.enable_on_exec = 0;
	if (event->state >= PERF_EVENT_STATE_INACTIVE)
		return 0;

2468
	__perf_event_mark_enabled(event);
2469 2470 2471 2472

	return 1;
}

2473
/*
2474
 * Enable all of a task's events that have been marked enable-on-exec.
2475 2476
 * This expects task == current.
 */
P
Peter Zijlstra 已提交
2477
static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2478
{
2479
	struct perf_event *event;
2480 2481
	unsigned long flags;
	int enabled = 0;
2482
	int ret;
2483 2484

	local_irq_save(flags);
2485
	if (!ctx || !ctx->nr_events)
2486 2487
		goto out;

2488 2489 2490 2491 2492 2493 2494
	/*
	 * We must ctxsw out cgroup events to avoid conflict
	 * when invoking perf_task_event_sched_in() later on
	 * in this function. Otherwise we end up trying to
	 * ctxswin cgroup events which are already scheduled
	 * in.
	 */
2495
	perf_cgroup_sched_out(current, NULL);
2496

2497
	raw_spin_lock(&ctx->lock);
2498
	task_ctx_sched_out(ctx);
2499

2500
	list_for_each_entry(event, &ctx->event_list, event_entry) {
2501 2502 2503
		ret = event_enable_on_exec(event, ctx);
		if (ret)
			enabled = 1;
2504 2505 2506
	}

	/*
2507
	 * Unclone this context if we enabled any event.
2508
	 */
2509 2510
	if (enabled)
		unclone_ctx(ctx);
2511

2512
	raw_spin_unlock(&ctx->lock);
2513

2514 2515 2516
	/*
	 * Also calls ctxswin for cgroup events, if any:
	 */
S
Stephane Eranian 已提交
2517
	perf_event_context_sched_in(ctx, ctx->task);
P
Peter Zijlstra 已提交
2518
out:
2519 2520 2521
	local_irq_restore(flags);
}

T
Thomas Gleixner 已提交
2522
/*
2523
 * Cross CPU call to read the hardware event
T
Thomas Gleixner 已提交
2524
 */
2525
static void __perf_event_read(void *info)
T
Thomas Gleixner 已提交
2526
{
2527 2528
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
P
Peter Zijlstra 已提交
2529
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
I
Ingo Molnar 已提交
2530

2531 2532 2533 2534
	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu.  If not it has been
	 * scheduled out before the smp call arrived.  In that case
2535 2536
	 * event->count would have been updated to a recent sample
	 * when the event was scheduled out.
2537 2538 2539 2540
	 */
	if (ctx->task && cpuctx->task_ctx != ctx)
		return;

2541
	raw_spin_lock(&ctx->lock);
S
Stephane Eranian 已提交
2542
	if (ctx->is_active) {
2543
		update_context_time(ctx);
S
Stephane Eranian 已提交
2544 2545
		update_cgrp_time_from_event(event);
	}
2546
	update_event_times(event);
2547 2548
	if (event->state == PERF_EVENT_STATE_ACTIVE)
		event->pmu->read(event);
2549
	raw_spin_unlock(&ctx->lock);
T
Thomas Gleixner 已提交
2550 2551
}

P
Peter Zijlstra 已提交
2552 2553
static inline u64 perf_event_count(struct perf_event *event)
{
2554
	return local64_read(&event->count) + atomic64_read(&event->child_count);
P
Peter Zijlstra 已提交
2555 2556
}

2557
static u64 perf_event_read(struct perf_event *event)
T
Thomas Gleixner 已提交
2558 2559
{
	/*
2560 2561
	 * If event is enabled and currently active on a CPU, update the
	 * value in the event structure:
T
Thomas Gleixner 已提交
2562
	 */
2563 2564 2565 2566
	if (event->state == PERF_EVENT_STATE_ACTIVE) {
		smp_call_function_single(event->oncpu,
					 __perf_event_read, event, 1);
	} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
P
Peter Zijlstra 已提交
2567 2568 2569
		struct perf_event_context *ctx = event->ctx;
		unsigned long flags;

2570
		raw_spin_lock_irqsave(&ctx->lock, flags);
2571 2572 2573 2574 2575
		/*
		 * may read while context is not active
		 * (e.g., thread is blocked), in that case
		 * we cannot update context time
		 */
S
Stephane Eranian 已提交
2576
		if (ctx->is_active) {
2577
			update_context_time(ctx);
S
Stephane Eranian 已提交
2578 2579
			update_cgrp_time_from_event(event);
		}
2580
		update_event_times(event);
2581
		raw_spin_unlock_irqrestore(&ctx->lock, flags);
T
Thomas Gleixner 已提交
2582 2583
	}

P
Peter Zijlstra 已提交
2584
	return perf_event_count(event);
T
Thomas Gleixner 已提交
2585 2586
}

2587
/*
2588
 * Initialize the perf_event context in a task_struct:
2589
 */
2590
static void __perf_event_init_context(struct perf_event_context *ctx)
2591
{
2592
	raw_spin_lock_init(&ctx->lock);
2593
	mutex_init(&ctx->mutex);
2594 2595
	INIT_LIST_HEAD(&ctx->pinned_groups);
	INIT_LIST_HEAD(&ctx->flexible_groups);
2596 2597
	INIT_LIST_HEAD(&ctx->event_list);
	atomic_set(&ctx->refcount, 1);
2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612
}

static struct perf_event_context *
alloc_perf_context(struct pmu *pmu, struct task_struct *task)
{
	struct perf_event_context *ctx;

	ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
	if (!ctx)
		return NULL;

	__perf_event_init_context(ctx);
	if (task) {
		ctx->task = task;
		get_task_struct(task);
T
Thomas Gleixner 已提交
2613
	}
2614 2615 2616
	ctx->pmu = pmu;

	return ctx;
2617 2618
}

2619 2620 2621 2622 2623
static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)
{
	struct task_struct *task;
	int err;
T
Thomas Gleixner 已提交
2624 2625

	rcu_read_lock();
2626
	if (!vpid)
T
Thomas Gleixner 已提交
2627 2628
		task = current;
	else
2629
		task = find_task_by_vpid(vpid);
T
Thomas Gleixner 已提交
2630 2631 2632 2633 2634 2635 2636 2637
	if (task)
		get_task_struct(task);
	rcu_read_unlock();

	if (!task)
		return ERR_PTR(-ESRCH);

	/* Reuse ptrace permission checks for now. */
2638 2639 2640 2641
	err = -EACCES;
	if (!ptrace_may_access(task, PTRACE_MODE_READ))
		goto errout;

2642 2643 2644 2645 2646 2647 2648
	return task;
errout:
	put_task_struct(task);
	return ERR_PTR(err);

}

2649 2650 2651
/*
 * Returns a matching context with refcount and pincount.
 */
P
Peter Zijlstra 已提交
2652
static struct perf_event_context *
M
Matt Helsley 已提交
2653
find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
T
Thomas Gleixner 已提交
2654
{
2655
	struct perf_event_context *ctx;
2656
	struct perf_cpu_context *cpuctx;
2657
	unsigned long flags;
P
Peter Zijlstra 已提交
2658
	int ctxn, err;
T
Thomas Gleixner 已提交
2659

2660
	if (!task) {
2661
		/* Must be root to operate on a CPU event: */
2662
		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
T
Thomas Gleixner 已提交
2663 2664 2665
			return ERR_PTR(-EACCES);

		/*
2666
		 * We could be clever and allow to attach a event to an
T
Thomas Gleixner 已提交
2667 2668 2669
		 * offline CPU and activate it when the CPU comes up, but
		 * that's for later.
		 */
2670
		if (!cpu_online(cpu))
T
Thomas Gleixner 已提交
2671 2672
			return ERR_PTR(-ENODEV);

P
Peter Zijlstra 已提交
2673
		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
T
Thomas Gleixner 已提交
2674
		ctx = &cpuctx->ctx;
2675
		get_ctx(ctx);
2676
		++ctx->pin_count;
T
Thomas Gleixner 已提交
2677 2678 2679 2680

		return ctx;
	}

P
Peter Zijlstra 已提交
2681 2682 2683 2684 2685
	err = -EINVAL;
	ctxn = pmu->task_ctx_nr;
	if (ctxn < 0)
		goto errout;

P
Peter Zijlstra 已提交
2686
retry:
P
Peter Zijlstra 已提交
2687
	ctx = perf_lock_task_context(task, ctxn, &flags);
2688
	if (ctx) {
2689
		unclone_ctx(ctx);
2690
		++ctx->pin_count;
2691
		raw_spin_unlock_irqrestore(&ctx->lock, flags);
2692
	} else {
2693
		ctx = alloc_perf_context(pmu, task);
2694 2695 2696
		err = -ENOMEM;
		if (!ctx)
			goto errout;
2697

2698 2699 2700 2701 2702 2703 2704 2705 2706 2707
		err = 0;
		mutex_lock(&task->perf_event_mutex);
		/*
		 * If it has already passed perf_event_exit_task().
		 * we must see PF_EXITING, it takes this mutex too.
		 */
		if (task->flags & PF_EXITING)
			err = -ESRCH;
		else if (task->perf_event_ctxp[ctxn])
			err = -EAGAIN;
2708
		else {
2709
			get_ctx(ctx);
2710
			++ctx->pin_count;
2711
			rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
2712
		}
2713 2714 2715
		mutex_unlock(&task->perf_event_mutex);

		if (unlikely(err)) {
2716
			put_ctx(ctx);
2717 2718 2719 2720

			if (err == -EAGAIN)
				goto retry;
			goto errout;
2721 2722 2723
		}
	}

T
Thomas Gleixner 已提交
2724
	return ctx;
2725

P
Peter Zijlstra 已提交
2726
errout:
2727
	return ERR_PTR(err);
T
Thomas Gleixner 已提交
2728 2729
}

L
Li Zefan 已提交
2730 2731
static void perf_event_free_filter(struct perf_event *event);

2732
static void free_event_rcu(struct rcu_head *head)
P
Peter Zijlstra 已提交
2733
{
2734
	struct perf_event *event;
P
Peter Zijlstra 已提交
2735

2736 2737 2738
	event = container_of(head, struct perf_event, rcu_head);
	if (event->ns)
		put_pid_ns(event->ns);
L
Li Zefan 已提交
2739
	perf_event_free_filter(event);
2740
	kfree(event);
P
Peter Zijlstra 已提交
2741 2742
}

2743
static void ring_buffer_put(struct ring_buffer *rb);
2744

2745
static void free_event(struct perf_event *event)
2746
{
2747
	irq_work_sync(&event->pending);
2748

2749
	if (!event->parent) {
2750
		if (event->attach_state & PERF_ATTACH_TASK)
2751
			jump_label_dec_deferred(&perf_sched_events);
2752
		if (event->attr.mmap || event->attr.mmap_data)
2753 2754 2755 2756 2757
			atomic_dec(&nr_mmap_events);
		if (event->attr.comm)
			atomic_dec(&nr_comm_events);
		if (event->attr.task)
			atomic_dec(&nr_task_events);
2758 2759
		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
			put_callchain_buffers();
2760 2761
		if (is_cgroup_event(event)) {
			atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
2762
			jump_label_dec_deferred(&perf_sched_events);
2763
		}
2764
	}
2765

2766 2767 2768
	if (event->rb) {
		ring_buffer_put(event->rb);
		event->rb = NULL;
2769 2770
	}

S
Stephane Eranian 已提交
2771 2772 2773
	if (is_cgroup_event(event))
		perf_detach_cgroup(event);

2774 2775
	if (event->destroy)
		event->destroy(event);
2776

P
Peter Zijlstra 已提交
2777 2778 2779
	if (event->ctx)
		put_ctx(event->ctx);

2780
	call_rcu(&event->rcu_head, free_event_rcu);
2781 2782
}

2783
int perf_event_release_kernel(struct perf_event *event)
T
Thomas Gleixner 已提交
2784
{
2785
	struct perf_event_context *ctx = event->ctx;
T
Thomas Gleixner 已提交
2786

2787
	WARN_ON_ONCE(ctx->parent_ctx);
2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800
	/*
	 * There are two ways this annotation is useful:
	 *
	 *  1) there is a lock recursion from perf_event_exit_task
	 *     see the comment there.
	 *
	 *  2) there is a lock-inversion with mmap_sem through
	 *     perf_event_read_group(), which takes faults while
	 *     holding ctx->mutex, however this is called after
	 *     the last filedesc died, so there is no possibility
	 *     to trigger the AB-BA case.
	 */
	mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
2801
	raw_spin_lock_irq(&ctx->lock);
2802
	perf_group_detach(event);
2803
	raw_spin_unlock_irq(&ctx->lock);
2804
	perf_remove_from_context(event);
2805
	mutex_unlock(&ctx->mutex);
T
Thomas Gleixner 已提交
2806

2807
	free_event(event);
T
Thomas Gleixner 已提交
2808 2809 2810

	return 0;
}
2811
EXPORT_SYMBOL_GPL(perf_event_release_kernel);
T
Thomas Gleixner 已提交
2812

2813 2814 2815 2816
/*
 * Called when the last reference to the file is gone.
 */
static int perf_release(struct inode *inode, struct file *file)
2817
{
2818
	struct perf_event *event = file->private_data;
P
Peter Zijlstra 已提交
2819
	struct task_struct *owner;
2820

2821
	file->private_data = NULL;
2822

P
Peter Zijlstra 已提交
2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855
	rcu_read_lock();
	owner = ACCESS_ONCE(event->owner);
	/*
	 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
	 * !owner it means the list deletion is complete and we can indeed
	 * free this event, otherwise we need to serialize on
	 * owner->perf_event_mutex.
	 */
	smp_read_barrier_depends();
	if (owner) {
		/*
		 * Since delayed_put_task_struct() also drops the last
		 * task reference we can safely take a new reference
		 * while holding the rcu_read_lock().
		 */
		get_task_struct(owner);
	}
	rcu_read_unlock();

	if (owner) {
		mutex_lock(&owner->perf_event_mutex);
		/*
		 * We have to re-check the event->owner field, if it is cleared
		 * we raced with perf_event_exit_task(), acquiring the mutex
		 * ensured they're done, and we can proceed with freeing the
		 * event.
		 */
		if (event->owner)
			list_del_init(&event->owner_entry);
		mutex_unlock(&owner->perf_event_mutex);
		put_task_struct(owner);
	}

2856
	return perf_event_release_kernel(event);
2857 2858
}

2859
u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
2860
{
2861
	struct perf_event *child;
2862 2863
	u64 total = 0;

2864 2865 2866
	*enabled = 0;
	*running = 0;

2867
	mutex_lock(&event->child_mutex);
2868
	total += perf_event_read(event);
2869 2870 2871 2872 2873 2874
	*enabled += event->total_time_enabled +
			atomic64_read(&event->child_total_time_enabled);
	*running += event->total_time_running +
			atomic64_read(&event->child_total_time_running);

	list_for_each_entry(child, &event->child_list, child_list) {
2875
		total += perf_event_read(child);
2876 2877 2878
		*enabled += child->total_time_enabled;
		*running += child->total_time_running;
	}
2879
	mutex_unlock(&event->child_mutex);
2880 2881 2882

	return total;
}
2883
EXPORT_SYMBOL_GPL(perf_event_read_value);
2884

2885
static int perf_event_read_group(struct perf_event *event,
2886 2887
				   u64 read_format, char __user *buf)
{
2888
	struct perf_event *leader = event->group_leader, *sub;
2889 2890
	int n = 0, size = 0, ret = -EFAULT;
	struct perf_event_context *ctx = leader->ctx;
2891
	u64 values[5];
2892
	u64 count, enabled, running;
2893

2894
	mutex_lock(&ctx->mutex);
2895
	count = perf_event_read_value(leader, &enabled, &running);
2896 2897

	values[n++] = 1 + leader->nr_siblings;
2898 2899 2900 2901
	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		values[n++] = enabled;
	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		values[n++] = running;
2902 2903 2904
	values[n++] = count;
	if (read_format & PERF_FORMAT_ID)
		values[n++] = primary_event_id(leader);
2905 2906 2907 2908

	size = n * sizeof(u64);

	if (copy_to_user(buf, values, size))
2909
		goto unlock;
2910

2911
	ret = size;
2912

2913
	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2914
		n = 0;
2915

2916
		values[n++] = perf_event_read_value(sub, &enabled, &running);
2917 2918 2919 2920 2921
		if (read_format & PERF_FORMAT_ID)
			values[n++] = primary_event_id(sub);

		size = n * sizeof(u64);

2922
		if (copy_to_user(buf + ret, values, size)) {
2923 2924 2925
			ret = -EFAULT;
			goto unlock;
		}
2926 2927

		ret += size;
2928
	}
2929 2930
unlock:
	mutex_unlock(&ctx->mutex);
2931

2932
	return ret;
2933 2934
}

2935
static int perf_event_read_one(struct perf_event *event,
2936 2937
				 u64 read_format, char __user *buf)
{
2938
	u64 enabled, running;
2939 2940 2941
	u64 values[4];
	int n = 0;

2942 2943 2944 2945 2946
	values[n++] = perf_event_read_value(event, &enabled, &running);
	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		values[n++] = enabled;
	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		values[n++] = running;
2947
	if (read_format & PERF_FORMAT_ID)
2948
		values[n++] = primary_event_id(event);
2949 2950 2951 2952 2953 2954 2955

	if (copy_to_user(buf, values, n * sizeof(u64)))
		return -EFAULT;

	return n * sizeof(u64);
}

T
Thomas Gleixner 已提交
2956
/*
2957
 * Read the performance event - simple non blocking version for now
T
Thomas Gleixner 已提交
2958 2959
 */
static ssize_t
2960
perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
T
Thomas Gleixner 已提交
2961
{
2962
	u64 read_format = event->attr.read_format;
2963
	int ret;
T
Thomas Gleixner 已提交
2964

2965
	/*
2966
	 * Return end-of-file for a read on a event that is in
2967 2968 2969
	 * error state (i.e. because it was pinned but it couldn't be
	 * scheduled on to the CPU at some point).
	 */
2970
	if (event->state == PERF_EVENT_STATE_ERROR)
2971 2972
		return 0;

2973
	if (count < event->read_size)
2974 2975
		return -ENOSPC;

2976
	WARN_ON_ONCE(event->ctx->parent_ctx);
2977
	if (read_format & PERF_FORMAT_GROUP)
2978
		ret = perf_event_read_group(event, read_format, buf);
2979
	else
2980
		ret = perf_event_read_one(event, read_format, buf);
T
Thomas Gleixner 已提交
2981

2982
	return ret;
T
Thomas Gleixner 已提交
2983 2984 2985 2986 2987
}

static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
2988
	struct perf_event *event = file->private_data;
T
Thomas Gleixner 已提交
2989

2990
	return perf_read_hw(event, buf, count);
T
Thomas Gleixner 已提交
2991 2992 2993 2994
}

static unsigned int perf_poll(struct file *file, poll_table *wait)
{
2995
	struct perf_event *event = file->private_data;
2996
	struct ring_buffer *rb;
2997
	unsigned int events = POLL_HUP;
P
Peter Zijlstra 已提交
2998

2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015
	/*
	 * Race between perf_event_set_output() and perf_poll(): perf_poll()
	 * grabs the rb reference but perf_event_set_output() overrides it.
	 * Here is the timeline for two threads T1, T2:
	 * t0: T1, rb = rcu_dereference(event->rb)
	 * t1: T2, old_rb = event->rb
	 * t2: T2, event->rb = new rb
	 * t3: T2, ring_buffer_detach(old_rb)
	 * t4: T1, ring_buffer_attach(rb1)
	 * t5: T1, poll_wait(event->waitq)
	 *
	 * To avoid this problem, we grab mmap_mutex in perf_poll()
	 * thereby ensuring that the assignment of the new ring buffer
	 * and the detachment of the old buffer appear atomic to perf_poll()
	 */
	mutex_lock(&event->mmap_mutex);

P
Peter Zijlstra 已提交
3016
	rcu_read_lock();
3017
	rb = rcu_dereference(event->rb);
3018 3019
	if (rb) {
		ring_buffer_attach(event, rb);
3020
		events = atomic_xchg(&rb->poll, 0);
3021
	}
P
Peter Zijlstra 已提交
3022
	rcu_read_unlock();
T
Thomas Gleixner 已提交
3023

3024 3025
	mutex_unlock(&event->mmap_mutex);

3026
	poll_wait(file, &event->waitq, wait);
T
Thomas Gleixner 已提交
3027 3028 3029 3030

	return events;
}

3031
static void perf_event_reset(struct perf_event *event)
3032
{
3033
	(void)perf_event_read(event);
3034
	local64_set(&event->count, 0);
3035
	perf_event_update_userpage(event);
P
Peter Zijlstra 已提交
3036 3037
}

3038
/*
3039 3040 3041 3042
 * Holding the top-level event's child_mutex means that any
 * descendant process that has inherited this event will block
 * in sync_child_event if it goes to exit, thus satisfying the
 * task existence requirements of perf_event_enable/disable.
3043
 */
3044 3045
static void perf_event_for_each_child(struct perf_event *event,
					void (*func)(struct perf_event *))
P
Peter Zijlstra 已提交
3046
{
3047
	struct perf_event *child;
P
Peter Zijlstra 已提交
3048

3049 3050 3051 3052
	WARN_ON_ONCE(event->ctx->parent_ctx);
	mutex_lock(&event->child_mutex);
	func(event);
	list_for_each_entry(child, &event->child_list, child_list)
P
Peter Zijlstra 已提交
3053
		func(child);
3054
	mutex_unlock(&event->child_mutex);
P
Peter Zijlstra 已提交
3055 3056
}

3057 3058
static void perf_event_for_each(struct perf_event *event,
				  void (*func)(struct perf_event *))
P
Peter Zijlstra 已提交
3059
{
3060 3061
	struct perf_event_context *ctx = event->ctx;
	struct perf_event *sibling;
P
Peter Zijlstra 已提交
3062

3063 3064
	WARN_ON_ONCE(ctx->parent_ctx);
	mutex_lock(&ctx->mutex);
3065
	event = event->group_leader;
3066

3067 3068 3069 3070
	perf_event_for_each_child(event, func);
	func(event);
	list_for_each_entry(sibling, &event->sibling_list, group_entry)
		perf_event_for_each_child(event, func);
3071
	mutex_unlock(&ctx->mutex);
3072 3073
}

3074
static int perf_event_period(struct perf_event *event, u64 __user *arg)
3075
{
3076
	struct perf_event_context *ctx = event->ctx;
3077 3078 3079
	int ret = 0;
	u64 value;

3080
	if (!is_sampling_event(event))
3081 3082
		return -EINVAL;

3083
	if (copy_from_user(&value, arg, sizeof(value)))
3084 3085 3086 3087 3088
		return -EFAULT;

	if (!value)
		return -EINVAL;

3089
	raw_spin_lock_irq(&ctx->lock);
3090 3091
	if (event->attr.freq) {
		if (value > sysctl_perf_event_sample_rate) {
3092 3093 3094 3095
			ret = -EINVAL;
			goto unlock;
		}

3096
		event->attr.sample_freq = value;
3097
	} else {
3098 3099
		event->attr.sample_period = value;
		event->hw.sample_period = value;
3100 3101
	}
unlock:
3102
	raw_spin_unlock_irq(&ctx->lock);
3103 3104 3105 3106

	return ret;
}

3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127
static const struct file_operations perf_fops;

static struct perf_event *perf_fget_light(int fd, int *fput_needed)
{
	struct file *file;

	file = fget_light(fd, fput_needed);
	if (!file)
		return ERR_PTR(-EBADF);

	if (file->f_op != &perf_fops) {
		fput_light(file, *fput_needed);
		*fput_needed = 0;
		return ERR_PTR(-EBADF);
	}

	return file->private_data;
}

static int perf_event_set_output(struct perf_event *event,
				 struct perf_event *output_event);
L
Li Zefan 已提交
3128
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
3129

3130 3131
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
3132 3133
	struct perf_event *event = file->private_data;
	void (*func)(struct perf_event *);
P
Peter Zijlstra 已提交
3134
	u32 flags = arg;
3135 3136

	switch (cmd) {
3137 3138
	case PERF_EVENT_IOC_ENABLE:
		func = perf_event_enable;
3139
		break;
3140 3141
	case PERF_EVENT_IOC_DISABLE:
		func = perf_event_disable;
3142
		break;
3143 3144
	case PERF_EVENT_IOC_RESET:
		func = perf_event_reset;
3145
		break;
P
Peter Zijlstra 已提交
3146

3147 3148
	case PERF_EVENT_IOC_REFRESH:
		return perf_event_refresh(event, arg);
3149

3150 3151
	case PERF_EVENT_IOC_PERIOD:
		return perf_event_period(event, (u64 __user *)arg);
3152

3153
	case PERF_EVENT_IOC_SET_OUTPUT:
3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170
	{
		struct perf_event *output_event = NULL;
		int fput_needed = 0;
		int ret;

		if (arg != -1) {
			output_event = perf_fget_light(arg, &fput_needed);
			if (IS_ERR(output_event))
				return PTR_ERR(output_event);
		}

		ret = perf_event_set_output(event, output_event);
		if (output_event)
			fput_light(output_event->filp, fput_needed);

		return ret;
	}
3171

L
Li Zefan 已提交
3172 3173 3174
	case PERF_EVENT_IOC_SET_FILTER:
		return perf_event_set_filter(event, (void __user *)arg);

3175
	default:
P
Peter Zijlstra 已提交
3176
		return -ENOTTY;
3177
	}
P
Peter Zijlstra 已提交
3178 3179

	if (flags & PERF_IOC_FLAG_GROUP)
3180
		perf_event_for_each(event, func);
P
Peter Zijlstra 已提交
3181
	else
3182
		perf_event_for_each_child(event, func);
P
Peter Zijlstra 已提交
3183 3184

	return 0;
3185 3186
}

3187
int perf_event_task_enable(void)
3188
{
3189
	struct perf_event *event;
3190

3191 3192 3193 3194
	mutex_lock(&current->perf_event_mutex);
	list_for_each_entry(event, &current->perf_event_list, owner_entry)
		perf_event_for_each_child(event, perf_event_enable);
	mutex_unlock(&current->perf_event_mutex);
3195 3196 3197 3198

	return 0;
}

3199
int perf_event_task_disable(void)
3200
{
3201
	struct perf_event *event;
3202

3203 3204 3205 3206
	mutex_lock(&current->perf_event_mutex);
	list_for_each_entry(event, &current->perf_event_list, owner_entry)
		perf_event_for_each_child(event, perf_event_disable);
	mutex_unlock(&current->perf_event_mutex);
3207 3208 3209 3210

	return 0;
}

3211
static int perf_event_index(struct perf_event *event)
3212
{
P
Peter Zijlstra 已提交
3213 3214 3215
	if (event->hw.state & PERF_HES_STOPPED)
		return 0;

3216
	if (event->state != PERF_EVENT_STATE_ACTIVE)
3217 3218
		return 0;

3219
	return event->pmu->event_idx(event);
3220 3221
}

3222
static void calc_timer_values(struct perf_event *event,
3223 3224
				u64 *enabled,
				u64 *running)
3225 3226 3227 3228 3229 3230 3231 3232 3233
{
	u64 now, ctx_time;

	now = perf_clock();
	ctx_time = event->shadow_ctx_time + now;
	*enabled = ctx_time - event->tstamp_enabled;
	*running = ctx_time - event->tstamp_running;
}

3234 3235 3236 3237 3238
/*
 * Callers need to ensure there can be no nesting of this function, otherwise
 * the seqlock logic goes bad. We can not serialize this because the arch
 * code calls this from NMI context.
 */
3239
void perf_event_update_userpage(struct perf_event *event)
3240
{
3241
	struct perf_event_mmap_page *userpg;
3242
	struct ring_buffer *rb;
3243
	u64 enabled, running;
3244 3245

	rcu_read_lock();
3246 3247 3248 3249 3250 3251 3252 3253 3254 3255
	/*
	 * compute total_time_enabled, total_time_running
	 * based on snapshot values taken when the event
	 * was last scheduled in.
	 *
	 * we cannot simply called update_context_time()
	 * because of locking issue as we can be called in
	 * NMI context
	 */
	calc_timer_values(event, &enabled, &running);
3256 3257
	rb = rcu_dereference(event->rb);
	if (!rb)
3258 3259
		goto unlock;

3260
	userpg = rb->user_page;
3261

3262 3263 3264 3265 3266
	/*
	 * Disable preemption so as to not let the corresponding user-space
	 * spin too long if we get preempted.
	 */
	preempt_disable();
3267
	++userpg->lock;
3268
	barrier();
3269
	userpg->index = perf_event_index(event);
P
Peter Zijlstra 已提交
3270
	userpg->offset = perf_event_count(event);
3271
	if (userpg->index)
3272
		userpg->offset -= local64_read(&event->hw.prev_count);
3273

3274
	userpg->time_enabled = enabled +
3275
			atomic64_read(&event->child_total_time_enabled);
3276

3277
	userpg->time_running = running +
3278
			atomic64_read(&event->child_total_time_running);
3279

3280
	barrier();
3281
	++userpg->lock;
3282
	preempt_enable();
3283
unlock:
3284
	rcu_read_unlock();
3285 3286
}

3287 3288 3289
static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct perf_event *event = vma->vm_file->private_data;
3290
	struct ring_buffer *rb;
3291 3292 3293 3294 3295 3296 3297 3298 3299
	int ret = VM_FAULT_SIGBUS;

	if (vmf->flags & FAULT_FLAG_MKWRITE) {
		if (vmf->pgoff == 0)
			ret = 0;
		return ret;
	}

	rcu_read_lock();
3300 3301
	rb = rcu_dereference(event->rb);
	if (!rb)
3302 3303 3304 3305 3306
		goto unlock;

	if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
		goto unlock;

3307
	vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321
	if (!vmf->page)
		goto unlock;

	get_page(vmf->page);
	vmf->page->mapping = vma->vm_file->f_mapping;
	vmf->page->index   = vmf->pgoff;

	ret = 0;
unlock:
	rcu_read_unlock();

	return ret;
}

3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364
static void ring_buffer_attach(struct perf_event *event,
			       struct ring_buffer *rb)
{
	unsigned long flags;

	if (!list_empty(&event->rb_entry))
		return;

	spin_lock_irqsave(&rb->event_lock, flags);
	if (!list_empty(&event->rb_entry))
		goto unlock;

	list_add(&event->rb_entry, &rb->event_list);
unlock:
	spin_unlock_irqrestore(&rb->event_lock, flags);
}

static void ring_buffer_detach(struct perf_event *event,
			       struct ring_buffer *rb)
{
	unsigned long flags;

	if (list_empty(&event->rb_entry))
		return;

	spin_lock_irqsave(&rb->event_lock, flags);
	list_del_init(&event->rb_entry);
	wake_up_all(&event->waitq);
	spin_unlock_irqrestore(&rb->event_lock, flags);
}

static void ring_buffer_wakeup(struct perf_event *event)
{
	struct ring_buffer *rb;

	rcu_read_lock();
	rb = rcu_dereference(event->rb);
	list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
		wake_up_all(&event->waitq);
	}
	rcu_read_unlock();
}

3365
static void rb_free_rcu(struct rcu_head *rcu_head)
3366
{
3367
	struct ring_buffer *rb;
3368

3369 3370
	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
	rb_free(rb);
3371 3372
}

3373
static struct ring_buffer *ring_buffer_get(struct perf_event *event)
3374
{
3375
	struct ring_buffer *rb;
3376

3377
	rcu_read_lock();
3378 3379 3380 3381
	rb = rcu_dereference(event->rb);
	if (rb) {
		if (!atomic_inc_not_zero(&rb->refcount))
			rb = NULL;
3382 3383 3384
	}
	rcu_read_unlock();

3385
	return rb;
3386 3387
}

3388
static void ring_buffer_put(struct ring_buffer *rb)
3389
{
3390 3391 3392
	struct perf_event *event, *n;
	unsigned long flags;

3393
	if (!atomic_dec_and_test(&rb->refcount))
3394
		return;
3395

3396 3397 3398 3399 3400 3401 3402
	spin_lock_irqsave(&rb->event_lock, flags);
	list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
		list_del_init(&event->rb_entry);
		wake_up_all(&event->waitq);
	}
	spin_unlock_irqrestore(&rb->event_lock, flags);

3403
	call_rcu(&rb->rcu_head, rb_free_rcu);
3404 3405 3406 3407
}

static void perf_mmap_open(struct vm_area_struct *vma)
{
3408
	struct perf_event *event = vma->vm_file->private_data;
3409

3410
	atomic_inc(&event->mmap_count);
3411 3412 3413 3414
}

static void perf_mmap_close(struct vm_area_struct *vma)
{
3415
	struct perf_event *event = vma->vm_file->private_data;
3416

3417
	if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
3418
		unsigned long size = perf_data_size(event->rb);
3419
		struct user_struct *user = event->mmap_user;
3420
		struct ring_buffer *rb = event->rb;
3421

3422
		atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
3423
		vma->vm_mm->pinned_vm -= event->mmap_locked;
3424
		rcu_assign_pointer(event->rb, NULL);
3425
		ring_buffer_detach(event, rb);
3426
		mutex_unlock(&event->mmap_mutex);
3427

3428
		ring_buffer_put(rb);
3429
		free_uid(user);
3430
	}
3431 3432
}

3433
static const struct vm_operations_struct perf_mmap_vmops = {
3434 3435 3436 3437
	.open		= perf_mmap_open,
	.close		= perf_mmap_close,
	.fault		= perf_mmap_fault,
	.page_mkwrite	= perf_mmap_fault,
3438 3439 3440 3441
};

static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
3442
	struct perf_event *event = file->private_data;
3443
	unsigned long user_locked, user_lock_limit;
3444
	struct user_struct *user = current_user();
3445
	unsigned long locked, lock_limit;
3446
	struct ring_buffer *rb;
3447 3448
	unsigned long vma_size;
	unsigned long nr_pages;
3449
	long user_extra, extra;
3450
	int ret = 0, flags = 0;
3451

3452 3453 3454
	/*
	 * Don't allow mmap() of inherited per-task counters. This would
	 * create a performance issue due to all children writing to the
3455
	 * same rb.
3456 3457 3458 3459
	 */
	if (event->cpu == -1 && event->attr.inherit)
		return -EINVAL;

3460
	if (!(vma->vm_flags & VM_SHARED))
3461
		return -EINVAL;
3462 3463 3464 3465

	vma_size = vma->vm_end - vma->vm_start;
	nr_pages = (vma_size / PAGE_SIZE) - 1;

3466
	/*
3467
	 * If we have rb pages ensure they're a power-of-two number, so we
3468 3469 3470
	 * can do bitmasks instead of modulo.
	 */
	if (nr_pages != 0 && !is_power_of_2(nr_pages))
3471 3472
		return -EINVAL;

3473
	if (vma_size != PAGE_SIZE * (1 + nr_pages))
3474 3475
		return -EINVAL;

3476 3477
	if (vma->vm_pgoff != 0)
		return -EINVAL;
3478

3479 3480
	WARN_ON_ONCE(event->ctx->parent_ctx);
	mutex_lock(&event->mmap_mutex);
3481 3482 3483
	if (event->rb) {
		if (event->rb->nr_pages == nr_pages)
			atomic_inc(&event->rb->refcount);
3484
		else
3485 3486 3487 3488
			ret = -EINVAL;
		goto unlock;
	}

3489
	user_extra = nr_pages + 1;
3490
	user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
I
Ingo Molnar 已提交
3491 3492 3493 3494 3495 3496

	/*
	 * Increase the limit linearly with more CPUs:
	 */
	user_lock_limit *= num_online_cpus();

3497
	user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3498

3499 3500 3501
	extra = 0;
	if (user_locked > user_lock_limit)
		extra = user_locked - user_lock_limit;
3502

3503
	lock_limit = rlimit(RLIMIT_MEMLOCK);
3504
	lock_limit >>= PAGE_SHIFT;
3505
	locked = vma->vm_mm->pinned_vm + extra;
3506

3507 3508
	if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
		!capable(CAP_IPC_LOCK)) {
3509 3510 3511
		ret = -EPERM;
		goto unlock;
	}
3512

3513
	WARN_ON(event->rb);
3514

3515
	if (vma->vm_flags & VM_WRITE)
3516
		flags |= RING_BUFFER_WRITABLE;
3517

3518 3519 3520 3521
	rb = rb_alloc(nr_pages, 
		event->attr.watermark ? event->attr.wakeup_watermark : 0,
		event->cpu, flags);

3522
	if (!rb) {
3523
		ret = -ENOMEM;
3524
		goto unlock;
3525
	}
3526
	rcu_assign_pointer(event->rb, rb);
3527

3528 3529 3530
	atomic_long_add(user_extra, &user->locked_vm);
	event->mmap_locked = extra;
	event->mmap_user = get_current_user();
3531
	vma->vm_mm->pinned_vm += event->mmap_locked;
3532

3533 3534
	perf_event_update_userpage(event);

3535
unlock:
3536 3537
	if (!ret)
		atomic_inc(&event->mmap_count);
3538
	mutex_unlock(&event->mmap_mutex);
3539 3540 3541

	vma->vm_flags |= VM_RESERVED;
	vma->vm_ops = &perf_mmap_vmops;
3542 3543

	return ret;
3544 3545
}

P
Peter Zijlstra 已提交
3546 3547 3548
static int perf_fasync(int fd, struct file *filp, int on)
{
	struct inode *inode = filp->f_path.dentry->d_inode;
3549
	struct perf_event *event = filp->private_data;
P
Peter Zijlstra 已提交
3550 3551 3552
	int retval;

	mutex_lock(&inode->i_mutex);
3553
	retval = fasync_helper(fd, filp, on, &event->fasync);
P
Peter Zijlstra 已提交
3554 3555 3556 3557 3558 3559 3560 3561
	mutex_unlock(&inode->i_mutex);

	if (retval < 0)
		return retval;

	return 0;
}

T
Thomas Gleixner 已提交
3562
static const struct file_operations perf_fops = {
3563
	.llseek			= no_llseek,
T
Thomas Gleixner 已提交
3564 3565 3566
	.release		= perf_release,
	.read			= perf_read,
	.poll			= perf_poll,
3567 3568
	.unlocked_ioctl		= perf_ioctl,
	.compat_ioctl		= perf_ioctl,
3569
	.mmap			= perf_mmap,
P
Peter Zijlstra 已提交
3570
	.fasync			= perf_fasync,
T
Thomas Gleixner 已提交
3571 3572
};

3573
/*
3574
 * Perf event wakeup
3575 3576 3577 3578 3579
 *
 * If there's data, ensure we set the poll() state and publish everything
 * to user-space before waking everybody up.
 */

3580
void perf_event_wakeup(struct perf_event *event)
3581
{
3582
	ring_buffer_wakeup(event);
3583

3584 3585 3586
	if (event->pending_kill) {
		kill_fasync(&event->fasync, SIGIO, event->pending_kill);
		event->pending_kill = 0;
3587
	}
3588 3589
}

3590
static void perf_pending_event(struct irq_work *entry)
3591
{
3592 3593
	struct perf_event *event = container_of(entry,
			struct perf_event, pending);
3594

3595 3596 3597
	if (event->pending_disable) {
		event->pending_disable = 0;
		__perf_event_disable(event);
3598 3599
	}

3600 3601 3602
	if (event->pending_wakeup) {
		event->pending_wakeup = 0;
		perf_event_wakeup(event);
3603 3604 3605
	}
}

3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626
/*
 * We assume there is only KVM supporting the callbacks.
 * Later on, we might change it to a list if there is
 * another virtualization implementation supporting the callbacks.
 */
struct perf_guest_info_callbacks *perf_guest_cbs;

int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
	perf_guest_cbs = cbs;
	return 0;
}
EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);

int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
	perf_guest_cbs = NULL;
	return 0;
}
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);

3627 3628 3629
static void __perf_event_header__init_id(struct perf_event_header *header,
					 struct perf_sample_data *data,
					 struct perf_event *event)
3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656
{
	u64 sample_type = event->attr.sample_type;

	data->type = sample_type;
	header->size += event->id_header_size;

	if (sample_type & PERF_SAMPLE_TID) {
		/* namespace issues */
		data->tid_entry.pid = perf_event_pid(event, current);
		data->tid_entry.tid = perf_event_tid(event, current);
	}

	if (sample_type & PERF_SAMPLE_TIME)
		data->time = perf_clock();

	if (sample_type & PERF_SAMPLE_ID)
		data->id = primary_event_id(event);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		data->stream_id = event->id;

	if (sample_type & PERF_SAMPLE_CPU) {
		data->cpu_entry.cpu	 = raw_smp_processor_id();
		data->cpu_entry.reserved = 0;
	}
}

3657 3658 3659
void perf_event_header__init_id(struct perf_event_header *header,
				struct perf_sample_data *data,
				struct perf_event *event)
3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685
{
	if (event->attr.sample_id_all)
		__perf_event_header__init_id(header, data, event);
}

static void __perf_event__output_id_sample(struct perf_output_handle *handle,
					   struct perf_sample_data *data)
{
	u64 sample_type = data->type;

	if (sample_type & PERF_SAMPLE_TID)
		perf_output_put(handle, data->tid_entry);

	if (sample_type & PERF_SAMPLE_TIME)
		perf_output_put(handle, data->time);

	if (sample_type & PERF_SAMPLE_ID)
		perf_output_put(handle, data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		perf_output_put(handle, data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		perf_output_put(handle, data->cpu_entry);
}

3686 3687 3688
void perf_event__output_id_sample(struct perf_event *event,
				  struct perf_output_handle *handle,
				  struct perf_sample_data *sample)
3689 3690 3691 3692 3693
{
	if (event->attr.sample_id_all)
		__perf_event__output_id_sample(handle, sample);
}

3694
static void perf_output_read_one(struct perf_output_handle *handle,
3695 3696
				 struct perf_event *event,
				 u64 enabled, u64 running)
3697
{
3698
	u64 read_format = event->attr.read_format;
3699 3700 3701
	u64 values[4];
	int n = 0;

P
Peter Zijlstra 已提交
3702
	values[n++] = perf_event_count(event);
3703
	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3704
		values[n++] = enabled +
3705
			atomic64_read(&event->child_total_time_enabled);
3706 3707
	}
	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3708
		values[n++] = running +
3709
			atomic64_read(&event->child_total_time_running);
3710 3711
	}
	if (read_format & PERF_FORMAT_ID)
3712
		values[n++] = primary_event_id(event);
3713

3714
	__output_copy(handle, values, n * sizeof(u64));
3715 3716 3717
}

/*
3718
 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3719 3720
 */
static void perf_output_read_group(struct perf_output_handle *handle,
3721 3722
			    struct perf_event *event,
			    u64 enabled, u64 running)
3723
{
3724 3725
	struct perf_event *leader = event->group_leader, *sub;
	u64 read_format = event->attr.read_format;
3726 3727 3728 3729 3730 3731
	u64 values[5];
	int n = 0;

	values[n++] = 1 + leader->nr_siblings;

	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3732
		values[n++] = enabled;
3733 3734

	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3735
		values[n++] = running;
3736

3737
	if (leader != event)
3738 3739
		leader->pmu->read(leader);

P
Peter Zijlstra 已提交
3740
	values[n++] = perf_event_count(leader);
3741
	if (read_format & PERF_FORMAT_ID)
3742
		values[n++] = primary_event_id(leader);
3743

3744
	__output_copy(handle, values, n * sizeof(u64));
3745

3746
	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3747 3748
		n = 0;

3749
		if (sub != event)
3750 3751
			sub->pmu->read(sub);

P
Peter Zijlstra 已提交
3752
		values[n++] = perf_event_count(sub);
3753
		if (read_format & PERF_FORMAT_ID)
3754
			values[n++] = primary_event_id(sub);
3755

3756
		__output_copy(handle, values, n * sizeof(u64));
3757 3758 3759
	}
}

3760 3761 3762
#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
				 PERF_FORMAT_TOTAL_TIME_RUNNING)

3763
static void perf_output_read(struct perf_output_handle *handle,
3764
			     struct perf_event *event)
3765
{
3766
	u64 enabled = 0, running = 0;
3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777
	u64 read_format = event->attr.read_format;

	/*
	 * compute total_time_enabled, total_time_running
	 * based on snapshot values taken when the event
	 * was last scheduled in.
	 *
	 * we cannot simply called update_context_time()
	 * because of locking issue as we are called in
	 * NMI context
	 */
3778 3779
	if (read_format & PERF_FORMAT_TOTAL_TIMES)
		calc_timer_values(event, &enabled, &running);
3780

3781
	if (event->attr.read_format & PERF_FORMAT_GROUP)
3782
		perf_output_read_group(handle, event, enabled, running);
3783
	else
3784
		perf_output_read_one(handle, event, enabled, running);
3785 3786
}

3787 3788 3789
void perf_output_sample(struct perf_output_handle *handle,
			struct perf_event_header *header,
			struct perf_sample_data *data,
3790
			struct perf_event *event)
3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820
{
	u64 sample_type = data->type;

	perf_output_put(handle, *header);

	if (sample_type & PERF_SAMPLE_IP)
		perf_output_put(handle, data->ip);

	if (sample_type & PERF_SAMPLE_TID)
		perf_output_put(handle, data->tid_entry);

	if (sample_type & PERF_SAMPLE_TIME)
		perf_output_put(handle, data->time);

	if (sample_type & PERF_SAMPLE_ADDR)
		perf_output_put(handle, data->addr);

	if (sample_type & PERF_SAMPLE_ID)
		perf_output_put(handle, data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		perf_output_put(handle, data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		perf_output_put(handle, data->cpu_entry);

	if (sample_type & PERF_SAMPLE_PERIOD)
		perf_output_put(handle, data->period);

	if (sample_type & PERF_SAMPLE_READ)
3821
		perf_output_read(handle, event);
3822 3823 3824 3825 3826 3827 3828 3829 3830 3831

	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
		if (data->callchain) {
			int size = 1;

			if (data->callchain)
				size += data->callchain->nr;

			size *= sizeof(u64);

3832
			__output_copy(handle, data->callchain, size);
3833 3834 3835 3836 3837 3838 3839 3840 3841
		} else {
			u64 nr = 0;
			perf_output_put(handle, nr);
		}
	}

	if (sample_type & PERF_SAMPLE_RAW) {
		if (data->raw) {
			perf_output_put(handle, data->raw->size);
3842 3843
			__output_copy(handle, data->raw->data,
					   data->raw->size);
3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854
		} else {
			struct {
				u32	size;
				u32	data;
			} raw = {
				.size = sizeof(u32),
				.data = 0,
			};
			perf_output_put(handle, raw);
		}
	}
3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868

	if (!event->attr.watermark) {
		int wakeup_events = event->attr.wakeup_events;

		if (wakeup_events) {
			struct ring_buffer *rb = handle->rb;
			int events = local_inc_return(&rb->events);

			if (events >= wakeup_events) {
				local_sub(wakeup_events, &rb->events);
				local_inc(&rb->wakeup);
			}
		}
	}
3869 3870 3871 3872
}

void perf_prepare_sample(struct perf_event_header *header,
			 struct perf_sample_data *data,
3873
			 struct perf_event *event,
3874
			 struct pt_regs *regs)
3875
{
3876
	u64 sample_type = event->attr.sample_type;
3877

3878
	header->type = PERF_RECORD_SAMPLE;
3879
	header->size = sizeof(*header) + event->header_size;
3880 3881 3882

	header->misc = 0;
	header->misc |= perf_misc_flags(regs);
3883

3884
	__perf_event_header__init_id(header, data, event);
3885

3886
	if (sample_type & PERF_SAMPLE_IP)
3887 3888
		data->ip = perf_instruction_pointer(regs);

3889
	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3890
		int size = 1;
3891

3892 3893 3894 3895 3896 3897
		data->callchain = perf_callchain(regs);

		if (data->callchain)
			size += data->callchain->nr;

		header->size += size * sizeof(u64);
3898 3899
	}

3900
	if (sample_type & PERF_SAMPLE_RAW) {
3901 3902 3903 3904 3905 3906 3907 3908
		int size = sizeof(u32);

		if (data->raw)
			size += data->raw->size;
		else
			size += sizeof(u32);

		WARN_ON_ONCE(size & (sizeof(u64)-1));
3909
		header->size += size;
3910
	}
3911
}
3912

3913
static void perf_event_output(struct perf_event *event,
3914 3915 3916 3917 3918
				struct perf_sample_data *data,
				struct pt_regs *regs)
{
	struct perf_output_handle handle;
	struct perf_event_header header;
3919

3920 3921 3922
	/* protect the callchain buffers */
	rcu_read_lock();

3923
	perf_prepare_sample(&header, data, event, regs);
P
Peter Zijlstra 已提交
3924

3925
	if (perf_output_begin(&handle, event, header.size))
3926
		goto exit;
3927

3928
	perf_output_sample(&handle, &header, data, event);
3929

3930
	perf_output_end(&handle);
3931 3932 3933

exit:
	rcu_read_unlock();
3934 3935
}

3936
/*
3937
 * read event_id
3938 3939 3940 3941 3942 3943 3944 3945 3946 3947
 */

struct perf_read_event {
	struct perf_event_header	header;

	u32				pid;
	u32				tid;
};

static void
3948
perf_event_read_event(struct perf_event *event,
3949 3950 3951
			struct task_struct *task)
{
	struct perf_output_handle handle;
3952
	struct perf_sample_data sample;
3953
	struct perf_read_event read_event = {
3954
		.header = {
3955
			.type = PERF_RECORD_READ,
3956
			.misc = 0,
3957
			.size = sizeof(read_event) + event->read_size,
3958
		},
3959 3960
		.pid = perf_event_pid(event, task),
		.tid = perf_event_tid(event, task),
3961
	};
3962
	int ret;
3963

3964
	perf_event_header__init_id(&read_event.header, &sample, event);
3965
	ret = perf_output_begin(&handle, event, read_event.header.size);
3966 3967 3968
	if (ret)
		return;

3969
	perf_output_put(&handle, read_event);
3970
	perf_output_read(&handle, event);
3971
	perf_event__output_id_sample(event, &handle, &sample);
3972

3973 3974 3975
	perf_output_end(&handle);
}

P
Peter Zijlstra 已提交
3976
/*
P
Peter Zijlstra 已提交
3977 3978
 * task tracking -- fork/exit
 *
3979
 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
P
Peter Zijlstra 已提交
3980 3981
 */

P
Peter Zijlstra 已提交
3982
struct perf_task_event {
3983
	struct task_struct		*task;
3984
	struct perf_event_context	*task_ctx;
P
Peter Zijlstra 已提交
3985 3986 3987 3988 3989 3990

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				ppid;
P
Peter Zijlstra 已提交
3991 3992
		u32				tid;
		u32				ptid;
3993
		u64				time;
3994
	} event_id;
P
Peter Zijlstra 已提交
3995 3996
};

3997
static void perf_event_task_output(struct perf_event *event,
P
Peter Zijlstra 已提交
3998
				     struct perf_task_event *task_event)
P
Peter Zijlstra 已提交
3999 4000
{
	struct perf_output_handle handle;
4001
	struct perf_sample_data	sample;
P
Peter Zijlstra 已提交
4002
	struct task_struct *task = task_event->task;
4003
	int ret, size = task_event->event_id.header.size;
4004

4005
	perf_event_header__init_id(&task_event->event_id.header, &sample, event);
P
Peter Zijlstra 已提交
4006

4007
	ret = perf_output_begin(&handle, event,
4008
				task_event->event_id.header.size);
4009
	if (ret)
4010
		goto out;
P
Peter Zijlstra 已提交
4011

4012 4013
	task_event->event_id.pid = perf_event_pid(event, task);
	task_event->event_id.ppid = perf_event_pid(event, current);
P
Peter Zijlstra 已提交
4014

4015 4016
	task_event->event_id.tid = perf_event_tid(event, task);
	task_event->event_id.ptid = perf_event_tid(event, current);
P
Peter Zijlstra 已提交
4017

4018
	perf_output_put(&handle, task_event->event_id);
4019

4020 4021
	perf_event__output_id_sample(event, &handle, &sample);

P
Peter Zijlstra 已提交
4022
	perf_output_end(&handle);
4023 4024
out:
	task_event->event_id.header.size = size;
P
Peter Zijlstra 已提交
4025 4026
}

4027
static int perf_event_task_match(struct perf_event *event)
P
Peter Zijlstra 已提交
4028
{
P
Peter Zijlstra 已提交
4029
	if (event->state < PERF_EVENT_STATE_INACTIVE)
4030 4031
		return 0;

4032
	if (!event_filter_match(event))
4033 4034
		return 0;

4035 4036
	if (event->attr.comm || event->attr.mmap ||
	    event->attr.mmap_data || event->attr.task)
P
Peter Zijlstra 已提交
4037 4038 4039 4040 4041
		return 1;

	return 0;
}

4042
static void perf_event_task_ctx(struct perf_event_context *ctx,
P
Peter Zijlstra 已提交
4043
				  struct perf_task_event *task_event)
P
Peter Zijlstra 已提交
4044
{
4045
	struct perf_event *event;
P
Peter Zijlstra 已提交
4046

4047 4048 4049
	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
		if (perf_event_task_match(event))
			perf_event_task_output(event, task_event);
P
Peter Zijlstra 已提交
4050 4051 4052
	}
}

4053
static void perf_event_task_event(struct perf_task_event *task_event)
P
Peter Zijlstra 已提交
4054 4055
{
	struct perf_cpu_context *cpuctx;
P
Peter Zijlstra 已提交
4056
	struct perf_event_context *ctx;
P
Peter Zijlstra 已提交
4057
	struct pmu *pmu;
P
Peter Zijlstra 已提交
4058
	int ctxn;
P
Peter Zijlstra 已提交
4059

4060
	rcu_read_lock();
P
Peter Zijlstra 已提交
4061
	list_for_each_entry_rcu(pmu, &pmus, entry) {
4062
		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4063 4064
		if (cpuctx->active_pmu != pmu)
			goto next;
P
Peter Zijlstra 已提交
4065
		perf_event_task_ctx(&cpuctx->ctx, task_event);
P
Peter Zijlstra 已提交
4066 4067 4068 4069 4070

		ctx = task_event->task_ctx;
		if (!ctx) {
			ctxn = pmu->task_ctx_nr;
			if (ctxn < 0)
4071
				goto next;
P
Peter Zijlstra 已提交
4072 4073 4074 4075
			ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
		}
		if (ctx)
			perf_event_task_ctx(ctx, task_event);
4076 4077
next:
		put_cpu_ptr(pmu->pmu_cpu_context);
P
Peter Zijlstra 已提交
4078
	}
P
Peter Zijlstra 已提交
4079 4080 4081
	rcu_read_unlock();
}

4082 4083
static void perf_event_task(struct task_struct *task,
			      struct perf_event_context *task_ctx,
4084
			      int new)
P
Peter Zijlstra 已提交
4085
{
P
Peter Zijlstra 已提交
4086
	struct perf_task_event task_event;
P
Peter Zijlstra 已提交
4087

4088 4089 4090
	if (!atomic_read(&nr_comm_events) &&
	    !atomic_read(&nr_mmap_events) &&
	    !atomic_read(&nr_task_events))
P
Peter Zijlstra 已提交
4091 4092
		return;

P
Peter Zijlstra 已提交
4093
	task_event = (struct perf_task_event){
4094 4095
		.task	  = task,
		.task_ctx = task_ctx,
4096
		.event_id    = {
P
Peter Zijlstra 已提交
4097
			.header = {
4098
				.type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
4099
				.misc = 0,
4100
				.size = sizeof(task_event.event_id),
P
Peter Zijlstra 已提交
4101
			},
4102 4103
			/* .pid  */
			/* .ppid */
P
Peter Zijlstra 已提交
4104 4105
			/* .tid  */
			/* .ptid */
P
Peter Zijlstra 已提交
4106
			.time = perf_clock(),
P
Peter Zijlstra 已提交
4107 4108 4109
		},
	};

4110
	perf_event_task_event(&task_event);
P
Peter Zijlstra 已提交
4111 4112
}

4113
void perf_event_fork(struct task_struct *task)
P
Peter Zijlstra 已提交
4114
{
4115
	perf_event_task(task, NULL, 1);
P
Peter Zijlstra 已提交
4116 4117
}

4118 4119 4120 4121 4122
/*
 * comm tracking
 */

struct perf_comm_event {
4123 4124
	struct task_struct	*task;
	char			*comm;
4125 4126 4127 4128 4129 4130 4131
	int			comm_size;

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				tid;
4132
	} event_id;
4133 4134
};

4135
static void perf_event_comm_output(struct perf_event *event,
4136 4137 4138
				     struct perf_comm_event *comm_event)
{
	struct perf_output_handle handle;
4139
	struct perf_sample_data sample;
4140
	int size = comm_event->event_id.header.size;
4141 4142 4143 4144
	int ret;

	perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
	ret = perf_output_begin(&handle, event,
4145
				comm_event->event_id.header.size);
4146 4147

	if (ret)
4148
		goto out;
4149

4150 4151
	comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
	comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
4152

4153
	perf_output_put(&handle, comm_event->event_id);
4154
	__output_copy(&handle, comm_event->comm,
4155
				   comm_event->comm_size);
4156 4157 4158

	perf_event__output_id_sample(event, &handle, &sample);

4159
	perf_output_end(&handle);
4160 4161
out:
	comm_event->event_id.header.size = size;
4162 4163
}

4164
static int perf_event_comm_match(struct perf_event *event)
4165
{
P
Peter Zijlstra 已提交
4166
	if (event->state < PERF_EVENT_STATE_INACTIVE)
4167 4168
		return 0;

4169
	if (!event_filter_match(event))
4170 4171
		return 0;

4172
	if (event->attr.comm)
4173 4174 4175 4176 4177
		return 1;

	return 0;
}

4178
static void perf_event_comm_ctx(struct perf_event_context *ctx,
4179 4180
				  struct perf_comm_event *comm_event)
{
4181
	struct perf_event *event;
4182

4183 4184 4185
	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
		if (perf_event_comm_match(event))
			perf_event_comm_output(event, comm_event);
4186 4187 4188
	}
}

4189
static void perf_event_comm_event(struct perf_comm_event *comm_event)
4190 4191
{
	struct perf_cpu_context *cpuctx;
4192
	struct perf_event_context *ctx;
4193
	char comm[TASK_COMM_LEN];
4194
	unsigned int size;
P
Peter Zijlstra 已提交
4195
	struct pmu *pmu;
P
Peter Zijlstra 已提交
4196
	int ctxn;
4197

4198
	memset(comm, 0, sizeof(comm));
4199
	strlcpy(comm, comm_event->task->comm, sizeof(comm));
4200
	size = ALIGN(strlen(comm)+1, sizeof(u64));
4201 4202 4203 4204

	comm_event->comm = comm;
	comm_event->comm_size = size;

4205
	comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
4206
	rcu_read_lock();
P
Peter Zijlstra 已提交
4207
	list_for_each_entry_rcu(pmu, &pmus, entry) {
4208
		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4209 4210
		if (cpuctx->active_pmu != pmu)
			goto next;
P
Peter Zijlstra 已提交
4211
		perf_event_comm_ctx(&cpuctx->ctx, comm_event);
P
Peter Zijlstra 已提交
4212 4213 4214

		ctxn = pmu->task_ctx_nr;
		if (ctxn < 0)
4215
			goto next;
P
Peter Zijlstra 已提交
4216 4217 4218 4219

		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
		if (ctx)
			perf_event_comm_ctx(ctx, comm_event);
4220 4221
next:
		put_cpu_ptr(pmu->pmu_cpu_context);
P
Peter Zijlstra 已提交
4222
	}
4223
	rcu_read_unlock();
4224 4225
}

4226
void perf_event_comm(struct task_struct *task)
4227
{
4228
	struct perf_comm_event comm_event;
P
Peter Zijlstra 已提交
4229 4230
	struct perf_event_context *ctx;
	int ctxn;
4231

P
Peter Zijlstra 已提交
4232 4233 4234 4235
	for_each_task_context_nr(ctxn) {
		ctx = task->perf_event_ctxp[ctxn];
		if (!ctx)
			continue;
4236

P
Peter Zijlstra 已提交
4237 4238
		perf_event_enable_on_exec(ctx);
	}
4239

4240
	if (!atomic_read(&nr_comm_events))
4241
		return;
4242

4243
	comm_event = (struct perf_comm_event){
4244
		.task	= task,
4245 4246
		/* .comm      */
		/* .comm_size */
4247
		.event_id  = {
4248
			.header = {
4249
				.type = PERF_RECORD_COMM,
4250 4251 4252 4253 4254
				.misc = 0,
				/* .size */
			},
			/* .pid */
			/* .tid */
4255 4256 4257
		},
	};

4258
	perf_event_comm_event(&comm_event);
4259 4260
}

4261 4262 4263 4264 4265
/*
 * mmap tracking
 */

struct perf_mmap_event {
4266 4267 4268 4269
	struct vm_area_struct	*vma;

	const char		*file_name;
	int			file_size;
4270 4271 4272 4273 4274 4275 4276 4277 4278

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				tid;
		u64				start;
		u64				len;
		u64				pgoff;
4279
	} event_id;
4280 4281
};

4282
static void perf_event_mmap_output(struct perf_event *event,
4283 4284 4285
				     struct perf_mmap_event *mmap_event)
{
	struct perf_output_handle handle;
4286
	struct perf_sample_data sample;
4287
	int size = mmap_event->event_id.header.size;
4288
	int ret;
4289

4290 4291
	perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
	ret = perf_output_begin(&handle, event,
4292
				mmap_event->event_id.header.size);
4293
	if (ret)
4294
		goto out;
4295

4296 4297
	mmap_event->event_id.pid = perf_event_pid(event, current);
	mmap_event->event_id.tid = perf_event_tid(event, current);
4298

4299
	perf_output_put(&handle, mmap_event->event_id);
4300
	__output_copy(&handle, mmap_event->file_name,
4301
				   mmap_event->file_size);
4302 4303 4304

	perf_event__output_id_sample(event, &handle, &sample);

4305
	perf_output_end(&handle);
4306 4307
out:
	mmap_event->event_id.header.size = size;
4308 4309
}

4310
static int perf_event_mmap_match(struct perf_event *event,
4311 4312
				   struct perf_mmap_event *mmap_event,
				   int executable)
4313
{
P
Peter Zijlstra 已提交
4314
	if (event->state < PERF_EVENT_STATE_INACTIVE)
4315 4316
		return 0;

4317
	if (!event_filter_match(event))
4318 4319
		return 0;

4320 4321
	if ((!executable && event->attr.mmap_data) ||
	    (executable && event->attr.mmap))
4322 4323 4324 4325 4326
		return 1;

	return 0;
}

4327
static void perf_event_mmap_ctx(struct perf_event_context *ctx,
4328 4329
				  struct perf_mmap_event *mmap_event,
				  int executable)
4330
{
4331
	struct perf_event *event;
4332

4333
	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4334
		if (perf_event_mmap_match(event, mmap_event, executable))
4335
			perf_event_mmap_output(event, mmap_event);
4336 4337 4338
	}
}

4339
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4340 4341
{
	struct perf_cpu_context *cpuctx;
4342
	struct perf_event_context *ctx;
4343 4344
	struct vm_area_struct *vma = mmap_event->vma;
	struct file *file = vma->vm_file;
4345 4346 4347
	unsigned int size;
	char tmp[16];
	char *buf = NULL;
4348
	const char *name;
P
Peter Zijlstra 已提交
4349
	struct pmu *pmu;
P
Peter Zijlstra 已提交
4350
	int ctxn;
4351

4352 4353
	memset(tmp, 0, sizeof(tmp));

4354
	if (file) {
4355
		/*
4356
		 * d_path works from the end of the rb backwards, so we
4357 4358 4359 4360
		 * need to add enough zero bytes after the string to handle
		 * the 64bit alignment we do later.
		 */
		buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
4361 4362 4363 4364
		if (!buf) {
			name = strncpy(tmp, "//enomem", sizeof(tmp));
			goto got_name;
		}
4365
		name = d_path(&file->f_path, buf, PATH_MAX);
4366 4367 4368 4369 4370
		if (IS_ERR(name)) {
			name = strncpy(tmp, "//toolong", sizeof(tmp));
			goto got_name;
		}
	} else {
4371 4372 4373
		if (arch_vma_name(mmap_event->vma)) {
			name = strncpy(tmp, arch_vma_name(mmap_event->vma),
				       sizeof(tmp));
4374
			goto got_name;
4375
		}
4376 4377 4378 4379

		if (!vma->vm_mm) {
			name = strncpy(tmp, "[vdso]", sizeof(tmp));
			goto got_name;
4380 4381 4382 4383 4384 4385 4386 4387
		} else if (vma->vm_start <= vma->vm_mm->start_brk &&
				vma->vm_end >= vma->vm_mm->brk) {
			name = strncpy(tmp, "[heap]", sizeof(tmp));
			goto got_name;
		} else if (vma->vm_start <= vma->vm_mm->start_stack &&
				vma->vm_end >= vma->vm_mm->start_stack) {
			name = strncpy(tmp, "[stack]", sizeof(tmp));
			goto got_name;
4388 4389
		}

4390 4391 4392 4393 4394
		name = strncpy(tmp, "//anon", sizeof(tmp));
		goto got_name;
	}

got_name:
4395
	size = ALIGN(strlen(name)+1, sizeof(u64));
4396 4397 4398 4399

	mmap_event->file_name = name;
	mmap_event->file_size = size;

4400
	mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4401

4402
	rcu_read_lock();
P
Peter Zijlstra 已提交
4403
	list_for_each_entry_rcu(pmu, &pmus, entry) {
4404
		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4405 4406
		if (cpuctx->active_pmu != pmu)
			goto next;
P
Peter Zijlstra 已提交
4407 4408
		perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
					vma->vm_flags & VM_EXEC);
P
Peter Zijlstra 已提交
4409 4410 4411

		ctxn = pmu->task_ctx_nr;
		if (ctxn < 0)
4412
			goto next;
P
Peter Zijlstra 已提交
4413 4414 4415 4416 4417 4418

		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
		if (ctx) {
			perf_event_mmap_ctx(ctx, mmap_event,
					vma->vm_flags & VM_EXEC);
		}
4419 4420
next:
		put_cpu_ptr(pmu->pmu_cpu_context);
P
Peter Zijlstra 已提交
4421
	}
4422 4423
	rcu_read_unlock();

4424 4425 4426
	kfree(buf);
}

4427
void perf_event_mmap(struct vm_area_struct *vma)
4428
{
4429 4430
	struct perf_mmap_event mmap_event;

4431
	if (!atomic_read(&nr_mmap_events))
4432 4433 4434
		return;

	mmap_event = (struct perf_mmap_event){
4435
		.vma	= vma,
4436 4437
		/* .file_name */
		/* .file_size */
4438
		.event_id  = {
4439
			.header = {
4440
				.type = PERF_RECORD_MMAP,
4441
				.misc = PERF_RECORD_MISC_USER,
4442 4443 4444 4445
				/* .size */
			},
			/* .pid */
			/* .tid */
4446 4447
			.start  = vma->vm_start,
			.len    = vma->vm_end - vma->vm_start,
4448
			.pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT,
4449 4450 4451
		},
	};

4452
	perf_event_mmap_event(&mmap_event);
4453 4454
}

4455 4456 4457 4458
/*
 * IRQ throttle logging
 */

4459
static void perf_log_throttle(struct perf_event *event, int enable)
4460 4461
{
	struct perf_output_handle handle;
4462
	struct perf_sample_data sample;
4463 4464 4465 4466 4467
	int ret;

	struct {
		struct perf_event_header	header;
		u64				time;
4468
		u64				id;
4469
		u64				stream_id;
4470 4471
	} throttle_event = {
		.header = {
4472
			.type = PERF_RECORD_THROTTLE,
4473 4474 4475
			.misc = 0,
			.size = sizeof(throttle_event),
		},
P
Peter Zijlstra 已提交
4476
		.time		= perf_clock(),
4477 4478
		.id		= primary_event_id(event),
		.stream_id	= event->id,
4479 4480
	};

4481
	if (enable)
4482
		throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4483

4484 4485 4486
	perf_event_header__init_id(&throttle_event.header, &sample, event);

	ret = perf_output_begin(&handle, event,
4487
				throttle_event.header.size);
4488 4489 4490 4491
	if (ret)
		return;

	perf_output_put(&handle, throttle_event);
4492
	perf_event__output_id_sample(event, &handle, &sample);
4493 4494 4495
	perf_output_end(&handle);
}

4496
/*
4497
 * Generic event overflow handling, sampling.
4498 4499
 */

4500
static int __perf_event_overflow(struct perf_event *event,
4501 4502
				   int throttle, struct perf_sample_data *data,
				   struct pt_regs *regs)
4503
{
4504 4505
	int events = atomic_read(&event->event_limit);
	struct hw_perf_event *hwc = &event->hw;
4506 4507
	int ret = 0;

4508 4509 4510 4511 4512 4513 4514
	/*
	 * Non-sampling counters might still use the PMI to fold short
	 * hardware counters, ignore those.
	 */
	if (unlikely(!is_sampling_event(event)))
		return 0;

P
Peter Zijlstra 已提交
4515 4516 4517 4518
	if (unlikely(hwc->interrupts >= max_samples_per_tick)) {
		if (throttle) {
			hwc->interrupts = MAX_INTERRUPTS;
			perf_log_throttle(event, 0);
4519 4520
			ret = 1;
		}
P
Peter Zijlstra 已提交
4521 4522
	} else
		hwc->interrupts++;
4523

4524
	if (event->attr.freq) {
P
Peter Zijlstra 已提交
4525
		u64 now = perf_clock();
4526
		s64 delta = now - hwc->freq_time_stamp;
4527

4528
		hwc->freq_time_stamp = now;
4529

4530 4531
		if (delta > 0 && delta < 2*TICK_NSEC)
			perf_adjust_period(event, delta, hwc->last_period);
4532 4533
	}

4534 4535
	/*
	 * XXX event_limit might not quite work as expected on inherited
4536
	 * events
4537 4538
	 */

4539 4540
	event->pending_kill = POLL_IN;
	if (events && atomic_dec_and_test(&event->event_limit)) {
4541
		ret = 1;
4542
		event->pending_kill = POLL_HUP;
4543 4544
		event->pending_disable = 1;
		irq_work_queue(&event->pending);
4545 4546
	}

4547
	if (event->overflow_handler)
4548
		event->overflow_handler(event, data, regs);
4549
	else
4550
		perf_event_output(event, data, regs);
4551

P
Peter Zijlstra 已提交
4552
	if (event->fasync && event->pending_kill) {
4553 4554
		event->pending_wakeup = 1;
		irq_work_queue(&event->pending);
P
Peter Zijlstra 已提交
4555 4556
	}

4557
	return ret;
4558 4559
}

4560
int perf_event_overflow(struct perf_event *event,
4561 4562
			  struct perf_sample_data *data,
			  struct pt_regs *regs)
4563
{
4564
	return __perf_event_overflow(event, 1, data, regs);
4565 4566
}

4567
/*
4568
 * Generic software event infrastructure
4569 4570
 */

4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581
struct swevent_htable {
	struct swevent_hlist		*swevent_hlist;
	struct mutex			hlist_mutex;
	int				hlist_refcount;

	/* Recursion avoidance in each contexts */
	int				recursion[PERF_NR_CONTEXTS];
};

static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);

4582
/*
4583 4584
 * We directly increment event->count and keep a second value in
 * event->hw.period_left to count intervals. This period event
4585 4586 4587 4588
 * is kept in the range [-sample_period, 0] so that we can use the
 * sign as trigger.
 */

4589
static u64 perf_swevent_set_period(struct perf_event *event)
4590
{
4591
	struct hw_perf_event *hwc = &event->hw;
4592 4593 4594 4595 4596
	u64 period = hwc->last_period;
	u64 nr, offset;
	s64 old, val;

	hwc->last_period = hwc->sample_period;
4597 4598

again:
4599
	old = val = local64_read(&hwc->period_left);
4600 4601
	if (val < 0)
		return 0;
4602

4603 4604 4605
	nr = div64_u64(period + val, period);
	offset = nr * period;
	val -= offset;
4606
	if (local64_cmpxchg(&hwc->period_left, old, val) != old)
4607
		goto again;
4608

4609
	return nr;
4610 4611
}

4612
static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4613
				    struct perf_sample_data *data,
4614
				    struct pt_regs *regs)
4615
{
4616
	struct hw_perf_event *hwc = &event->hw;
4617
	int throttle = 0;
4618

4619 4620
	if (!overflow)
		overflow = perf_swevent_set_period(event);
4621

4622 4623
	if (hwc->interrupts == MAX_INTERRUPTS)
		return;
4624

4625
	for (; overflow; overflow--) {
4626
		if (__perf_event_overflow(event, throttle,
4627
					    data, regs)) {
4628 4629 4630 4631 4632 4633
			/*
			 * We inhibit the overflow from happening when
			 * hwc->interrupts == MAX_INTERRUPTS.
			 */
			break;
		}
4634
		throttle = 1;
4635
	}
4636 4637
}

P
Peter Zijlstra 已提交
4638
static void perf_swevent_event(struct perf_event *event, u64 nr,
4639
			       struct perf_sample_data *data,
4640
			       struct pt_regs *regs)
4641
{
4642
	struct hw_perf_event *hwc = &event->hw;
4643

4644
	local64_add(nr, &event->count);
4645

4646 4647 4648
	if (!regs)
		return;

4649
	if (!is_sampling_event(event))
4650
		return;
4651

4652 4653 4654 4655 4656 4657
	if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
		data->period = nr;
		return perf_swevent_overflow(event, 1, data, regs);
	} else
		data->period = event->hw.last_period;

4658
	if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4659
		return perf_swevent_overflow(event, 1, data, regs);
4660

4661
	if (local64_add_negative(nr, &hwc->period_left))
4662
		return;
4663

4664
	perf_swevent_overflow(event, 0, data, regs);
4665 4666
}

4667 4668 4669
static int perf_exclude_event(struct perf_event *event,
			      struct pt_regs *regs)
{
P
Peter Zijlstra 已提交
4670
	if (event->hw.state & PERF_HES_STOPPED)
4671
		return 1;
P
Peter Zijlstra 已提交
4672

4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683
	if (regs) {
		if (event->attr.exclude_user && user_mode(regs))
			return 1;

		if (event->attr.exclude_kernel && !user_mode(regs))
			return 1;
	}

	return 0;
}

4684
static int perf_swevent_match(struct perf_event *event,
P
Peter Zijlstra 已提交
4685
				enum perf_type_id type,
L
Li Zefan 已提交
4686 4687 4688
				u32 event_id,
				struct perf_sample_data *data,
				struct pt_regs *regs)
4689
{
4690
	if (event->attr.type != type)
4691
		return 0;
4692

4693
	if (event->attr.config != event_id)
4694 4695
		return 0;

4696 4697
	if (perf_exclude_event(event, regs))
		return 0;
4698 4699 4700 4701

	return 1;
}

4702 4703 4704 4705 4706 4707 4708
static inline u64 swevent_hash(u64 type, u32 event_id)
{
	u64 val = event_id | (type << 32);

	return hash_64(val, SWEVENT_HLIST_BITS);
}

4709 4710
static inline struct hlist_head *
__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
4711
{
4712 4713 4714 4715
	u64 hash = swevent_hash(type, event_id);

	return &hlist->heads[hash];
}
4716

4717 4718
/* For the read side: events when they trigger */
static inline struct hlist_head *
4719
find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
4720 4721
{
	struct swevent_hlist *hlist;
4722

4723
	hlist = rcu_dereference(swhash->swevent_hlist);
4724 4725 4726
	if (!hlist)
		return NULL;

4727 4728 4729 4730 4731
	return __find_swevent_head(hlist, type, event_id);
}

/* For the event head insertion and removal in the hlist */
static inline struct hlist_head *
4732
find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
4733 4734 4735 4736 4737 4738 4739 4740 4741 4742
{
	struct swevent_hlist *hlist;
	u32 event_id = event->attr.config;
	u64 type = event->attr.type;

	/*
	 * Event scheduling is always serialized against hlist allocation
	 * and release. Which makes the protected version suitable here.
	 * The context lock guarantees that.
	 */
4743
	hlist = rcu_dereference_protected(swhash->swevent_hlist,
4744 4745 4746 4747 4748
					  lockdep_is_held(&event->ctx->lock));
	if (!hlist)
		return NULL;

	return __find_swevent_head(hlist, type, event_id);
4749 4750 4751
}

static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4752
				    u64 nr,
4753 4754
				    struct perf_sample_data *data,
				    struct pt_regs *regs)
4755
{
4756
	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4757
	struct perf_event *event;
4758 4759
	struct hlist_node *node;
	struct hlist_head *head;
4760

4761
	rcu_read_lock();
4762
	head = find_swevent_head_rcu(swhash, type, event_id);
4763 4764 4765 4766
	if (!head)
		goto end;

	hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
L
Li Zefan 已提交
4767
		if (perf_swevent_match(event, type, event_id, data, regs))
4768
			perf_swevent_event(event, nr, data, regs);
4769
	}
4770 4771
end:
	rcu_read_unlock();
4772 4773
}

4774
int perf_swevent_get_recursion_context(void)
P
Peter Zijlstra 已提交
4775
{
4776
	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
P
Peter Zijlstra 已提交
4777

4778
	return get_recursion_context(swhash->recursion);
P
Peter Zijlstra 已提交
4779
}
I
Ingo Molnar 已提交
4780
EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
P
Peter Zijlstra 已提交
4781

4782
inline void perf_swevent_put_recursion_context(int rctx)
4783
{
4784
	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4785

4786
	put_recursion_context(swhash->recursion, rctx);
4787
}
4788

4789
void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
4790
{
4791
	struct perf_sample_data data;
4792 4793
	int rctx;

4794
	preempt_disable_notrace();
4795 4796 4797
	rctx = perf_swevent_get_recursion_context();
	if (rctx < 0)
		return;
4798

4799
	perf_sample_data_init(&data, addr);
4800

4801
	do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
4802 4803

	perf_swevent_put_recursion_context(rctx);
4804
	preempt_enable_notrace();
4805 4806
}

4807
static void perf_swevent_read(struct perf_event *event)
4808 4809 4810
{
}

P
Peter Zijlstra 已提交
4811
static int perf_swevent_add(struct perf_event *event, int flags)
4812
{
4813
	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4814
	struct hw_perf_event *hwc = &event->hw;
4815 4816
	struct hlist_head *head;

4817
	if (is_sampling_event(event)) {
4818
		hwc->last_period = hwc->sample_period;
4819
		perf_swevent_set_period(event);
4820
	}
4821

P
Peter Zijlstra 已提交
4822 4823
	hwc->state = !(flags & PERF_EF_START);

4824
	head = find_swevent_head(swhash, event);
4825 4826 4827 4828 4829
	if (WARN_ON_ONCE(!head))
		return -EINVAL;

	hlist_add_head_rcu(&event->hlist_entry, head);

4830 4831 4832
	return 0;
}

P
Peter Zijlstra 已提交
4833
static void perf_swevent_del(struct perf_event *event, int flags)
4834
{
4835
	hlist_del_rcu(&event->hlist_entry);
4836 4837
}

P
Peter Zijlstra 已提交
4838
static void perf_swevent_start(struct perf_event *event, int flags)
4839
{
P
Peter Zijlstra 已提交
4840
	event->hw.state = 0;
4841
}
I
Ingo Molnar 已提交
4842

P
Peter Zijlstra 已提交
4843
static void perf_swevent_stop(struct perf_event *event, int flags)
4844
{
P
Peter Zijlstra 已提交
4845
	event->hw.state = PERF_HES_STOPPED;
4846 4847
}

4848 4849
/* Deref the hlist from the update side */
static inline struct swevent_hlist *
4850
swevent_hlist_deref(struct swevent_htable *swhash)
4851
{
4852 4853
	return rcu_dereference_protected(swhash->swevent_hlist,
					 lockdep_is_held(&swhash->hlist_mutex));
4854 4855
}

4856
static void swevent_hlist_release(struct swevent_htable *swhash)
4857
{
4858
	struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
4859

4860
	if (!hlist)
4861 4862
		return;

4863
	rcu_assign_pointer(swhash->swevent_hlist, NULL);
4864
	kfree_rcu(hlist, rcu_head);
4865 4866 4867 4868
}

static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
{
4869
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
4870

4871
	mutex_lock(&swhash->hlist_mutex);
4872

4873 4874
	if (!--swhash->hlist_refcount)
		swevent_hlist_release(swhash);
4875

4876
	mutex_unlock(&swhash->hlist_mutex);
4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893
}

static void swevent_hlist_put(struct perf_event *event)
{
	int cpu;

	if (event->cpu != -1) {
		swevent_hlist_put_cpu(event, event->cpu);
		return;
	}

	for_each_possible_cpu(cpu)
		swevent_hlist_put_cpu(event, cpu);
}

static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
{
4894
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
4895 4896
	int err = 0;

4897
	mutex_lock(&swhash->hlist_mutex);
4898

4899
	if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
4900 4901 4902 4903 4904 4905 4906
		struct swevent_hlist *hlist;

		hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
		if (!hlist) {
			err = -ENOMEM;
			goto exit;
		}
4907
		rcu_assign_pointer(swhash->swevent_hlist, hlist);
4908
	}
4909
	swhash->hlist_refcount++;
P
Peter Zijlstra 已提交
4910
exit:
4911
	mutex_unlock(&swhash->hlist_mutex);
4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934

	return err;
}

static int swevent_hlist_get(struct perf_event *event)
{
	int err;
	int cpu, failed_cpu;

	if (event->cpu != -1)
		return swevent_hlist_get_cpu(event, event->cpu);

	get_online_cpus();
	for_each_possible_cpu(cpu) {
		err = swevent_hlist_get_cpu(event, cpu);
		if (err) {
			failed_cpu = cpu;
			goto fail;
		}
	}
	put_online_cpus();

	return 0;
P
Peter Zijlstra 已提交
4935
fail:
4936 4937 4938 4939 4940 4941 4942 4943 4944 4945
	for_each_possible_cpu(cpu) {
		if (cpu == failed_cpu)
			break;
		swevent_hlist_put_cpu(event, cpu);
	}

	put_online_cpus();
	return err;
}

4946
struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
4947

4948 4949 4950
static void sw_perf_event_destroy(struct perf_event *event)
{
	u64 event_id = event->attr.config;
4951

4952 4953
	WARN_ON(event->parent);

P
Peter Zijlstra 已提交
4954
	jump_label_dec(&perf_swevent_enabled[event_id]);
4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973
	swevent_hlist_put(event);
}

static int perf_swevent_init(struct perf_event *event)
{
	int event_id = event->attr.config;

	if (event->attr.type != PERF_TYPE_SOFTWARE)
		return -ENOENT;

	switch (event_id) {
	case PERF_COUNT_SW_CPU_CLOCK:
	case PERF_COUNT_SW_TASK_CLOCK:
		return -ENOENT;

	default:
		break;
	}

4974
	if (event_id >= PERF_COUNT_SW_MAX)
4975 4976 4977 4978 4979 4980 4981 4982 4983
		return -ENOENT;

	if (!event->parent) {
		int err;

		err = swevent_hlist_get(event);
		if (err)
			return err;

P
Peter Zijlstra 已提交
4984
		jump_label_inc(&perf_swevent_enabled[event_id]);
4985 4986 4987 4988 4989 4990
		event->destroy = sw_perf_event_destroy;
	}

	return 0;
}

4991 4992 4993 4994 4995
static int perf_swevent_event_idx(struct perf_event *event)
{
	return 0;
}

4996
static struct pmu perf_swevent = {
4997
	.task_ctx_nr	= perf_sw_context,
4998

4999
	.event_init	= perf_swevent_init,
P
Peter Zijlstra 已提交
5000 5001 5002 5003
	.add		= perf_swevent_add,
	.del		= perf_swevent_del,
	.start		= perf_swevent_start,
	.stop		= perf_swevent_stop,
5004
	.read		= perf_swevent_read,
5005 5006

	.event_idx	= perf_swevent_event_idx,
5007 5008
};

5009 5010
#ifdef CONFIG_EVENT_TRACING

5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024
static int perf_tp_filter_match(struct perf_event *event,
				struct perf_sample_data *data)
{
	void *record = data->raw->data;

	if (likely(!event->filter) || filter_match_preds(event->filter, record))
		return 1;
	return 0;
}

static int perf_tp_event_match(struct perf_event *event,
				struct perf_sample_data *data,
				struct pt_regs *regs)
{
5025 5026
	if (event->hw.state & PERF_HES_STOPPED)
		return 0;
5027 5028 5029 5030
	/*
	 * All tracepoints are from kernel-space.
	 */
	if (event->attr.exclude_kernel)
5031 5032 5033 5034 5035 5036 5037 5038 5039
		return 0;

	if (!perf_tp_filter_match(event, data))
		return 0;

	return 1;
}

void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5040
		   struct pt_regs *regs, struct hlist_head *head, int rctx)
5041 5042
{
	struct perf_sample_data data;
5043 5044 5045
	struct perf_event *event;
	struct hlist_node *node;

5046 5047 5048 5049 5050 5051 5052 5053
	struct perf_raw_record raw = {
		.size = entry_size,
		.data = record,
	};

	perf_sample_data_init(&data, addr);
	data.raw = &raw;

5054 5055
	hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
		if (perf_tp_event_match(event, &data, regs))
5056
			perf_swevent_event(event, count, &data, regs);
5057
	}
5058 5059

	perf_swevent_put_recursion_context(rctx);
5060 5061 5062
}
EXPORT_SYMBOL_GPL(perf_tp_event);

5063
static void tp_perf_event_destroy(struct perf_event *event)
5064
{
5065
	perf_trace_destroy(event);
5066 5067
}

5068
static int perf_tp_event_init(struct perf_event *event)
5069
{
5070 5071
	int err;

5072 5073 5074
	if (event->attr.type != PERF_TYPE_TRACEPOINT)
		return -ENOENT;

5075 5076
	err = perf_trace_init(event);
	if (err)
5077
		return err;
5078

5079
	event->destroy = tp_perf_event_destroy;
5080

5081 5082 5083 5084
	return 0;
}

static struct pmu perf_tracepoint = {
5085 5086
	.task_ctx_nr	= perf_sw_context,

5087
	.event_init	= perf_tp_event_init,
P
Peter Zijlstra 已提交
5088 5089 5090 5091
	.add		= perf_trace_add,
	.del		= perf_trace_del,
	.start		= perf_swevent_start,
	.stop		= perf_swevent_stop,
5092
	.read		= perf_swevent_read,
5093 5094

	.event_idx	= perf_swevent_event_idx,
5095 5096 5097 5098
};

static inline void perf_tp_register(void)
{
P
Peter Zijlstra 已提交
5099
	perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
5100
}
L
Li Zefan 已提交
5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124

static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
	char *filter_str;
	int ret;

	if (event->attr.type != PERF_TYPE_TRACEPOINT)
		return -EINVAL;

	filter_str = strndup_user(arg, PAGE_SIZE);
	if (IS_ERR(filter_str))
		return PTR_ERR(filter_str);

	ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);

	kfree(filter_str);
	return ret;
}

static void perf_event_free_filter(struct perf_event *event)
{
	ftrace_profile_free_filter(event);
}

5125
#else
L
Li Zefan 已提交
5126

5127
static inline void perf_tp_register(void)
5128 5129
{
}
L
Li Zefan 已提交
5130 5131 5132 5133 5134 5135 5136 5137 5138 5139

static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
	return -ENOENT;
}

static void perf_event_free_filter(struct perf_event *event)
{
}

5140
#endif /* CONFIG_EVENT_TRACING */
5141

5142
#ifdef CONFIG_HAVE_HW_BREAKPOINT
5143
void perf_bp_event(struct perf_event *bp, void *data)
5144
{
5145 5146 5147
	struct perf_sample_data sample;
	struct pt_regs *regs = data;

5148
	perf_sample_data_init(&sample, bp->attr.bp_addr);
5149

P
Peter Zijlstra 已提交
5150
	if (!bp->hw.state && !perf_exclude_event(bp, regs))
5151
		perf_swevent_event(bp, 1, &sample, regs);
5152 5153 5154
}
#endif

5155 5156 5157
/*
 * hrtimer based swevent callback
 */
5158

5159
static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
5160
{
5161 5162 5163 5164 5165
	enum hrtimer_restart ret = HRTIMER_RESTART;
	struct perf_sample_data data;
	struct pt_regs *regs;
	struct perf_event *event;
	u64 period;
5166

5167
	event = container_of(hrtimer, struct perf_event, hw.hrtimer);
P
Peter Zijlstra 已提交
5168 5169 5170 5171

	if (event->state != PERF_EVENT_STATE_ACTIVE)
		return HRTIMER_NORESTART;

5172
	event->pmu->read(event);
5173

5174 5175 5176 5177 5178 5179
	perf_sample_data_init(&data, 0);
	data.period = event->hw.last_period;
	regs = get_irq_regs();

	if (regs && !perf_exclude_event(event, regs)) {
		if (!(event->attr.exclude_idle && current->pid == 0))
5180
			if (perf_event_overflow(event, &data, regs))
5181 5182
				ret = HRTIMER_NORESTART;
	}
5183

5184 5185
	period = max_t(u64, 10000, event->hw.sample_period);
	hrtimer_forward_now(hrtimer, ns_to_ktime(period));
5186

5187
	return ret;
5188 5189
}

5190
static void perf_swevent_start_hrtimer(struct perf_event *event)
5191
{
5192
	struct hw_perf_event *hwc = &event->hw;
5193 5194 5195 5196
	s64 period;

	if (!is_sampling_event(event))
		return;
5197

5198 5199 5200 5201
	period = local64_read(&hwc->period_left);
	if (period) {
		if (period < 0)
			period = 10000;
P
Peter Zijlstra 已提交
5202

5203 5204 5205 5206 5207
		local64_set(&hwc->period_left, 0);
	} else {
		period = max_t(u64, 10000, hwc->sample_period);
	}
	__hrtimer_start_range_ns(&hwc->hrtimer,
5208
				ns_to_ktime(period), 0,
5209
				HRTIMER_MODE_REL_PINNED, 0);
5210
}
5211 5212

static void perf_swevent_cancel_hrtimer(struct perf_event *event)
5213
{
5214 5215
	struct hw_perf_event *hwc = &event->hw;

5216
	if (is_sampling_event(event)) {
5217
		ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
P
Peter Zijlstra 已提交
5218
		local64_set(&hwc->period_left, ktime_to_ns(remaining));
5219 5220 5221

		hrtimer_cancel(&hwc->hrtimer);
	}
5222 5223
}

P
Peter Zijlstra 已提交
5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247
static void perf_swevent_init_hrtimer(struct perf_event *event)
{
	struct hw_perf_event *hwc = &event->hw;

	if (!is_sampling_event(event))
		return;

	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hwc->hrtimer.function = perf_swevent_hrtimer;

	/*
	 * Since hrtimers have a fixed rate, we can do a static freq->period
	 * mapping and avoid the whole period adjust feedback stuff.
	 */
	if (event->attr.freq) {
		long freq = event->attr.sample_freq;

		event->attr.sample_period = NSEC_PER_SEC / freq;
		hwc->sample_period = event->attr.sample_period;
		local64_set(&hwc->period_left, hwc->sample_period);
		event->attr.freq = 0;
	}
}

5248 5249 5250 5251 5252
/*
 * Software event: cpu wall time clock
 */

static void cpu_clock_event_update(struct perf_event *event)
5253
{
5254 5255 5256
	s64 prev;
	u64 now;

P
Peter Zijlstra 已提交
5257
	now = local_clock();
5258 5259
	prev = local64_xchg(&event->hw.prev_count, now);
	local64_add(now - prev, &event->count);
5260 5261
}

P
Peter Zijlstra 已提交
5262
static void cpu_clock_event_start(struct perf_event *event, int flags)
5263
{
P
Peter Zijlstra 已提交
5264
	local64_set(&event->hw.prev_count, local_clock());
5265 5266 5267
	perf_swevent_start_hrtimer(event);
}

P
Peter Zijlstra 已提交
5268
static void cpu_clock_event_stop(struct perf_event *event, int flags)
5269
{
5270 5271 5272
	perf_swevent_cancel_hrtimer(event);
	cpu_clock_event_update(event);
}
5273

P
Peter Zijlstra 已提交
5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286
static int cpu_clock_event_add(struct perf_event *event, int flags)
{
	if (flags & PERF_EF_START)
		cpu_clock_event_start(event, flags);

	return 0;
}

static void cpu_clock_event_del(struct perf_event *event, int flags)
{
	cpu_clock_event_stop(event, flags);
}

5287 5288 5289 5290
static void cpu_clock_event_read(struct perf_event *event)
{
	cpu_clock_event_update(event);
}
5291

5292 5293 5294 5295 5296 5297 5298 5299
static int cpu_clock_event_init(struct perf_event *event)
{
	if (event->attr.type != PERF_TYPE_SOFTWARE)
		return -ENOENT;

	if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
		return -ENOENT;

P
Peter Zijlstra 已提交
5300 5301
	perf_swevent_init_hrtimer(event);

5302
	return 0;
5303 5304
}

5305
static struct pmu perf_cpu_clock = {
5306 5307
	.task_ctx_nr	= perf_sw_context,

5308
	.event_init	= cpu_clock_event_init,
P
Peter Zijlstra 已提交
5309 5310 5311 5312
	.add		= cpu_clock_event_add,
	.del		= cpu_clock_event_del,
	.start		= cpu_clock_event_start,
	.stop		= cpu_clock_event_stop,
5313
	.read		= cpu_clock_event_read,
5314 5315

	.event_idx	= perf_swevent_event_idx,
5316 5317 5318 5319 5320 5321 5322
};

/*
 * Software event: task time clock
 */

static void task_clock_event_update(struct perf_event *event, u64 now)
5323
{
5324 5325
	u64 prev;
	s64 delta;
5326

5327 5328 5329 5330
	prev = local64_xchg(&event->hw.prev_count, now);
	delta = now - prev;
	local64_add(delta, &event->count);
}
5331

P
Peter Zijlstra 已提交
5332
static void task_clock_event_start(struct perf_event *event, int flags)
5333
{
P
Peter Zijlstra 已提交
5334
	local64_set(&event->hw.prev_count, event->ctx->time);
5335 5336 5337
	perf_swevent_start_hrtimer(event);
}

P
Peter Zijlstra 已提交
5338
static void task_clock_event_stop(struct perf_event *event, int flags)
5339 5340 5341
{
	perf_swevent_cancel_hrtimer(event);
	task_clock_event_update(event, event->ctx->time);
P
Peter Zijlstra 已提交
5342 5343 5344 5345 5346 5347
}

static int task_clock_event_add(struct perf_event *event, int flags)
{
	if (flags & PERF_EF_START)
		task_clock_event_start(event, flags);
5348

P
Peter Zijlstra 已提交
5349 5350 5351 5352 5353 5354
	return 0;
}

static void task_clock_event_del(struct perf_event *event, int flags)
{
	task_clock_event_stop(event, PERF_EF_UPDATE);
5355 5356 5357 5358
}

static void task_clock_event_read(struct perf_event *event)
{
5359 5360 5361
	u64 now = perf_clock();
	u64 delta = now - event->ctx->timestamp;
	u64 time = event->ctx->time + delta;
5362 5363 5364 5365 5366

	task_clock_event_update(event, time);
}

static int task_clock_event_init(struct perf_event *event)
L
Li Zefan 已提交
5367
{
5368 5369 5370 5371 5372 5373
	if (event->attr.type != PERF_TYPE_SOFTWARE)
		return -ENOENT;

	if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
		return -ENOENT;

P
Peter Zijlstra 已提交
5374 5375
	perf_swevent_init_hrtimer(event);

5376
	return 0;
L
Li Zefan 已提交
5377 5378
}

5379
static struct pmu perf_task_clock = {
5380 5381
	.task_ctx_nr	= perf_sw_context,

5382
	.event_init	= task_clock_event_init,
P
Peter Zijlstra 已提交
5383 5384 5385 5386
	.add		= task_clock_event_add,
	.del		= task_clock_event_del,
	.start		= task_clock_event_start,
	.stop		= task_clock_event_stop,
5387
	.read		= task_clock_event_read,
5388 5389

	.event_idx	= perf_swevent_event_idx,
5390
};
L
Li Zefan 已提交
5391

P
Peter Zijlstra 已提交
5392
static void perf_pmu_nop_void(struct pmu *pmu)
5393 5394
{
}
L
Li Zefan 已提交
5395

P
Peter Zijlstra 已提交
5396
static int perf_pmu_nop_int(struct pmu *pmu)
L
Li Zefan 已提交
5397
{
P
Peter Zijlstra 已提交
5398
	return 0;
L
Li Zefan 已提交
5399 5400
}

P
Peter Zijlstra 已提交
5401
static void perf_pmu_start_txn(struct pmu *pmu)
L
Li Zefan 已提交
5402
{
P
Peter Zijlstra 已提交
5403
	perf_pmu_disable(pmu);
L
Li Zefan 已提交
5404 5405
}

P
Peter Zijlstra 已提交
5406 5407 5408 5409 5410
static int perf_pmu_commit_txn(struct pmu *pmu)
{
	perf_pmu_enable(pmu);
	return 0;
}
5411

P
Peter Zijlstra 已提交
5412
static void perf_pmu_cancel_txn(struct pmu *pmu)
5413
{
P
Peter Zijlstra 已提交
5414
	perf_pmu_enable(pmu);
5415 5416
}

5417 5418 5419 5420 5421
static int perf_event_idx_default(struct perf_event *event)
{
	return event->hw.idx + 1;
}

P
Peter Zijlstra 已提交
5422 5423 5424 5425 5426
/*
 * Ensures all contexts with the same task_ctx_nr have the same
 * pmu_cpu_context too.
 */
static void *find_pmu_context(int ctxn)
5427
{
P
Peter Zijlstra 已提交
5428
	struct pmu *pmu;
5429

P
Peter Zijlstra 已提交
5430 5431
	if (ctxn < 0)
		return NULL;
5432

P
Peter Zijlstra 已提交
5433 5434 5435 5436
	list_for_each_entry(pmu, &pmus, entry) {
		if (pmu->task_ctx_nr == ctxn)
			return pmu->pmu_cpu_context;
	}
5437

P
Peter Zijlstra 已提交
5438
	return NULL;
5439 5440
}

5441
static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
5442
{
5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457
	int cpu;

	for_each_possible_cpu(cpu) {
		struct perf_cpu_context *cpuctx;

		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);

		if (cpuctx->active_pmu == old_pmu)
			cpuctx->active_pmu = pmu;
	}
}

static void free_pmu_context(struct pmu *pmu)
{
	struct pmu *i;
5458

P
Peter Zijlstra 已提交
5459
	mutex_lock(&pmus_lock);
5460
	/*
P
Peter Zijlstra 已提交
5461
	 * Like a real lame refcount.
5462
	 */
5463 5464 5465
	list_for_each_entry(i, &pmus, entry) {
		if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
			update_pmu_context(i, pmu);
P
Peter Zijlstra 已提交
5466
			goto out;
5467
		}
P
Peter Zijlstra 已提交
5468
	}
5469

5470
	free_percpu(pmu->pmu_cpu_context);
P
Peter Zijlstra 已提交
5471 5472
out:
	mutex_unlock(&pmus_lock);
5473
}
P
Peter Zijlstra 已提交
5474
static struct idr pmu_idr;
5475

P
Peter Zijlstra 已提交
5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527
static ssize_t
type_show(struct device *dev, struct device_attribute *attr, char *page)
{
	struct pmu *pmu = dev_get_drvdata(dev);

	return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
}

static struct device_attribute pmu_dev_attrs[] = {
       __ATTR_RO(type),
       __ATTR_NULL,
};

static int pmu_bus_running;
static struct bus_type pmu_bus = {
	.name		= "event_source",
	.dev_attrs	= pmu_dev_attrs,
};

static void pmu_dev_release(struct device *dev)
{
	kfree(dev);
}

static int pmu_dev_alloc(struct pmu *pmu)
{
	int ret = -ENOMEM;

	pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
	if (!pmu->dev)
		goto out;

	device_initialize(pmu->dev);
	ret = dev_set_name(pmu->dev, "%s", pmu->name);
	if (ret)
		goto free_dev;

	dev_set_drvdata(pmu->dev, pmu);
	pmu->dev->bus = &pmu_bus;
	pmu->dev->release = pmu_dev_release;
	ret = device_add(pmu->dev);
	if (ret)
		goto free_dev;

out:
	return ret;

free_dev:
	put_device(pmu->dev);
	goto out;
}

5528
static struct lock_class_key cpuctx_mutex;
5529
static struct lock_class_key cpuctx_lock;
5530

P
Peter Zijlstra 已提交
5531
int perf_pmu_register(struct pmu *pmu, char *name, int type)
5532
{
P
Peter Zijlstra 已提交
5533
	int cpu, ret;
5534

5535
	mutex_lock(&pmus_lock);
P
Peter Zijlstra 已提交
5536 5537 5538 5539
	ret = -ENOMEM;
	pmu->pmu_disable_count = alloc_percpu(int);
	if (!pmu->pmu_disable_count)
		goto unlock;
5540

P
Peter Zijlstra 已提交
5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558
	pmu->type = -1;
	if (!name)
		goto skip_type;
	pmu->name = name;

	if (type < 0) {
		int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
		if (!err)
			goto free_pdc;

		err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
		if (err) {
			ret = err;
			goto free_pdc;
		}
	}
	pmu->type = type;

P
Peter Zijlstra 已提交
5559 5560 5561 5562 5563 5564
	if (pmu_bus_running) {
		ret = pmu_dev_alloc(pmu);
		if (ret)
			goto free_idr;
	}

P
Peter Zijlstra 已提交
5565
skip_type:
P
Peter Zijlstra 已提交
5566 5567 5568
	pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
	if (pmu->pmu_cpu_context)
		goto got_cpu_context;
5569

P
Peter Zijlstra 已提交
5570 5571
	pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
	if (!pmu->pmu_cpu_context)
P
Peter Zijlstra 已提交
5572
		goto free_dev;
5573

P
Peter Zijlstra 已提交
5574 5575 5576 5577
	for_each_possible_cpu(cpu) {
		struct perf_cpu_context *cpuctx;

		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5578
		__perf_event_init_context(&cpuctx->ctx);
5579
		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
5580
		lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
5581
		cpuctx->ctx.type = cpu_context;
P
Peter Zijlstra 已提交
5582
		cpuctx->ctx.pmu = pmu;
5583 5584
		cpuctx->jiffies_interval = 1;
		INIT_LIST_HEAD(&cpuctx->rotation_list);
5585
		cpuctx->active_pmu = pmu;
P
Peter Zijlstra 已提交
5586
	}
5587

P
Peter Zijlstra 已提交
5588
got_cpu_context:
P
Peter Zijlstra 已提交
5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602
	if (!pmu->start_txn) {
		if (pmu->pmu_enable) {
			/*
			 * If we have pmu_enable/pmu_disable calls, install
			 * transaction stubs that use that to try and batch
			 * hardware accesses.
			 */
			pmu->start_txn  = perf_pmu_start_txn;
			pmu->commit_txn = perf_pmu_commit_txn;
			pmu->cancel_txn = perf_pmu_cancel_txn;
		} else {
			pmu->start_txn  = perf_pmu_nop_void;
			pmu->commit_txn = perf_pmu_nop_int;
			pmu->cancel_txn = perf_pmu_nop_void;
5603
		}
5604
	}
5605

P
Peter Zijlstra 已提交
5606 5607 5608 5609 5610
	if (!pmu->pmu_enable) {
		pmu->pmu_enable  = perf_pmu_nop_void;
		pmu->pmu_disable = perf_pmu_nop_void;
	}

5611 5612 5613
	if (!pmu->event_idx)
		pmu->event_idx = perf_event_idx_default;

5614
	list_add_rcu(&pmu->entry, &pmus);
P
Peter Zijlstra 已提交
5615 5616
	ret = 0;
unlock:
5617 5618
	mutex_unlock(&pmus_lock);

P
Peter Zijlstra 已提交
5619
	return ret;
P
Peter Zijlstra 已提交
5620

P
Peter Zijlstra 已提交
5621 5622 5623 5624
free_dev:
	device_del(pmu->dev);
	put_device(pmu->dev);

P
Peter Zijlstra 已提交
5625 5626 5627 5628
free_idr:
	if (pmu->type >= PERF_TYPE_MAX)
		idr_remove(&pmu_idr, pmu->type);

P
Peter Zijlstra 已提交
5629 5630 5631
free_pdc:
	free_percpu(pmu->pmu_disable_count);
	goto unlock;
5632 5633
}

5634
void perf_pmu_unregister(struct pmu *pmu)
5635
{
5636 5637 5638
	mutex_lock(&pmus_lock);
	list_del_rcu(&pmu->entry);
	mutex_unlock(&pmus_lock);
5639

5640
	/*
P
Peter Zijlstra 已提交
5641 5642
	 * We dereference the pmu list under both SRCU and regular RCU, so
	 * synchronize against both of those.
5643
	 */
5644
	synchronize_srcu(&pmus_srcu);
P
Peter Zijlstra 已提交
5645
	synchronize_rcu();
5646

P
Peter Zijlstra 已提交
5647
	free_percpu(pmu->pmu_disable_count);
P
Peter Zijlstra 已提交
5648 5649
	if (pmu->type >= PERF_TYPE_MAX)
		idr_remove(&pmu_idr, pmu->type);
P
Peter Zijlstra 已提交
5650 5651
	device_del(pmu->dev);
	put_device(pmu->dev);
5652
	free_pmu_context(pmu);
5653
}
5654

5655 5656 5657 5658
struct pmu *perf_init_event(struct perf_event *event)
{
	struct pmu *pmu = NULL;
	int idx;
5659
	int ret;
5660 5661

	idx = srcu_read_lock(&pmus_srcu);
P
Peter Zijlstra 已提交
5662 5663 5664 5665

	rcu_read_lock();
	pmu = idr_find(&pmu_idr, event->attr.type);
	rcu_read_unlock();
5666
	if (pmu) {
5667
		event->pmu = pmu;
5668 5669 5670
		ret = pmu->event_init(event);
		if (ret)
			pmu = ERR_PTR(ret);
P
Peter Zijlstra 已提交
5671
		goto unlock;
5672
	}
P
Peter Zijlstra 已提交
5673

5674
	list_for_each_entry_rcu(pmu, &pmus, entry) {
5675
		event->pmu = pmu;
5676
		ret = pmu->event_init(event);
5677
		if (!ret)
P
Peter Zijlstra 已提交
5678
			goto unlock;
5679

5680 5681
		if (ret != -ENOENT) {
			pmu = ERR_PTR(ret);
P
Peter Zijlstra 已提交
5682
			goto unlock;
5683
		}
5684
	}
P
Peter Zijlstra 已提交
5685 5686
	pmu = ERR_PTR(-ENOENT);
unlock:
5687
	srcu_read_unlock(&pmus_srcu, idx);
5688

5689
	return pmu;
5690 5691
}

T
Thomas Gleixner 已提交
5692
/*
5693
 * Allocate and initialize a event structure
T
Thomas Gleixner 已提交
5694
 */
5695
static struct perf_event *
5696
perf_event_alloc(struct perf_event_attr *attr, int cpu,
5697 5698 5699
		 struct task_struct *task,
		 struct perf_event *group_leader,
		 struct perf_event *parent_event,
5700 5701
		 perf_overflow_handler_t overflow_handler,
		 void *context)
T
Thomas Gleixner 已提交
5702
{
P
Peter Zijlstra 已提交
5703
	struct pmu *pmu;
5704 5705
	struct perf_event *event;
	struct hw_perf_event *hwc;
5706
	long err;
T
Thomas Gleixner 已提交
5707

5708 5709 5710 5711 5712
	if ((unsigned)cpu >= nr_cpu_ids) {
		if (!task || cpu != -1)
			return ERR_PTR(-EINVAL);
	}

5713
	event = kzalloc(sizeof(*event), GFP_KERNEL);
5714
	if (!event)
5715
		return ERR_PTR(-ENOMEM);
T
Thomas Gleixner 已提交
5716

5717
	/*
5718
	 * Single events are their own group leaders, with an
5719 5720 5721
	 * empty sibling list:
	 */
	if (!group_leader)
5722
		group_leader = event;
5723

5724 5725
	mutex_init(&event->child_mutex);
	INIT_LIST_HEAD(&event->child_list);
5726

5727 5728 5729
	INIT_LIST_HEAD(&event->group_entry);
	INIT_LIST_HEAD(&event->event_entry);
	INIT_LIST_HEAD(&event->sibling_list);
5730 5731
	INIT_LIST_HEAD(&event->rb_entry);

5732
	init_waitqueue_head(&event->waitq);
5733
	init_irq_work(&event->pending, perf_pending_event);
T
Thomas Gleixner 已提交
5734

5735
	mutex_init(&event->mmap_mutex);
5736

5737 5738 5739 5740 5741
	event->cpu		= cpu;
	event->attr		= *attr;
	event->group_leader	= group_leader;
	event->pmu		= NULL;
	event->oncpu		= -1;
5742

5743
	event->parent		= parent_event;
5744

5745 5746
	event->ns		= get_pid_ns(current->nsproxy->pid_ns);
	event->id		= atomic64_inc_return(&perf_event_id);
5747

5748
	event->state		= PERF_EVENT_STATE_INACTIVE;
5749

5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760
	if (task) {
		event->attach_state = PERF_ATTACH_TASK;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
		/*
		 * hw_breakpoint is a bit difficult here..
		 */
		if (attr->type == PERF_TYPE_BREAKPOINT)
			event->hw.bp_target = task;
#endif
	}

5761
	if (!overflow_handler && parent_event) {
5762
		overflow_handler = parent_event->overflow_handler;
5763 5764
		context = parent_event->overflow_handler_context;
	}
5765

5766
	event->overflow_handler	= overflow_handler;
5767
	event->overflow_handler_context = context;
5768

5769
	if (attr->disabled)
5770
		event->state = PERF_EVENT_STATE_OFF;
5771

5772
	pmu = NULL;
5773

5774
	hwc = &event->hw;
5775
	hwc->sample_period = attr->sample_period;
5776
	if (attr->freq && attr->sample_freq)
5777
		hwc->sample_period = 1;
5778
	hwc->last_period = hwc->sample_period;
5779

5780
	local64_set(&hwc->period_left, hwc->sample_period);
5781

5782
	/*
5783
	 * we currently do not support PERF_FORMAT_GROUP on inherited events
5784
	 */
5785
	if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5786 5787
		goto done;

5788
	pmu = perf_init_event(event);
5789

5790 5791
done:
	err = 0;
5792
	if (!pmu)
5793
		err = -EINVAL;
5794 5795
	else if (IS_ERR(pmu))
		err = PTR_ERR(pmu);
5796

5797
	if (err) {
5798 5799 5800
		if (event->ns)
			put_pid_ns(event->ns);
		kfree(event);
5801
		return ERR_PTR(err);
I
Ingo Molnar 已提交
5802
	}
5803

5804
	if (!event->parent) {
5805
		if (event->attach_state & PERF_ATTACH_TASK)
5806
			jump_label_inc(&perf_sched_events.key);
5807
		if (event->attr.mmap || event->attr.mmap_data)
5808 5809 5810 5811 5812
			atomic_inc(&nr_mmap_events);
		if (event->attr.comm)
			atomic_inc(&nr_comm_events);
		if (event->attr.task)
			atomic_inc(&nr_task_events);
5813 5814 5815 5816 5817 5818 5819
		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
			err = get_callchain_buffers();
			if (err) {
				free_event(event);
				return ERR_PTR(err);
			}
		}
5820
	}
5821

5822
	return event;
T
Thomas Gleixner 已提交
5823 5824
}

5825 5826
static int perf_copy_attr(struct perf_event_attr __user *uattr,
			  struct perf_event_attr *attr)
5827 5828
{
	u32 size;
5829
	int ret;
5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853

	if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
		return -EFAULT;

	/*
	 * zero the full structure, so that a short copy will be nice.
	 */
	memset(attr, 0, sizeof(*attr));

	ret = get_user(size, &uattr->size);
	if (ret)
		return ret;

	if (size > PAGE_SIZE)	/* silly large */
		goto err_size;

	if (!size)		/* abi compat */
		size = PERF_ATTR_SIZE_VER0;

	if (size < PERF_ATTR_SIZE_VER0)
		goto err_size;

	/*
	 * If we're handed a bigger struct than we know of,
5854 5855 5856
	 * ensure all the unknown bits are 0 - i.e. new
	 * user-space does not rely on any kernel feature
	 * extensions we dont know about yet.
5857 5858
	 */
	if (size > sizeof(*attr)) {
5859 5860 5861
		unsigned char __user *addr;
		unsigned char __user *end;
		unsigned char val;
5862

5863 5864
		addr = (void __user *)uattr + sizeof(*attr);
		end  = (void __user *)uattr + size;
5865

5866
		for (; addr < end; addr++) {
5867 5868 5869 5870 5871 5872
			ret = get_user(val, addr);
			if (ret)
				return ret;
			if (val)
				goto err_size;
		}
5873
		size = sizeof(*attr);
5874 5875 5876 5877 5878 5879
	}

	ret = copy_from_user(attr, uattr, size);
	if (ret)
		return -EFAULT;

5880
	if (attr->__reserved_1)
5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897
		return -EINVAL;

	if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
		return -EINVAL;

	if (attr->read_format & ~(PERF_FORMAT_MAX-1))
		return -EINVAL;

out:
	return ret;

err_size:
	put_user(sizeof(*attr), &uattr->size);
	ret = -E2BIG;
	goto out;
}

5898 5899
static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
5900
{
5901
	struct ring_buffer *rb = NULL, *old_rb = NULL;
5902 5903
	int ret = -EINVAL;

5904
	if (!output_event)
5905 5906
		goto set;

5907 5908
	/* don't allow circular references */
	if (event == output_event)
5909 5910
		goto out;

5911 5912 5913 5914 5915 5916 5917
	/*
	 * Don't allow cross-cpu buffers
	 */
	if (output_event->cpu != event->cpu)
		goto out;

	/*
5918
	 * If its not a per-cpu rb, it must be the same task.
5919 5920 5921 5922
	 */
	if (output_event->cpu == -1 && output_event->ctx != event->ctx)
		goto out;

5923
set:
5924
	mutex_lock(&event->mmap_mutex);
5925 5926 5927
	/* Can't redirect output if we've got an active mmap() */
	if (atomic_read(&event->mmap_count))
		goto unlock;
5928

5929
	if (output_event) {
5930 5931 5932
		/* get the rb we want to redirect to */
		rb = ring_buffer_get(output_event);
		if (!rb)
5933
			goto unlock;
5934 5935
	}

5936 5937
	old_rb = event->rb;
	rcu_assign_pointer(event->rb, rb);
5938 5939
	if (old_rb)
		ring_buffer_detach(event, old_rb);
5940
	ret = 0;
5941 5942 5943
unlock:
	mutex_unlock(&event->mmap_mutex);

5944 5945
	if (old_rb)
		ring_buffer_put(old_rb);
5946 5947 5948 5949
out:
	return ret;
}

T
Thomas Gleixner 已提交
5950
/**
5951
 * sys_perf_event_open - open a performance event, associate it to a task/cpu
I
Ingo Molnar 已提交
5952
 *
5953
 * @attr_uptr:	event_id type attributes for monitoring/sampling
T
Thomas Gleixner 已提交
5954
 * @pid:		target pid
I
Ingo Molnar 已提交
5955
 * @cpu:		target cpu
5956
 * @group_fd:		group leader event fd
T
Thomas Gleixner 已提交
5957
 */
5958 5959
SYSCALL_DEFINE5(perf_event_open,
		struct perf_event_attr __user *, attr_uptr,
5960
		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
T
Thomas Gleixner 已提交
5961
{
5962 5963
	struct perf_event *group_leader = NULL, *output_event = NULL;
	struct perf_event *event, *sibling;
5964 5965 5966
	struct perf_event_attr attr;
	struct perf_event_context *ctx;
	struct file *event_file = NULL;
5967
	struct file *group_file = NULL;
M
Matt Helsley 已提交
5968
	struct task_struct *task = NULL;
5969
	struct pmu *pmu;
5970
	int event_fd;
5971
	int move_group = 0;
5972
	int fput_needed = 0;
5973
	int err;
T
Thomas Gleixner 已提交
5974

5975
	/* for future expandability... */
S
Stephane Eranian 已提交
5976
	if (flags & ~PERF_FLAG_ALL)
5977 5978
		return -EINVAL;

5979 5980 5981
	err = perf_copy_attr(attr_uptr, &attr);
	if (err)
		return err;
5982

5983 5984 5985 5986 5987
	if (!attr.exclude_kernel) {
		if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
			return -EACCES;
	}

5988
	if (attr.freq) {
5989
		if (attr.sample_freq > sysctl_perf_event_sample_rate)
5990 5991 5992
			return -EINVAL;
	}

S
Stephane Eranian 已提交
5993 5994 5995 5996 5997 5998 5999 6000 6001
	/*
	 * In cgroup mode, the pid argument is used to pass the fd
	 * opened to the cgroup directory in cgroupfs. The cpu argument
	 * designates the cpu on which to monitor threads from that
	 * cgroup.
	 */
	if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
		return -EINVAL;

6002 6003 6004 6005
	event_fd = get_unused_fd_flags(O_RDWR);
	if (event_fd < 0)
		return event_fd;

6006 6007 6008 6009
	if (group_fd != -1) {
		group_leader = perf_fget_light(group_fd, &fput_needed);
		if (IS_ERR(group_leader)) {
			err = PTR_ERR(group_leader);
6010
			goto err_fd;
6011 6012 6013 6014 6015 6016 6017 6018
		}
		group_file = group_leader->filp;
		if (flags & PERF_FLAG_FD_OUTPUT)
			output_event = group_leader;
		if (flags & PERF_FLAG_FD_NO_GROUP)
			group_leader = NULL;
	}

S
Stephane Eranian 已提交
6019
	if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
6020 6021 6022 6023 6024 6025 6026
		task = find_lively_task_by_vpid(pid);
		if (IS_ERR(task)) {
			err = PTR_ERR(task);
			goto err_group_fd;
		}
	}

6027 6028
	event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
				 NULL, NULL);
6029 6030
	if (IS_ERR(event)) {
		err = PTR_ERR(event);
6031
		goto err_task;
6032 6033
	}

S
Stephane Eranian 已提交
6034 6035 6036 6037
	if (flags & PERF_FLAG_PID_CGROUP) {
		err = perf_cgroup_connect(pid, event, &attr, group_leader);
		if (err)
			goto err_alloc;
6038 6039 6040 6041 6042 6043
		/*
		 * one more event:
		 * - that has cgroup constraint on event->cpu
		 * - that may need work on context switch
		 */
		atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
6044
		jump_label_inc(&perf_sched_events.key);
S
Stephane Eranian 已提交
6045 6046
	}

6047 6048 6049 6050 6051
	/*
	 * Special case software events and allow them to be part of
	 * any hardware group.
	 */
	pmu = event->pmu;
6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074

	if (group_leader &&
	    (is_software_event(event) != is_software_event(group_leader))) {
		if (is_software_event(event)) {
			/*
			 * If event and group_leader are not both a software
			 * event, and event is, then group leader is not.
			 *
			 * Allow the addition of software events to !software
			 * groups, this is safe because software events never
			 * fail to schedule.
			 */
			pmu = group_leader->pmu;
		} else if (is_software_event(group_leader) &&
			   (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
			/*
			 * In case the group is a pure software group, and we
			 * try to add a hardware event, move the whole group to
			 * the hardware context.
			 */
			move_group = 1;
		}
	}
6075 6076 6077 6078

	/*
	 * Get the target context (task or percpu):
	 */
M
Matt Helsley 已提交
6079
	ctx = find_get_context(pmu, task, cpu);
6080 6081
	if (IS_ERR(ctx)) {
		err = PTR_ERR(ctx);
6082
		goto err_alloc;
6083 6084
	}

6085 6086 6087 6088 6089
	if (task) {
		put_task_struct(task);
		task = NULL;
	}

I
Ingo Molnar 已提交
6090
	/*
6091
	 * Look up the group leader (we will attach this event to it):
6092
	 */
6093
	if (group_leader) {
6094
		err = -EINVAL;
6095 6096

		/*
I
Ingo Molnar 已提交
6097 6098 6099 6100
		 * Do not allow a recursive hierarchy (this new sibling
		 * becoming part of another group-sibling):
		 */
		if (group_leader->group_leader != group_leader)
6101
			goto err_context;
I
Ingo Molnar 已提交
6102 6103 6104
		/*
		 * Do not allow to attach to a group in a different
		 * task or CPU context:
6105
		 */
6106 6107 6108 6109 6110 6111 6112 6113
		if (move_group) {
			if (group_leader->ctx->type != ctx->type)
				goto err_context;
		} else {
			if (group_leader->ctx != ctx)
				goto err_context;
		}

6114 6115 6116
		/*
		 * Only a group leader can be exclusive or pinned
		 */
6117
		if (attr.exclusive || attr.pinned)
6118
			goto err_context;
6119 6120 6121 6122 6123
	}

	if (output_event) {
		err = perf_event_set_output(event, output_event);
		if (err)
6124
			goto err_context;
6125
	}
T
Thomas Gleixner 已提交
6126

6127 6128 6129
	event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
	if (IS_ERR(event_file)) {
		err = PTR_ERR(event_file);
6130
		goto err_context;
6131
	}
6132

6133 6134 6135 6136
	if (move_group) {
		struct perf_event_context *gctx = group_leader->ctx;

		mutex_lock(&gctx->mutex);
6137
		perf_remove_from_context(group_leader);
6138 6139
		list_for_each_entry(sibling, &group_leader->sibling_list,
				    group_entry) {
6140
			perf_remove_from_context(sibling);
6141 6142 6143 6144
			put_ctx(gctx);
		}
		mutex_unlock(&gctx->mutex);
		put_ctx(gctx);
6145
	}
6146

6147
	event->filp = event_file;
6148
	WARN_ON_ONCE(ctx->parent_ctx);
6149
	mutex_lock(&ctx->mutex);
6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160

	if (move_group) {
		perf_install_in_context(ctx, group_leader, cpu);
		get_ctx(ctx);
		list_for_each_entry(sibling, &group_leader->sibling_list,
				    group_entry) {
			perf_install_in_context(ctx, sibling, cpu);
			get_ctx(ctx);
		}
	}

6161
	perf_install_in_context(ctx, event, cpu);
6162
	++ctx->generation;
6163
	perf_unpin_context(ctx);
6164
	mutex_unlock(&ctx->mutex);
6165

6166
	event->owner = current;
P
Peter Zijlstra 已提交
6167

6168 6169 6170
	mutex_lock(&current->perf_event_mutex);
	list_add_tail(&event->owner_entry, &current->perf_event_list);
	mutex_unlock(&current->perf_event_mutex);
6171

6172 6173 6174 6175
	/*
	 * Precalculate sample_data sizes
	 */
	perf_event__header_size(event);
6176
	perf_event__id_header_size(event);
6177

6178 6179 6180 6181 6182 6183
	/*
	 * Drop the reference on the group_event after placing the
	 * new event on the sibling_list. This ensures destruction
	 * of the group leader will find the pointer to itself in
	 * perf_group_detach().
	 */
6184 6185 6186
	fput_light(group_file, fput_needed);
	fd_install(event_fd, event_file);
	return event_fd;
T
Thomas Gleixner 已提交
6187

6188
err_context:
6189
	perf_unpin_context(ctx);
6190
	put_ctx(ctx);
6191
err_alloc:
6192
	free_event(event);
P
Peter Zijlstra 已提交
6193 6194 6195
err_task:
	if (task)
		put_task_struct(task);
6196
err_group_fd:
6197
	fput_light(group_file, fput_needed);
6198 6199
err_fd:
	put_unused_fd(event_fd);
6200
	return err;
T
Thomas Gleixner 已提交
6201 6202
}

6203 6204 6205 6206 6207
/**
 * perf_event_create_kernel_counter
 *
 * @attr: attributes of the counter to create
 * @cpu: cpu in which the counter is bound
M
Matt Helsley 已提交
6208
 * @task: task to profile (NULL for percpu)
6209 6210 6211
 */
struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
M
Matt Helsley 已提交
6212
				 struct task_struct *task,
6213 6214
				 perf_overflow_handler_t overflow_handler,
				 void *context)
6215 6216
{
	struct perf_event_context *ctx;
6217
	struct perf_event *event;
6218
	int err;
6219

6220 6221 6222
	/*
	 * Get the target context (task or percpu):
	 */
6223

6224 6225
	event = perf_event_alloc(attr, cpu, task, NULL, NULL,
				 overflow_handler, context);
6226 6227 6228 6229
	if (IS_ERR(event)) {
		err = PTR_ERR(event);
		goto err;
	}
6230

M
Matt Helsley 已提交
6231
	ctx = find_get_context(event->pmu, task, cpu);
6232 6233
	if (IS_ERR(ctx)) {
		err = PTR_ERR(ctx);
6234
		goto err_free;
6235
	}
6236 6237 6238 6239 6240 6241

	event->filp = NULL;
	WARN_ON_ONCE(ctx->parent_ctx);
	mutex_lock(&ctx->mutex);
	perf_install_in_context(ctx, event, cpu);
	++ctx->generation;
6242
	perf_unpin_context(ctx);
6243 6244 6245 6246
	mutex_unlock(&ctx->mutex);

	return event;

6247 6248 6249
err_free:
	free_event(event);
err:
6250
	return ERR_PTR(err);
6251
}
6252
EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
6253

6254
static void sync_child_event(struct perf_event *child_event,
6255
			       struct task_struct *child)
6256
{
6257
	struct perf_event *parent_event = child_event->parent;
6258
	u64 child_val;
6259

6260 6261
	if (child_event->attr.inherit_stat)
		perf_event_read_event(child_event, child);
6262

P
Peter Zijlstra 已提交
6263
	child_val = perf_event_count(child_event);
6264 6265 6266 6267

	/*
	 * Add back the child's count to the parent's count:
	 */
6268
	atomic64_add(child_val, &parent_event->child_count);
6269 6270 6271 6272
	atomic64_add(child_event->total_time_enabled,
		     &parent_event->child_total_time_enabled);
	atomic64_add(child_event->total_time_running,
		     &parent_event->child_total_time_running);
6273 6274

	/*
6275
	 * Remove this event from the parent's list
6276
	 */
6277 6278 6279 6280
	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
	mutex_lock(&parent_event->child_mutex);
	list_del_init(&child_event->child_list);
	mutex_unlock(&parent_event->child_mutex);
6281 6282

	/*
6283
	 * Release the parent event, if this was the last
6284 6285
	 * reference to it.
	 */
6286
	fput(parent_event->filp);
6287 6288
}

6289
static void
6290 6291
__perf_event_exit_task(struct perf_event *child_event,
			 struct perf_event_context *child_ctx,
6292
			 struct task_struct *child)
6293
{
6294 6295 6296 6297 6298
	if (child_event->parent) {
		raw_spin_lock_irq(&child_ctx->lock);
		perf_group_detach(child_event);
		raw_spin_unlock_irq(&child_ctx->lock);
	}
6299

6300
	perf_remove_from_context(child_event);
6301

6302
	/*
6303
	 * It can happen that the parent exits first, and has events
6304
	 * that are still around due to the child reference. These
6305
	 * events need to be zapped.
6306
	 */
6307
	if (child_event->parent) {
6308 6309
		sync_child_event(child_event, child);
		free_event(child_event);
6310
	}
6311 6312
}

P
Peter Zijlstra 已提交
6313
static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
6314
{
6315 6316
	struct perf_event *child_event, *tmp;
	struct perf_event_context *child_ctx;
6317
	unsigned long flags;
6318

P
Peter Zijlstra 已提交
6319
	if (likely(!child->perf_event_ctxp[ctxn])) {
6320
		perf_event_task(child, NULL, 0);
6321
		return;
P
Peter Zijlstra 已提交
6322
	}
6323

6324
	local_irq_save(flags);
6325 6326 6327 6328 6329 6330
	/*
	 * We can't reschedule here because interrupts are disabled,
	 * and either child is current or it is a task that can't be
	 * scheduled, so we are now safe from rescheduling changing
	 * our context.
	 */
6331
	child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
6332 6333 6334

	/*
	 * Take the context lock here so that if find_get_context is
6335
	 * reading child->perf_event_ctxp, we wait until it has
6336 6337
	 * incremented the context's refcount before we do put_ctx below.
	 */
6338
	raw_spin_lock(&child_ctx->lock);
6339
	task_ctx_sched_out(child_ctx);
P
Peter Zijlstra 已提交
6340
	child->perf_event_ctxp[ctxn] = NULL;
6341 6342 6343
	/*
	 * If this context is a clone; unclone it so it can't get
	 * swapped to another process while we're removing all
6344
	 * the events from it.
6345 6346
	 */
	unclone_ctx(child_ctx);
6347
	update_context_time(child_ctx);
6348
	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
P
Peter Zijlstra 已提交
6349 6350

	/*
6351 6352 6353
	 * Report the task dead after unscheduling the events so that we
	 * won't get any samples after PERF_RECORD_EXIT. We can however still
	 * get a few PERF_RECORD_READ events.
P
Peter Zijlstra 已提交
6354
	 */
6355
	perf_event_task(child, child_ctx, 0);
6356

6357 6358 6359
	/*
	 * We can recurse on the same lock type through:
	 *
6360 6361 6362
	 *   __perf_event_exit_task()
	 *     sync_child_event()
	 *       fput(parent_event->filp)
6363 6364 6365 6366 6367
	 *         perf_release()
	 *           mutex_lock(&ctx->mutex)
	 *
	 * But since its the parent context it won't be the same instance.
	 */
6368
	mutex_lock(&child_ctx->mutex);
6369

6370
again:
6371 6372 6373 6374 6375
	list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
				 group_entry)
		__perf_event_exit_task(child_event, child_ctx, child);

	list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
6376
				 group_entry)
6377
		__perf_event_exit_task(child_event, child_ctx, child);
6378 6379

	/*
6380
	 * If the last event was a group event, it will have appended all
6381 6382 6383
	 * its siblings to the list, but we obtained 'tmp' before that which
	 * will still point to the list head terminating the iteration.
	 */
6384 6385
	if (!list_empty(&child_ctx->pinned_groups) ||
	    !list_empty(&child_ctx->flexible_groups))
6386
		goto again;
6387 6388 6389 6390

	mutex_unlock(&child_ctx->mutex);

	put_ctx(child_ctx);
6391 6392
}

P
Peter Zijlstra 已提交
6393 6394 6395 6396 6397
/*
 * When a child task exits, feed back event values to parent events.
 */
void perf_event_exit_task(struct task_struct *child)
{
P
Peter Zijlstra 已提交
6398
	struct perf_event *event, *tmp;
P
Peter Zijlstra 已提交
6399 6400
	int ctxn;

P
Peter Zijlstra 已提交
6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415
	mutex_lock(&child->perf_event_mutex);
	list_for_each_entry_safe(event, tmp, &child->perf_event_list,
				 owner_entry) {
		list_del_init(&event->owner_entry);

		/*
		 * Ensure the list deletion is visible before we clear
		 * the owner, closes a race against perf_release() where
		 * we need to serialize on the owner->perf_event_mutex.
		 */
		smp_wmb();
		event->owner = NULL;
	}
	mutex_unlock(&child->perf_event_mutex);

P
Peter Zijlstra 已提交
6416 6417 6418 6419
	for_each_task_context_nr(ctxn)
		perf_event_exit_task_context(child, ctxn);
}

6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433
static void perf_free_event(struct perf_event *event,
			    struct perf_event_context *ctx)
{
	struct perf_event *parent = event->parent;

	if (WARN_ON_ONCE(!parent))
		return;

	mutex_lock(&parent->child_mutex);
	list_del_init(&event->child_list);
	mutex_unlock(&parent->child_mutex);

	fput(parent->filp);

6434
	perf_group_detach(event);
6435 6436 6437 6438
	list_del_event(event, ctx);
	free_event(event);
}

6439 6440
/*
 * free an unexposed, unused context as created by inheritance by
P
Peter Zijlstra 已提交
6441
 * perf_event_init_task below, used by fork() in case of fail.
6442
 */
6443
void perf_event_free_task(struct task_struct *task)
6444
{
P
Peter Zijlstra 已提交
6445
	struct perf_event_context *ctx;
6446
	struct perf_event *event, *tmp;
P
Peter Zijlstra 已提交
6447
	int ctxn;
6448

P
Peter Zijlstra 已提交
6449 6450 6451 6452
	for_each_task_context_nr(ctxn) {
		ctx = task->perf_event_ctxp[ctxn];
		if (!ctx)
			continue;
6453

P
Peter Zijlstra 已提交
6454
		mutex_lock(&ctx->mutex);
6455
again:
P
Peter Zijlstra 已提交
6456 6457 6458
		list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
				group_entry)
			perf_free_event(event, ctx);
6459

P
Peter Zijlstra 已提交
6460 6461 6462
		list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
				group_entry)
			perf_free_event(event, ctx);
6463

P
Peter Zijlstra 已提交
6464 6465 6466
		if (!list_empty(&ctx->pinned_groups) ||
				!list_empty(&ctx->flexible_groups))
			goto again;
6467

P
Peter Zijlstra 已提交
6468
		mutex_unlock(&ctx->mutex);
6469

P
Peter Zijlstra 已提交
6470 6471
		put_ctx(ctx);
	}
6472 6473
}

6474 6475 6476 6477 6478 6479 6480 6481
void perf_event_delayed_put(struct task_struct *task)
{
	int ctxn;

	for_each_task_context_nr(ctxn)
		WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
}

P
Peter Zijlstra 已提交
6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493
/*
 * inherit a event from parent task to child task:
 */
static struct perf_event *
inherit_event(struct perf_event *parent_event,
	      struct task_struct *parent,
	      struct perf_event_context *parent_ctx,
	      struct task_struct *child,
	      struct perf_event *group_leader,
	      struct perf_event_context *child_ctx)
{
	struct perf_event *child_event;
6494
	unsigned long flags;
P
Peter Zijlstra 已提交
6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506

	/*
	 * Instead of creating recursive hierarchies of events,
	 * we link inherited events back to the original parent,
	 * which has a filp for sure, which we use as the reference
	 * count:
	 */
	if (parent_event->parent)
		parent_event = parent_event->parent;

	child_event = perf_event_alloc(&parent_event->attr,
					   parent_event->cpu,
6507
					   child,
P
Peter Zijlstra 已提交
6508
					   group_leader, parent_event,
6509
				           NULL, NULL);
P
Peter Zijlstra 已提交
6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535
	if (IS_ERR(child_event))
		return child_event;
	get_ctx(child_ctx);

	/*
	 * Make the child state follow the state of the parent event,
	 * not its attr.disabled bit.  We hold the parent's mutex,
	 * so we won't race with perf_event_{en, dis}able_family.
	 */
	if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
		child_event->state = PERF_EVENT_STATE_INACTIVE;
	else
		child_event->state = PERF_EVENT_STATE_OFF;

	if (parent_event->attr.freq) {
		u64 sample_period = parent_event->hw.sample_period;
		struct hw_perf_event *hwc = &child_event->hw;

		hwc->sample_period = sample_period;
		hwc->last_period   = sample_period;

		local64_set(&hwc->period_left, sample_period);
	}

	child_event->ctx = child_ctx;
	child_event->overflow_handler = parent_event->overflow_handler;
6536 6537
	child_event->overflow_handler_context
		= parent_event->overflow_handler_context;
P
Peter Zijlstra 已提交
6538

6539 6540 6541 6542
	/*
	 * Precalculate sample_data sizes
	 */
	perf_event__header_size(child_event);
6543
	perf_event__id_header_size(child_event);
6544

P
Peter Zijlstra 已提交
6545 6546 6547
	/*
	 * Link it up in the child's context:
	 */
6548
	raw_spin_lock_irqsave(&child_ctx->lock, flags);
P
Peter Zijlstra 已提交
6549
	add_event_to_ctx(child_event, child_ctx);
6550
	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
P
Peter Zijlstra 已提交
6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591

	/*
	 * Get a reference to the parent filp - we will fput it
	 * when the child event exits. This is safe to do because
	 * we are in the parent and we know that the filp still
	 * exists and has a nonzero count:
	 */
	atomic_long_inc(&parent_event->filp->f_count);

	/*
	 * Link this into the parent event's child list
	 */
	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
	mutex_lock(&parent_event->child_mutex);
	list_add_tail(&child_event->child_list, &parent_event->child_list);
	mutex_unlock(&parent_event->child_mutex);

	return child_event;
}

static int inherit_group(struct perf_event *parent_event,
	      struct task_struct *parent,
	      struct perf_event_context *parent_ctx,
	      struct task_struct *child,
	      struct perf_event_context *child_ctx)
{
	struct perf_event *leader;
	struct perf_event *sub;
	struct perf_event *child_ctr;

	leader = inherit_event(parent_event, parent, parent_ctx,
				 child, NULL, child_ctx);
	if (IS_ERR(leader))
		return PTR_ERR(leader);
	list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
		child_ctr = inherit_event(sub, parent, parent_ctx,
					    child, leader, child_ctx);
		if (IS_ERR(child_ctr))
			return PTR_ERR(child_ctr);
	}
	return 0;
6592 6593 6594 6595 6596
}

static int
inherit_task_group(struct perf_event *event, struct task_struct *parent,
		   struct perf_event_context *parent_ctx,
P
Peter Zijlstra 已提交
6597
		   struct task_struct *child, int ctxn,
6598 6599 6600
		   int *inherited_all)
{
	int ret;
P
Peter Zijlstra 已提交
6601
	struct perf_event_context *child_ctx;
6602 6603 6604 6605

	if (!event->attr.inherit) {
		*inherited_all = 0;
		return 0;
6606 6607
	}

6608
	child_ctx = child->perf_event_ctxp[ctxn];
6609 6610 6611 6612 6613 6614 6615
	if (!child_ctx) {
		/*
		 * This is executed from the parent task context, so
		 * inherit events that have been marked for cloning.
		 * First allocate and initialize a context for the
		 * child.
		 */
6616

6617
		child_ctx = alloc_perf_context(event->pmu, child);
6618 6619
		if (!child_ctx)
			return -ENOMEM;
6620

P
Peter Zijlstra 已提交
6621
		child->perf_event_ctxp[ctxn] = child_ctx;
6622 6623 6624 6625 6626 6627 6628 6629 6630
	}

	ret = inherit_group(event, parent, parent_ctx,
			    child, child_ctx);

	if (ret)
		*inherited_all = 0;

	return ret;
6631 6632
}

6633
/*
6634
 * Initialize the perf_event context in task_struct
6635
 */
P
Peter Zijlstra 已提交
6636
int perf_event_init_context(struct task_struct *child, int ctxn)
6637
{
6638
	struct perf_event_context *child_ctx, *parent_ctx;
6639 6640
	struct perf_event_context *cloned_ctx;
	struct perf_event *event;
6641
	struct task_struct *parent = current;
6642
	int inherited_all = 1;
6643
	unsigned long flags;
6644
	int ret = 0;
6645

P
Peter Zijlstra 已提交
6646
	if (likely(!parent->perf_event_ctxp[ctxn]))
6647 6648
		return 0;

6649
	/*
6650 6651
	 * If the parent's context is a clone, pin it so it won't get
	 * swapped under us.
6652
	 */
P
Peter Zijlstra 已提交
6653
	parent_ctx = perf_pin_task_context(parent, ctxn);
6654

6655 6656 6657 6658 6659 6660 6661
	/*
	 * No need to check if parent_ctx != NULL here; since we saw
	 * it non-NULL earlier, the only reason for it to become NULL
	 * is if we exit, and since we're currently in the middle of
	 * a fork we can't be exiting at the same time.
	 */

6662 6663 6664 6665
	/*
	 * Lock the parent list. No need to lock the child - not PID
	 * hashed yet and not running, so nobody can access it.
	 */
6666
	mutex_lock(&parent_ctx->mutex);
6667 6668 6669 6670 6671

	/*
	 * We dont have to disable NMIs - we are only looking at
	 * the list, not manipulating it:
	 */
6672
	list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
P
Peter Zijlstra 已提交
6673 6674
		ret = inherit_task_group(event, parent, parent_ctx,
					 child, ctxn, &inherited_all);
6675 6676 6677
		if (ret)
			break;
	}
6678

6679 6680 6681 6682 6683 6684 6685 6686 6687
	/*
	 * We can't hold ctx->lock when iterating the ->flexible_group list due
	 * to allocations, but we need to prevent rotation because
	 * rotate_ctx() will change the list from interrupt context.
	 */
	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
	parent_ctx->rotate_disable = 1;
	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);

6688
	list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
P
Peter Zijlstra 已提交
6689 6690
		ret = inherit_task_group(event, parent, parent_ctx,
					 child, ctxn, &inherited_all);
6691
		if (ret)
6692
			break;
6693 6694
	}

6695 6696 6697
	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
	parent_ctx->rotate_disable = 0;

P
Peter Zijlstra 已提交
6698
	child_ctx = child->perf_event_ctxp[ctxn];
6699

6700
	if (child_ctx && inherited_all) {
6701 6702 6703
		/*
		 * Mark the child context as a clone of the parent
		 * context, or of whatever the parent is a clone of.
P
Peter Zijlstra 已提交
6704 6705 6706
		 *
		 * Note that if the parent is a clone, the holding of
		 * parent_ctx->lock avoids it from being uncloned.
6707
		 */
P
Peter Zijlstra 已提交
6708
		cloned_ctx = parent_ctx->parent_ctx;
6709 6710
		if (cloned_ctx) {
			child_ctx->parent_ctx = cloned_ctx;
6711
			child_ctx->parent_gen = parent_ctx->parent_gen;
6712 6713 6714 6715 6716
		} else {
			child_ctx->parent_ctx = parent_ctx;
			child_ctx->parent_gen = parent_ctx->generation;
		}
		get_ctx(child_ctx->parent_ctx);
6717 6718
	}

P
Peter Zijlstra 已提交
6719
	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6720
	mutex_unlock(&parent_ctx->mutex);
6721

6722
	perf_unpin_context(parent_ctx);
6723
	put_ctx(parent_ctx);
6724

6725
	return ret;
6726 6727
}

P
Peter Zijlstra 已提交
6728 6729 6730 6731 6732 6733 6734
/*
 * Initialize the perf_event context in task_struct
 */
int perf_event_init_task(struct task_struct *child)
{
	int ctxn, ret;

6735 6736 6737 6738
	memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
	mutex_init(&child->perf_event_mutex);
	INIT_LIST_HEAD(&child->perf_event_list);

P
Peter Zijlstra 已提交
6739 6740 6741 6742 6743 6744 6745 6746 6747
	for_each_task_context_nr(ctxn) {
		ret = perf_event_init_context(child, ctxn);
		if (ret)
			return ret;
	}

	return 0;
}

6748 6749
static void __init perf_event_init_all_cpus(void)
{
6750
	struct swevent_htable *swhash;
6751 6752 6753
	int cpu;

	for_each_possible_cpu(cpu) {
6754 6755
		swhash = &per_cpu(swevent_htable, cpu);
		mutex_init(&swhash->hlist_mutex);
6756
		INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
6757 6758 6759
	}
}

6760
static void __cpuinit perf_event_init_cpu(int cpu)
T
Thomas Gleixner 已提交
6761
{
P
Peter Zijlstra 已提交
6762
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
T
Thomas Gleixner 已提交
6763

6764
	mutex_lock(&swhash->hlist_mutex);
6765
	if (swhash->hlist_refcount > 0) {
6766 6767
		struct swevent_hlist *hlist;

6768 6769 6770
		hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
		WARN_ON(!hlist);
		rcu_assign_pointer(swhash->swevent_hlist, hlist);
6771
	}
6772
	mutex_unlock(&swhash->hlist_mutex);
T
Thomas Gleixner 已提交
6773 6774
}

P
Peter Zijlstra 已提交
6775
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
6776
static void perf_pmu_rotate_stop(struct pmu *pmu)
T
Thomas Gleixner 已提交
6777
{
6778 6779 6780 6781 6782 6783 6784
	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);

	WARN_ON(!irqs_disabled());

	list_del_init(&cpuctx->rotation_list);
}

P
Peter Zijlstra 已提交
6785
static void __perf_event_exit_context(void *__info)
T
Thomas Gleixner 已提交
6786
{
P
Peter Zijlstra 已提交
6787
	struct perf_event_context *ctx = __info;
6788
	struct perf_event *event, *tmp;
T
Thomas Gleixner 已提交
6789

P
Peter Zijlstra 已提交
6790
	perf_pmu_rotate_stop(ctx->pmu);
6791

6792
	list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
6793
		__perf_remove_from_context(event);
6794
	list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
6795
		__perf_remove_from_context(event);
T
Thomas Gleixner 已提交
6796
}
P
Peter Zijlstra 已提交
6797 6798 6799 6800 6801 6802 6803 6804 6805

static void perf_event_exit_cpu_context(int cpu)
{
	struct perf_event_context *ctx;
	struct pmu *pmu;
	int idx;

	idx = srcu_read_lock(&pmus_srcu);
	list_for_each_entry_rcu(pmu, &pmus, entry) {
6806
		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
P
Peter Zijlstra 已提交
6807 6808 6809 6810 6811 6812 6813 6814

		mutex_lock(&ctx->mutex);
		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
		mutex_unlock(&ctx->mutex);
	}
	srcu_read_unlock(&pmus_srcu, idx);
}

6815
static void perf_event_exit_cpu(int cpu)
T
Thomas Gleixner 已提交
6816
{
6817
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6818

6819 6820 6821
	mutex_lock(&swhash->hlist_mutex);
	swevent_hlist_release(swhash);
	mutex_unlock(&swhash->hlist_mutex);
6822

P
Peter Zijlstra 已提交
6823
	perf_event_exit_cpu_context(cpu);
T
Thomas Gleixner 已提交
6824 6825
}
#else
6826
static inline void perf_event_exit_cpu(int cpu) { }
T
Thomas Gleixner 已提交
6827 6828
#endif

P
Peter Zijlstra 已提交
6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848
static int
perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
{
	int cpu;

	for_each_online_cpu(cpu)
		perf_event_exit_cpu(cpu);

	return NOTIFY_OK;
}

/*
 * Run the perf reboot notifier at the very last possible moment so that
 * the generic watchdog code runs as long as possible.
 */
static struct notifier_block perf_reboot_notifier = {
	.notifier_call = perf_reboot,
	.priority = INT_MIN,
};

T
Thomas Gleixner 已提交
6849 6850 6851 6852 6853
static int __cpuinit
perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
	unsigned int cpu = (long)hcpu;

6854
	switch (action & ~CPU_TASKS_FROZEN) {
T
Thomas Gleixner 已提交
6855 6856

	case CPU_UP_PREPARE:
P
Peter Zijlstra 已提交
6857
	case CPU_DOWN_FAILED:
6858
		perf_event_init_cpu(cpu);
T
Thomas Gleixner 已提交
6859 6860
		break;

P
Peter Zijlstra 已提交
6861
	case CPU_UP_CANCELED:
T
Thomas Gleixner 已提交
6862
	case CPU_DOWN_PREPARE:
6863
		perf_event_exit_cpu(cpu);
T
Thomas Gleixner 已提交
6864 6865 6866 6867 6868 6869 6870 6871 6872
		break;

	default:
		break;
	}

	return NOTIFY_OK;
}

6873
void __init perf_event_init(void)
T
Thomas Gleixner 已提交
6874
{
6875 6876
	int ret;

P
Peter Zijlstra 已提交
6877 6878
	idr_init(&pmu_idr);

6879
	perf_event_init_all_cpus();
6880
	init_srcu_struct(&pmus_srcu);
P
Peter Zijlstra 已提交
6881 6882 6883
	perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
	perf_pmu_register(&perf_cpu_clock, NULL, -1);
	perf_pmu_register(&perf_task_clock, NULL, -1);
6884 6885
	perf_tp_register();
	perf_cpu_notifier(perf_cpu_notify);
P
Peter Zijlstra 已提交
6886
	register_reboot_notifier(&perf_reboot_notifier);
6887 6888 6889

	ret = init_hw_breakpoint();
	WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
6890 6891 6892

	/* do not patch jump label more than once per second */
	jump_label_rate_limit(&perf_sched_events, HZ);
T
Thomas Gleixner 已提交
6893
}
P
Peter Zijlstra 已提交
6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921

static int __init perf_event_sysfs_init(void)
{
	struct pmu *pmu;
	int ret;

	mutex_lock(&pmus_lock);

	ret = bus_register(&pmu_bus);
	if (ret)
		goto unlock;

	list_for_each_entry(pmu, &pmus, entry) {
		if (!pmu->name || pmu->type < 0)
			continue;

		ret = pmu_dev_alloc(pmu);
		WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
	}
	pmu_bus_running = 1;
	ret = 0;

unlock:
	mutex_unlock(&pmus_lock);

	return ret;
}
device_initcall(perf_event_sysfs_init);
S
Stephane Eranian 已提交
6922 6923 6924 6925 6926 6927 6928

#ifdef CONFIG_CGROUP_PERF
static struct cgroup_subsys_state *perf_cgroup_create(
	struct cgroup_subsys *ss, struct cgroup *cont)
{
	struct perf_cgroup *jc;

6929
	jc = kzalloc(sizeof(*jc), GFP_KERNEL);
S
Stephane Eranian 已提交
6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958
	if (!jc)
		return ERR_PTR(-ENOMEM);

	jc->info = alloc_percpu(struct perf_cgroup_info);
	if (!jc->info) {
		kfree(jc);
		return ERR_PTR(-ENOMEM);
	}

	return &jc->css;
}

static void perf_cgroup_destroy(struct cgroup_subsys *ss,
				struct cgroup *cont)
{
	struct perf_cgroup *jc;
	jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
			  struct perf_cgroup, css);
	free_percpu(jc->info);
	kfree(jc);
}

static int __perf_cgroup_move(void *info)
{
	struct task_struct *task = info;
	perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
	return 0;
}

6959 6960
static void
perf_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *task)
S
Stephane Eranian 已提交
6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975
{
	task_function_call(task, __perf_cgroup_move, task);
}

static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
		struct cgroup *old_cgrp, struct task_struct *task)
{
	/*
	 * cgroup_exit() is called in the copy_process() failure path.
	 * Ignore this case since the task hasn't ran yet, this avoids
	 * trying to poke a half freed task state from generic code.
	 */
	if (!(task->flags & PF_EXITING))
		return;

6976
	perf_cgroup_attach_task(cgrp, task);
S
Stephane Eranian 已提交
6977 6978 6979
}

struct cgroup_subsys perf_subsys = {
6980 6981 6982 6983 6984
	.name		= "perf_event",
	.subsys_id	= perf_subsys_id,
	.create		= perf_cgroup_create,
	.destroy	= perf_cgroup_destroy,
	.exit		= perf_cgroup_exit,
6985
	.attach_task	= perf_cgroup_attach_task,
S
Stephane Eranian 已提交
6986 6987
};
#endif /* CONFIG_CGROUP_PERF */