core.c 160.1 KB
Newer Older
T
Thomas Gleixner 已提交
1
/*
I
Ingo Molnar 已提交
2
 * Performance events core code:
T
Thomas Gleixner 已提交
3
 *
4
 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 6
 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7
 *  Copyright    2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8
 *
I
Ingo Molnar 已提交
9
 * For licensing details see kernel-base/COPYING
T
Thomas Gleixner 已提交
10 11 12
 */

#include <linux/fs.h>
13
#include <linux/mm.h>
T
Thomas Gleixner 已提交
14 15
#include <linux/cpu.h>
#include <linux/smp.h>
P
Peter Zijlstra 已提交
16
#include <linux/idr.h>
17
#include <linux/file.h>
T
Thomas Gleixner 已提交
18
#include <linux/poll.h>
19
#include <linux/slab.h>
20
#include <linux/hash.h>
T
Thomas Gleixner 已提交
21
#include <linux/sysfs.h>
22
#include <linux/dcache.h>
T
Thomas Gleixner 已提交
23
#include <linux/percpu.h>
24
#include <linux/ptrace.h>
P
Peter Zijlstra 已提交
25
#include <linux/reboot.h>
26
#include <linux/vmstat.h>
P
Peter Zijlstra 已提交
27
#include <linux/device.h>
28
#include <linux/vmalloc.h>
29 30
#include <linux/hardirq.h>
#include <linux/rculist.h>
T
Thomas Gleixner 已提交
31 32 33
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
I
Ingo Molnar 已提交
34
#include <linux/kernel_stat.h>
35
#include <linux/perf_event.h>
L
Li Zefan 已提交
36
#include <linux/ftrace_event.h>
37
#include <linux/hw_breakpoint.h>
T
Thomas Gleixner 已提交
38

39 40
#include "internal.h"

41 42
#include <asm/irq_regs.h>

43
struct remote_function_call {
44 45 46 47
	struct task_struct	*p;
	int			(*func)(void *info);
	void			*info;
	int			ret;
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
};

static void remote_function(void *data)
{
	struct remote_function_call *tfc = data;
	struct task_struct *p = tfc->p;

	if (p) {
		tfc->ret = -EAGAIN;
		if (task_cpu(p) != smp_processor_id() || !task_curr(p))
			return;
	}

	tfc->ret = tfc->func(tfc->info);
}

/**
 * task_function_call - call a function on the cpu on which a task runs
 * @p:		the task to evaluate
 * @func:	the function to be called
 * @info:	the function call argument
 *
 * Calls the function @func when the task is currently running. This might
 * be on the current CPU, which just calls the function directly
 *
 * returns: @func return value, or
 *	    -ESRCH  - when the process isn't running
 *	    -EAGAIN - when the process moved away
 */
static int
task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
{
	struct remote_function_call data = {
81 82 83 84
		.p	= p,
		.func	= func,
		.info	= info,
		.ret	= -ESRCH, /* No such (running) process */
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
	};

	if (task_curr(p))
		smp_call_function_single(task_cpu(p), remote_function, &data, 1);

	return data.ret;
}

/**
 * cpu_function_call - call a function on the cpu
 * @func:	the function to be called
 * @info:	the function call argument
 *
 * Calls the function @func on the remote cpu.
 *
 * returns: @func return value or -ENXIO when the cpu is offline
 */
static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
{
	struct remote_function_call data = {
105 106 107 108
		.p	= NULL,
		.func	= func,
		.info	= info,
		.ret	= -ENXIO, /* No such CPU */
109 110 111 112 113 114 115
	};

	smp_call_function_single(cpu, remote_function, &data, 1);

	return data.ret;
}

S
Stephane Eranian 已提交
116 117 118 119
#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
		       PERF_FLAG_FD_OUTPUT  |\
		       PERF_FLAG_PID_CGROUP)

120 121 122 123 124 125
enum event_type_t {
	EVENT_FLEXIBLE = 0x1,
	EVENT_PINNED = 0x2,
	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};

S
Stephane Eranian 已提交
126 127 128 129
/*
 * perf_sched_events : >0 events exist
 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
 */
130
struct jump_label_key perf_sched_events __read_mostly;
S
Stephane Eranian 已提交
131 132
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);

133 134 135
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
136

P
Peter Zijlstra 已提交
137 138 139 140
static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu;

141
/*
142
 * perf event paranoia level:
143 144
 *  -1 - not paranoid at all
 *   0 - disallow raw tracepoint access for unpriv
145
 *   1 - disallow cpu events for unpriv
146
 *   2 - disallow kernel profiling for unpriv
147
 */
148
int sysctl_perf_event_paranoid __read_mostly = 1;
149

150 151
/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
152 153

/*
154
 * max perf event sample rate
155
 */
P
Peter Zijlstra 已提交
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
#define DEFAULT_MAX_SAMPLE_RATE 100000
int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
static int max_samples_per_tick __read_mostly =
	DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);

int perf_proc_update_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *lenp,
		loff_t *ppos)
{
	int ret = proc_dointvec(table, write, buffer, lenp, ppos);

	if (ret || !write)
		return ret;

	max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);

	return 0;
}
174

175
static atomic64_t perf_event_id;
176

177 178 179 180
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
			      enum event_type_t event_type);

static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
S
Stephane Eranian 已提交
181 182 183 184 185
			     enum event_type_t event_type,
			     struct task_struct *task);

static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
186

187
void __weak perf_event_print_debug(void)	{ }
T
Thomas Gleixner 已提交
188

189
extern __weak const char *perf_pmu_name(void)
T
Thomas Gleixner 已提交
190
{
191
	return "pmu";
T
Thomas Gleixner 已提交
192 193
}

194 195 196 197 198
static inline u64 perf_clock(void)
{
	return local_clock();
}

S
Stephane Eranian 已提交
199 200 201 202 203 204
static inline struct perf_cpu_context *
__get_cpu_context(struct perf_event_context *ctx)
{
	return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
}

205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
			  struct perf_event_context *ctx)
{
	raw_spin_lock(&cpuctx->ctx.lock);
	if (ctx)
		raw_spin_lock(&ctx->lock);
}

static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
			    struct perf_event_context *ctx)
{
	if (ctx)
		raw_spin_unlock(&ctx->lock);
	raw_spin_unlock(&cpuctx->ctx.lock);
}

S
Stephane Eranian 已提交
221 222
#ifdef CONFIG_CGROUP_PERF

223 224 225 226 227
/*
 * Must ensure cgroup is pinned (css_get) before calling
 * this function. In other words, we cannot call this function
 * if there is no cgroup event for the current CPU context.
 */
S
Stephane Eranian 已提交
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task)
{
	return container_of(task_subsys_state(task, perf_subsys_id),
			struct perf_cgroup, css);
}

static inline bool
perf_cgroup_match(struct perf_event *event)
{
	struct perf_event_context *ctx = event->ctx;
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);

	return !event->cgrp || event->cgrp == cpuctx->cgrp;
}

static inline void perf_get_cgroup(struct perf_event *event)
{
	css_get(&event->cgrp->css);
}

static inline void perf_put_cgroup(struct perf_event *event)
{
	css_put(&event->cgrp->css);
}

static inline void perf_detach_cgroup(struct perf_event *event)
{
	perf_put_cgroup(event);
	event->cgrp = NULL;
}

static inline int is_cgroup_event(struct perf_event *event)
{
	return event->cgrp != NULL;
}

static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
	struct perf_cgroup_info *t;

	t = per_cpu_ptr(event->cgrp->info, event->cpu);
	return t->time;
}

static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
{
	struct perf_cgroup_info *info;
	u64 now;

	now = perf_clock();

	info = this_cpu_ptr(cgrp->info);

	info->time += now - info->timestamp;
	info->timestamp = now;
}

static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
	struct perf_cgroup *cgrp_out = cpuctx->cgrp;
	if (cgrp_out)
		__update_cgrp_time(cgrp_out);
}

static inline void update_cgrp_time_from_event(struct perf_event *event)
{
295 296
	struct perf_cgroup *cgrp;

S
Stephane Eranian 已提交
297
	/*
298 299
	 * ensure we access cgroup data only when needed and
	 * when we know the cgroup is pinned (css_get)
S
Stephane Eranian 已提交
300
	 */
301
	if (!is_cgroup_event(event))
S
Stephane Eranian 已提交
302 303
		return;

304 305 306 307 308 309
	cgrp = perf_cgroup_from_task(current);
	/*
	 * Do not update time when cgroup is not active
	 */
	if (cgrp == event->cgrp)
		__update_cgrp_time(event->cgrp);
S
Stephane Eranian 已提交
310 311 312
}

static inline void
313 314
perf_cgroup_set_timestamp(struct task_struct *task,
			  struct perf_event_context *ctx)
S
Stephane Eranian 已提交
315 316 317 318
{
	struct perf_cgroup *cgrp;
	struct perf_cgroup_info *info;

319 320 321 322 323 324
	/*
	 * ctx->lock held by caller
	 * ensure we do not access cgroup data
	 * unless we have the cgroup pinned (css_get)
	 */
	if (!task || !ctx->nr_cgroups)
S
Stephane Eranian 已提交
325 326 327 328
		return;

	cgrp = perf_cgroup_from_task(task);
	info = this_cpu_ptr(cgrp->info);
329
	info->timestamp = ctx->timestamp;
S
Stephane Eranian 已提交
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
}

#define PERF_CGROUP_SWOUT	0x1 /* cgroup switch out every event */
#define PERF_CGROUP_SWIN	0x2 /* cgroup switch in events based on task */

/*
 * reschedule events based on the cgroup constraint of task.
 *
 * mode SWOUT : schedule out everything
 * mode SWIN : schedule in based on cgroup for next
 */
void perf_cgroup_switch(struct task_struct *task, int mode)
{
	struct perf_cpu_context *cpuctx;
	struct pmu *pmu;
	unsigned long flags;

	/*
	 * disable interrupts to avoid geting nr_cgroup
	 * changes via __perf_event_disable(). Also
	 * avoids preemption.
	 */
	local_irq_save(flags);

	/*
	 * we reschedule only in the presence of cgroup
	 * constrained events.
	 */
	rcu_read_lock();

	list_for_each_entry_rcu(pmu, &pmus, entry) {
		cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);

		/*
		 * perf_cgroup_events says at least one
		 * context on this CPU has cgroup events.
		 *
		 * ctx->nr_cgroups reports the number of cgroup
		 * events for a context.
		 */
		if (cpuctx->ctx.nr_cgroups > 0) {
371 372
			perf_ctx_lock(cpuctx, cpuctx->task_ctx);
			perf_pmu_disable(cpuctx->ctx.pmu);
S
Stephane Eranian 已提交
373 374 375 376 377 378 379 380 381 382 383

			if (mode & PERF_CGROUP_SWOUT) {
				cpu_ctx_sched_out(cpuctx, EVENT_ALL);
				/*
				 * must not be done before ctxswout due
				 * to event_filter_match() in event_sched_out()
				 */
				cpuctx->cgrp = NULL;
			}

			if (mode & PERF_CGROUP_SWIN) {
384
				WARN_ON_ONCE(cpuctx->cgrp);
S
Stephane Eranian 已提交
385 386 387 388 389 390 391
				/* set cgrp before ctxsw in to
				 * allow event_filter_match() to not
				 * have to pass task around
				 */
				cpuctx->cgrp = perf_cgroup_from_task(task);
				cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
			}
392 393
			perf_pmu_enable(cpuctx->ctx.pmu);
			perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
S
Stephane Eranian 已提交
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
		}
	}

	rcu_read_unlock();

	local_irq_restore(flags);
}

static inline void perf_cgroup_sched_out(struct task_struct *task)
{
	perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
}

static inline void perf_cgroup_sched_in(struct task_struct *task)
{
	perf_cgroup_switch(task, PERF_CGROUP_SWIN);
}

static inline int perf_cgroup_connect(int fd, struct perf_event *event,
				      struct perf_event_attr *attr,
				      struct perf_event *group_leader)
{
	struct perf_cgroup *cgrp;
	struct cgroup_subsys_state *css;
	struct file *file;
	int ret = 0, fput_needed;

	file = fget_light(fd, &fput_needed);
	if (!file)
		return -EBADF;

	css = cgroup_css_from_dir(file, perf_subsys_id);
426 427 428 429
	if (IS_ERR(css)) {
		ret = PTR_ERR(css);
		goto out;
	}
S
Stephane Eranian 已提交
430 431 432 433

	cgrp = container_of(css, struct perf_cgroup, css);
	event->cgrp = cgrp;

434 435 436
	/* must be done before we fput() the file */
	perf_get_cgroup(event);

S
Stephane Eranian 已提交
437 438 439 440 441 442 443 444 445
	/*
	 * all events in a group must monitor
	 * the same cgroup because a task belongs
	 * to only one perf cgroup at a time
	 */
	if (group_leader && group_leader->cgrp != cgrp) {
		perf_detach_cgroup(event);
		ret = -EINVAL;
	}
446
out:
S
Stephane Eranian 已提交
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
	fput_light(file, fput_needed);
	return ret;
}

static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
	struct perf_cgroup_info *t;
	t = per_cpu_ptr(event->cgrp->info, event->cpu);
	event->shadow_ctx_time = now - t->timestamp;
}

static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
	/*
	 * when the current task's perf cgroup does not match
	 * the event's, we need to remember to call the
	 * perf_mark_enable() function the first time a task with
	 * a matching perf cgroup is scheduled in.
	 */
	if (is_cgroup_event(event) && !perf_cgroup_match(event))
		event->cgrp_defer_enabled = 1;
}

static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
			 struct perf_event_context *ctx)
{
	struct perf_event *sub;
	u64 tstamp = perf_event_time(event);

	if (!event->cgrp_defer_enabled)
		return;

	event->cgrp_defer_enabled = 0;

	event->tstamp_enabled = tstamp - event->total_time_enabled;
	list_for_each_entry(sub, &event->sibling_list, group_entry) {
		if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
			sub->cgrp_defer_enabled = 0;
		}
	}
}
#else /* !CONFIG_CGROUP_PERF */

static inline bool
perf_cgroup_match(struct perf_event *event)
{
	return true;
}

static inline void perf_detach_cgroup(struct perf_event *event)
{}

static inline int is_cgroup_event(struct perf_event *event)
{
	return 0;
}

static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
{
	return 0;
}

static inline void update_cgrp_time_from_event(struct perf_event *event)
{
}

static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
}

static inline void perf_cgroup_sched_out(struct task_struct *task)
{
}

static inline void perf_cgroup_sched_in(struct task_struct *task)
{
}

static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
				      struct perf_event_attr *attr,
				      struct perf_event *group_leader)
{
	return -EINVAL;
}

static inline void
537 538
perf_cgroup_set_timestamp(struct task_struct *task,
			  struct perf_event_context *ctx)
S
Stephane Eranian 已提交
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
{
}

void
perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
{
}

static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
}

static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
	return 0;
}

static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
}

static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
			 struct perf_event_context *ctx)
{
}
#endif

P
Peter Zijlstra 已提交
569
void perf_pmu_disable(struct pmu *pmu)
570
{
P
Peter Zijlstra 已提交
571 572 573
	int *count = this_cpu_ptr(pmu->pmu_disable_count);
	if (!(*count)++)
		pmu->pmu_disable(pmu);
574 575
}

P
Peter Zijlstra 已提交
576
void perf_pmu_enable(struct pmu *pmu)
577
{
P
Peter Zijlstra 已提交
578 579 580
	int *count = this_cpu_ptr(pmu->pmu_disable_count);
	if (!--(*count))
		pmu->pmu_enable(pmu);
581 582
}

583 584 585 586 587 588 589
static DEFINE_PER_CPU(struct list_head, rotation_list);

/*
 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
 * because they're strictly cpu affine and rotate_start is called with IRQs
 * disabled, while rotate_context is called from IRQ context.
 */
P
Peter Zijlstra 已提交
590
static void perf_pmu_rotate_start(struct pmu *pmu)
591
{
P
Peter Zijlstra 已提交
592
	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
593
	struct list_head *head = &__get_cpu_var(rotation_list);
594

595
	WARN_ON(!irqs_disabled());
596

597 598
	if (list_empty(&cpuctx->rotation_list))
		list_add(&cpuctx->rotation_list, head);
599 600
}

601
static void get_ctx(struct perf_event_context *ctx)
602
{
603
	WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
604 605
}

606
static void put_ctx(struct perf_event_context *ctx)
607
{
608 609 610
	if (atomic_dec_and_test(&ctx->refcount)) {
		if (ctx->parent_ctx)
			put_ctx(ctx->parent_ctx);
611 612
		if (ctx->task)
			put_task_struct(ctx->task);
613
		kfree_rcu(ctx, rcu_head);
614
	}
615 616
}

617
static void unclone_ctx(struct perf_event_context *ctx)
618 619 620 621 622 623 624
{
	if (ctx->parent_ctx) {
		put_ctx(ctx->parent_ctx);
		ctx->parent_ctx = NULL;
	}
}

625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
{
	/*
	 * only top level events have the pid namespace they were created in
	 */
	if (event->parent)
		event = event->parent;

	return task_tgid_nr_ns(p, event->ns);
}

static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
{
	/*
	 * only top level events have the pid namespace they were created in
	 */
	if (event->parent)
		event = event->parent;

	return task_pid_nr_ns(p, event->ns);
}

647
/*
648
 * If we inherit events we want to return the parent event id
649 650
 * to userspace.
 */
651
static u64 primary_event_id(struct perf_event *event)
652
{
653
	u64 id = event->id;
654

655 656
	if (event->parent)
		id = event->parent->id;
657 658 659 660

	return id;
}

661
/*
662
 * Get the perf_event_context for a task and lock it.
663 664 665
 * This has to cope with with the fact that until it is locked,
 * the context could get moved to another task.
 */
666
static struct perf_event_context *
P
Peter Zijlstra 已提交
667
perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
668
{
669
	struct perf_event_context *ctx;
670 671

	rcu_read_lock();
P
Peter Zijlstra 已提交
672
retry:
P
Peter Zijlstra 已提交
673
	ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
674 675 676 677
	if (ctx) {
		/*
		 * If this context is a clone of another, it might
		 * get swapped for another underneath us by
678
		 * perf_event_task_sched_out, though the
679 680 681 682 683 684
		 * rcu_read_lock() protects us from any context
		 * getting freed.  Lock the context and check if it
		 * got swapped before we could get the lock, and retry
		 * if so.  If we locked the right context, then it
		 * can't get swapped on us any more.
		 */
685
		raw_spin_lock_irqsave(&ctx->lock, *flags);
P
Peter Zijlstra 已提交
686
		if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
687
			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
688 689
			goto retry;
		}
690 691

		if (!atomic_inc_not_zero(&ctx->refcount)) {
692
			raw_spin_unlock_irqrestore(&ctx->lock, *flags);
693 694
			ctx = NULL;
		}
695 696 697 698 699 700 701 702 703 704
	}
	rcu_read_unlock();
	return ctx;
}

/*
 * Get the context for a task and increment its pin_count so it
 * can't get swapped to another task.  This also increments its
 * reference count so that the context can't get freed.
 */
P
Peter Zijlstra 已提交
705 706
static struct perf_event_context *
perf_pin_task_context(struct task_struct *task, int ctxn)
707
{
708
	struct perf_event_context *ctx;
709 710
	unsigned long flags;

P
Peter Zijlstra 已提交
711
	ctx = perf_lock_task_context(task, ctxn, &flags);
712 713
	if (ctx) {
		++ctx->pin_count;
714
		raw_spin_unlock_irqrestore(&ctx->lock, flags);
715 716 717 718
	}
	return ctx;
}

719
static void perf_unpin_context(struct perf_event_context *ctx)
720 721 722
{
	unsigned long flags;

723
	raw_spin_lock_irqsave(&ctx->lock, flags);
724
	--ctx->pin_count;
725
	raw_spin_unlock_irqrestore(&ctx->lock, flags);
726 727
}

728 729 730 731 732 733 734 735 736 737 738
/*
 * Update the record of the current time in a context.
 */
static void update_context_time(struct perf_event_context *ctx)
{
	u64 now = perf_clock();

	ctx->time += now - ctx->timestamp;
	ctx->timestamp = now;
}

739 740 741
static u64 perf_event_time(struct perf_event *event)
{
	struct perf_event_context *ctx = event->ctx;
S
Stephane Eranian 已提交
742 743 744 745

	if (is_cgroup_event(event))
		return perf_cgroup_event_time(event);

746 747 748
	return ctx ? ctx->time : 0;
}

749 750
/*
 * Update the total_time_enabled and total_time_running fields for a event.
751
 * The caller of this function needs to hold the ctx->lock.
752 753 754 755 756 757 758 759 760
 */
static void update_event_times(struct perf_event *event)
{
	struct perf_event_context *ctx = event->ctx;
	u64 run_end;

	if (event->state < PERF_EVENT_STATE_INACTIVE ||
	    event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
		return;
S
Stephane Eranian 已提交
761 762 763 764 765 766 767 768 769 770 771
	/*
	 * in cgroup mode, time_enabled represents
	 * the time the event was enabled AND active
	 * tasks were in the monitored cgroup. This is
	 * independent of the activity of the context as
	 * there may be a mix of cgroup and non-cgroup events.
	 *
	 * That is why we treat cgroup events differently
	 * here.
	 */
	if (is_cgroup_event(event))
772
		run_end = perf_event_time(event);
S
Stephane Eranian 已提交
773 774
	else if (ctx->is_active)
		run_end = ctx->time;
775 776 777 778
	else
		run_end = event->tstamp_stopped;

	event->total_time_enabled = run_end - event->tstamp_enabled;
779 780 781 782

	if (event->state == PERF_EVENT_STATE_INACTIVE)
		run_end = event->tstamp_stopped;
	else
783
		run_end = perf_event_time(event);
784 785

	event->total_time_running = run_end - event->tstamp_running;
S
Stephane Eranian 已提交
786

787 788
}

789 790 791 792 793 794 795 796 797 798 799 800
/*
 * Update total_time_enabled and total_time_running for all events in a group.
 */
static void update_group_times(struct perf_event *leader)
{
	struct perf_event *event;

	update_event_times(leader);
	list_for_each_entry(event, &leader->sibling_list, group_entry)
		update_event_times(event);
}

801 802 803 804 805 806 807 808 809
static struct list_head *
ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
{
	if (event->attr.pinned)
		return &ctx->pinned_groups;
	else
		return &ctx->flexible_groups;
}

810
/*
811
 * Add a event from the lists for its context.
812 813
 * Must be called with ctx->mutex and ctx->lock held.
 */
814
static void
815
list_add_event(struct perf_event *event, struct perf_event_context *ctx)
816
{
817 818
	WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
	event->attach_state |= PERF_ATTACH_CONTEXT;
819 820

	/*
821 822 823
	 * If we're a stand alone event or group leader, we go to the context
	 * list, group events are kept attached to the group so that
	 * perf_group_detach can, at all times, locate all siblings.
824
	 */
825
	if (event->group_leader == event) {
826 827
		struct list_head *list;

828 829 830
		if (is_software_event(event))
			event->group_flags |= PERF_GROUP_SOFTWARE;

831 832
		list = ctx_group_list(event, ctx);
		list_add_tail(&event->group_entry, list);
P
Peter Zijlstra 已提交
833
	}
P
Peter Zijlstra 已提交
834

835
	if (is_cgroup_event(event))
S
Stephane Eranian 已提交
836 837
		ctx->nr_cgroups++;

838
	list_add_rcu(&event->event_entry, &ctx->event_list);
839
	if (!ctx->nr_events)
P
Peter Zijlstra 已提交
840
		perf_pmu_rotate_start(ctx->pmu);
841 842
	ctx->nr_events++;
	if (event->attr.inherit_stat)
843
		ctx->nr_stat++;
844 845
}

846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
/*
 * Called at perf_event creation and when events are attached/detached from a
 * group.
 */
static void perf_event__read_size(struct perf_event *event)
{
	int entry = sizeof(u64); /* value */
	int size = 0;
	int nr = 1;

	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		size += sizeof(u64);

	if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		size += sizeof(u64);

	if (event->attr.read_format & PERF_FORMAT_ID)
		entry += sizeof(u64);

	if (event->attr.read_format & PERF_FORMAT_GROUP) {
		nr += event->group_leader->nr_siblings;
		size += sizeof(u64);
	}

	size += entry * nr;
	event->read_size = size;
}

static void perf_event__header_size(struct perf_event *event)
{
	struct perf_sample_data *data;
	u64 sample_type = event->attr.sample_type;
	u16 size = 0;

	perf_event__read_size(event);

	if (sample_type & PERF_SAMPLE_IP)
		size += sizeof(data->ip);

885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
	if (sample_type & PERF_SAMPLE_ADDR)
		size += sizeof(data->addr);

	if (sample_type & PERF_SAMPLE_PERIOD)
		size += sizeof(data->period);

	if (sample_type & PERF_SAMPLE_READ)
		size += event->read_size;

	event->header_size = size;
}

static void perf_event__id_header_size(struct perf_event *event)
{
	struct perf_sample_data *data;
	u64 sample_type = event->attr.sample_type;
	u16 size = 0;

903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
	if (sample_type & PERF_SAMPLE_TID)
		size += sizeof(data->tid_entry);

	if (sample_type & PERF_SAMPLE_TIME)
		size += sizeof(data->time);

	if (sample_type & PERF_SAMPLE_ID)
		size += sizeof(data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		size += sizeof(data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		size += sizeof(data->cpu_entry);

918
	event->id_header_size = size;
919 920
}

921 922
static void perf_group_attach(struct perf_event *event)
{
923
	struct perf_event *group_leader = event->group_leader, *pos;
924

P
Peter Zijlstra 已提交
925 926 927 928 929 930
	/*
	 * We can have double attach due to group movement in perf_event_open.
	 */
	if (event->attach_state & PERF_ATTACH_GROUP)
		return;

931 932 933 934 935 936 937 938 939 940 941
	event->attach_state |= PERF_ATTACH_GROUP;

	if (group_leader == event)
		return;

	if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
			!is_software_event(event))
		group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;

	list_add_tail(&event->group_entry, &group_leader->sibling_list);
	group_leader->nr_siblings++;
942 943 944 945 946

	perf_event__header_size(group_leader);

	list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
		perf_event__header_size(pos);
947 948
}

949
/*
950
 * Remove a event from the lists for its context.
951
 * Must be called with ctx->mutex and ctx->lock held.
952
 */
953
static void
954
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
955
{
956
	struct perf_cpu_context *cpuctx;
957 958 959 960
	/*
	 * We can have double detach due to exit/hot-unplug + close.
	 */
	if (!(event->attach_state & PERF_ATTACH_CONTEXT))
961
		return;
962 963 964

	event->attach_state &= ~PERF_ATTACH_CONTEXT;

965
	if (is_cgroup_event(event)) {
S
Stephane Eranian 已提交
966
		ctx->nr_cgroups--;
967 968 969 970 971 972 973 974 975
		cpuctx = __get_cpu_context(ctx);
		/*
		 * if there are no more cgroup events
		 * then cler cgrp to avoid stale pointer
		 * in update_cgrp_time_from_cpuctx()
		 */
		if (!ctx->nr_cgroups)
			cpuctx->cgrp = NULL;
	}
S
Stephane Eranian 已提交
976

977 978
	ctx->nr_events--;
	if (event->attr.inherit_stat)
979
		ctx->nr_stat--;
980

981
	list_del_rcu(&event->event_entry);
982

983 984
	if (event->group_leader == event)
		list_del_init(&event->group_entry);
P
Peter Zijlstra 已提交
985

986
	update_group_times(event);
987 988 989 990 991 992 993 994 995 996

	/*
	 * If event was in error state, then keep it
	 * that way, otherwise bogus counts will be
	 * returned on read(). The only way to get out
	 * of error state is by explicit re-enabling
	 * of the event
	 */
	if (event->state > PERF_EVENT_STATE_OFF)
		event->state = PERF_EVENT_STATE_OFF;
997 998
}

999
static void perf_group_detach(struct perf_event *event)
1000 1001
{
	struct perf_event *sibling, *tmp;
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
	struct list_head *list = NULL;

	/*
	 * We can have double detach due to exit/hot-unplug + close.
	 */
	if (!(event->attach_state & PERF_ATTACH_GROUP))
		return;

	event->attach_state &= ~PERF_ATTACH_GROUP;

	/*
	 * If this is a sibling, remove it from its group.
	 */
	if (event->group_leader != event) {
		list_del_init(&event->group_entry);
		event->group_leader->nr_siblings--;
1018
		goto out;
1019 1020 1021 1022
	}

	if (!list_empty(&event->group_entry))
		list = &event->group_entry;
1023

1024
	/*
1025 1026
	 * If this was a group event with sibling events then
	 * upgrade the siblings to singleton events by adding them
1027
	 * to whatever list we are on.
1028
	 */
1029
	list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1030 1031
		if (list)
			list_move_tail(&sibling->group_entry, list);
1032
		sibling->group_leader = sibling;
1033 1034 1035

		/* Inherit group flags from the previous leader */
		sibling->group_flags = event->group_flags;
1036
	}
1037 1038 1039 1040 1041 1042

out:
	perf_event__header_size(event->group_leader);

	list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
		perf_event__header_size(tmp);
1043 1044
}

1045 1046 1047
static inline int
event_filter_match(struct perf_event *event)
{
S
Stephane Eranian 已提交
1048 1049
	return (event->cpu == -1 || event->cpu == smp_processor_id())
	    && perf_cgroup_match(event);
1050 1051
}

1052 1053
static void
event_sched_out(struct perf_event *event,
1054
		  struct perf_cpu_context *cpuctx,
1055
		  struct perf_event_context *ctx)
1056
{
1057
	u64 tstamp = perf_event_time(event);
1058 1059 1060 1061 1062 1063 1064 1065 1066
	u64 delta;
	/*
	 * An event which could not be activated because of
	 * filter mismatch still needs to have its timings
	 * maintained, otherwise bogus information is return
	 * via read() for time_enabled, time_running:
	 */
	if (event->state == PERF_EVENT_STATE_INACTIVE
	    && !event_filter_match(event)) {
S
Stephane Eranian 已提交
1067
		delta = tstamp - event->tstamp_stopped;
1068
		event->tstamp_running += delta;
1069
		event->tstamp_stopped = tstamp;
1070 1071
	}

1072
	if (event->state != PERF_EVENT_STATE_ACTIVE)
1073
		return;
1074

1075 1076 1077 1078
	event->state = PERF_EVENT_STATE_INACTIVE;
	if (event->pending_disable) {
		event->pending_disable = 0;
		event->state = PERF_EVENT_STATE_OFF;
1079
	}
1080
	event->tstamp_stopped = tstamp;
P
Peter Zijlstra 已提交
1081
	event->pmu->del(event, 0);
1082
	event->oncpu = -1;
1083

1084
	if (!is_software_event(event))
1085 1086
		cpuctx->active_oncpu--;
	ctx->nr_active--;
1087
	if (event->attr.exclusive || !cpuctx->active_oncpu)
1088 1089 1090
		cpuctx->exclusive = 0;
}

1091
static void
1092
group_sched_out(struct perf_event *group_event,
1093
		struct perf_cpu_context *cpuctx,
1094
		struct perf_event_context *ctx)
1095
{
1096
	struct perf_event *event;
1097
	int state = group_event->state;
1098

1099
	event_sched_out(group_event, cpuctx, ctx);
1100 1101 1102 1103

	/*
	 * Schedule out siblings (if any):
	 */
1104 1105
	list_for_each_entry(event, &group_event->sibling_list, group_entry)
		event_sched_out(event, cpuctx, ctx);
1106

1107
	if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1108 1109 1110
		cpuctx->exclusive = 0;
}

T
Thomas Gleixner 已提交
1111
/*
1112
 * Cross CPU call to remove a performance event
T
Thomas Gleixner 已提交
1113
 *
1114
 * We disable the event on the hardware level first. After that we
T
Thomas Gleixner 已提交
1115 1116
 * remove it from the context list.
 */
1117
static int __perf_remove_from_context(void *info)
T
Thomas Gleixner 已提交
1118
{
1119 1120
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
P
Peter Zijlstra 已提交
1121
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
T
Thomas Gleixner 已提交
1122

1123
	raw_spin_lock(&ctx->lock);
1124 1125
	event_sched_out(event, cpuctx, ctx);
	list_del_event(event, ctx);
1126 1127 1128 1129
	if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
		ctx->is_active = 0;
		cpuctx->task_ctx = NULL;
	}
1130
	raw_spin_unlock(&ctx->lock);
1131 1132

	return 0;
T
Thomas Gleixner 已提交
1133 1134 1135 1136
}


/*
1137
 * Remove the event from a task's (or a CPU's) list of events.
T
Thomas Gleixner 已提交
1138
 *
1139
 * CPU events are removed with a smp call. For task events we only
T
Thomas Gleixner 已提交
1140
 * call when the task is on a CPU.
1141
 *
1142 1143
 * If event->ctx is a cloned context, callers must make sure that
 * every task struct that event->ctx->task could possibly point to
1144 1145
 * remains valid.  This is OK when called from perf_release since
 * that only calls us on the top-level context, which can't be a clone.
1146
 * When called from perf_event_exit_task, it's OK because the
1147
 * context has been detached from its task.
T
Thomas Gleixner 已提交
1148
 */
1149
static void perf_remove_from_context(struct perf_event *event)
T
Thomas Gleixner 已提交
1150
{
1151
	struct perf_event_context *ctx = event->ctx;
T
Thomas Gleixner 已提交
1152 1153
	struct task_struct *task = ctx->task;

1154 1155
	lockdep_assert_held(&ctx->mutex);

T
Thomas Gleixner 已提交
1156 1157
	if (!task) {
		/*
1158
		 * Per cpu events are removed via an smp call and
1159
		 * the removal is always successful.
T
Thomas Gleixner 已提交
1160
		 */
1161
		cpu_function_call(event->cpu, __perf_remove_from_context, event);
T
Thomas Gleixner 已提交
1162 1163 1164 1165
		return;
	}

retry:
1166 1167
	if (!task_function_call(task, __perf_remove_from_context, event))
		return;
T
Thomas Gleixner 已提交
1168

1169
	raw_spin_lock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1170
	/*
1171 1172
	 * If we failed to find a running task, but find the context active now
	 * that we've acquired the ctx->lock, retry.
T
Thomas Gleixner 已提交
1173
	 */
1174
	if (ctx->is_active) {
1175
		raw_spin_unlock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1176 1177 1178 1179
		goto retry;
	}

	/*
1180 1181
	 * Since the task isn't running, its safe to remove the event, us
	 * holding the ctx->lock ensures the task won't get scheduled in.
T
Thomas Gleixner 已提交
1182
	 */
1183
	list_del_event(event, ctx);
1184
	raw_spin_unlock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1185 1186
}

1187
/*
1188
 * Cross CPU call to disable a performance event
1189
 */
1190
static int __perf_event_disable(void *info)
1191
{
1192 1193
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
P
Peter Zijlstra 已提交
1194
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1195 1196

	/*
1197 1198
	 * If this is a per-task event, need to check whether this
	 * event's task is the current task on this cpu.
1199 1200 1201
	 *
	 * Can trigger due to concurrent perf_event_context_sched_out()
	 * flipping contexts around.
1202
	 */
1203
	if (ctx->task && cpuctx->task_ctx != ctx)
1204
		return -EINVAL;
1205

1206
	raw_spin_lock(&ctx->lock);
1207 1208

	/*
1209
	 * If the event is on, turn it off.
1210 1211
	 * If it is in error state, leave it in error state.
	 */
1212
	if (event->state >= PERF_EVENT_STATE_INACTIVE) {
1213
		update_context_time(ctx);
S
Stephane Eranian 已提交
1214
		update_cgrp_time_from_event(event);
1215 1216 1217
		update_group_times(event);
		if (event == event->group_leader)
			group_sched_out(event, cpuctx, ctx);
1218
		else
1219 1220
			event_sched_out(event, cpuctx, ctx);
		event->state = PERF_EVENT_STATE_OFF;
1221 1222
	}

1223
	raw_spin_unlock(&ctx->lock);
1224 1225

	return 0;
1226 1227 1228
}

/*
1229
 * Disable a event.
1230
 *
1231 1232
 * If event->ctx is a cloned context, callers must make sure that
 * every task struct that event->ctx->task could possibly point to
1233
 * remains valid.  This condition is satisifed when called through
1234 1235 1236 1237
 * perf_event_for_each_child or perf_event_for_each because they
 * hold the top-level event's child_mutex, so any descendant that
 * goes to exit will block in sync_child_event.
 * When called from perf_pending_event it's OK because event->ctx
1238
 * is the current context on this CPU and preemption is disabled,
1239
 * hence we can't get into perf_event_task_sched_out for this context.
1240
 */
1241
void perf_event_disable(struct perf_event *event)
1242
{
1243
	struct perf_event_context *ctx = event->ctx;
1244 1245 1246 1247
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
1248
		 * Disable the event on the cpu that it's on
1249
		 */
1250
		cpu_function_call(event->cpu, __perf_event_disable, event);
1251 1252 1253
		return;
	}

P
Peter Zijlstra 已提交
1254
retry:
1255 1256
	if (!task_function_call(task, __perf_event_disable, event))
		return;
1257

1258
	raw_spin_lock_irq(&ctx->lock);
1259
	/*
1260
	 * If the event is still active, we need to retry the cross-call.
1261
	 */
1262
	if (event->state == PERF_EVENT_STATE_ACTIVE) {
1263
		raw_spin_unlock_irq(&ctx->lock);
1264 1265 1266 1267 1268
		/*
		 * Reload the task pointer, it might have been changed by
		 * a concurrent perf_event_context_sched_out().
		 */
		task = ctx->task;
1269 1270 1271 1272 1273 1274 1275
		goto retry;
	}

	/*
	 * Since we have the lock this context can't be scheduled
	 * in, so we can change the state safely.
	 */
1276 1277 1278
	if (event->state == PERF_EVENT_STATE_INACTIVE) {
		update_group_times(event);
		event->state = PERF_EVENT_STATE_OFF;
1279
	}
1280
	raw_spin_unlock_irq(&ctx->lock);
1281 1282
}

S
Stephane Eranian 已提交
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
static void perf_set_shadow_time(struct perf_event *event,
				 struct perf_event_context *ctx,
				 u64 tstamp)
{
	/*
	 * use the correct time source for the time snapshot
	 *
	 * We could get by without this by leveraging the
	 * fact that to get to this function, the caller
	 * has most likely already called update_context_time()
	 * and update_cgrp_time_xx() and thus both timestamp
	 * are identical (or very close). Given that tstamp is,
	 * already adjusted for cgroup, we could say that:
	 *    tstamp - ctx->timestamp
	 * is equivalent to
	 *    tstamp - cgrp->timestamp.
	 *
	 * Then, in perf_output_read(), the calculation would
	 * work with no changes because:
	 * - event is guaranteed scheduled in
	 * - no scheduled out in between
	 * - thus the timestamp would be the same
	 *
	 * But this is a bit hairy.
	 *
	 * So instead, we have an explicit cgroup call to remain
	 * within the time time source all along. We believe it
	 * is cleaner and simpler to understand.
	 */
	if (is_cgroup_event(event))
		perf_cgroup_set_shadow_time(event, tstamp);
	else
		event->shadow_ctx_time = tstamp - ctx->timestamp;
}

P
Peter Zijlstra 已提交
1318 1319 1320 1321
#define MAX_INTERRUPTS (~0ULL)

static void perf_log_throttle(struct perf_event *event, int enable);

1322
static int
1323
event_sched_in(struct perf_event *event,
1324
		 struct perf_cpu_context *cpuctx,
1325
		 struct perf_event_context *ctx)
1326
{
1327 1328
	u64 tstamp = perf_event_time(event);

1329
	if (event->state <= PERF_EVENT_STATE_OFF)
1330 1331
		return 0;

1332
	event->state = PERF_EVENT_STATE_ACTIVE;
1333
	event->oncpu = smp_processor_id();
P
Peter Zijlstra 已提交
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344

	/*
	 * Unthrottle events, since we scheduled we might have missed several
	 * ticks already, also for a heavily scheduling task there is little
	 * guarantee it'll get a tick in a timely manner.
	 */
	if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
		perf_log_throttle(event, 1);
		event->hw.interrupts = 0;
	}

1345 1346 1347 1348 1349
	/*
	 * The new state must be visible before we turn it on in the hardware:
	 */
	smp_wmb();

P
Peter Zijlstra 已提交
1350
	if (event->pmu->add(event, PERF_EF_START)) {
1351 1352
		event->state = PERF_EVENT_STATE_INACTIVE;
		event->oncpu = -1;
1353 1354 1355
		return -EAGAIN;
	}

1356
	event->tstamp_running += tstamp - event->tstamp_stopped;
1357

S
Stephane Eranian 已提交
1358
	perf_set_shadow_time(event, ctx, tstamp);
1359

1360
	if (!is_software_event(event))
1361
		cpuctx->active_oncpu++;
1362 1363
	ctx->nr_active++;

1364
	if (event->attr.exclusive)
1365 1366
		cpuctx->exclusive = 1;

1367 1368 1369
	return 0;
}

1370
static int
1371
group_sched_in(struct perf_event *group_event,
1372
	       struct perf_cpu_context *cpuctx,
1373
	       struct perf_event_context *ctx)
1374
{
1375
	struct perf_event *event, *partial_group = NULL;
P
Peter Zijlstra 已提交
1376
	struct pmu *pmu = group_event->pmu;
1377 1378
	u64 now = ctx->time;
	bool simulate = false;
1379

1380
	if (group_event->state == PERF_EVENT_STATE_OFF)
1381 1382
		return 0;

P
Peter Zijlstra 已提交
1383
	pmu->start_txn(pmu);
1384

1385
	if (event_sched_in(group_event, cpuctx, ctx)) {
P
Peter Zijlstra 已提交
1386
		pmu->cancel_txn(pmu);
1387
		return -EAGAIN;
1388
	}
1389 1390 1391 1392

	/*
	 * Schedule in siblings as one group (if any):
	 */
1393
	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1394
		if (event_sched_in(event, cpuctx, ctx)) {
1395
			partial_group = event;
1396 1397 1398 1399
			goto group_error;
		}
	}

1400
	if (!pmu->commit_txn(pmu))
1401
		return 0;
1402

1403 1404 1405 1406
group_error:
	/*
	 * Groups can be scheduled in as one unit only, so undo any
	 * partial group before returning:
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
	 * The events up to the failed event are scheduled out normally,
	 * tstamp_stopped will be updated.
	 *
	 * The failed events and the remaining siblings need to have
	 * their timings updated as if they had gone thru event_sched_in()
	 * and event_sched_out(). This is required to get consistent timings
	 * across the group. This also takes care of the case where the group
	 * could never be scheduled by ensuring tstamp_stopped is set to mark
	 * the time the event was actually stopped, such that time delta
	 * calculation in update_event_times() is correct.
1417
	 */
1418 1419
	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
		if (event == partial_group)
1420 1421 1422 1423 1424 1425 1426 1427
			simulate = true;

		if (simulate) {
			event->tstamp_running += now - event->tstamp_stopped;
			event->tstamp_stopped = now;
		} else {
			event_sched_out(event, cpuctx, ctx);
		}
1428
	}
1429
	event_sched_out(group_event, cpuctx, ctx);
1430

P
Peter Zijlstra 已提交
1431
	pmu->cancel_txn(pmu);
1432

1433 1434 1435
	return -EAGAIN;
}

1436
/*
1437
 * Work out whether we can put this event group on the CPU now.
1438
 */
1439
static int group_can_go_on(struct perf_event *event,
1440 1441 1442 1443
			   struct perf_cpu_context *cpuctx,
			   int can_add_hw)
{
	/*
1444
	 * Groups consisting entirely of software events can always go on.
1445
	 */
1446
	if (event->group_flags & PERF_GROUP_SOFTWARE)
1447 1448 1449
		return 1;
	/*
	 * If an exclusive group is already on, no other hardware
1450
	 * events can go on.
1451 1452 1453 1454 1455
	 */
	if (cpuctx->exclusive)
		return 0;
	/*
	 * If this group is exclusive and there are already
1456
	 * events on the CPU, it can't go on.
1457
	 */
1458
	if (event->attr.exclusive && cpuctx->active_oncpu)
1459 1460 1461 1462 1463 1464 1465 1466
		return 0;
	/*
	 * Otherwise, try to add it if all previous groups were able
	 * to go on.
	 */
	return can_add_hw;
}

1467 1468
static void add_event_to_ctx(struct perf_event *event,
			       struct perf_event_context *ctx)
1469
{
1470 1471
	u64 tstamp = perf_event_time(event);

1472
	list_add_event(event, ctx);
1473
	perf_group_attach(event);
1474 1475 1476
	event->tstamp_enabled = tstamp;
	event->tstamp_running = tstamp;
	event->tstamp_stopped = tstamp;
1477 1478
}

1479 1480 1481 1482 1483 1484
static void task_ctx_sched_out(struct perf_event_context *ctx);
static void
ctx_sched_in(struct perf_event_context *ctx,
	     struct perf_cpu_context *cpuctx,
	     enum event_type_t event_type,
	     struct task_struct *task);
1485

1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
				struct perf_event_context *ctx,
				struct task_struct *task)
{
	cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
	if (ctx)
		ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
	cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
	if (ctx)
		ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
}

T
Thomas Gleixner 已提交
1498
/*
1499
 * Cross CPU call to install and enable a performance event
1500 1501
 *
 * Must be called with ctx->mutex held
T
Thomas Gleixner 已提交
1502
 */
1503
static int  __perf_install_in_context(void *info)
T
Thomas Gleixner 已提交
1504
{
1505 1506
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
P
Peter Zijlstra 已提交
1507
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1508 1509 1510
	struct perf_event_context *task_ctx = cpuctx->task_ctx;
	struct task_struct *task = current;

1511
	perf_ctx_lock(cpuctx, task_ctx);
1512
	perf_pmu_disable(cpuctx->ctx.pmu);
T
Thomas Gleixner 已提交
1513 1514

	/*
1515
	 * If there was an active task_ctx schedule it out.
T
Thomas Gleixner 已提交
1516
	 */
1517
	if (task_ctx)
1518
		task_ctx_sched_out(task_ctx);
1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532

	/*
	 * If the context we're installing events in is not the
	 * active task_ctx, flip them.
	 */
	if (ctx->task && task_ctx != ctx) {
		if (task_ctx)
			raw_spin_unlock(&task_ctx->lock);
		raw_spin_lock(&ctx->lock);
		task_ctx = ctx;
	}

	if (task_ctx) {
		cpuctx->task_ctx = task_ctx;
1533 1534
		task = task_ctx->task;
	}
1535

1536
	cpu_ctx_sched_out(cpuctx, EVENT_ALL);
T
Thomas Gleixner 已提交
1537

1538
	update_context_time(ctx);
S
Stephane Eranian 已提交
1539 1540 1541 1542 1543 1544
	/*
	 * update cgrp time only if current cgrp
	 * matches event->cgrp. Must be done before
	 * calling add_event_to_ctx()
	 */
	update_cgrp_time_from_event(event);
T
Thomas Gleixner 已提交
1545

1546
	add_event_to_ctx(event, ctx);
T
Thomas Gleixner 已提交
1547

1548
	/*
1549
	 * Schedule everything back in
1550
	 */
1551
	perf_event_sched_in(cpuctx, task_ctx, task);
1552 1553 1554

	perf_pmu_enable(cpuctx->ctx.pmu);
	perf_ctx_unlock(cpuctx, task_ctx);
1555 1556

	return 0;
T
Thomas Gleixner 已提交
1557 1558 1559
}

/*
1560
 * Attach a performance event to a context
T
Thomas Gleixner 已提交
1561
 *
1562 1563
 * First we add the event to the list with the hardware enable bit
 * in event->hw_config cleared.
T
Thomas Gleixner 已提交
1564
 *
1565
 * If the event is attached to a task which is on a CPU we use a smp
T
Thomas Gleixner 已提交
1566 1567 1568 1569
 * call to enable it in the task context. The task might have been
 * scheduled away, but we check this in the smp call again.
 */
static void
1570 1571
perf_install_in_context(struct perf_event_context *ctx,
			struct perf_event *event,
T
Thomas Gleixner 已提交
1572 1573 1574 1575
			int cpu)
{
	struct task_struct *task = ctx->task;

1576 1577
	lockdep_assert_held(&ctx->mutex);

1578 1579
	event->ctx = ctx;

T
Thomas Gleixner 已提交
1580 1581
	if (!task) {
		/*
1582
		 * Per cpu events are installed via an smp call and
1583
		 * the install is always successful.
T
Thomas Gleixner 已提交
1584
		 */
1585
		cpu_function_call(cpu, __perf_install_in_context, event);
T
Thomas Gleixner 已提交
1586 1587 1588 1589
		return;
	}

retry:
1590 1591
	if (!task_function_call(task, __perf_install_in_context, event))
		return;
T
Thomas Gleixner 已提交
1592

1593
	raw_spin_lock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1594
	/*
1595 1596
	 * If we failed to find a running task, but find the context active now
	 * that we've acquired the ctx->lock, retry.
T
Thomas Gleixner 已提交
1597
	 */
1598
	if (ctx->is_active) {
1599
		raw_spin_unlock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1600 1601 1602 1603
		goto retry;
	}

	/*
1604 1605
	 * Since the task isn't running, its safe to add the event, us holding
	 * the ctx->lock ensures the task won't get scheduled in.
T
Thomas Gleixner 已提交
1606
	 */
1607
	add_event_to_ctx(event, ctx);
1608
	raw_spin_unlock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1609 1610
}

1611
/*
1612
 * Put a event into inactive state and update time fields.
1613 1614 1615 1616 1617 1618
 * Enabling the leader of a group effectively enables all
 * the group members that aren't explicitly disabled, so we
 * have to update their ->tstamp_enabled also.
 * Note: this works for group members as well as group leaders
 * since the non-leader members' sibling_lists will be empty.
 */
1619 1620
static void __perf_event_mark_enabled(struct perf_event *event,
					struct perf_event_context *ctx)
1621
{
1622
	struct perf_event *sub;
1623
	u64 tstamp = perf_event_time(event);
1624

1625
	event->state = PERF_EVENT_STATE_INACTIVE;
1626
	event->tstamp_enabled = tstamp - event->total_time_enabled;
P
Peter Zijlstra 已提交
1627
	list_for_each_entry(sub, &event->sibling_list, group_entry) {
1628 1629
		if (sub->state >= PERF_EVENT_STATE_INACTIVE)
			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
P
Peter Zijlstra 已提交
1630
	}
1631 1632
}

1633
/*
1634
 * Cross CPU call to enable a performance event
1635
 */
1636
static int __perf_event_enable(void *info)
1637
{
1638 1639 1640
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
	struct perf_event *leader = event->group_leader;
P
Peter Zijlstra 已提交
1641
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1642
	int err;
1643

1644 1645
	if (WARN_ON_ONCE(!ctx->is_active))
		return -EINVAL;
1646

1647
	raw_spin_lock(&ctx->lock);
1648
	update_context_time(ctx);
1649

1650
	if (event->state >= PERF_EVENT_STATE_INACTIVE)
1651
		goto unlock;
S
Stephane Eranian 已提交
1652 1653 1654 1655

	/*
	 * set current task's cgroup time reference point
	 */
1656
	perf_cgroup_set_timestamp(current, ctx);
S
Stephane Eranian 已提交
1657

1658
	__perf_event_mark_enabled(event, ctx);
1659

S
Stephane Eranian 已提交
1660 1661 1662
	if (!event_filter_match(event)) {
		if (is_cgroup_event(event))
			perf_cgroup_defer_enabled(event);
1663
		goto unlock;
S
Stephane Eranian 已提交
1664
	}
1665

1666
	/*
1667
	 * If the event is in a group and isn't the group leader,
1668
	 * then don't put it on unless the group is on.
1669
	 */
1670
	if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
1671
		goto unlock;
1672

1673
	if (!group_can_go_on(event, cpuctx, 1)) {
1674
		err = -EEXIST;
1675
	} else {
1676
		if (event == leader)
1677
			err = group_sched_in(event, cpuctx, ctx);
1678
		else
1679
			err = event_sched_in(event, cpuctx, ctx);
1680
	}
1681 1682 1683

	if (err) {
		/*
1684
		 * If this event can't go on and it's part of a
1685 1686
		 * group, then the whole group has to come off.
		 */
1687
		if (leader != event)
1688
			group_sched_out(leader, cpuctx, ctx);
1689
		if (leader->attr.pinned) {
1690
			update_group_times(leader);
1691
			leader->state = PERF_EVENT_STATE_ERROR;
1692
		}
1693 1694
	}

P
Peter Zijlstra 已提交
1695
unlock:
1696
	raw_spin_unlock(&ctx->lock);
1697 1698

	return 0;
1699 1700 1701
}

/*
1702
 * Enable a event.
1703
 *
1704 1705
 * If event->ctx is a cloned context, callers must make sure that
 * every task struct that event->ctx->task could possibly point to
1706
 * remains valid.  This condition is satisfied when called through
1707 1708
 * perf_event_for_each_child or perf_event_for_each as described
 * for perf_event_disable.
1709
 */
1710
void perf_event_enable(struct perf_event *event)
1711
{
1712
	struct perf_event_context *ctx = event->ctx;
1713 1714 1715 1716
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
1717
		 * Enable the event on the cpu that it's on
1718
		 */
1719
		cpu_function_call(event->cpu, __perf_event_enable, event);
1720 1721 1722
		return;
	}

1723
	raw_spin_lock_irq(&ctx->lock);
1724
	if (event->state >= PERF_EVENT_STATE_INACTIVE)
1725 1726 1727
		goto out;

	/*
1728 1729
	 * If the event is in error state, clear that first.
	 * That way, if we see the event in error state below, we
1730 1731 1732 1733
	 * know that it has gone back into error state, as distinct
	 * from the task having been scheduled away before the
	 * cross-call arrived.
	 */
1734 1735
	if (event->state == PERF_EVENT_STATE_ERROR)
		event->state = PERF_EVENT_STATE_OFF;
1736

P
Peter Zijlstra 已提交
1737
retry:
1738 1739 1740 1741 1742
	if (!ctx->is_active) {
		__perf_event_mark_enabled(event, ctx);
		goto out;
	}

1743
	raw_spin_unlock_irq(&ctx->lock);
1744 1745 1746

	if (!task_function_call(task, __perf_event_enable, event))
		return;
1747

1748
	raw_spin_lock_irq(&ctx->lock);
1749 1750

	/*
1751
	 * If the context is active and the event is still off,
1752 1753
	 * we need to retry the cross-call.
	 */
1754 1755 1756 1757 1758 1759
	if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
		/*
		 * task could have been flipped by a concurrent
		 * perf_event_context_sched_out()
		 */
		task = ctx->task;
1760
		goto retry;
1761
	}
1762

P
Peter Zijlstra 已提交
1763
out:
1764
	raw_spin_unlock_irq(&ctx->lock);
1765 1766
}

1767
static int perf_event_refresh(struct perf_event *event, int refresh)
1768
{
1769
	/*
1770
	 * not supported on inherited events
1771
	 */
1772
	if (event->attr.inherit || !is_sampling_event(event))
1773 1774
		return -EINVAL;

1775 1776
	atomic_add(refresh, &event->event_limit);
	perf_event_enable(event);
1777 1778

	return 0;
1779 1780
}

1781 1782 1783
static void ctx_sched_out(struct perf_event_context *ctx,
			  struct perf_cpu_context *cpuctx,
			  enum event_type_t event_type)
1784
{
1785
	struct perf_event *event;
1786
	int is_active = ctx->is_active;
1787

1788
	ctx->is_active &= ~event_type;
1789
	if (likely(!ctx->nr_events))
1790 1791
		return;

1792
	update_context_time(ctx);
S
Stephane Eranian 已提交
1793
	update_cgrp_time_from_cpuctx(cpuctx);
1794
	if (!ctx->nr_active)
1795
		return;
1796

P
Peter Zijlstra 已提交
1797
	perf_pmu_disable(ctx->pmu);
1798
	if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
1799 1800
		list_for_each_entry(event, &ctx->pinned_groups, group_entry)
			group_sched_out(event, cpuctx, ctx);
P
Peter Zijlstra 已提交
1801
	}
1802

1803
	if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
1804
		list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1805
			group_sched_out(event, cpuctx, ctx);
P
Peter Zijlstra 已提交
1806
	}
P
Peter Zijlstra 已提交
1807
	perf_pmu_enable(ctx->pmu);
1808 1809
}

1810 1811 1812
/*
 * Test whether two contexts are equivalent, i.e. whether they
 * have both been cloned from the same version of the same context
1813 1814 1815 1816
 * and they both have the same number of enabled events.
 * If the number of enabled events is the same, then the set
 * of enabled events should be the same, because these are both
 * inherited contexts, therefore we can't access individual events
1817
 * in them directly with an fd; we can only enable/disable all
1818
 * events via prctl, or enable/disable all events in a family
1819 1820
 * via ioctl, which will have the same effect on both contexts.
 */
1821 1822
static int context_equiv(struct perf_event_context *ctx1,
			 struct perf_event_context *ctx2)
1823 1824
{
	return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1825
		&& ctx1->parent_gen == ctx2->parent_gen
1826
		&& !ctx1->pin_count && !ctx2->pin_count;
1827 1828
}

1829 1830
static void __perf_event_sync_stat(struct perf_event *event,
				     struct perf_event *next_event)
1831 1832 1833
{
	u64 value;

1834
	if (!event->attr.inherit_stat)
1835 1836 1837
		return;

	/*
1838
	 * Update the event value, we cannot use perf_event_read()
1839 1840
	 * because we're in the middle of a context switch and have IRQs
	 * disabled, which upsets smp_call_function_single(), however
1841
	 * we know the event must be on the current CPU, therefore we
1842 1843
	 * don't need to use it.
	 */
1844 1845
	switch (event->state) {
	case PERF_EVENT_STATE_ACTIVE:
1846 1847
		event->pmu->read(event);
		/* fall-through */
1848

1849 1850
	case PERF_EVENT_STATE_INACTIVE:
		update_event_times(event);
1851 1852 1853 1854 1855 1856 1857
		break;

	default:
		break;
	}

	/*
1858
	 * In order to keep per-task stats reliable we need to flip the event
1859 1860
	 * values when we flip the contexts.
	 */
1861 1862 1863
	value = local64_read(&next_event->count);
	value = local64_xchg(&event->count, value);
	local64_set(&next_event->count, value);
1864

1865 1866
	swap(event->total_time_enabled, next_event->total_time_enabled);
	swap(event->total_time_running, next_event->total_time_running);
1867

1868
	/*
1869
	 * Since we swizzled the values, update the user visible data too.
1870
	 */
1871 1872
	perf_event_update_userpage(event);
	perf_event_update_userpage(next_event);
1873 1874 1875 1876 1877
}

#define list_next_entry(pos, member) \
	list_entry(pos->member.next, typeof(*pos), member)

1878 1879
static void perf_event_sync_stat(struct perf_event_context *ctx,
				   struct perf_event_context *next_ctx)
1880
{
1881
	struct perf_event *event, *next_event;
1882 1883 1884 1885

	if (!ctx->nr_stat)
		return;

1886 1887
	update_context_time(ctx);

1888 1889
	event = list_first_entry(&ctx->event_list,
				   struct perf_event, event_entry);
1890

1891 1892
	next_event = list_first_entry(&next_ctx->event_list,
					struct perf_event, event_entry);
1893

1894 1895
	while (&event->event_entry != &ctx->event_list &&
	       &next_event->event_entry != &next_ctx->event_list) {
1896

1897
		__perf_event_sync_stat(event, next_event);
1898

1899 1900
		event = list_next_entry(event, event_entry);
		next_event = list_next_entry(next_event, event_entry);
1901 1902 1903
	}
}

1904 1905
static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
					 struct task_struct *next)
T
Thomas Gleixner 已提交
1906
{
P
Peter Zijlstra 已提交
1907
	struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
1908 1909
	struct perf_event_context *next_ctx;
	struct perf_event_context *parent;
P
Peter Zijlstra 已提交
1910
	struct perf_cpu_context *cpuctx;
1911
	int do_switch = 1;
T
Thomas Gleixner 已提交
1912

P
Peter Zijlstra 已提交
1913 1914
	if (likely(!ctx))
		return;
1915

P
Peter Zijlstra 已提交
1916 1917
	cpuctx = __get_cpu_context(ctx);
	if (!cpuctx->task_ctx)
T
Thomas Gleixner 已提交
1918 1919
		return;

1920 1921
	rcu_read_lock();
	parent = rcu_dereference(ctx->parent_ctx);
P
Peter Zijlstra 已提交
1922
	next_ctx = next->perf_event_ctxp[ctxn];
1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
	if (parent && next_ctx &&
	    rcu_dereference(next_ctx->parent_ctx) == parent) {
		/*
		 * Looks like the two contexts are clones, so we might be
		 * able to optimize the context switch.  We lock both
		 * contexts and check that they are clones under the
		 * lock (including re-checking that neither has been
		 * uncloned in the meantime).  It doesn't matter which
		 * order we take the locks because no other cpu could
		 * be trying to lock both of these tasks.
		 */
1934 1935
		raw_spin_lock(&ctx->lock);
		raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1936
		if (context_equiv(ctx, next_ctx)) {
1937 1938
			/*
			 * XXX do we need a memory barrier of sorts
1939
			 * wrt to rcu_dereference() of perf_event_ctxp
1940
			 */
P
Peter Zijlstra 已提交
1941 1942
			task->perf_event_ctxp[ctxn] = next_ctx;
			next->perf_event_ctxp[ctxn] = ctx;
1943 1944 1945
			ctx->task = next;
			next_ctx->task = task;
			do_switch = 0;
1946

1947
			perf_event_sync_stat(ctx, next_ctx);
1948
		}
1949 1950
		raw_spin_unlock(&next_ctx->lock);
		raw_spin_unlock(&ctx->lock);
1951
	}
1952
	rcu_read_unlock();
1953

1954
	if (do_switch) {
1955
		raw_spin_lock(&ctx->lock);
1956
		ctx_sched_out(ctx, cpuctx, EVENT_ALL);
1957
		cpuctx->task_ctx = NULL;
1958
		raw_spin_unlock(&ctx->lock);
1959
	}
T
Thomas Gleixner 已提交
1960 1961
}

P
Peter Zijlstra 已提交
1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975
#define for_each_task_context_nr(ctxn)					\
	for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)

/*
 * Called from scheduler to remove the events of the current task,
 * with interrupts disabled.
 *
 * We stop each event and update the event value in event->count.
 *
 * This does not protect us against NMI, but disable()
 * sets the disabled bit in the control field of event _before_
 * accessing the event control register. If a NMI hits, then it will
 * not restart the event.
 */
1976 1977
void __perf_event_task_sched_out(struct task_struct *task,
				 struct task_struct *next)
P
Peter Zijlstra 已提交
1978 1979 1980 1981 1982
{
	int ctxn;

	for_each_task_context_nr(ctxn)
		perf_event_context_sched_out(task, ctxn, next);
S
Stephane Eranian 已提交
1983 1984 1985 1986 1987 1988 1989 1990

	/*
	 * if cgroup events exist on this CPU, then we need
	 * to check if we have to switch out PMU state.
	 * cgroup event are system-wide mode only
	 */
	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
		perf_cgroup_sched_out(task);
P
Peter Zijlstra 已提交
1991 1992
}

1993
static void task_ctx_sched_out(struct perf_event_context *ctx)
1994
{
P
Peter Zijlstra 已提交
1995
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1996

1997 1998
	if (!cpuctx->task_ctx)
		return;
1999 2000 2001 2002

	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
		return;

2003
	ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2004 2005 2006
	cpuctx->task_ctx = NULL;
}

2007 2008 2009 2010 2011 2012 2013
/*
 * Called with IRQs disabled
 */
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
			      enum event_type_t event_type)
{
	ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2014 2015
}

2016
static void
2017
ctx_pinned_sched_in(struct perf_event_context *ctx,
2018
		    struct perf_cpu_context *cpuctx)
T
Thomas Gleixner 已提交
2019
{
2020
	struct perf_event *event;
T
Thomas Gleixner 已提交
2021

2022 2023
	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
		if (event->state <= PERF_EVENT_STATE_OFF)
2024
			continue;
2025
		if (!event_filter_match(event))
2026 2027
			continue;

S
Stephane Eranian 已提交
2028 2029 2030 2031
		/* may need to reset tstamp_enabled */
		if (is_cgroup_event(event))
			perf_cgroup_mark_enabled(event, ctx);

2032
		if (group_can_go_on(event, cpuctx, 1))
2033
			group_sched_in(event, cpuctx, ctx);
2034 2035 2036 2037 2038

		/*
		 * If this pinned group hasn't been scheduled,
		 * put it in error state.
		 */
2039 2040 2041
		if (event->state == PERF_EVENT_STATE_INACTIVE) {
			update_group_times(event);
			event->state = PERF_EVENT_STATE_ERROR;
2042
		}
2043
	}
2044 2045 2046 2047
}

static void
ctx_flexible_sched_in(struct perf_event_context *ctx,
2048
		      struct perf_cpu_context *cpuctx)
2049 2050 2051
{
	struct perf_event *event;
	int can_add_hw = 1;
2052

2053 2054 2055
	list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
		/* Ignore events in OFF or ERROR state */
		if (event->state <= PERF_EVENT_STATE_OFF)
2056
			continue;
2057 2058
		/*
		 * Listen to the 'cpu' scheduling filter constraint
2059
		 * of events:
2060
		 */
2061
		if (!event_filter_match(event))
T
Thomas Gleixner 已提交
2062 2063
			continue;

S
Stephane Eranian 已提交
2064 2065 2066 2067
		/* may need to reset tstamp_enabled */
		if (is_cgroup_event(event))
			perf_cgroup_mark_enabled(event, ctx);

P
Peter Zijlstra 已提交
2068
		if (group_can_go_on(event, cpuctx, can_add_hw)) {
2069
			if (group_sched_in(event, cpuctx, ctx))
2070
				can_add_hw = 0;
P
Peter Zijlstra 已提交
2071
		}
T
Thomas Gleixner 已提交
2072
	}
2073 2074 2075 2076 2077
}

static void
ctx_sched_in(struct perf_event_context *ctx,
	     struct perf_cpu_context *cpuctx,
S
Stephane Eranian 已提交
2078 2079
	     enum event_type_t event_type,
	     struct task_struct *task)
2080
{
S
Stephane Eranian 已提交
2081
	u64 now;
2082
	int is_active = ctx->is_active;
S
Stephane Eranian 已提交
2083

2084
	ctx->is_active |= event_type;
2085
	if (likely(!ctx->nr_events))
2086
		return;
2087

S
Stephane Eranian 已提交
2088 2089
	now = perf_clock();
	ctx->timestamp = now;
2090
	perf_cgroup_set_timestamp(task, ctx);
2091 2092 2093 2094
	/*
	 * First go through the list and put on any pinned groups
	 * in order to give them the best chance of going on.
	 */
2095
	if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
2096
		ctx_pinned_sched_in(ctx, cpuctx);
2097 2098

	/* Then walk through the lower prio flexible groups */
2099
	if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
2100
		ctx_flexible_sched_in(ctx, cpuctx);
2101 2102
}

2103
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
S
Stephane Eranian 已提交
2104 2105
			     enum event_type_t event_type,
			     struct task_struct *task)
2106 2107 2108
{
	struct perf_event_context *ctx = &cpuctx->ctx;

S
Stephane Eranian 已提交
2109
	ctx_sched_in(ctx, cpuctx, event_type, task);
2110 2111
}

S
Stephane Eranian 已提交
2112 2113
static void perf_event_context_sched_in(struct perf_event_context *ctx,
					struct task_struct *task)
2114
{
P
Peter Zijlstra 已提交
2115
	struct perf_cpu_context *cpuctx;
2116

P
Peter Zijlstra 已提交
2117
	cpuctx = __get_cpu_context(ctx);
2118 2119 2120
	if (cpuctx->task_ctx == ctx)
		return;

2121
	perf_ctx_lock(cpuctx, ctx);
P
Peter Zijlstra 已提交
2122
	perf_pmu_disable(ctx->pmu);
2123 2124 2125 2126 2127 2128 2129
	/*
	 * We want to keep the following priority order:
	 * cpu pinned (that don't need to move), task pinned,
	 * cpu flexible, task flexible.
	 */
	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);

2130
	perf_event_sched_in(cpuctx, ctx, task);
2131 2132

	cpuctx->task_ctx = ctx;
2133

2134 2135 2136
	perf_pmu_enable(ctx->pmu);
	perf_ctx_unlock(cpuctx, ctx);

2137 2138 2139 2140
	/*
	 * Since these rotations are per-cpu, we need to ensure the
	 * cpu-context we got scheduled on is actually rotating.
	 */
P
Peter Zijlstra 已提交
2141
	perf_pmu_rotate_start(ctx->pmu);
2142 2143
}

P
Peter Zijlstra 已提交
2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154
/*
 * Called from scheduler to add the events of the current task
 * with interrupts disabled.
 *
 * We restore the event value and then enable it.
 *
 * This does not protect us against NMI, but enable()
 * sets the enabled bit in the control field of event _before_
 * accessing the event control register. If a NMI hits, then it will
 * keep the event running.
 */
2155
void __perf_event_task_sched_in(struct task_struct *task)
P
Peter Zijlstra 已提交
2156 2157 2158 2159 2160 2161 2162 2163 2164
{
	struct perf_event_context *ctx;
	int ctxn;

	for_each_task_context_nr(ctxn) {
		ctx = task->perf_event_ctxp[ctxn];
		if (likely(!ctx))
			continue;

S
Stephane Eranian 已提交
2165
		perf_event_context_sched_in(ctx, task);
P
Peter Zijlstra 已提交
2166
	}
S
Stephane Eranian 已提交
2167 2168 2169 2170 2171 2172 2173
	/*
	 * if cgroup events exist on this CPU, then we need
	 * to check if we have to switch in PMU state.
	 * cgroup event are system-wide mode only
	 */
	if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
		perf_cgroup_sched_in(task);
2174 2175
}

2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{
	u64 frequency = event->attr.sample_freq;
	u64 sec = NSEC_PER_SEC;
	u64 divisor, dividend;

	int count_fls, nsec_fls, frequency_fls, sec_fls;

	count_fls = fls64(count);
	nsec_fls = fls64(nsec);
	frequency_fls = fls64(frequency);
	sec_fls = 30;

	/*
	 * We got @count in @nsec, with a target of sample_freq HZ
	 * the target period becomes:
	 *
	 *             @count * 10^9
	 * period = -------------------
	 *          @nsec * sample_freq
	 *
	 */

	/*
	 * Reduce accuracy by one bit such that @a and @b converge
	 * to a similar magnitude.
	 */
2203
#define REDUCE_FLS(a, b)		\
2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242
do {					\
	if (a##_fls > b##_fls) {	\
		a >>= 1;		\
		a##_fls--;		\
	} else {			\
		b >>= 1;		\
		b##_fls--;		\
	}				\
} while (0)

	/*
	 * Reduce accuracy until either term fits in a u64, then proceed with
	 * the other, so that finally we can do a u64/u64 division.
	 */
	while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
		REDUCE_FLS(nsec, frequency);
		REDUCE_FLS(sec, count);
	}

	if (count_fls + sec_fls > 64) {
		divisor = nsec * frequency;

		while (count_fls + sec_fls > 64) {
			REDUCE_FLS(count, sec);
			divisor >>= 1;
		}

		dividend = count * sec;
	} else {
		dividend = count * sec;

		while (nsec_fls + frequency_fls > 64) {
			REDUCE_FLS(nsec, frequency);
			dividend >>= 1;
		}

		divisor = nsec * frequency;
	}

2243 2244 2245
	if (!divisor)
		return dividend;

2246 2247 2248 2249
	return div64_u64(dividend, divisor);
}

static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
2250
{
2251
	struct hw_perf_event *hwc = &event->hw;
2252
	s64 period, sample_period;
2253 2254
	s64 delta;

2255
	period = perf_calculate_period(event, nsec, count);
2256 2257 2258 2259 2260 2261 2262 2263 2264 2265

	delta = (s64)(period - hwc->sample_period);
	delta = (delta + 7) / 8; /* low pass filter */

	sample_period = hwc->sample_period + delta;

	if (!sample_period)
		sample_period = 1;

	hwc->sample_period = sample_period;
2266

2267
	if (local64_read(&hwc->period_left) > 8*sample_period) {
P
Peter Zijlstra 已提交
2268
		event->pmu->stop(event, PERF_EF_UPDATE);
2269
		local64_set(&hwc->period_left, 0);
P
Peter Zijlstra 已提交
2270
		event->pmu->start(event, PERF_EF_RELOAD);
2271
	}
2272 2273
}

2274
static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
2275
{
2276 2277
	struct perf_event *event;
	struct hw_perf_event *hwc;
2278 2279
	u64 interrupts, now;
	s64 delta;
2280

2281
	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2282
		if (event->state != PERF_EVENT_STATE_ACTIVE)
2283 2284
			continue;

2285
		if (!event_filter_match(event))
2286 2287
			continue;

2288
		hwc = &event->hw;
2289 2290 2291

		interrupts = hwc->interrupts;
		hwc->interrupts = 0;
2292

2293
		/*
2294
		 * unthrottle events on the tick
2295
		 */
2296
		if (interrupts == MAX_INTERRUPTS) {
2297
			perf_log_throttle(event, 1);
P
Peter Zijlstra 已提交
2298
			event->pmu->start(event, 0);
2299 2300
		}

2301
		if (!event->attr.freq || !event->attr.sample_freq)
2302 2303
			continue;

2304
		event->pmu->read(event);
2305
		now = local64_read(&event->count);
2306 2307
		delta = now - hwc->freq_count_stamp;
		hwc->freq_count_stamp = now;
2308

2309
		if (delta > 0)
2310
			perf_adjust_period(event, period, delta);
2311 2312 2313
	}
}

2314
/*
2315
 * Round-robin a context's events:
2316
 */
2317
static void rotate_ctx(struct perf_event_context *ctx)
T
Thomas Gleixner 已提交
2318
{
2319 2320 2321 2322 2323 2324
	/*
	 * Rotate the first entry last of non-pinned groups. Rotation might be
	 * disabled by the inheritance code.
	 */
	if (!ctx->rotate_disable)
		list_rotate_left(&ctx->flexible_groups);
2325 2326
}

2327
/*
2328 2329 2330
 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
 * because they're strictly cpu affine and rotate_start is called with IRQs
 * disabled, while rotate_context is called from IRQ context.
2331
 */
2332
static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2333
{
2334
	u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
P
Peter Zijlstra 已提交
2335
	struct perf_event_context *ctx = NULL;
2336
	int rotate = 0, remove = 1;
2337

2338
	if (cpuctx->ctx.nr_events) {
2339
		remove = 0;
2340 2341 2342
		if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
			rotate = 1;
	}
2343

P
Peter Zijlstra 已提交
2344
	ctx = cpuctx->task_ctx;
2345
	if (ctx && ctx->nr_events) {
2346
		remove = 0;
2347 2348 2349
		if (ctx->nr_events != ctx->nr_active)
			rotate = 1;
	}
2350

2351
	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
P
Peter Zijlstra 已提交
2352
	perf_pmu_disable(cpuctx->ctx.pmu);
2353
	perf_ctx_adjust_freq(&cpuctx->ctx, interval);
2354
	if (ctx)
2355
		perf_ctx_adjust_freq(ctx, interval);
2356

2357
	if (!rotate)
2358
		goto done;
2359

2360
	cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2361
	if (ctx)
2362
		ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
T
Thomas Gleixner 已提交
2363

2364
	rotate_ctx(&cpuctx->ctx);
2365 2366
	if (ctx)
		rotate_ctx(ctx);
2367

2368
	perf_event_sched_in(cpuctx, ctx, current);
2369 2370

done:
2371 2372 2373
	if (remove)
		list_del_init(&cpuctx->rotation_list);

P
Peter Zijlstra 已提交
2374
	perf_pmu_enable(cpuctx->ctx.pmu);
2375
	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2376 2377 2378 2379 2380 2381
}

void perf_event_task_tick(void)
{
	struct list_head *head = &__get_cpu_var(rotation_list);
	struct perf_cpu_context *cpuctx, *tmp;
2382

2383 2384 2385 2386 2387 2388 2389
	WARN_ON(!irqs_disabled());

	list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
		if (cpuctx->jiffies_interval == 1 ||
				!(jiffies % cpuctx->jiffies_interval))
			perf_rotate_context(cpuctx);
	}
T
Thomas Gleixner 已提交
2390 2391
}

2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406
static int event_enable_on_exec(struct perf_event *event,
				struct perf_event_context *ctx)
{
	if (!event->attr.enable_on_exec)
		return 0;

	event->attr.enable_on_exec = 0;
	if (event->state >= PERF_EVENT_STATE_INACTIVE)
		return 0;

	__perf_event_mark_enabled(event, ctx);

	return 1;
}

2407
/*
2408
 * Enable all of a task's events that have been marked enable-on-exec.
2409 2410
 * This expects task == current.
 */
P
Peter Zijlstra 已提交
2411
static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2412
{
2413
	struct perf_event *event;
2414 2415
	unsigned long flags;
	int enabled = 0;
2416
	int ret;
2417 2418

	local_irq_save(flags);
2419
	if (!ctx || !ctx->nr_events)
2420 2421
		goto out;

2422 2423 2424 2425 2426 2427 2428 2429
	/*
	 * We must ctxsw out cgroup events to avoid conflict
	 * when invoking perf_task_event_sched_in() later on
	 * in this function. Otherwise we end up trying to
	 * ctxswin cgroup events which are already scheduled
	 * in.
	 */
	perf_cgroup_sched_out(current);
2430

2431
	raw_spin_lock(&ctx->lock);
2432
	task_ctx_sched_out(ctx);
2433

2434 2435 2436 2437 2438 2439 2440 2441 2442 2443
	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
		ret = event_enable_on_exec(event, ctx);
		if (ret)
			enabled = 1;
	}

	list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
		ret = event_enable_on_exec(event, ctx);
		if (ret)
			enabled = 1;
2444 2445 2446
	}

	/*
2447
	 * Unclone this context if we enabled any event.
2448
	 */
2449 2450
	if (enabled)
		unclone_ctx(ctx);
2451

2452
	raw_spin_unlock(&ctx->lock);
2453

2454 2455 2456
	/*
	 * Also calls ctxswin for cgroup events, if any:
	 */
S
Stephane Eranian 已提交
2457
	perf_event_context_sched_in(ctx, ctx->task);
P
Peter Zijlstra 已提交
2458
out:
2459 2460 2461
	local_irq_restore(flags);
}

T
Thomas Gleixner 已提交
2462
/*
2463
 * Cross CPU call to read the hardware event
T
Thomas Gleixner 已提交
2464
 */
2465
static void __perf_event_read(void *info)
T
Thomas Gleixner 已提交
2466
{
2467 2468
	struct perf_event *event = info;
	struct perf_event_context *ctx = event->ctx;
P
Peter Zijlstra 已提交
2469
	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
I
Ingo Molnar 已提交
2470

2471 2472 2473 2474
	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu.  If not it has been
	 * scheduled out before the smp call arrived.  In that case
2475 2476
	 * event->count would have been updated to a recent sample
	 * when the event was scheduled out.
2477 2478 2479 2480
	 */
	if (ctx->task && cpuctx->task_ctx != ctx)
		return;

2481
	raw_spin_lock(&ctx->lock);
S
Stephane Eranian 已提交
2482
	if (ctx->is_active) {
2483
		update_context_time(ctx);
S
Stephane Eranian 已提交
2484 2485
		update_cgrp_time_from_event(event);
	}
2486
	update_event_times(event);
2487 2488
	if (event->state == PERF_EVENT_STATE_ACTIVE)
		event->pmu->read(event);
2489
	raw_spin_unlock(&ctx->lock);
T
Thomas Gleixner 已提交
2490 2491
}

P
Peter Zijlstra 已提交
2492 2493
static inline u64 perf_event_count(struct perf_event *event)
{
2494
	return local64_read(&event->count) + atomic64_read(&event->child_count);
P
Peter Zijlstra 已提交
2495 2496
}

2497
static u64 perf_event_read(struct perf_event *event)
T
Thomas Gleixner 已提交
2498 2499
{
	/*
2500 2501
	 * If event is enabled and currently active on a CPU, update the
	 * value in the event structure:
T
Thomas Gleixner 已提交
2502
	 */
2503 2504 2505 2506
	if (event->state == PERF_EVENT_STATE_ACTIVE) {
		smp_call_function_single(event->oncpu,
					 __perf_event_read, event, 1);
	} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
P
Peter Zijlstra 已提交
2507 2508 2509
		struct perf_event_context *ctx = event->ctx;
		unsigned long flags;

2510
		raw_spin_lock_irqsave(&ctx->lock, flags);
2511 2512 2513 2514 2515
		/*
		 * may read while context is not active
		 * (e.g., thread is blocked), in that case
		 * we cannot update context time
		 */
S
Stephane Eranian 已提交
2516
		if (ctx->is_active) {
2517
			update_context_time(ctx);
S
Stephane Eranian 已提交
2518 2519
			update_cgrp_time_from_event(event);
		}
2520
		update_event_times(event);
2521
		raw_spin_unlock_irqrestore(&ctx->lock, flags);
T
Thomas Gleixner 已提交
2522 2523
	}

P
Peter Zijlstra 已提交
2524
	return perf_event_count(event);
T
Thomas Gleixner 已提交
2525 2526
}

2527
/*
2528
 * Callchain support
2529
 */
2530 2531 2532 2533 2534 2535

struct callchain_cpus_entries {
	struct rcu_head			rcu_head;
	struct perf_callchain_entry	*cpu_entries[0];
};

2536
static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
2537 2538 2539 2540 2541 2542 2543
static atomic_t nr_callchain_events;
static DEFINE_MUTEX(callchain_mutex);
struct callchain_cpus_entries *callchain_cpus_entries;


__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
				  struct pt_regs *regs)
2544 2545 2546
{
}

2547 2548
__weak void perf_callchain_user(struct perf_callchain_entry *entry,
				struct pt_regs *regs)
T
Thomas Gleixner 已提交
2549
{
2550
}
T
Thomas Gleixner 已提交
2551

2552 2553 2554 2555
static void release_callchain_buffers_rcu(struct rcu_head *head)
{
	struct callchain_cpus_entries *entries;
	int cpu;
T
Thomas Gleixner 已提交
2556

2557
	entries = container_of(head, struct callchain_cpus_entries, rcu_head);
T
Thomas Gleixner 已提交
2558

2559 2560
	for_each_possible_cpu(cpu)
		kfree(entries->cpu_entries[cpu]);
T
Thomas Gleixner 已提交
2561

2562 2563
	kfree(entries);
}
T
Thomas Gleixner 已提交
2564

2565 2566 2567
static void release_callchain_buffers(void)
{
	struct callchain_cpus_entries *entries;
T
Thomas Gleixner 已提交
2568

2569 2570 2571 2572
	entries = callchain_cpus_entries;
	rcu_assign_pointer(callchain_cpus_entries, NULL);
	call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
}
T
Thomas Gleixner 已提交
2573

2574 2575 2576 2577 2578
static int alloc_callchain_buffers(void)
{
	int cpu;
	int size;
	struct callchain_cpus_entries *entries;
T
Thomas Gleixner 已提交
2579

2580
	/*
2581 2582 2583
	 * We can't use the percpu allocation API for data that can be
	 * accessed from NMI. Use a temporary manual per cpu allocation
	 * until that gets sorted out.
2584
	 */
2585
	size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
2586

2587 2588 2589
	entries = kzalloc(size, GFP_KERNEL);
	if (!entries)
		return -ENOMEM;
2590

2591
	size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
T
Thomas Gleixner 已提交
2592

2593 2594 2595 2596 2597
	for_each_possible_cpu(cpu) {
		entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
							 cpu_to_node(cpu));
		if (!entries->cpu_entries[cpu])
			goto fail;
2598 2599
	}

2600
	rcu_assign_pointer(callchain_cpus_entries, entries);
T
Thomas Gleixner 已提交
2601

2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735
	return 0;

fail:
	for_each_possible_cpu(cpu)
		kfree(entries->cpu_entries[cpu]);
	kfree(entries);

	return -ENOMEM;
}

static int get_callchain_buffers(void)
{
	int err = 0;
	int count;

	mutex_lock(&callchain_mutex);

	count = atomic_inc_return(&nr_callchain_events);
	if (WARN_ON_ONCE(count < 1)) {
		err = -EINVAL;
		goto exit;
	}

	if (count > 1) {
		/* If the allocation failed, give up */
		if (!callchain_cpus_entries)
			err = -ENOMEM;
		goto exit;
	}

	err = alloc_callchain_buffers();
	if (err)
		release_callchain_buffers();
exit:
	mutex_unlock(&callchain_mutex);

	return err;
}

static void put_callchain_buffers(void)
{
	if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
		release_callchain_buffers();
		mutex_unlock(&callchain_mutex);
	}
}

static int get_recursion_context(int *recursion)
{
	int rctx;

	if (in_nmi())
		rctx = 3;
	else if (in_irq())
		rctx = 2;
	else if (in_softirq())
		rctx = 1;
	else
		rctx = 0;

	if (recursion[rctx])
		return -1;

	recursion[rctx]++;
	barrier();

	return rctx;
}

static inline void put_recursion_context(int *recursion, int rctx)
{
	barrier();
	recursion[rctx]--;
}

static struct perf_callchain_entry *get_callchain_entry(int *rctx)
{
	int cpu;
	struct callchain_cpus_entries *entries;

	*rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
	if (*rctx == -1)
		return NULL;

	entries = rcu_dereference(callchain_cpus_entries);
	if (!entries)
		return NULL;

	cpu = smp_processor_id();

	return &entries->cpu_entries[cpu][*rctx];
}

static void
put_callchain_entry(int rctx)
{
	put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
}

static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
{
	int rctx;
	struct perf_callchain_entry *entry;


	entry = get_callchain_entry(&rctx);
	if (rctx == -1)
		return NULL;

	if (!entry)
		goto exit_put;

	entry->nr = 0;

	if (!user_mode(regs)) {
		perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
		perf_callchain_kernel(entry, regs);
		if (current->mm)
			regs = task_pt_regs(current);
		else
			regs = NULL;
	}

	if (regs) {
		perf_callchain_store(entry, PERF_CONTEXT_USER);
		perf_callchain_user(entry, regs);
	}

exit_put:
	put_callchain_entry(rctx);

	return entry;
}

2736
/*
2737
 * Initialize the perf_event context in a task_struct:
2738
 */
2739
static void __perf_event_init_context(struct perf_event_context *ctx)
2740
{
2741
	raw_spin_lock_init(&ctx->lock);
2742
	mutex_init(&ctx->mutex);
2743 2744
	INIT_LIST_HEAD(&ctx->pinned_groups);
	INIT_LIST_HEAD(&ctx->flexible_groups);
2745 2746
	INIT_LIST_HEAD(&ctx->event_list);
	atomic_set(&ctx->refcount, 1);
2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761
}

static struct perf_event_context *
alloc_perf_context(struct pmu *pmu, struct task_struct *task)
{
	struct perf_event_context *ctx;

	ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
	if (!ctx)
		return NULL;

	__perf_event_init_context(ctx);
	if (task) {
		ctx->task = task;
		get_task_struct(task);
T
Thomas Gleixner 已提交
2762
	}
2763 2764 2765
	ctx->pmu = pmu;

	return ctx;
2766 2767
}

2768 2769 2770 2771 2772
static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)
{
	struct task_struct *task;
	int err;
T
Thomas Gleixner 已提交
2773 2774

	rcu_read_lock();
2775
	if (!vpid)
T
Thomas Gleixner 已提交
2776 2777
		task = current;
	else
2778
		task = find_task_by_vpid(vpid);
T
Thomas Gleixner 已提交
2779 2780 2781 2782 2783 2784 2785 2786
	if (task)
		get_task_struct(task);
	rcu_read_unlock();

	if (!task)
		return ERR_PTR(-ESRCH);

	/* Reuse ptrace permission checks for now. */
2787 2788 2789 2790
	err = -EACCES;
	if (!ptrace_may_access(task, PTRACE_MODE_READ))
		goto errout;

2791 2792 2793 2794 2795 2796 2797
	return task;
errout:
	put_task_struct(task);
	return ERR_PTR(err);

}

2798 2799 2800
/*
 * Returns a matching context with refcount and pincount.
 */
P
Peter Zijlstra 已提交
2801
static struct perf_event_context *
M
Matt Helsley 已提交
2802
find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
T
Thomas Gleixner 已提交
2803
{
2804
	struct perf_event_context *ctx;
2805
	struct perf_cpu_context *cpuctx;
2806
	unsigned long flags;
P
Peter Zijlstra 已提交
2807
	int ctxn, err;
T
Thomas Gleixner 已提交
2808

2809
	if (!task) {
2810
		/* Must be root to operate on a CPU event: */
2811
		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
T
Thomas Gleixner 已提交
2812 2813 2814
			return ERR_PTR(-EACCES);

		/*
2815
		 * We could be clever and allow to attach a event to an
T
Thomas Gleixner 已提交
2816 2817 2818
		 * offline CPU and activate it when the CPU comes up, but
		 * that's for later.
		 */
2819
		if (!cpu_online(cpu))
T
Thomas Gleixner 已提交
2820 2821
			return ERR_PTR(-ENODEV);

P
Peter Zijlstra 已提交
2822
		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
T
Thomas Gleixner 已提交
2823
		ctx = &cpuctx->ctx;
2824
		get_ctx(ctx);
2825
		++ctx->pin_count;
T
Thomas Gleixner 已提交
2826 2827 2828 2829

		return ctx;
	}

P
Peter Zijlstra 已提交
2830 2831 2832 2833 2834
	err = -EINVAL;
	ctxn = pmu->task_ctx_nr;
	if (ctxn < 0)
		goto errout;

P
Peter Zijlstra 已提交
2835
retry:
P
Peter Zijlstra 已提交
2836
	ctx = perf_lock_task_context(task, ctxn, &flags);
2837
	if (ctx) {
2838
		unclone_ctx(ctx);
2839
		++ctx->pin_count;
2840
		raw_spin_unlock_irqrestore(&ctx->lock, flags);
2841
	} else {
2842
		ctx = alloc_perf_context(pmu, task);
2843 2844 2845
		err = -ENOMEM;
		if (!ctx)
			goto errout;
2846

2847 2848 2849 2850 2851 2852 2853 2854 2855 2856
		err = 0;
		mutex_lock(&task->perf_event_mutex);
		/*
		 * If it has already passed perf_event_exit_task().
		 * we must see PF_EXITING, it takes this mutex too.
		 */
		if (task->flags & PF_EXITING)
			err = -ESRCH;
		else if (task->perf_event_ctxp[ctxn])
			err = -EAGAIN;
2857
		else {
2858
			get_ctx(ctx);
2859
			++ctx->pin_count;
2860
			rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
2861
		}
2862 2863 2864
		mutex_unlock(&task->perf_event_mutex);

		if (unlikely(err)) {
2865
			put_ctx(ctx);
2866 2867 2868 2869

			if (err == -EAGAIN)
				goto retry;
			goto errout;
2870 2871 2872
		}
	}

T
Thomas Gleixner 已提交
2873
	return ctx;
2874

P
Peter Zijlstra 已提交
2875
errout:
2876
	return ERR_PTR(err);
T
Thomas Gleixner 已提交
2877 2878
}

L
Li Zefan 已提交
2879 2880
static void perf_event_free_filter(struct perf_event *event);

2881
static void free_event_rcu(struct rcu_head *head)
P
Peter Zijlstra 已提交
2882
{
2883
	struct perf_event *event;
P
Peter Zijlstra 已提交
2884

2885 2886 2887
	event = container_of(head, struct perf_event, rcu_head);
	if (event->ns)
		put_pid_ns(event->ns);
L
Li Zefan 已提交
2888
	perf_event_free_filter(event);
2889
	kfree(event);
P
Peter Zijlstra 已提交
2890 2891
}

2892
static void ring_buffer_put(struct ring_buffer *rb);
2893

2894
static void free_event(struct perf_event *event)
2895
{
2896
	irq_work_sync(&event->pending);
2897

2898
	if (!event->parent) {
2899
		if (event->attach_state & PERF_ATTACH_TASK)
S
Stephane Eranian 已提交
2900
			jump_label_dec(&perf_sched_events);
2901
		if (event->attr.mmap || event->attr.mmap_data)
2902 2903 2904 2905 2906
			atomic_dec(&nr_mmap_events);
		if (event->attr.comm)
			atomic_dec(&nr_comm_events);
		if (event->attr.task)
			atomic_dec(&nr_task_events);
2907 2908
		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
			put_callchain_buffers();
2909 2910 2911 2912
		if (is_cgroup_event(event)) {
			atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
			jump_label_dec(&perf_sched_events);
		}
2913
	}
2914

2915 2916 2917
	if (event->rb) {
		ring_buffer_put(event->rb);
		event->rb = NULL;
2918 2919
	}

S
Stephane Eranian 已提交
2920 2921 2922
	if (is_cgroup_event(event))
		perf_detach_cgroup(event);

2923 2924
	if (event->destroy)
		event->destroy(event);
2925

P
Peter Zijlstra 已提交
2926 2927 2928
	if (event->ctx)
		put_ctx(event->ctx);

2929
	call_rcu(&event->rcu_head, free_event_rcu);
2930 2931
}

2932
int perf_event_release_kernel(struct perf_event *event)
T
Thomas Gleixner 已提交
2933
{
2934
	struct perf_event_context *ctx = event->ctx;
T
Thomas Gleixner 已提交
2935

2936
	WARN_ON_ONCE(ctx->parent_ctx);
2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949
	/*
	 * There are two ways this annotation is useful:
	 *
	 *  1) there is a lock recursion from perf_event_exit_task
	 *     see the comment there.
	 *
	 *  2) there is a lock-inversion with mmap_sem through
	 *     perf_event_read_group(), which takes faults while
	 *     holding ctx->mutex, however this is called after
	 *     the last filedesc died, so there is no possibility
	 *     to trigger the AB-BA case.
	 */
	mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
2950
	raw_spin_lock_irq(&ctx->lock);
2951
	perf_group_detach(event);
2952
	raw_spin_unlock_irq(&ctx->lock);
2953
	perf_remove_from_context(event);
2954
	mutex_unlock(&ctx->mutex);
T
Thomas Gleixner 已提交
2955

2956
	free_event(event);
T
Thomas Gleixner 已提交
2957 2958 2959

	return 0;
}
2960
EXPORT_SYMBOL_GPL(perf_event_release_kernel);
T
Thomas Gleixner 已提交
2961

2962 2963 2964 2965
/*
 * Called when the last reference to the file is gone.
 */
static int perf_release(struct inode *inode, struct file *file)
2966
{
2967
	struct perf_event *event = file->private_data;
P
Peter Zijlstra 已提交
2968
	struct task_struct *owner;
2969

2970
	file->private_data = NULL;
2971

P
Peter Zijlstra 已提交
2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004
	rcu_read_lock();
	owner = ACCESS_ONCE(event->owner);
	/*
	 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
	 * !owner it means the list deletion is complete and we can indeed
	 * free this event, otherwise we need to serialize on
	 * owner->perf_event_mutex.
	 */
	smp_read_barrier_depends();
	if (owner) {
		/*
		 * Since delayed_put_task_struct() also drops the last
		 * task reference we can safely take a new reference
		 * while holding the rcu_read_lock().
		 */
		get_task_struct(owner);
	}
	rcu_read_unlock();

	if (owner) {
		mutex_lock(&owner->perf_event_mutex);
		/*
		 * We have to re-check the event->owner field, if it is cleared
		 * we raced with perf_event_exit_task(), acquiring the mutex
		 * ensured they're done, and we can proceed with freeing the
		 * event.
		 */
		if (event->owner)
			list_del_init(&event->owner_entry);
		mutex_unlock(&owner->perf_event_mutex);
		put_task_struct(owner);
	}

3005
	return perf_event_release_kernel(event);
3006 3007
}

3008
u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
3009
{
3010
	struct perf_event *child;
3011 3012
	u64 total = 0;

3013 3014 3015
	*enabled = 0;
	*running = 0;

3016
	mutex_lock(&event->child_mutex);
3017
	total += perf_event_read(event);
3018 3019 3020 3021 3022 3023
	*enabled += event->total_time_enabled +
			atomic64_read(&event->child_total_time_enabled);
	*running += event->total_time_running +
			atomic64_read(&event->child_total_time_running);

	list_for_each_entry(child, &event->child_list, child_list) {
3024
		total += perf_event_read(child);
3025 3026 3027
		*enabled += child->total_time_enabled;
		*running += child->total_time_running;
	}
3028
	mutex_unlock(&event->child_mutex);
3029 3030 3031

	return total;
}
3032
EXPORT_SYMBOL_GPL(perf_event_read_value);
3033

3034
static int perf_event_read_group(struct perf_event *event,
3035 3036
				   u64 read_format, char __user *buf)
{
3037
	struct perf_event *leader = event->group_leader, *sub;
3038 3039
	int n = 0, size = 0, ret = -EFAULT;
	struct perf_event_context *ctx = leader->ctx;
3040
	u64 values[5];
3041
	u64 count, enabled, running;
3042

3043
	mutex_lock(&ctx->mutex);
3044
	count = perf_event_read_value(leader, &enabled, &running);
3045 3046

	values[n++] = 1 + leader->nr_siblings;
3047 3048 3049 3050
	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		values[n++] = enabled;
	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		values[n++] = running;
3051 3052 3053
	values[n++] = count;
	if (read_format & PERF_FORMAT_ID)
		values[n++] = primary_event_id(leader);
3054 3055 3056 3057

	size = n * sizeof(u64);

	if (copy_to_user(buf, values, size))
3058
		goto unlock;
3059

3060
	ret = size;
3061

3062
	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3063
		n = 0;
3064

3065
		values[n++] = perf_event_read_value(sub, &enabled, &running);
3066 3067 3068 3069 3070
		if (read_format & PERF_FORMAT_ID)
			values[n++] = primary_event_id(sub);

		size = n * sizeof(u64);

3071
		if (copy_to_user(buf + ret, values, size)) {
3072 3073 3074
			ret = -EFAULT;
			goto unlock;
		}
3075 3076

		ret += size;
3077
	}
3078 3079
unlock:
	mutex_unlock(&ctx->mutex);
3080

3081
	return ret;
3082 3083
}

3084
static int perf_event_read_one(struct perf_event *event,
3085 3086
				 u64 read_format, char __user *buf)
{
3087
	u64 enabled, running;
3088 3089 3090
	u64 values[4];
	int n = 0;

3091 3092 3093 3094 3095
	values[n++] = perf_event_read_value(event, &enabled, &running);
	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		values[n++] = enabled;
	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		values[n++] = running;
3096
	if (read_format & PERF_FORMAT_ID)
3097
		values[n++] = primary_event_id(event);
3098 3099 3100 3101 3102 3103 3104

	if (copy_to_user(buf, values, n * sizeof(u64)))
		return -EFAULT;

	return n * sizeof(u64);
}

T
Thomas Gleixner 已提交
3105
/*
3106
 * Read the performance event - simple non blocking version for now
T
Thomas Gleixner 已提交
3107 3108
 */
static ssize_t
3109
perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
T
Thomas Gleixner 已提交
3110
{
3111
	u64 read_format = event->attr.read_format;
3112
	int ret;
T
Thomas Gleixner 已提交
3113

3114
	/*
3115
	 * Return end-of-file for a read on a event that is in
3116 3117 3118
	 * error state (i.e. because it was pinned but it couldn't be
	 * scheduled on to the CPU at some point).
	 */
3119
	if (event->state == PERF_EVENT_STATE_ERROR)
3120 3121
		return 0;

3122
	if (count < event->read_size)
3123 3124
		return -ENOSPC;

3125
	WARN_ON_ONCE(event->ctx->parent_ctx);
3126
	if (read_format & PERF_FORMAT_GROUP)
3127
		ret = perf_event_read_group(event, read_format, buf);
3128
	else
3129
		ret = perf_event_read_one(event, read_format, buf);
T
Thomas Gleixner 已提交
3130

3131
	return ret;
T
Thomas Gleixner 已提交
3132 3133 3134 3135 3136
}

static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
3137
	struct perf_event *event = file->private_data;
T
Thomas Gleixner 已提交
3138

3139
	return perf_read_hw(event, buf, count);
T
Thomas Gleixner 已提交
3140 3141 3142 3143
}

static unsigned int perf_poll(struct file *file, poll_table *wait)
{
3144
	struct perf_event *event = file->private_data;
3145
	struct ring_buffer *rb;
3146
	unsigned int events = POLL_HUP;
P
Peter Zijlstra 已提交
3147 3148

	rcu_read_lock();
3149 3150 3151
	rb = rcu_dereference(event->rb);
	if (rb)
		events = atomic_xchg(&rb->poll, 0);
P
Peter Zijlstra 已提交
3152
	rcu_read_unlock();
T
Thomas Gleixner 已提交
3153

3154
	poll_wait(file, &event->waitq, wait);
T
Thomas Gleixner 已提交
3155 3156 3157 3158

	return events;
}

3159
static void perf_event_reset(struct perf_event *event)
3160
{
3161
	(void)perf_event_read(event);
3162
	local64_set(&event->count, 0);
3163
	perf_event_update_userpage(event);
P
Peter Zijlstra 已提交
3164 3165
}

3166
/*
3167 3168 3169 3170
 * Holding the top-level event's child_mutex means that any
 * descendant process that has inherited this event will block
 * in sync_child_event if it goes to exit, thus satisfying the
 * task existence requirements of perf_event_enable/disable.
3171
 */
3172 3173
static void perf_event_for_each_child(struct perf_event *event,
					void (*func)(struct perf_event *))
P
Peter Zijlstra 已提交
3174
{
3175
	struct perf_event *child;
P
Peter Zijlstra 已提交
3176

3177 3178 3179 3180
	WARN_ON_ONCE(event->ctx->parent_ctx);
	mutex_lock(&event->child_mutex);
	func(event);
	list_for_each_entry(child, &event->child_list, child_list)
P
Peter Zijlstra 已提交
3181
		func(child);
3182
	mutex_unlock(&event->child_mutex);
P
Peter Zijlstra 已提交
3183 3184
}

3185 3186
static void perf_event_for_each(struct perf_event *event,
				  void (*func)(struct perf_event *))
P
Peter Zijlstra 已提交
3187
{
3188 3189
	struct perf_event_context *ctx = event->ctx;
	struct perf_event *sibling;
P
Peter Zijlstra 已提交
3190

3191 3192
	WARN_ON_ONCE(ctx->parent_ctx);
	mutex_lock(&ctx->mutex);
3193
	event = event->group_leader;
3194

3195 3196 3197 3198
	perf_event_for_each_child(event, func);
	func(event);
	list_for_each_entry(sibling, &event->sibling_list, group_entry)
		perf_event_for_each_child(event, func);
3199
	mutex_unlock(&ctx->mutex);
3200 3201
}

3202
static int perf_event_period(struct perf_event *event, u64 __user *arg)
3203
{
3204
	struct perf_event_context *ctx = event->ctx;
3205 3206 3207
	int ret = 0;
	u64 value;

3208
	if (!is_sampling_event(event))
3209 3210
		return -EINVAL;

3211
	if (copy_from_user(&value, arg, sizeof(value)))
3212 3213 3214 3215 3216
		return -EFAULT;

	if (!value)
		return -EINVAL;

3217
	raw_spin_lock_irq(&ctx->lock);
3218 3219
	if (event->attr.freq) {
		if (value > sysctl_perf_event_sample_rate) {
3220 3221 3222 3223
			ret = -EINVAL;
			goto unlock;
		}

3224
		event->attr.sample_freq = value;
3225
	} else {
3226 3227
		event->attr.sample_period = value;
		event->hw.sample_period = value;
3228 3229
	}
unlock:
3230
	raw_spin_unlock_irq(&ctx->lock);
3231 3232 3233 3234

	return ret;
}

3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255
static const struct file_operations perf_fops;

static struct perf_event *perf_fget_light(int fd, int *fput_needed)
{
	struct file *file;

	file = fget_light(fd, fput_needed);
	if (!file)
		return ERR_PTR(-EBADF);

	if (file->f_op != &perf_fops) {
		fput_light(file, *fput_needed);
		*fput_needed = 0;
		return ERR_PTR(-EBADF);
	}

	return file->private_data;
}

static int perf_event_set_output(struct perf_event *event,
				 struct perf_event *output_event);
L
Li Zefan 已提交
3256
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
3257

3258 3259
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
3260 3261
	struct perf_event *event = file->private_data;
	void (*func)(struct perf_event *);
P
Peter Zijlstra 已提交
3262
	u32 flags = arg;
3263 3264

	switch (cmd) {
3265 3266
	case PERF_EVENT_IOC_ENABLE:
		func = perf_event_enable;
3267
		break;
3268 3269
	case PERF_EVENT_IOC_DISABLE:
		func = perf_event_disable;
3270
		break;
3271 3272
	case PERF_EVENT_IOC_RESET:
		func = perf_event_reset;
3273
		break;
P
Peter Zijlstra 已提交
3274

3275 3276
	case PERF_EVENT_IOC_REFRESH:
		return perf_event_refresh(event, arg);
3277

3278 3279
	case PERF_EVENT_IOC_PERIOD:
		return perf_event_period(event, (u64 __user *)arg);
3280

3281
	case PERF_EVENT_IOC_SET_OUTPUT:
3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298
	{
		struct perf_event *output_event = NULL;
		int fput_needed = 0;
		int ret;

		if (arg != -1) {
			output_event = perf_fget_light(arg, &fput_needed);
			if (IS_ERR(output_event))
				return PTR_ERR(output_event);
		}

		ret = perf_event_set_output(event, output_event);
		if (output_event)
			fput_light(output_event->filp, fput_needed);

		return ret;
	}
3299

L
Li Zefan 已提交
3300 3301 3302
	case PERF_EVENT_IOC_SET_FILTER:
		return perf_event_set_filter(event, (void __user *)arg);

3303
	default:
P
Peter Zijlstra 已提交
3304
		return -ENOTTY;
3305
	}
P
Peter Zijlstra 已提交
3306 3307

	if (flags & PERF_IOC_FLAG_GROUP)
3308
		perf_event_for_each(event, func);
P
Peter Zijlstra 已提交
3309
	else
3310
		perf_event_for_each_child(event, func);
P
Peter Zijlstra 已提交
3311 3312

	return 0;
3313 3314
}

3315
int perf_event_task_enable(void)
3316
{
3317
	struct perf_event *event;
3318

3319 3320 3321 3322
	mutex_lock(&current->perf_event_mutex);
	list_for_each_entry(event, &current->perf_event_list, owner_entry)
		perf_event_for_each_child(event, perf_event_enable);
	mutex_unlock(&current->perf_event_mutex);
3323 3324 3325 3326

	return 0;
}

3327
int perf_event_task_disable(void)
3328
{
3329
	struct perf_event *event;
3330

3331 3332 3333 3334
	mutex_lock(&current->perf_event_mutex);
	list_for_each_entry(event, &current->perf_event_list, owner_entry)
		perf_event_for_each_child(event, perf_event_disable);
	mutex_unlock(&current->perf_event_mutex);
3335 3336 3337 3338

	return 0;
}

3339 3340
#ifndef PERF_EVENT_INDEX_OFFSET
# define PERF_EVENT_INDEX_OFFSET 0
I
Ingo Molnar 已提交
3341 3342
#endif

3343
static int perf_event_index(struct perf_event *event)
3344
{
P
Peter Zijlstra 已提交
3345 3346 3347
	if (event->hw.state & PERF_HES_STOPPED)
		return 0;

3348
	if (event->state != PERF_EVENT_STATE_ACTIVE)
3349 3350
		return 0;

3351
	return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
3352 3353
}

3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365
static void calc_timer_values(struct perf_event *event,
				u64 *running,
				u64 *enabled)
{
	u64 now, ctx_time;

	now = perf_clock();
	ctx_time = event->shadow_ctx_time + now;
	*enabled = ctx_time - event->tstamp_enabled;
	*running = ctx_time - event->tstamp_running;
}

3366 3367 3368 3369 3370
/*
 * Callers need to ensure there can be no nesting of this function, otherwise
 * the seqlock logic goes bad. We can not serialize this because the arch
 * code calls this from NMI context.
 */
3371
void perf_event_update_userpage(struct perf_event *event)
3372
{
3373
	struct perf_event_mmap_page *userpg;
3374
	struct ring_buffer *rb;
3375
	u64 enabled, running;
3376 3377

	rcu_read_lock();
3378 3379 3380 3381 3382 3383 3384 3385 3386 3387
	/*
	 * compute total_time_enabled, total_time_running
	 * based on snapshot values taken when the event
	 * was last scheduled in.
	 *
	 * we cannot simply called update_context_time()
	 * because of locking issue as we can be called in
	 * NMI context
	 */
	calc_timer_values(event, &enabled, &running);
3388 3389
	rb = rcu_dereference(event->rb);
	if (!rb)
3390 3391
		goto unlock;

3392
	userpg = rb->user_page;
3393

3394 3395 3396 3397 3398
	/*
	 * Disable preemption so as to not let the corresponding user-space
	 * spin too long if we get preempted.
	 */
	preempt_disable();
3399
	++userpg->lock;
3400
	barrier();
3401
	userpg->index = perf_event_index(event);
P
Peter Zijlstra 已提交
3402
	userpg->offset = perf_event_count(event);
3403
	if (event->state == PERF_EVENT_STATE_ACTIVE)
3404
		userpg->offset -= local64_read(&event->hw.prev_count);
3405

3406
	userpg->time_enabled = enabled +
3407
			atomic64_read(&event->child_total_time_enabled);
3408

3409
	userpg->time_running = running +
3410
			atomic64_read(&event->child_total_time_running);
3411

3412
	barrier();
3413
	++userpg->lock;
3414
	preempt_enable();
3415
unlock:
3416
	rcu_read_unlock();
3417 3418
}

3419 3420 3421
static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct perf_event *event = vma->vm_file->private_data;
3422
	struct ring_buffer *rb;
3423 3424 3425 3426 3427 3428 3429 3430 3431
	int ret = VM_FAULT_SIGBUS;

	if (vmf->flags & FAULT_FLAG_MKWRITE) {
		if (vmf->pgoff == 0)
			ret = 0;
		return ret;
	}

	rcu_read_lock();
3432 3433
	rb = rcu_dereference(event->rb);
	if (!rb)
3434 3435 3436 3437 3438
		goto unlock;

	if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
		goto unlock;

3439
	vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453
	if (!vmf->page)
		goto unlock;

	get_page(vmf->page);
	vmf->page->mapping = vma->vm_file->f_mapping;
	vmf->page->index   = vmf->pgoff;

	ret = 0;
unlock:
	rcu_read_unlock();

	return ret;
}

3454
static void rb_free_rcu(struct rcu_head *rcu_head)
3455
{
3456
	struct ring_buffer *rb;
3457

3458 3459
	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
	rb_free(rb);
3460 3461
}

3462
static struct ring_buffer *ring_buffer_get(struct perf_event *event)
3463
{
3464
	struct ring_buffer *rb;
3465

3466
	rcu_read_lock();
3467 3468 3469 3470
	rb = rcu_dereference(event->rb);
	if (rb) {
		if (!atomic_inc_not_zero(&rb->refcount))
			rb = NULL;
3471 3472 3473
	}
	rcu_read_unlock();

3474
	return rb;
3475 3476
}

3477
static void ring_buffer_put(struct ring_buffer *rb)
3478
{
3479
	if (!atomic_dec_and_test(&rb->refcount))
3480
		return;
3481

3482
	call_rcu(&rb->rcu_head, rb_free_rcu);
3483 3484 3485 3486
}

static void perf_mmap_open(struct vm_area_struct *vma)
{
3487
	struct perf_event *event = vma->vm_file->private_data;
3488

3489
	atomic_inc(&event->mmap_count);
3490 3491 3492 3493
}

static void perf_mmap_close(struct vm_area_struct *vma)
{
3494
	struct perf_event *event = vma->vm_file->private_data;
3495

3496
	if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
3497
		unsigned long size = perf_data_size(event->rb);
3498
		struct user_struct *user = event->mmap_user;
3499
		struct ring_buffer *rb = event->rb;
3500

3501
		atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
3502
		vma->vm_mm->locked_vm -= event->mmap_locked;
3503
		rcu_assign_pointer(event->rb, NULL);
3504
		mutex_unlock(&event->mmap_mutex);
3505

3506
		ring_buffer_put(rb);
3507
		free_uid(user);
3508
	}
3509 3510
}

3511
static const struct vm_operations_struct perf_mmap_vmops = {
3512 3513 3514 3515
	.open		= perf_mmap_open,
	.close		= perf_mmap_close,
	.fault		= perf_mmap_fault,
	.page_mkwrite	= perf_mmap_fault,
3516 3517 3518 3519
};

static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
3520
	struct perf_event *event = file->private_data;
3521
	unsigned long user_locked, user_lock_limit;
3522
	struct user_struct *user = current_user();
3523
	unsigned long locked, lock_limit;
3524
	struct ring_buffer *rb;
3525 3526
	unsigned long vma_size;
	unsigned long nr_pages;
3527
	long user_extra, extra;
3528
	int ret = 0, flags = 0;
3529

3530 3531 3532
	/*
	 * Don't allow mmap() of inherited per-task counters. This would
	 * create a performance issue due to all children writing to the
3533
	 * same rb.
3534 3535 3536 3537
	 */
	if (event->cpu == -1 && event->attr.inherit)
		return -EINVAL;

3538
	if (!(vma->vm_flags & VM_SHARED))
3539
		return -EINVAL;
3540 3541 3542 3543

	vma_size = vma->vm_end - vma->vm_start;
	nr_pages = (vma_size / PAGE_SIZE) - 1;

3544
	/*
3545
	 * If we have rb pages ensure they're a power-of-two number, so we
3546 3547 3548
	 * can do bitmasks instead of modulo.
	 */
	if (nr_pages != 0 && !is_power_of_2(nr_pages))
3549 3550
		return -EINVAL;

3551
	if (vma_size != PAGE_SIZE * (1 + nr_pages))
3552 3553
		return -EINVAL;

3554 3555
	if (vma->vm_pgoff != 0)
		return -EINVAL;
3556

3557 3558
	WARN_ON_ONCE(event->ctx->parent_ctx);
	mutex_lock(&event->mmap_mutex);
3559 3560 3561
	if (event->rb) {
		if (event->rb->nr_pages == nr_pages)
			atomic_inc(&event->rb->refcount);
3562
		else
3563 3564 3565 3566
			ret = -EINVAL;
		goto unlock;
	}

3567
	user_extra = nr_pages + 1;
3568
	user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
I
Ingo Molnar 已提交
3569 3570 3571 3572 3573 3574

	/*
	 * Increase the limit linearly with more CPUs:
	 */
	user_lock_limit *= num_online_cpus();

3575
	user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3576

3577 3578 3579
	extra = 0;
	if (user_locked > user_lock_limit)
		extra = user_locked - user_lock_limit;
3580

3581
	lock_limit = rlimit(RLIMIT_MEMLOCK);
3582
	lock_limit >>= PAGE_SHIFT;
3583
	locked = vma->vm_mm->locked_vm + extra;
3584

3585 3586
	if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
		!capable(CAP_IPC_LOCK)) {
3587 3588 3589
		ret = -EPERM;
		goto unlock;
	}
3590

3591
	WARN_ON(event->rb);
3592

3593
	if (vma->vm_flags & VM_WRITE)
3594
		flags |= RING_BUFFER_WRITABLE;
3595

3596 3597 3598 3599
	rb = rb_alloc(nr_pages, 
		event->attr.watermark ? event->attr.wakeup_watermark : 0,
		event->cpu, flags);

3600
	if (!rb) {
3601
		ret = -ENOMEM;
3602
		goto unlock;
3603
	}
3604
	rcu_assign_pointer(event->rb, rb);
3605

3606 3607 3608 3609 3610
	atomic_long_add(user_extra, &user->locked_vm);
	event->mmap_locked = extra;
	event->mmap_user = get_current_user();
	vma->vm_mm->locked_vm += event->mmap_locked;

3611
unlock:
3612 3613
	if (!ret)
		atomic_inc(&event->mmap_count);
3614
	mutex_unlock(&event->mmap_mutex);
3615 3616 3617

	vma->vm_flags |= VM_RESERVED;
	vma->vm_ops = &perf_mmap_vmops;
3618 3619

	return ret;
3620 3621
}

P
Peter Zijlstra 已提交
3622 3623 3624
static int perf_fasync(int fd, struct file *filp, int on)
{
	struct inode *inode = filp->f_path.dentry->d_inode;
3625
	struct perf_event *event = filp->private_data;
P
Peter Zijlstra 已提交
3626 3627 3628
	int retval;

	mutex_lock(&inode->i_mutex);
3629
	retval = fasync_helper(fd, filp, on, &event->fasync);
P
Peter Zijlstra 已提交
3630 3631 3632 3633 3634 3635 3636 3637
	mutex_unlock(&inode->i_mutex);

	if (retval < 0)
		return retval;

	return 0;
}

T
Thomas Gleixner 已提交
3638
static const struct file_operations perf_fops = {
3639
	.llseek			= no_llseek,
T
Thomas Gleixner 已提交
3640 3641 3642
	.release		= perf_release,
	.read			= perf_read,
	.poll			= perf_poll,
3643 3644
	.unlocked_ioctl		= perf_ioctl,
	.compat_ioctl		= perf_ioctl,
3645
	.mmap			= perf_mmap,
P
Peter Zijlstra 已提交
3646
	.fasync			= perf_fasync,
T
Thomas Gleixner 已提交
3647 3648
};

3649
/*
3650
 * Perf event wakeup
3651 3652 3653 3654 3655
 *
 * If there's data, ensure we set the poll() state and publish everything
 * to user-space before waking everybody up.
 */

3656
void perf_event_wakeup(struct perf_event *event)
3657
{
3658
	wake_up_all(&event->waitq);
3659

3660 3661 3662
	if (event->pending_kill) {
		kill_fasync(&event->fasync, SIGIO, event->pending_kill);
		event->pending_kill = 0;
3663
	}
3664 3665
}

3666
static void perf_pending_event(struct irq_work *entry)
3667
{
3668 3669
	struct perf_event *event = container_of(entry,
			struct perf_event, pending);
3670

3671 3672 3673
	if (event->pending_disable) {
		event->pending_disable = 0;
		__perf_event_disable(event);
3674 3675
	}

3676 3677 3678
	if (event->pending_wakeup) {
		event->pending_wakeup = 0;
		perf_event_wakeup(event);
3679 3680 3681
	}
}

3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702
/*
 * We assume there is only KVM supporting the callbacks.
 * Later on, we might change it to a list if there is
 * another virtualization implementation supporting the callbacks.
 */
struct perf_guest_info_callbacks *perf_guest_cbs;

int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
	perf_guest_cbs = cbs;
	return 0;
}
EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);

int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
	perf_guest_cbs = NULL;
	return 0;
}
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);

3703 3704 3705
static void __perf_event_header__init_id(struct perf_event_header *header,
					 struct perf_sample_data *data,
					 struct perf_event *event)
3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732
{
	u64 sample_type = event->attr.sample_type;

	data->type = sample_type;
	header->size += event->id_header_size;

	if (sample_type & PERF_SAMPLE_TID) {
		/* namespace issues */
		data->tid_entry.pid = perf_event_pid(event, current);
		data->tid_entry.tid = perf_event_tid(event, current);
	}

	if (sample_type & PERF_SAMPLE_TIME)
		data->time = perf_clock();

	if (sample_type & PERF_SAMPLE_ID)
		data->id = primary_event_id(event);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		data->stream_id = event->id;

	if (sample_type & PERF_SAMPLE_CPU) {
		data->cpu_entry.cpu	 = raw_smp_processor_id();
		data->cpu_entry.reserved = 0;
	}
}

3733 3734 3735
void perf_event_header__init_id(struct perf_event_header *header,
				struct perf_sample_data *data,
				struct perf_event *event)
3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761
{
	if (event->attr.sample_id_all)
		__perf_event_header__init_id(header, data, event);
}

static void __perf_event__output_id_sample(struct perf_output_handle *handle,
					   struct perf_sample_data *data)
{
	u64 sample_type = data->type;

	if (sample_type & PERF_SAMPLE_TID)
		perf_output_put(handle, data->tid_entry);

	if (sample_type & PERF_SAMPLE_TIME)
		perf_output_put(handle, data->time);

	if (sample_type & PERF_SAMPLE_ID)
		perf_output_put(handle, data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		perf_output_put(handle, data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		perf_output_put(handle, data->cpu_entry);
}

3762 3763 3764
void perf_event__output_id_sample(struct perf_event *event,
				  struct perf_output_handle *handle,
				  struct perf_sample_data *sample)
3765 3766 3767 3768 3769
{
	if (event->attr.sample_id_all)
		__perf_event__output_id_sample(handle, sample);
}

3770
static void perf_output_read_one(struct perf_output_handle *handle,
3771 3772
				 struct perf_event *event,
				 u64 enabled, u64 running)
3773
{
3774
	u64 read_format = event->attr.read_format;
3775 3776 3777
	u64 values[4];
	int n = 0;

P
Peter Zijlstra 已提交
3778
	values[n++] = perf_event_count(event);
3779
	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3780
		values[n++] = enabled +
3781
			atomic64_read(&event->child_total_time_enabled);
3782 3783
	}
	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3784
		values[n++] = running +
3785
			atomic64_read(&event->child_total_time_running);
3786 3787
	}
	if (read_format & PERF_FORMAT_ID)
3788
		values[n++] = primary_event_id(event);
3789

3790
	__output_copy(handle, values, n * sizeof(u64));
3791 3792 3793
}

/*
3794
 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3795 3796
 */
static void perf_output_read_group(struct perf_output_handle *handle,
3797 3798
			    struct perf_event *event,
			    u64 enabled, u64 running)
3799
{
3800 3801
	struct perf_event *leader = event->group_leader, *sub;
	u64 read_format = event->attr.read_format;
3802 3803 3804 3805 3806 3807
	u64 values[5];
	int n = 0;

	values[n++] = 1 + leader->nr_siblings;

	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3808
		values[n++] = enabled;
3809 3810

	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3811
		values[n++] = running;
3812

3813
	if (leader != event)
3814 3815
		leader->pmu->read(leader);

P
Peter Zijlstra 已提交
3816
	values[n++] = perf_event_count(leader);
3817
	if (read_format & PERF_FORMAT_ID)
3818
		values[n++] = primary_event_id(leader);
3819

3820
	__output_copy(handle, values, n * sizeof(u64));
3821

3822
	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3823 3824
		n = 0;

3825
		if (sub != event)
3826 3827
			sub->pmu->read(sub);

P
Peter Zijlstra 已提交
3828
		values[n++] = perf_event_count(sub);
3829
		if (read_format & PERF_FORMAT_ID)
3830
			values[n++] = primary_event_id(sub);
3831

3832
		__output_copy(handle, values, n * sizeof(u64));
3833 3834 3835
	}
}

3836 3837 3838
#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
				 PERF_FORMAT_TOTAL_TIME_RUNNING)

3839
static void perf_output_read(struct perf_output_handle *handle,
3840
			     struct perf_event *event)
3841
{
3842
	u64 enabled = 0, running = 0;
3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853
	u64 read_format = event->attr.read_format;

	/*
	 * compute total_time_enabled, total_time_running
	 * based on snapshot values taken when the event
	 * was last scheduled in.
	 *
	 * we cannot simply called update_context_time()
	 * because of locking issue as we are called in
	 * NMI context
	 */
3854 3855
	if (read_format & PERF_FORMAT_TOTAL_TIMES)
		calc_timer_values(event, &enabled, &running);
3856

3857
	if (event->attr.read_format & PERF_FORMAT_GROUP)
3858
		perf_output_read_group(handle, event, enabled, running);
3859
	else
3860
		perf_output_read_one(handle, event, enabled, running);
3861 3862
}

3863 3864 3865
void perf_output_sample(struct perf_output_handle *handle,
			struct perf_event_header *header,
			struct perf_sample_data *data,
3866
			struct perf_event *event)
3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896
{
	u64 sample_type = data->type;

	perf_output_put(handle, *header);

	if (sample_type & PERF_SAMPLE_IP)
		perf_output_put(handle, data->ip);

	if (sample_type & PERF_SAMPLE_TID)
		perf_output_put(handle, data->tid_entry);

	if (sample_type & PERF_SAMPLE_TIME)
		perf_output_put(handle, data->time);

	if (sample_type & PERF_SAMPLE_ADDR)
		perf_output_put(handle, data->addr);

	if (sample_type & PERF_SAMPLE_ID)
		perf_output_put(handle, data->id);

	if (sample_type & PERF_SAMPLE_STREAM_ID)
		perf_output_put(handle, data->stream_id);

	if (sample_type & PERF_SAMPLE_CPU)
		perf_output_put(handle, data->cpu_entry);

	if (sample_type & PERF_SAMPLE_PERIOD)
		perf_output_put(handle, data->period);

	if (sample_type & PERF_SAMPLE_READ)
3897
		perf_output_read(handle, event);
3898 3899 3900 3901 3902 3903 3904 3905 3906 3907

	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
		if (data->callchain) {
			int size = 1;

			if (data->callchain)
				size += data->callchain->nr;

			size *= sizeof(u64);

3908
			__output_copy(handle, data->callchain, size);
3909 3910 3911 3912 3913 3914 3915 3916 3917
		} else {
			u64 nr = 0;
			perf_output_put(handle, nr);
		}
	}

	if (sample_type & PERF_SAMPLE_RAW) {
		if (data->raw) {
			perf_output_put(handle, data->raw->size);
3918 3919
			__output_copy(handle, data->raw->data,
					   data->raw->size);
3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934
		} else {
			struct {
				u32	size;
				u32	data;
			} raw = {
				.size = sizeof(u32),
				.data = 0,
			};
			perf_output_put(handle, raw);
		}
	}
}

void perf_prepare_sample(struct perf_event_header *header,
			 struct perf_sample_data *data,
3935
			 struct perf_event *event,
3936
			 struct pt_regs *regs)
3937
{
3938
	u64 sample_type = event->attr.sample_type;
3939

3940
	header->type = PERF_RECORD_SAMPLE;
3941
	header->size = sizeof(*header) + event->header_size;
3942 3943 3944

	header->misc = 0;
	header->misc |= perf_misc_flags(regs);
3945

3946
	__perf_event_header__init_id(header, data, event);
3947

3948
	if (sample_type & PERF_SAMPLE_IP)
3949 3950
		data->ip = perf_instruction_pointer(regs);

3951
	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3952
		int size = 1;
3953

3954 3955 3956 3957 3958 3959
		data->callchain = perf_callchain(regs);

		if (data->callchain)
			size += data->callchain->nr;

		header->size += size * sizeof(u64);
3960 3961
	}

3962
	if (sample_type & PERF_SAMPLE_RAW) {
3963 3964 3965 3966 3967 3968 3969 3970
		int size = sizeof(u32);

		if (data->raw)
			size += data->raw->size;
		else
			size += sizeof(u32);

		WARN_ON_ONCE(size & (sizeof(u64)-1));
3971
		header->size += size;
3972
	}
3973
}
3974

3975
static void perf_event_output(struct perf_event *event, int nmi,
3976 3977 3978 3979 3980
				struct perf_sample_data *data,
				struct pt_regs *regs)
{
	struct perf_output_handle handle;
	struct perf_event_header header;
3981

3982 3983 3984
	/* protect the callchain buffers */
	rcu_read_lock();

3985
	perf_prepare_sample(&header, data, event, regs);
P
Peter Zijlstra 已提交
3986

3987
	if (perf_output_begin(&handle, event, header.size, nmi, 1))
3988
		goto exit;
3989

3990
	perf_output_sample(&handle, &header, data, event);
3991

3992
	perf_output_end(&handle);
3993 3994 3995

exit:
	rcu_read_unlock();
3996 3997
}

3998
/*
3999
 * read event_id
4000 4001 4002 4003 4004 4005 4006 4007 4008 4009
 */

struct perf_read_event {
	struct perf_event_header	header;

	u32				pid;
	u32				tid;
};

static void
4010
perf_event_read_event(struct perf_event *event,
4011 4012 4013
			struct task_struct *task)
{
	struct perf_output_handle handle;
4014
	struct perf_sample_data sample;
4015
	struct perf_read_event read_event = {
4016
		.header = {
4017
			.type = PERF_RECORD_READ,
4018
			.misc = 0,
4019
			.size = sizeof(read_event) + event->read_size,
4020
		},
4021 4022
		.pid = perf_event_pid(event, task),
		.tid = perf_event_tid(event, task),
4023
	};
4024
	int ret;
4025

4026
	perf_event_header__init_id(&read_event.header, &sample, event);
4027
	ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
4028 4029 4030
	if (ret)
		return;

4031
	perf_output_put(&handle, read_event);
4032
	perf_output_read(&handle, event);
4033
	perf_event__output_id_sample(event, &handle, &sample);
4034

4035 4036 4037
	perf_output_end(&handle);
}

P
Peter Zijlstra 已提交
4038
/*
P
Peter Zijlstra 已提交
4039 4040
 * task tracking -- fork/exit
 *
4041
 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
P
Peter Zijlstra 已提交
4042 4043
 */

P
Peter Zijlstra 已提交
4044
struct perf_task_event {
4045
	struct task_struct		*task;
4046
	struct perf_event_context	*task_ctx;
P
Peter Zijlstra 已提交
4047 4048 4049 4050 4051 4052

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				ppid;
P
Peter Zijlstra 已提交
4053 4054
		u32				tid;
		u32				ptid;
4055
		u64				time;
4056
	} event_id;
P
Peter Zijlstra 已提交
4057 4058
};

4059
static void perf_event_task_output(struct perf_event *event,
P
Peter Zijlstra 已提交
4060
				     struct perf_task_event *task_event)
P
Peter Zijlstra 已提交
4061 4062
{
	struct perf_output_handle handle;
4063
	struct perf_sample_data	sample;
P
Peter Zijlstra 已提交
4064
	struct task_struct *task = task_event->task;
4065
	int ret, size = task_event->event_id.header.size;
4066

4067
	perf_event_header__init_id(&task_event->event_id.header, &sample, event);
P
Peter Zijlstra 已提交
4068

4069 4070
	ret = perf_output_begin(&handle, event,
				task_event->event_id.header.size, 0, 0);
4071
	if (ret)
4072
		goto out;
P
Peter Zijlstra 已提交
4073

4074 4075
	task_event->event_id.pid = perf_event_pid(event, task);
	task_event->event_id.ppid = perf_event_pid(event, current);
P
Peter Zijlstra 已提交
4076

4077 4078
	task_event->event_id.tid = perf_event_tid(event, task);
	task_event->event_id.ptid = perf_event_tid(event, current);
P
Peter Zijlstra 已提交
4079

4080
	perf_output_put(&handle, task_event->event_id);
4081

4082 4083
	perf_event__output_id_sample(event, &handle, &sample);

P
Peter Zijlstra 已提交
4084
	perf_output_end(&handle);
4085 4086
out:
	task_event->event_id.header.size = size;
P
Peter Zijlstra 已提交
4087 4088
}

4089
static int perf_event_task_match(struct perf_event *event)
P
Peter Zijlstra 已提交
4090
{
P
Peter Zijlstra 已提交
4091
	if (event->state < PERF_EVENT_STATE_INACTIVE)
4092 4093
		return 0;

4094
	if (!event_filter_match(event))
4095 4096
		return 0;

4097 4098
	if (event->attr.comm || event->attr.mmap ||
	    event->attr.mmap_data || event->attr.task)
P
Peter Zijlstra 已提交
4099 4100 4101 4102 4103
		return 1;

	return 0;
}

4104
static void perf_event_task_ctx(struct perf_event_context *ctx,
P
Peter Zijlstra 已提交
4105
				  struct perf_task_event *task_event)
P
Peter Zijlstra 已提交
4106
{
4107
	struct perf_event *event;
P
Peter Zijlstra 已提交
4108

4109 4110 4111
	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
		if (perf_event_task_match(event))
			perf_event_task_output(event, task_event);
P
Peter Zijlstra 已提交
4112 4113 4114
	}
}

4115
static void perf_event_task_event(struct perf_task_event *task_event)
P
Peter Zijlstra 已提交
4116 4117
{
	struct perf_cpu_context *cpuctx;
P
Peter Zijlstra 已提交
4118
	struct perf_event_context *ctx;
P
Peter Zijlstra 已提交
4119
	struct pmu *pmu;
P
Peter Zijlstra 已提交
4120
	int ctxn;
P
Peter Zijlstra 已提交
4121

4122
	rcu_read_lock();
P
Peter Zijlstra 已提交
4123
	list_for_each_entry_rcu(pmu, &pmus, entry) {
4124
		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4125 4126
		if (cpuctx->active_pmu != pmu)
			goto next;
P
Peter Zijlstra 已提交
4127
		perf_event_task_ctx(&cpuctx->ctx, task_event);
P
Peter Zijlstra 已提交
4128 4129 4130 4131 4132

		ctx = task_event->task_ctx;
		if (!ctx) {
			ctxn = pmu->task_ctx_nr;
			if (ctxn < 0)
4133
				goto next;
P
Peter Zijlstra 已提交
4134 4135 4136 4137
			ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
		}
		if (ctx)
			perf_event_task_ctx(ctx, task_event);
4138 4139
next:
		put_cpu_ptr(pmu->pmu_cpu_context);
P
Peter Zijlstra 已提交
4140
	}
P
Peter Zijlstra 已提交
4141 4142 4143
	rcu_read_unlock();
}

4144 4145
static void perf_event_task(struct task_struct *task,
			      struct perf_event_context *task_ctx,
4146
			      int new)
P
Peter Zijlstra 已提交
4147
{
P
Peter Zijlstra 已提交
4148
	struct perf_task_event task_event;
P
Peter Zijlstra 已提交
4149

4150 4151 4152
	if (!atomic_read(&nr_comm_events) &&
	    !atomic_read(&nr_mmap_events) &&
	    !atomic_read(&nr_task_events))
P
Peter Zijlstra 已提交
4153 4154
		return;

P
Peter Zijlstra 已提交
4155
	task_event = (struct perf_task_event){
4156 4157
		.task	  = task,
		.task_ctx = task_ctx,
4158
		.event_id    = {
P
Peter Zijlstra 已提交
4159
			.header = {
4160
				.type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
4161
				.misc = 0,
4162
				.size = sizeof(task_event.event_id),
P
Peter Zijlstra 已提交
4163
			},
4164 4165
			/* .pid  */
			/* .ppid */
P
Peter Zijlstra 已提交
4166 4167
			/* .tid  */
			/* .ptid */
P
Peter Zijlstra 已提交
4168
			.time = perf_clock(),
P
Peter Zijlstra 已提交
4169 4170 4171
		},
	};

4172
	perf_event_task_event(&task_event);
P
Peter Zijlstra 已提交
4173 4174
}

4175
void perf_event_fork(struct task_struct *task)
P
Peter Zijlstra 已提交
4176
{
4177
	perf_event_task(task, NULL, 1);
P
Peter Zijlstra 已提交
4178 4179
}

4180 4181 4182 4183 4184
/*
 * comm tracking
 */

struct perf_comm_event {
4185 4186
	struct task_struct	*task;
	char			*comm;
4187 4188 4189 4190 4191 4192 4193
	int			comm_size;

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				tid;
4194
	} event_id;
4195 4196
};

4197
static void perf_event_comm_output(struct perf_event *event,
4198 4199 4200
				     struct perf_comm_event *comm_event)
{
	struct perf_output_handle handle;
4201
	struct perf_sample_data sample;
4202
	int size = comm_event->event_id.header.size;
4203 4204 4205 4206 4207
	int ret;

	perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
	ret = perf_output_begin(&handle, event,
				comm_event->event_id.header.size, 0, 0);
4208 4209

	if (ret)
4210
		goto out;
4211

4212 4213
	comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
	comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
4214

4215
	perf_output_put(&handle, comm_event->event_id);
4216
	__output_copy(&handle, comm_event->comm,
4217
				   comm_event->comm_size);
4218 4219 4220

	perf_event__output_id_sample(event, &handle, &sample);

4221
	perf_output_end(&handle);
4222 4223
out:
	comm_event->event_id.header.size = size;
4224 4225
}

4226
static int perf_event_comm_match(struct perf_event *event)
4227
{
P
Peter Zijlstra 已提交
4228
	if (event->state < PERF_EVENT_STATE_INACTIVE)
4229 4230
		return 0;

4231
	if (!event_filter_match(event))
4232 4233
		return 0;

4234
	if (event->attr.comm)
4235 4236 4237 4238 4239
		return 1;

	return 0;
}

4240
static void perf_event_comm_ctx(struct perf_event_context *ctx,
4241 4242
				  struct perf_comm_event *comm_event)
{
4243
	struct perf_event *event;
4244

4245 4246 4247
	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
		if (perf_event_comm_match(event))
			perf_event_comm_output(event, comm_event);
4248 4249 4250
	}
}

4251
static void perf_event_comm_event(struct perf_comm_event *comm_event)
4252 4253
{
	struct perf_cpu_context *cpuctx;
4254
	struct perf_event_context *ctx;
4255
	char comm[TASK_COMM_LEN];
4256
	unsigned int size;
P
Peter Zijlstra 已提交
4257
	struct pmu *pmu;
P
Peter Zijlstra 已提交
4258
	int ctxn;
4259

4260
	memset(comm, 0, sizeof(comm));
4261
	strlcpy(comm, comm_event->task->comm, sizeof(comm));
4262
	size = ALIGN(strlen(comm)+1, sizeof(u64));
4263 4264 4265 4266

	comm_event->comm = comm;
	comm_event->comm_size = size;

4267
	comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
4268
	rcu_read_lock();
P
Peter Zijlstra 已提交
4269
	list_for_each_entry_rcu(pmu, &pmus, entry) {
4270
		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4271 4272
		if (cpuctx->active_pmu != pmu)
			goto next;
P
Peter Zijlstra 已提交
4273
		perf_event_comm_ctx(&cpuctx->ctx, comm_event);
P
Peter Zijlstra 已提交
4274 4275 4276

		ctxn = pmu->task_ctx_nr;
		if (ctxn < 0)
4277
			goto next;
P
Peter Zijlstra 已提交
4278 4279 4280 4281

		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
		if (ctx)
			perf_event_comm_ctx(ctx, comm_event);
4282 4283
next:
		put_cpu_ptr(pmu->pmu_cpu_context);
P
Peter Zijlstra 已提交
4284
	}
4285
	rcu_read_unlock();
4286 4287
}

4288
void perf_event_comm(struct task_struct *task)
4289
{
4290
	struct perf_comm_event comm_event;
P
Peter Zijlstra 已提交
4291 4292
	struct perf_event_context *ctx;
	int ctxn;
4293

P
Peter Zijlstra 已提交
4294 4295 4296 4297
	for_each_task_context_nr(ctxn) {
		ctx = task->perf_event_ctxp[ctxn];
		if (!ctx)
			continue;
4298

P
Peter Zijlstra 已提交
4299 4300
		perf_event_enable_on_exec(ctx);
	}
4301

4302
	if (!atomic_read(&nr_comm_events))
4303
		return;
4304

4305
	comm_event = (struct perf_comm_event){
4306
		.task	= task,
4307 4308
		/* .comm      */
		/* .comm_size */
4309
		.event_id  = {
4310
			.header = {
4311
				.type = PERF_RECORD_COMM,
4312 4313 4314 4315 4316
				.misc = 0,
				/* .size */
			},
			/* .pid */
			/* .tid */
4317 4318 4319
		},
	};

4320
	perf_event_comm_event(&comm_event);
4321 4322
}

4323 4324 4325 4326 4327
/*
 * mmap tracking
 */

struct perf_mmap_event {
4328 4329 4330 4331
	struct vm_area_struct	*vma;

	const char		*file_name;
	int			file_size;
4332 4333 4334 4335 4336 4337 4338 4339 4340

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				tid;
		u64				start;
		u64				len;
		u64				pgoff;
4341
	} event_id;
4342 4343
};

4344
static void perf_event_mmap_output(struct perf_event *event,
4345 4346 4347
				     struct perf_mmap_event *mmap_event)
{
	struct perf_output_handle handle;
4348
	struct perf_sample_data sample;
4349
	int size = mmap_event->event_id.header.size;
4350
	int ret;
4351

4352 4353 4354
	perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
	ret = perf_output_begin(&handle, event,
				mmap_event->event_id.header.size, 0, 0);
4355
	if (ret)
4356
		goto out;
4357

4358 4359
	mmap_event->event_id.pid = perf_event_pid(event, current);
	mmap_event->event_id.tid = perf_event_tid(event, current);
4360

4361
	perf_output_put(&handle, mmap_event->event_id);
4362
	__output_copy(&handle, mmap_event->file_name,
4363
				   mmap_event->file_size);
4364 4365 4366

	perf_event__output_id_sample(event, &handle, &sample);

4367
	perf_output_end(&handle);
4368 4369
out:
	mmap_event->event_id.header.size = size;
4370 4371
}

4372
static int perf_event_mmap_match(struct perf_event *event,
4373 4374
				   struct perf_mmap_event *mmap_event,
				   int executable)
4375
{
P
Peter Zijlstra 已提交
4376
	if (event->state < PERF_EVENT_STATE_INACTIVE)
4377 4378
		return 0;

4379
	if (!event_filter_match(event))
4380 4381
		return 0;

4382 4383
	if ((!executable && event->attr.mmap_data) ||
	    (executable && event->attr.mmap))
4384 4385 4386 4387 4388
		return 1;

	return 0;
}

4389
static void perf_event_mmap_ctx(struct perf_event_context *ctx,
4390 4391
				  struct perf_mmap_event *mmap_event,
				  int executable)
4392
{
4393
	struct perf_event *event;
4394

4395
	list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4396
		if (perf_event_mmap_match(event, mmap_event, executable))
4397
			perf_event_mmap_output(event, mmap_event);
4398 4399 4400
	}
}

4401
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4402 4403
{
	struct perf_cpu_context *cpuctx;
4404
	struct perf_event_context *ctx;
4405 4406
	struct vm_area_struct *vma = mmap_event->vma;
	struct file *file = vma->vm_file;
4407 4408 4409
	unsigned int size;
	char tmp[16];
	char *buf = NULL;
4410
	const char *name;
P
Peter Zijlstra 已提交
4411
	struct pmu *pmu;
P
Peter Zijlstra 已提交
4412
	int ctxn;
4413

4414 4415
	memset(tmp, 0, sizeof(tmp));

4416
	if (file) {
4417
		/*
4418
		 * d_path works from the end of the rb backwards, so we
4419 4420 4421 4422
		 * need to add enough zero bytes after the string to handle
		 * the 64bit alignment we do later.
		 */
		buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
4423 4424 4425 4426
		if (!buf) {
			name = strncpy(tmp, "//enomem", sizeof(tmp));
			goto got_name;
		}
4427
		name = d_path(&file->f_path, buf, PATH_MAX);
4428 4429 4430 4431 4432
		if (IS_ERR(name)) {
			name = strncpy(tmp, "//toolong", sizeof(tmp));
			goto got_name;
		}
	} else {
4433 4434 4435
		if (arch_vma_name(mmap_event->vma)) {
			name = strncpy(tmp, arch_vma_name(mmap_event->vma),
				       sizeof(tmp));
4436
			goto got_name;
4437
		}
4438 4439 4440 4441

		if (!vma->vm_mm) {
			name = strncpy(tmp, "[vdso]", sizeof(tmp));
			goto got_name;
4442 4443 4444 4445 4446 4447 4448 4449
		} else if (vma->vm_start <= vma->vm_mm->start_brk &&
				vma->vm_end >= vma->vm_mm->brk) {
			name = strncpy(tmp, "[heap]", sizeof(tmp));
			goto got_name;
		} else if (vma->vm_start <= vma->vm_mm->start_stack &&
				vma->vm_end >= vma->vm_mm->start_stack) {
			name = strncpy(tmp, "[stack]", sizeof(tmp));
			goto got_name;
4450 4451
		}

4452 4453 4454 4455 4456
		name = strncpy(tmp, "//anon", sizeof(tmp));
		goto got_name;
	}

got_name:
4457
	size = ALIGN(strlen(name)+1, sizeof(u64));
4458 4459 4460 4461

	mmap_event->file_name = name;
	mmap_event->file_size = size;

4462
	mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4463

4464
	rcu_read_lock();
P
Peter Zijlstra 已提交
4465
	list_for_each_entry_rcu(pmu, &pmus, entry) {
4466
		cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4467 4468
		if (cpuctx->active_pmu != pmu)
			goto next;
P
Peter Zijlstra 已提交
4469 4470
		perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
					vma->vm_flags & VM_EXEC);
P
Peter Zijlstra 已提交
4471 4472 4473

		ctxn = pmu->task_ctx_nr;
		if (ctxn < 0)
4474
			goto next;
P
Peter Zijlstra 已提交
4475 4476 4477 4478 4479 4480

		ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
		if (ctx) {
			perf_event_mmap_ctx(ctx, mmap_event,
					vma->vm_flags & VM_EXEC);
		}
4481 4482
next:
		put_cpu_ptr(pmu->pmu_cpu_context);
P
Peter Zijlstra 已提交
4483
	}
4484 4485
	rcu_read_unlock();

4486 4487 4488
	kfree(buf);
}

4489
void perf_event_mmap(struct vm_area_struct *vma)
4490
{
4491 4492
	struct perf_mmap_event mmap_event;

4493
	if (!atomic_read(&nr_mmap_events))
4494 4495 4496
		return;

	mmap_event = (struct perf_mmap_event){
4497
		.vma	= vma,
4498 4499
		/* .file_name */
		/* .file_size */
4500
		.event_id  = {
4501
			.header = {
4502
				.type = PERF_RECORD_MMAP,
4503
				.misc = PERF_RECORD_MISC_USER,
4504 4505 4506 4507
				/* .size */
			},
			/* .pid */
			/* .tid */
4508 4509
			.start  = vma->vm_start,
			.len    = vma->vm_end - vma->vm_start,
4510
			.pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT,
4511 4512 4513
		},
	};

4514
	perf_event_mmap_event(&mmap_event);
4515 4516
}

4517 4518 4519 4520
/*
 * IRQ throttle logging
 */

4521
static void perf_log_throttle(struct perf_event *event, int enable)
4522 4523
{
	struct perf_output_handle handle;
4524
	struct perf_sample_data sample;
4525 4526 4527 4528 4529
	int ret;

	struct {
		struct perf_event_header	header;
		u64				time;
4530
		u64				id;
4531
		u64				stream_id;
4532 4533
	} throttle_event = {
		.header = {
4534
			.type = PERF_RECORD_THROTTLE,
4535 4536 4537
			.misc = 0,
			.size = sizeof(throttle_event),
		},
P
Peter Zijlstra 已提交
4538
		.time		= perf_clock(),
4539 4540
		.id		= primary_event_id(event),
		.stream_id	= event->id,
4541 4542
	};

4543
	if (enable)
4544
		throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4545

4546 4547 4548 4549
	perf_event_header__init_id(&throttle_event.header, &sample, event);

	ret = perf_output_begin(&handle, event,
				throttle_event.header.size, 1, 0);
4550 4551 4552 4553
	if (ret)
		return;

	perf_output_put(&handle, throttle_event);
4554
	perf_event__output_id_sample(event, &handle, &sample);
4555 4556 4557
	perf_output_end(&handle);
}

4558
/*
4559
 * Generic event overflow handling, sampling.
4560 4561
 */

4562
static int __perf_event_overflow(struct perf_event *event, int nmi,
4563 4564
				   int throttle, struct perf_sample_data *data,
				   struct pt_regs *regs)
4565
{
4566 4567
	int events = atomic_read(&event->event_limit);
	struct hw_perf_event *hwc = &event->hw;
4568 4569
	int ret = 0;

4570 4571 4572 4573 4574 4575 4576
	/*
	 * Non-sampling counters might still use the PMI to fold short
	 * hardware counters, ignore those.
	 */
	if (unlikely(!is_sampling_event(event)))
		return 0;

P
Peter Zijlstra 已提交
4577 4578 4579 4580
	if (unlikely(hwc->interrupts >= max_samples_per_tick)) {
		if (throttle) {
			hwc->interrupts = MAX_INTERRUPTS;
			perf_log_throttle(event, 0);
4581 4582
			ret = 1;
		}
P
Peter Zijlstra 已提交
4583 4584
	} else
		hwc->interrupts++;
4585

4586
	if (event->attr.freq) {
P
Peter Zijlstra 已提交
4587
		u64 now = perf_clock();
4588
		s64 delta = now - hwc->freq_time_stamp;
4589

4590
		hwc->freq_time_stamp = now;
4591

4592 4593
		if (delta > 0 && delta < 2*TICK_NSEC)
			perf_adjust_period(event, delta, hwc->last_period);
4594 4595
	}

4596 4597
	/*
	 * XXX event_limit might not quite work as expected on inherited
4598
	 * events
4599 4600
	 */

4601 4602
	event->pending_kill = POLL_IN;
	if (events && atomic_dec_and_test(&event->event_limit)) {
4603
		ret = 1;
4604
		event->pending_kill = POLL_HUP;
4605
		if (nmi) {
4606
			event->pending_disable = 1;
4607
			irq_work_queue(&event->pending);
4608
		} else
4609
			perf_event_disable(event);
4610 4611
	}

4612 4613 4614 4615 4616
	if (event->overflow_handler)
		event->overflow_handler(event, nmi, data, regs);
	else
		perf_event_output(event, nmi, data, regs);

P
Peter Zijlstra 已提交
4617 4618 4619 4620 4621 4622 4623 4624
	if (event->fasync && event->pending_kill) {
		if (nmi) {
			event->pending_wakeup = 1;
			irq_work_queue(&event->pending);
		} else
			perf_event_wakeup(event);
	}

4625
	return ret;
4626 4627
}

4628
int perf_event_overflow(struct perf_event *event, int nmi,
4629 4630
			  struct perf_sample_data *data,
			  struct pt_regs *regs)
4631
{
4632
	return __perf_event_overflow(event, nmi, 1, data, regs);
4633 4634
}

4635
/*
4636
 * Generic software event infrastructure
4637 4638
 */

4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649
struct swevent_htable {
	struct swevent_hlist		*swevent_hlist;
	struct mutex			hlist_mutex;
	int				hlist_refcount;

	/* Recursion avoidance in each contexts */
	int				recursion[PERF_NR_CONTEXTS];
};

static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);

4650
/*
4651 4652
 * We directly increment event->count and keep a second value in
 * event->hw.period_left to count intervals. This period event
4653 4654 4655 4656
 * is kept in the range [-sample_period, 0] so that we can use the
 * sign as trigger.
 */

4657
static u64 perf_swevent_set_period(struct perf_event *event)
4658
{
4659
	struct hw_perf_event *hwc = &event->hw;
4660 4661 4662 4663 4664
	u64 period = hwc->last_period;
	u64 nr, offset;
	s64 old, val;

	hwc->last_period = hwc->sample_period;
4665 4666

again:
4667
	old = val = local64_read(&hwc->period_left);
4668 4669
	if (val < 0)
		return 0;
4670

4671 4672 4673
	nr = div64_u64(period + val, period);
	offset = nr * period;
	val -= offset;
4674
	if (local64_cmpxchg(&hwc->period_left, old, val) != old)
4675
		goto again;
4676

4677
	return nr;
4678 4679
}

4680
static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4681 4682
				    int nmi, struct perf_sample_data *data,
				    struct pt_regs *regs)
4683
{
4684
	struct hw_perf_event *hwc = &event->hw;
4685
	int throttle = 0;
4686

4687
	data->period = event->hw.last_period;
4688 4689
	if (!overflow)
		overflow = perf_swevent_set_period(event);
4690

4691 4692
	if (hwc->interrupts == MAX_INTERRUPTS)
		return;
4693

4694
	for (; overflow; overflow--) {
4695
		if (__perf_event_overflow(event, nmi, throttle,
4696
					    data, regs)) {
4697 4698 4699 4700 4701 4702
			/*
			 * We inhibit the overflow from happening when
			 * hwc->interrupts == MAX_INTERRUPTS.
			 */
			break;
		}
4703
		throttle = 1;
4704
	}
4705 4706
}

P
Peter Zijlstra 已提交
4707
static void perf_swevent_event(struct perf_event *event, u64 nr,
4708 4709
			       int nmi, struct perf_sample_data *data,
			       struct pt_regs *regs)
4710
{
4711
	struct hw_perf_event *hwc = &event->hw;
4712

4713
	local64_add(nr, &event->count);
4714

4715 4716 4717
	if (!regs)
		return;

4718
	if (!is_sampling_event(event))
4719
		return;
4720

4721 4722 4723
	if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
		return perf_swevent_overflow(event, 1, nmi, data, regs);

4724
	if (local64_add_negative(nr, &hwc->period_left))
4725
		return;
4726

4727
	perf_swevent_overflow(event, 0, nmi, data, regs);
4728 4729
}

4730 4731 4732
static int perf_exclude_event(struct perf_event *event,
			      struct pt_regs *regs)
{
P
Peter Zijlstra 已提交
4733
	if (event->hw.state & PERF_HES_STOPPED)
4734
		return 1;
P
Peter Zijlstra 已提交
4735

4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746
	if (regs) {
		if (event->attr.exclude_user && user_mode(regs))
			return 1;

		if (event->attr.exclude_kernel && !user_mode(regs))
			return 1;
	}

	return 0;
}

4747
static int perf_swevent_match(struct perf_event *event,
P
Peter Zijlstra 已提交
4748
				enum perf_type_id type,
L
Li Zefan 已提交
4749 4750 4751
				u32 event_id,
				struct perf_sample_data *data,
				struct pt_regs *regs)
4752
{
4753
	if (event->attr.type != type)
4754
		return 0;
4755

4756
	if (event->attr.config != event_id)
4757 4758
		return 0;

4759 4760
	if (perf_exclude_event(event, regs))
		return 0;
4761 4762 4763 4764

	return 1;
}

4765 4766 4767 4768 4769 4770 4771
static inline u64 swevent_hash(u64 type, u32 event_id)
{
	u64 val = event_id | (type << 32);

	return hash_64(val, SWEVENT_HLIST_BITS);
}

4772 4773
static inline struct hlist_head *
__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
4774
{
4775 4776 4777 4778
	u64 hash = swevent_hash(type, event_id);

	return &hlist->heads[hash];
}
4779

4780 4781
/* For the read side: events when they trigger */
static inline struct hlist_head *
4782
find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
4783 4784
{
	struct swevent_hlist *hlist;
4785

4786
	hlist = rcu_dereference(swhash->swevent_hlist);
4787 4788 4789
	if (!hlist)
		return NULL;

4790 4791 4792 4793 4794
	return __find_swevent_head(hlist, type, event_id);
}

/* For the event head insertion and removal in the hlist */
static inline struct hlist_head *
4795
find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
4796 4797 4798 4799 4800 4801 4802 4803 4804 4805
{
	struct swevent_hlist *hlist;
	u32 event_id = event->attr.config;
	u64 type = event->attr.type;

	/*
	 * Event scheduling is always serialized against hlist allocation
	 * and release. Which makes the protected version suitable here.
	 * The context lock guarantees that.
	 */
4806
	hlist = rcu_dereference_protected(swhash->swevent_hlist,
4807 4808 4809 4810 4811
					  lockdep_is_held(&event->ctx->lock));
	if (!hlist)
		return NULL;

	return __find_swevent_head(hlist, type, event_id);
4812 4813 4814 4815 4816 4817
}

static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
				    u64 nr, int nmi,
				    struct perf_sample_data *data,
				    struct pt_regs *regs)
4818
{
4819
	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4820
	struct perf_event *event;
4821 4822
	struct hlist_node *node;
	struct hlist_head *head;
4823

4824
	rcu_read_lock();
4825
	head = find_swevent_head_rcu(swhash, type, event_id);
4826 4827 4828 4829
	if (!head)
		goto end;

	hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
L
Li Zefan 已提交
4830
		if (perf_swevent_match(event, type, event_id, data, regs))
P
Peter Zijlstra 已提交
4831
			perf_swevent_event(event, nr, nmi, data, regs);
4832
	}
4833 4834
end:
	rcu_read_unlock();
4835 4836
}

4837
int perf_swevent_get_recursion_context(void)
P
Peter Zijlstra 已提交
4838
{
4839
	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
P
Peter Zijlstra 已提交
4840

4841
	return get_recursion_context(swhash->recursion);
P
Peter Zijlstra 已提交
4842
}
I
Ingo Molnar 已提交
4843
EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
P
Peter Zijlstra 已提交
4844

4845
inline void perf_swevent_put_recursion_context(int rctx)
4846
{
4847
	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4848

4849
	put_recursion_context(swhash->recursion, rctx);
4850
}
4851

4852
void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4853
			    struct pt_regs *regs, u64 addr)
4854
{
4855
	struct perf_sample_data data;
4856 4857
	int rctx;

4858
	preempt_disable_notrace();
4859 4860 4861
	rctx = perf_swevent_get_recursion_context();
	if (rctx < 0)
		return;
4862

4863
	perf_sample_data_init(&data, addr);
4864

4865
	do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
4866 4867

	perf_swevent_put_recursion_context(rctx);
4868
	preempt_enable_notrace();
4869 4870
}

4871
static void perf_swevent_read(struct perf_event *event)
4872 4873 4874
{
}

P
Peter Zijlstra 已提交
4875
static int perf_swevent_add(struct perf_event *event, int flags)
4876
{
4877
	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4878
	struct hw_perf_event *hwc = &event->hw;
4879 4880
	struct hlist_head *head;

4881
	if (is_sampling_event(event)) {
4882
		hwc->last_period = hwc->sample_period;
4883
		perf_swevent_set_period(event);
4884
	}
4885

P
Peter Zijlstra 已提交
4886 4887
	hwc->state = !(flags & PERF_EF_START);

4888
	head = find_swevent_head(swhash, event);
4889 4890 4891 4892 4893
	if (WARN_ON_ONCE(!head))
		return -EINVAL;

	hlist_add_head_rcu(&event->hlist_entry, head);

4894 4895 4896
	return 0;
}

P
Peter Zijlstra 已提交
4897
static void perf_swevent_del(struct perf_event *event, int flags)
4898
{
4899
	hlist_del_rcu(&event->hlist_entry);
4900 4901
}

P
Peter Zijlstra 已提交
4902
static void perf_swevent_start(struct perf_event *event, int flags)
4903
{
P
Peter Zijlstra 已提交
4904
	event->hw.state = 0;
4905
}
I
Ingo Molnar 已提交
4906

P
Peter Zijlstra 已提交
4907
static void perf_swevent_stop(struct perf_event *event, int flags)
4908
{
P
Peter Zijlstra 已提交
4909
	event->hw.state = PERF_HES_STOPPED;
4910 4911
}

4912 4913
/* Deref the hlist from the update side */
static inline struct swevent_hlist *
4914
swevent_hlist_deref(struct swevent_htable *swhash)
4915
{
4916 4917
	return rcu_dereference_protected(swhash->swevent_hlist,
					 lockdep_is_held(&swhash->hlist_mutex));
4918 4919
}

4920
static void swevent_hlist_release(struct swevent_htable *swhash)
4921
{
4922
	struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
4923

4924
	if (!hlist)
4925 4926
		return;

4927
	rcu_assign_pointer(swhash->swevent_hlist, NULL);
4928
	kfree_rcu(hlist, rcu_head);
4929 4930 4931 4932
}

static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
{
4933
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
4934

4935
	mutex_lock(&swhash->hlist_mutex);
4936

4937 4938
	if (!--swhash->hlist_refcount)
		swevent_hlist_release(swhash);
4939

4940
	mutex_unlock(&swhash->hlist_mutex);
4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957
}

static void swevent_hlist_put(struct perf_event *event)
{
	int cpu;

	if (event->cpu != -1) {
		swevent_hlist_put_cpu(event, event->cpu);
		return;
	}

	for_each_possible_cpu(cpu)
		swevent_hlist_put_cpu(event, cpu);
}

static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
{
4958
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
4959 4960
	int err = 0;

4961
	mutex_lock(&swhash->hlist_mutex);
4962

4963
	if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
4964 4965 4966 4967 4968 4969 4970
		struct swevent_hlist *hlist;

		hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
		if (!hlist) {
			err = -ENOMEM;
			goto exit;
		}
4971
		rcu_assign_pointer(swhash->swevent_hlist, hlist);
4972
	}
4973
	swhash->hlist_refcount++;
P
Peter Zijlstra 已提交
4974
exit:
4975
	mutex_unlock(&swhash->hlist_mutex);
4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998

	return err;
}

static int swevent_hlist_get(struct perf_event *event)
{
	int err;
	int cpu, failed_cpu;

	if (event->cpu != -1)
		return swevent_hlist_get_cpu(event, event->cpu);

	get_online_cpus();
	for_each_possible_cpu(cpu) {
		err = swevent_hlist_get_cpu(event, cpu);
		if (err) {
			failed_cpu = cpu;
			goto fail;
		}
	}
	put_online_cpus();

	return 0;
P
Peter Zijlstra 已提交
4999
fail:
5000 5001 5002 5003 5004 5005 5006 5007 5008 5009
	for_each_possible_cpu(cpu) {
		if (cpu == failed_cpu)
			break;
		swevent_hlist_put_cpu(event, cpu);
	}

	put_online_cpus();
	return err;
}

5010
struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
5011

5012 5013 5014
static void sw_perf_event_destroy(struct perf_event *event)
{
	u64 event_id = event->attr.config;
5015

5016 5017
	WARN_ON(event->parent);

P
Peter Zijlstra 已提交
5018
	jump_label_dec(&perf_swevent_enabled[event_id]);
5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037
	swevent_hlist_put(event);
}

static int perf_swevent_init(struct perf_event *event)
{
	int event_id = event->attr.config;

	if (event->attr.type != PERF_TYPE_SOFTWARE)
		return -ENOENT;

	switch (event_id) {
	case PERF_COUNT_SW_CPU_CLOCK:
	case PERF_COUNT_SW_TASK_CLOCK:
		return -ENOENT;

	default:
		break;
	}

5038
	if (event_id >= PERF_COUNT_SW_MAX)
5039 5040 5041 5042 5043 5044 5045 5046 5047
		return -ENOENT;

	if (!event->parent) {
		int err;

		err = swevent_hlist_get(event);
		if (err)
			return err;

P
Peter Zijlstra 已提交
5048
		jump_label_inc(&perf_swevent_enabled[event_id]);
5049 5050 5051 5052 5053 5054 5055
		event->destroy = sw_perf_event_destroy;
	}

	return 0;
}

static struct pmu perf_swevent = {
5056
	.task_ctx_nr	= perf_sw_context,
5057

5058
	.event_init	= perf_swevent_init,
P
Peter Zijlstra 已提交
5059 5060 5061 5062
	.add		= perf_swevent_add,
	.del		= perf_swevent_del,
	.start		= perf_swevent_start,
	.stop		= perf_swevent_stop,
5063 5064 5065
	.read		= perf_swevent_read,
};

5066 5067
#ifdef CONFIG_EVENT_TRACING

5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081
static int perf_tp_filter_match(struct perf_event *event,
				struct perf_sample_data *data)
{
	void *record = data->raw->data;

	if (likely(!event->filter) || filter_match_preds(event->filter, record))
		return 1;
	return 0;
}

static int perf_tp_event_match(struct perf_event *event,
				struct perf_sample_data *data,
				struct pt_regs *regs)
{
5082 5083
	if (event->hw.state & PERF_HES_STOPPED)
		return 0;
5084 5085 5086 5087
	/*
	 * All tracepoints are from kernel-space.
	 */
	if (event->attr.exclude_kernel)
5088 5089 5090 5091 5092 5093 5094 5095 5096
		return 0;

	if (!perf_tp_filter_match(event, data))
		return 0;

	return 1;
}

void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5097
		   struct pt_regs *regs, struct hlist_head *head, int rctx)
5098 5099
{
	struct perf_sample_data data;
5100 5101 5102
	struct perf_event *event;
	struct hlist_node *node;

5103 5104 5105 5106 5107 5108 5109 5110
	struct perf_raw_record raw = {
		.size = entry_size,
		.data = record,
	};

	perf_sample_data_init(&data, addr);
	data.raw = &raw;

5111 5112
	hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
		if (perf_tp_event_match(event, &data, regs))
P
Peter Zijlstra 已提交
5113
			perf_swevent_event(event, count, 1, &data, regs);
5114
	}
5115 5116

	perf_swevent_put_recursion_context(rctx);
5117 5118 5119
}
EXPORT_SYMBOL_GPL(perf_tp_event);

5120
static void tp_perf_event_destroy(struct perf_event *event)
5121
{
5122
	perf_trace_destroy(event);
5123 5124
}

5125
static int perf_tp_event_init(struct perf_event *event)
5126
{
5127 5128
	int err;

5129 5130 5131
	if (event->attr.type != PERF_TYPE_TRACEPOINT)
		return -ENOENT;

5132 5133
	err = perf_trace_init(event);
	if (err)
5134
		return err;
5135

5136
	event->destroy = tp_perf_event_destroy;
5137

5138 5139 5140 5141
	return 0;
}

static struct pmu perf_tracepoint = {
5142 5143
	.task_ctx_nr	= perf_sw_context,

5144
	.event_init	= perf_tp_event_init,
P
Peter Zijlstra 已提交
5145 5146 5147 5148
	.add		= perf_trace_add,
	.del		= perf_trace_del,
	.start		= perf_swevent_start,
	.stop		= perf_swevent_stop,
5149 5150 5151 5152 5153
	.read		= perf_swevent_read,
};

static inline void perf_tp_register(void)
{
P
Peter Zijlstra 已提交
5154
	perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
5155
}
L
Li Zefan 已提交
5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179

static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
	char *filter_str;
	int ret;

	if (event->attr.type != PERF_TYPE_TRACEPOINT)
		return -EINVAL;

	filter_str = strndup_user(arg, PAGE_SIZE);
	if (IS_ERR(filter_str))
		return PTR_ERR(filter_str);

	ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);

	kfree(filter_str);
	return ret;
}

static void perf_event_free_filter(struct perf_event *event)
{
	ftrace_profile_free_filter(event);
}

5180
#else
L
Li Zefan 已提交
5181

5182
static inline void perf_tp_register(void)
5183 5184
{
}
L
Li Zefan 已提交
5185 5186 5187 5188 5189 5190 5191 5192 5193 5194

static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
	return -ENOENT;
}

static void perf_event_free_filter(struct perf_event *event)
{
}

5195
#endif /* CONFIG_EVENT_TRACING */
5196

5197
#ifdef CONFIG_HAVE_HW_BREAKPOINT
5198
void perf_bp_event(struct perf_event *bp, void *data)
5199
{
5200 5201 5202
	struct perf_sample_data sample;
	struct pt_regs *regs = data;

5203
	perf_sample_data_init(&sample, bp->attr.bp_addr);
5204

P
Peter Zijlstra 已提交
5205 5206
	if (!bp->hw.state && !perf_exclude_event(bp, regs))
		perf_swevent_event(bp, 1, 1, &sample, regs);
5207 5208 5209
}
#endif

5210 5211 5212
/*
 * hrtimer based swevent callback
 */
5213

5214
static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
5215
{
5216 5217 5218 5219 5220
	enum hrtimer_restart ret = HRTIMER_RESTART;
	struct perf_sample_data data;
	struct pt_regs *regs;
	struct perf_event *event;
	u64 period;
5221

5222
	event = container_of(hrtimer, struct perf_event, hw.hrtimer);
P
Peter Zijlstra 已提交
5223 5224 5225 5226

	if (event->state != PERF_EVENT_STATE_ACTIVE)
		return HRTIMER_NORESTART;

5227
	event->pmu->read(event);
5228

5229 5230 5231 5232 5233 5234 5235 5236 5237
	perf_sample_data_init(&data, 0);
	data.period = event->hw.last_period;
	regs = get_irq_regs();

	if (regs && !perf_exclude_event(event, regs)) {
		if (!(event->attr.exclude_idle && current->pid == 0))
			if (perf_event_overflow(event, 0, &data, regs))
				ret = HRTIMER_NORESTART;
	}
5238

5239 5240
	period = max_t(u64, 10000, event->hw.sample_period);
	hrtimer_forward_now(hrtimer, ns_to_ktime(period));
5241

5242
	return ret;
5243 5244
}

5245
static void perf_swevent_start_hrtimer(struct perf_event *event)
5246
{
5247
	struct hw_perf_event *hwc = &event->hw;
5248 5249 5250 5251
	s64 period;

	if (!is_sampling_event(event))
		return;
5252

5253 5254 5255 5256
	period = local64_read(&hwc->period_left);
	if (period) {
		if (period < 0)
			period = 10000;
P
Peter Zijlstra 已提交
5257

5258 5259 5260 5261 5262
		local64_set(&hwc->period_left, 0);
	} else {
		period = max_t(u64, 10000, hwc->sample_period);
	}
	__hrtimer_start_range_ns(&hwc->hrtimer,
5263
				ns_to_ktime(period), 0,
5264
				HRTIMER_MODE_REL_PINNED, 0);
5265
}
5266 5267

static void perf_swevent_cancel_hrtimer(struct perf_event *event)
5268
{
5269 5270
	struct hw_perf_event *hwc = &event->hw;

5271
	if (is_sampling_event(event)) {
5272
		ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
P
Peter Zijlstra 已提交
5273
		local64_set(&hwc->period_left, ktime_to_ns(remaining));
5274 5275 5276

		hrtimer_cancel(&hwc->hrtimer);
	}
5277 5278
}

P
Peter Zijlstra 已提交
5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302
static void perf_swevent_init_hrtimer(struct perf_event *event)
{
	struct hw_perf_event *hwc = &event->hw;

	if (!is_sampling_event(event))
		return;

	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hwc->hrtimer.function = perf_swevent_hrtimer;

	/*
	 * Since hrtimers have a fixed rate, we can do a static freq->period
	 * mapping and avoid the whole period adjust feedback stuff.
	 */
	if (event->attr.freq) {
		long freq = event->attr.sample_freq;

		event->attr.sample_period = NSEC_PER_SEC / freq;
		hwc->sample_period = event->attr.sample_period;
		local64_set(&hwc->period_left, hwc->sample_period);
		event->attr.freq = 0;
	}
}

5303 5304 5305 5306 5307
/*
 * Software event: cpu wall time clock
 */

static void cpu_clock_event_update(struct perf_event *event)
5308
{
5309 5310 5311
	s64 prev;
	u64 now;

P
Peter Zijlstra 已提交
5312
	now = local_clock();
5313 5314
	prev = local64_xchg(&event->hw.prev_count, now);
	local64_add(now - prev, &event->count);
5315 5316
}

P
Peter Zijlstra 已提交
5317
static void cpu_clock_event_start(struct perf_event *event, int flags)
5318
{
P
Peter Zijlstra 已提交
5319
	local64_set(&event->hw.prev_count, local_clock());
5320 5321 5322
	perf_swevent_start_hrtimer(event);
}

P
Peter Zijlstra 已提交
5323
static void cpu_clock_event_stop(struct perf_event *event, int flags)
5324
{
5325 5326 5327
	perf_swevent_cancel_hrtimer(event);
	cpu_clock_event_update(event);
}
5328

P
Peter Zijlstra 已提交
5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341
static int cpu_clock_event_add(struct perf_event *event, int flags)
{
	if (flags & PERF_EF_START)
		cpu_clock_event_start(event, flags);

	return 0;
}

static void cpu_clock_event_del(struct perf_event *event, int flags)
{
	cpu_clock_event_stop(event, flags);
}

5342 5343 5344 5345
static void cpu_clock_event_read(struct perf_event *event)
{
	cpu_clock_event_update(event);
}
5346

5347 5348 5349 5350 5351 5352 5353 5354
static int cpu_clock_event_init(struct perf_event *event)
{
	if (event->attr.type != PERF_TYPE_SOFTWARE)
		return -ENOENT;

	if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
		return -ENOENT;

P
Peter Zijlstra 已提交
5355 5356
	perf_swevent_init_hrtimer(event);

5357
	return 0;
5358 5359
}

5360
static struct pmu perf_cpu_clock = {
5361 5362
	.task_ctx_nr	= perf_sw_context,

5363
	.event_init	= cpu_clock_event_init,
P
Peter Zijlstra 已提交
5364 5365 5366 5367
	.add		= cpu_clock_event_add,
	.del		= cpu_clock_event_del,
	.start		= cpu_clock_event_start,
	.stop		= cpu_clock_event_stop,
5368 5369 5370 5371 5372 5373 5374 5375
	.read		= cpu_clock_event_read,
};

/*
 * Software event: task time clock
 */

static void task_clock_event_update(struct perf_event *event, u64 now)
5376
{
5377 5378
	u64 prev;
	s64 delta;
5379

5380 5381 5382 5383
	prev = local64_xchg(&event->hw.prev_count, now);
	delta = now - prev;
	local64_add(delta, &event->count);
}
5384

P
Peter Zijlstra 已提交
5385
static void task_clock_event_start(struct perf_event *event, int flags)
5386
{
P
Peter Zijlstra 已提交
5387
	local64_set(&event->hw.prev_count, event->ctx->time);
5388 5389 5390
	perf_swevent_start_hrtimer(event);
}

P
Peter Zijlstra 已提交
5391
static void task_clock_event_stop(struct perf_event *event, int flags)
5392 5393 5394
{
	perf_swevent_cancel_hrtimer(event);
	task_clock_event_update(event, event->ctx->time);
P
Peter Zijlstra 已提交
5395 5396 5397 5398 5399 5400
}

static int task_clock_event_add(struct perf_event *event, int flags)
{
	if (flags & PERF_EF_START)
		task_clock_event_start(event, flags);
5401

P
Peter Zijlstra 已提交
5402 5403 5404 5405 5406 5407
	return 0;
}

static void task_clock_event_del(struct perf_event *event, int flags)
{
	task_clock_event_stop(event, PERF_EF_UPDATE);
5408 5409 5410 5411
}

static void task_clock_event_read(struct perf_event *event)
{
5412 5413 5414
	u64 now = perf_clock();
	u64 delta = now - event->ctx->timestamp;
	u64 time = event->ctx->time + delta;
5415 5416 5417 5418 5419

	task_clock_event_update(event, time);
}

static int task_clock_event_init(struct perf_event *event)
L
Li Zefan 已提交
5420
{
5421 5422 5423 5424 5425 5426
	if (event->attr.type != PERF_TYPE_SOFTWARE)
		return -ENOENT;

	if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
		return -ENOENT;

P
Peter Zijlstra 已提交
5427 5428
	perf_swevent_init_hrtimer(event);

5429
	return 0;
L
Li Zefan 已提交
5430 5431
}

5432
static struct pmu perf_task_clock = {
5433 5434
	.task_ctx_nr	= perf_sw_context,

5435
	.event_init	= task_clock_event_init,
P
Peter Zijlstra 已提交
5436 5437 5438 5439
	.add		= task_clock_event_add,
	.del		= task_clock_event_del,
	.start		= task_clock_event_start,
	.stop		= task_clock_event_stop,
5440 5441
	.read		= task_clock_event_read,
};
L
Li Zefan 已提交
5442

P
Peter Zijlstra 已提交
5443
static void perf_pmu_nop_void(struct pmu *pmu)
5444 5445
{
}
L
Li Zefan 已提交
5446

P
Peter Zijlstra 已提交
5447
static int perf_pmu_nop_int(struct pmu *pmu)
L
Li Zefan 已提交
5448
{
P
Peter Zijlstra 已提交
5449
	return 0;
L
Li Zefan 已提交
5450 5451
}

P
Peter Zijlstra 已提交
5452
static void perf_pmu_start_txn(struct pmu *pmu)
L
Li Zefan 已提交
5453
{
P
Peter Zijlstra 已提交
5454
	perf_pmu_disable(pmu);
L
Li Zefan 已提交
5455 5456
}

P
Peter Zijlstra 已提交
5457 5458 5459 5460 5461
static int perf_pmu_commit_txn(struct pmu *pmu)
{
	perf_pmu_enable(pmu);
	return 0;
}
5462

P
Peter Zijlstra 已提交
5463
static void perf_pmu_cancel_txn(struct pmu *pmu)
5464
{
P
Peter Zijlstra 已提交
5465
	perf_pmu_enable(pmu);
5466 5467
}

P
Peter Zijlstra 已提交
5468 5469 5470 5471 5472
/*
 * Ensures all contexts with the same task_ctx_nr have the same
 * pmu_cpu_context too.
 */
static void *find_pmu_context(int ctxn)
5473
{
P
Peter Zijlstra 已提交
5474
	struct pmu *pmu;
5475

P
Peter Zijlstra 已提交
5476 5477
	if (ctxn < 0)
		return NULL;
5478

P
Peter Zijlstra 已提交
5479 5480 5481 5482
	list_for_each_entry(pmu, &pmus, entry) {
		if (pmu->task_ctx_nr == ctxn)
			return pmu->pmu_cpu_context;
	}
5483

P
Peter Zijlstra 已提交
5484
	return NULL;
5485 5486
}

5487
static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
5488
{
5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503
	int cpu;

	for_each_possible_cpu(cpu) {
		struct perf_cpu_context *cpuctx;

		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);

		if (cpuctx->active_pmu == old_pmu)
			cpuctx->active_pmu = pmu;
	}
}

static void free_pmu_context(struct pmu *pmu)
{
	struct pmu *i;
5504

P
Peter Zijlstra 已提交
5505
	mutex_lock(&pmus_lock);
5506
	/*
P
Peter Zijlstra 已提交
5507
	 * Like a real lame refcount.
5508
	 */
5509 5510 5511
	list_for_each_entry(i, &pmus, entry) {
		if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
			update_pmu_context(i, pmu);
P
Peter Zijlstra 已提交
5512
			goto out;
5513
		}
P
Peter Zijlstra 已提交
5514
	}
5515

5516
	free_percpu(pmu->pmu_cpu_context);
P
Peter Zijlstra 已提交
5517 5518
out:
	mutex_unlock(&pmus_lock);
5519
}
P
Peter Zijlstra 已提交
5520
static struct idr pmu_idr;
5521

P
Peter Zijlstra 已提交
5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573
static ssize_t
type_show(struct device *dev, struct device_attribute *attr, char *page)
{
	struct pmu *pmu = dev_get_drvdata(dev);

	return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
}

static struct device_attribute pmu_dev_attrs[] = {
       __ATTR_RO(type),
       __ATTR_NULL,
};

static int pmu_bus_running;
static struct bus_type pmu_bus = {
	.name		= "event_source",
	.dev_attrs	= pmu_dev_attrs,
};

static void pmu_dev_release(struct device *dev)
{
	kfree(dev);
}

static int pmu_dev_alloc(struct pmu *pmu)
{
	int ret = -ENOMEM;

	pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
	if (!pmu->dev)
		goto out;

	device_initialize(pmu->dev);
	ret = dev_set_name(pmu->dev, "%s", pmu->name);
	if (ret)
		goto free_dev;

	dev_set_drvdata(pmu->dev, pmu);
	pmu->dev->bus = &pmu_bus;
	pmu->dev->release = pmu_dev_release;
	ret = device_add(pmu->dev);
	if (ret)
		goto free_dev;

out:
	return ret;

free_dev:
	put_device(pmu->dev);
	goto out;
}

5574
static struct lock_class_key cpuctx_mutex;
5575
static struct lock_class_key cpuctx_lock;
5576

P
Peter Zijlstra 已提交
5577
int perf_pmu_register(struct pmu *pmu, char *name, int type)
5578
{
P
Peter Zijlstra 已提交
5579
	int cpu, ret;
5580

5581
	mutex_lock(&pmus_lock);
P
Peter Zijlstra 已提交
5582 5583 5584 5585
	ret = -ENOMEM;
	pmu->pmu_disable_count = alloc_percpu(int);
	if (!pmu->pmu_disable_count)
		goto unlock;
5586

P
Peter Zijlstra 已提交
5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604
	pmu->type = -1;
	if (!name)
		goto skip_type;
	pmu->name = name;

	if (type < 0) {
		int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
		if (!err)
			goto free_pdc;

		err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
		if (err) {
			ret = err;
			goto free_pdc;
		}
	}
	pmu->type = type;

P
Peter Zijlstra 已提交
5605 5606 5607 5608 5609 5610
	if (pmu_bus_running) {
		ret = pmu_dev_alloc(pmu);
		if (ret)
			goto free_idr;
	}

P
Peter Zijlstra 已提交
5611
skip_type:
P
Peter Zijlstra 已提交
5612 5613 5614
	pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
	if (pmu->pmu_cpu_context)
		goto got_cpu_context;
5615

P
Peter Zijlstra 已提交
5616 5617
	pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
	if (!pmu->pmu_cpu_context)
P
Peter Zijlstra 已提交
5618
		goto free_dev;
5619

P
Peter Zijlstra 已提交
5620 5621 5622 5623
	for_each_possible_cpu(cpu) {
		struct perf_cpu_context *cpuctx;

		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5624
		__perf_event_init_context(&cpuctx->ctx);
5625
		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
5626
		lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
5627
		cpuctx->ctx.type = cpu_context;
P
Peter Zijlstra 已提交
5628
		cpuctx->ctx.pmu = pmu;
5629 5630
		cpuctx->jiffies_interval = 1;
		INIT_LIST_HEAD(&cpuctx->rotation_list);
5631
		cpuctx->active_pmu = pmu;
P
Peter Zijlstra 已提交
5632
	}
5633

P
Peter Zijlstra 已提交
5634
got_cpu_context:
P
Peter Zijlstra 已提交
5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648
	if (!pmu->start_txn) {
		if (pmu->pmu_enable) {
			/*
			 * If we have pmu_enable/pmu_disable calls, install
			 * transaction stubs that use that to try and batch
			 * hardware accesses.
			 */
			pmu->start_txn  = perf_pmu_start_txn;
			pmu->commit_txn = perf_pmu_commit_txn;
			pmu->cancel_txn = perf_pmu_cancel_txn;
		} else {
			pmu->start_txn  = perf_pmu_nop_void;
			pmu->commit_txn = perf_pmu_nop_int;
			pmu->cancel_txn = perf_pmu_nop_void;
5649
		}
5650
	}
5651

P
Peter Zijlstra 已提交
5652 5653 5654 5655 5656
	if (!pmu->pmu_enable) {
		pmu->pmu_enable  = perf_pmu_nop_void;
		pmu->pmu_disable = perf_pmu_nop_void;
	}

5657
	list_add_rcu(&pmu->entry, &pmus);
P
Peter Zijlstra 已提交
5658 5659
	ret = 0;
unlock:
5660 5661
	mutex_unlock(&pmus_lock);

P
Peter Zijlstra 已提交
5662
	return ret;
P
Peter Zijlstra 已提交
5663

P
Peter Zijlstra 已提交
5664 5665 5666 5667
free_dev:
	device_del(pmu->dev);
	put_device(pmu->dev);

P
Peter Zijlstra 已提交
5668 5669 5670 5671
free_idr:
	if (pmu->type >= PERF_TYPE_MAX)
		idr_remove(&pmu_idr, pmu->type);

P
Peter Zijlstra 已提交
5672 5673 5674
free_pdc:
	free_percpu(pmu->pmu_disable_count);
	goto unlock;
5675 5676
}

5677
void perf_pmu_unregister(struct pmu *pmu)
5678
{
5679 5680 5681
	mutex_lock(&pmus_lock);
	list_del_rcu(&pmu->entry);
	mutex_unlock(&pmus_lock);
5682

5683
	/*
P
Peter Zijlstra 已提交
5684 5685
	 * We dereference the pmu list under both SRCU and regular RCU, so
	 * synchronize against both of those.
5686
	 */
5687
	synchronize_srcu(&pmus_srcu);
P
Peter Zijlstra 已提交
5688
	synchronize_rcu();
5689

P
Peter Zijlstra 已提交
5690
	free_percpu(pmu->pmu_disable_count);
P
Peter Zijlstra 已提交
5691 5692
	if (pmu->type >= PERF_TYPE_MAX)
		idr_remove(&pmu_idr, pmu->type);
P
Peter Zijlstra 已提交
5693 5694
	device_del(pmu->dev);
	put_device(pmu->dev);
5695
	free_pmu_context(pmu);
5696
}
5697

5698 5699 5700 5701
struct pmu *perf_init_event(struct perf_event *event)
{
	struct pmu *pmu = NULL;
	int idx;
5702
	int ret;
5703 5704

	idx = srcu_read_lock(&pmus_srcu);
P
Peter Zijlstra 已提交
5705 5706 5707 5708

	rcu_read_lock();
	pmu = idr_find(&pmu_idr, event->attr.type);
	rcu_read_unlock();
5709 5710 5711 5712
	if (pmu) {
		ret = pmu->event_init(event);
		if (ret)
			pmu = ERR_PTR(ret);
P
Peter Zijlstra 已提交
5713
		goto unlock;
5714
	}
P
Peter Zijlstra 已提交
5715

5716
	list_for_each_entry_rcu(pmu, &pmus, entry) {
5717
		ret = pmu->event_init(event);
5718
		if (!ret)
P
Peter Zijlstra 已提交
5719
			goto unlock;
5720

5721 5722
		if (ret != -ENOENT) {
			pmu = ERR_PTR(ret);
P
Peter Zijlstra 已提交
5723
			goto unlock;
5724
		}
5725
	}
P
Peter Zijlstra 已提交
5726 5727
	pmu = ERR_PTR(-ENOENT);
unlock:
5728
	srcu_read_unlock(&pmus_srcu, idx);
5729

5730
	return pmu;
5731 5732
}

T
Thomas Gleixner 已提交
5733
/*
5734
 * Allocate and initialize a event structure
T
Thomas Gleixner 已提交
5735
 */
5736
static struct perf_event *
5737
perf_event_alloc(struct perf_event_attr *attr, int cpu,
5738 5739 5740 5741
		 struct task_struct *task,
		 struct perf_event *group_leader,
		 struct perf_event *parent_event,
		 perf_overflow_handler_t overflow_handler)
T
Thomas Gleixner 已提交
5742
{
P
Peter Zijlstra 已提交
5743
	struct pmu *pmu;
5744 5745
	struct perf_event *event;
	struct hw_perf_event *hwc;
5746
	long err;
T
Thomas Gleixner 已提交
5747

5748 5749 5750 5751 5752
	if ((unsigned)cpu >= nr_cpu_ids) {
		if (!task || cpu != -1)
			return ERR_PTR(-EINVAL);
	}

5753
	event = kzalloc(sizeof(*event), GFP_KERNEL);
5754
	if (!event)
5755
		return ERR_PTR(-ENOMEM);
T
Thomas Gleixner 已提交
5756

5757
	/*
5758
	 * Single events are their own group leaders, with an
5759 5760 5761
	 * empty sibling list:
	 */
	if (!group_leader)
5762
		group_leader = event;
5763

5764 5765
	mutex_init(&event->child_mutex);
	INIT_LIST_HEAD(&event->child_list);
5766

5767 5768 5769 5770
	INIT_LIST_HEAD(&event->group_entry);
	INIT_LIST_HEAD(&event->event_entry);
	INIT_LIST_HEAD(&event->sibling_list);
	init_waitqueue_head(&event->waitq);
5771
	init_irq_work(&event->pending, perf_pending_event);
T
Thomas Gleixner 已提交
5772

5773
	mutex_init(&event->mmap_mutex);
5774

5775 5776 5777 5778 5779
	event->cpu		= cpu;
	event->attr		= *attr;
	event->group_leader	= group_leader;
	event->pmu		= NULL;
	event->oncpu		= -1;
5780

5781
	event->parent		= parent_event;
5782

5783 5784
	event->ns		= get_pid_ns(current->nsproxy->pid_ns);
	event->id		= atomic64_inc_return(&perf_event_id);
5785

5786
	event->state		= PERF_EVENT_STATE_INACTIVE;
5787

5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798
	if (task) {
		event->attach_state = PERF_ATTACH_TASK;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
		/*
		 * hw_breakpoint is a bit difficult here..
		 */
		if (attr->type == PERF_TYPE_BREAKPOINT)
			event->hw.bp_target = task;
#endif
	}

5799 5800
	if (!overflow_handler && parent_event)
		overflow_handler = parent_event->overflow_handler;
5801

5802
	event->overflow_handler	= overflow_handler;
5803

5804
	if (attr->disabled)
5805
		event->state = PERF_EVENT_STATE_OFF;
5806

5807
	pmu = NULL;
5808

5809
	hwc = &event->hw;
5810
	hwc->sample_period = attr->sample_period;
5811
	if (attr->freq && attr->sample_freq)
5812
		hwc->sample_period = 1;
5813
	hwc->last_period = hwc->sample_period;
5814

5815
	local64_set(&hwc->period_left, hwc->sample_period);
5816

5817
	/*
5818
	 * we currently do not support PERF_FORMAT_GROUP on inherited events
5819
	 */
5820
	if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5821 5822
		goto done;

5823
	pmu = perf_init_event(event);
5824

5825 5826
done:
	err = 0;
5827
	if (!pmu)
5828
		err = -EINVAL;
5829 5830
	else if (IS_ERR(pmu))
		err = PTR_ERR(pmu);
5831

5832
	if (err) {
5833 5834 5835
		if (event->ns)
			put_pid_ns(event->ns);
		kfree(event);
5836
		return ERR_PTR(err);
I
Ingo Molnar 已提交
5837
	}
5838

5839
	event->pmu = pmu;
T
Thomas Gleixner 已提交
5840

5841
	if (!event->parent) {
5842
		if (event->attach_state & PERF_ATTACH_TASK)
S
Stephane Eranian 已提交
5843
			jump_label_inc(&perf_sched_events);
5844
		if (event->attr.mmap || event->attr.mmap_data)
5845 5846 5847 5848 5849
			atomic_inc(&nr_mmap_events);
		if (event->attr.comm)
			atomic_inc(&nr_comm_events);
		if (event->attr.task)
			atomic_inc(&nr_task_events);
5850 5851 5852 5853 5854 5855 5856
		if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
			err = get_callchain_buffers();
			if (err) {
				free_event(event);
				return ERR_PTR(err);
			}
		}
5857
	}
5858

5859
	return event;
T
Thomas Gleixner 已提交
5860 5861
}

5862 5863
static int perf_copy_attr(struct perf_event_attr __user *uattr,
			  struct perf_event_attr *attr)
5864 5865
{
	u32 size;
5866
	int ret;
5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890

	if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
		return -EFAULT;

	/*
	 * zero the full structure, so that a short copy will be nice.
	 */
	memset(attr, 0, sizeof(*attr));

	ret = get_user(size, &uattr->size);
	if (ret)
		return ret;

	if (size > PAGE_SIZE)	/* silly large */
		goto err_size;

	if (!size)		/* abi compat */
		size = PERF_ATTR_SIZE_VER0;

	if (size < PERF_ATTR_SIZE_VER0)
		goto err_size;

	/*
	 * If we're handed a bigger struct than we know of,
5891 5892 5893
	 * ensure all the unknown bits are 0 - i.e. new
	 * user-space does not rely on any kernel feature
	 * extensions we dont know about yet.
5894 5895
	 */
	if (size > sizeof(*attr)) {
5896 5897 5898
		unsigned char __user *addr;
		unsigned char __user *end;
		unsigned char val;
5899

5900 5901
		addr = (void __user *)uattr + sizeof(*attr);
		end  = (void __user *)uattr + size;
5902

5903
		for (; addr < end; addr++) {
5904 5905 5906 5907 5908 5909
			ret = get_user(val, addr);
			if (ret)
				return ret;
			if (val)
				goto err_size;
		}
5910
		size = sizeof(*attr);
5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923
	}

	ret = copy_from_user(attr, uattr, size);
	if (ret)
		return -EFAULT;

	/*
	 * If the type exists, the corresponding creation will verify
	 * the attr->config.
	 */
	if (attr->type >= PERF_TYPE_MAX)
		return -EINVAL;

5924
	if (attr->__reserved_1)
5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941
		return -EINVAL;

	if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
		return -EINVAL;

	if (attr->read_format & ~(PERF_FORMAT_MAX-1))
		return -EINVAL;

out:
	return ret;

err_size:
	put_user(sizeof(*attr), &uattr->size);
	ret = -E2BIG;
	goto out;
}

5942 5943
static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
5944
{
5945
	struct ring_buffer *rb = NULL, *old_rb = NULL;
5946 5947
	int ret = -EINVAL;

5948
	if (!output_event)
5949 5950
		goto set;

5951 5952
	/* don't allow circular references */
	if (event == output_event)
5953 5954
		goto out;

5955 5956 5957 5958 5959 5960 5961
	/*
	 * Don't allow cross-cpu buffers
	 */
	if (output_event->cpu != event->cpu)
		goto out;

	/*
5962
	 * If its not a per-cpu rb, it must be the same task.
5963 5964 5965 5966
	 */
	if (output_event->cpu == -1 && output_event->ctx != event->ctx)
		goto out;

5967
set:
5968
	mutex_lock(&event->mmap_mutex);
5969 5970 5971
	/* Can't redirect output if we've got an active mmap() */
	if (atomic_read(&event->mmap_count))
		goto unlock;
5972

5973
	if (output_event) {
5974 5975 5976
		/* get the rb we want to redirect to */
		rb = ring_buffer_get(output_event);
		if (!rb)
5977
			goto unlock;
5978 5979
	}

5980 5981
	old_rb = event->rb;
	rcu_assign_pointer(event->rb, rb);
5982
	ret = 0;
5983 5984 5985
unlock:
	mutex_unlock(&event->mmap_mutex);

5986 5987
	if (old_rb)
		ring_buffer_put(old_rb);
5988 5989 5990 5991
out:
	return ret;
}

T
Thomas Gleixner 已提交
5992
/**
5993
 * sys_perf_event_open - open a performance event, associate it to a task/cpu
I
Ingo Molnar 已提交
5994
 *
5995
 * @attr_uptr:	event_id type attributes for monitoring/sampling
T
Thomas Gleixner 已提交
5996
 * @pid:		target pid
I
Ingo Molnar 已提交
5997
 * @cpu:		target cpu
5998
 * @group_fd:		group leader event fd
T
Thomas Gleixner 已提交
5999
 */
6000 6001
SYSCALL_DEFINE5(perf_event_open,
		struct perf_event_attr __user *, attr_uptr,
6002
		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
T
Thomas Gleixner 已提交
6003
{
6004 6005
	struct perf_event *group_leader = NULL, *output_event = NULL;
	struct perf_event *event, *sibling;
6006 6007 6008
	struct perf_event_attr attr;
	struct perf_event_context *ctx;
	struct file *event_file = NULL;
6009
	struct file *group_file = NULL;
M
Matt Helsley 已提交
6010
	struct task_struct *task = NULL;
6011
	struct pmu *pmu;
6012
	int event_fd;
6013
	int move_group = 0;
6014
	int fput_needed = 0;
6015
	int err;
T
Thomas Gleixner 已提交
6016

6017
	/* for future expandability... */
S
Stephane Eranian 已提交
6018
	if (flags & ~PERF_FLAG_ALL)
6019 6020
		return -EINVAL;

6021 6022 6023
	err = perf_copy_attr(attr_uptr, &attr);
	if (err)
		return err;
6024

6025 6026 6027 6028 6029
	if (!attr.exclude_kernel) {
		if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
			return -EACCES;
	}

6030
	if (attr.freq) {
6031
		if (attr.sample_freq > sysctl_perf_event_sample_rate)
6032 6033 6034
			return -EINVAL;
	}

S
Stephane Eranian 已提交
6035 6036 6037 6038 6039 6040 6041 6042 6043
	/*
	 * In cgroup mode, the pid argument is used to pass the fd
	 * opened to the cgroup directory in cgroupfs. The cpu argument
	 * designates the cpu on which to monitor threads from that
	 * cgroup.
	 */
	if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
		return -EINVAL;

6044 6045 6046 6047
	event_fd = get_unused_fd_flags(O_RDWR);
	if (event_fd < 0)
		return event_fd;

6048 6049 6050 6051
	if (group_fd != -1) {
		group_leader = perf_fget_light(group_fd, &fput_needed);
		if (IS_ERR(group_leader)) {
			err = PTR_ERR(group_leader);
6052
			goto err_fd;
6053 6054 6055 6056 6057 6058 6059 6060
		}
		group_file = group_leader->filp;
		if (flags & PERF_FLAG_FD_OUTPUT)
			output_event = group_leader;
		if (flags & PERF_FLAG_FD_NO_GROUP)
			group_leader = NULL;
	}

S
Stephane Eranian 已提交
6061
	if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
6062 6063 6064 6065 6066 6067 6068
		task = find_lively_task_by_vpid(pid);
		if (IS_ERR(task)) {
			err = PTR_ERR(task);
			goto err_group_fd;
		}
	}

6069
	event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL);
6070 6071
	if (IS_ERR(event)) {
		err = PTR_ERR(event);
6072
		goto err_task;
6073 6074
	}

S
Stephane Eranian 已提交
6075 6076 6077 6078
	if (flags & PERF_FLAG_PID_CGROUP) {
		err = perf_cgroup_connect(pid, event, &attr, group_leader);
		if (err)
			goto err_alloc;
6079 6080 6081 6082 6083 6084 6085
		/*
		 * one more event:
		 * - that has cgroup constraint on event->cpu
		 * - that may need work on context switch
		 */
		atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
		jump_label_inc(&perf_sched_events);
S
Stephane Eranian 已提交
6086 6087
	}

6088 6089 6090 6091 6092
	/*
	 * Special case software events and allow them to be part of
	 * any hardware group.
	 */
	pmu = event->pmu;
6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115

	if (group_leader &&
	    (is_software_event(event) != is_software_event(group_leader))) {
		if (is_software_event(event)) {
			/*
			 * If event and group_leader are not both a software
			 * event, and event is, then group leader is not.
			 *
			 * Allow the addition of software events to !software
			 * groups, this is safe because software events never
			 * fail to schedule.
			 */
			pmu = group_leader->pmu;
		} else if (is_software_event(group_leader) &&
			   (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
			/*
			 * In case the group is a pure software group, and we
			 * try to add a hardware event, move the whole group to
			 * the hardware context.
			 */
			move_group = 1;
		}
	}
6116 6117 6118 6119

	/*
	 * Get the target context (task or percpu):
	 */
M
Matt Helsley 已提交
6120
	ctx = find_get_context(pmu, task, cpu);
6121 6122
	if (IS_ERR(ctx)) {
		err = PTR_ERR(ctx);
6123
		goto err_alloc;
6124 6125
	}

6126 6127 6128 6129 6130
	if (task) {
		put_task_struct(task);
		task = NULL;
	}

I
Ingo Molnar 已提交
6131
	/*
6132
	 * Look up the group leader (we will attach this event to it):
6133
	 */
6134
	if (group_leader) {
6135
		err = -EINVAL;
6136 6137

		/*
I
Ingo Molnar 已提交
6138 6139 6140 6141
		 * Do not allow a recursive hierarchy (this new sibling
		 * becoming part of another group-sibling):
		 */
		if (group_leader->group_leader != group_leader)
6142
			goto err_context;
I
Ingo Molnar 已提交
6143 6144 6145
		/*
		 * Do not allow to attach to a group in a different
		 * task or CPU context:
6146
		 */
6147 6148 6149 6150 6151 6152 6153 6154
		if (move_group) {
			if (group_leader->ctx->type != ctx->type)
				goto err_context;
		} else {
			if (group_leader->ctx != ctx)
				goto err_context;
		}

6155 6156 6157
		/*
		 * Only a group leader can be exclusive or pinned
		 */
6158
		if (attr.exclusive || attr.pinned)
6159
			goto err_context;
6160 6161 6162 6163 6164
	}

	if (output_event) {
		err = perf_event_set_output(event, output_event);
		if (err)
6165
			goto err_context;
6166
	}
T
Thomas Gleixner 已提交
6167

6168 6169 6170
	event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
	if (IS_ERR(event_file)) {
		err = PTR_ERR(event_file);
6171
		goto err_context;
6172
	}
6173

6174 6175 6176 6177
	if (move_group) {
		struct perf_event_context *gctx = group_leader->ctx;

		mutex_lock(&gctx->mutex);
6178
		perf_remove_from_context(group_leader);
6179 6180
		list_for_each_entry(sibling, &group_leader->sibling_list,
				    group_entry) {
6181
			perf_remove_from_context(sibling);
6182 6183 6184 6185
			put_ctx(gctx);
		}
		mutex_unlock(&gctx->mutex);
		put_ctx(gctx);
6186
	}
6187

6188
	event->filp = event_file;
6189
	WARN_ON_ONCE(ctx->parent_ctx);
6190
	mutex_lock(&ctx->mutex);
6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201

	if (move_group) {
		perf_install_in_context(ctx, group_leader, cpu);
		get_ctx(ctx);
		list_for_each_entry(sibling, &group_leader->sibling_list,
				    group_entry) {
			perf_install_in_context(ctx, sibling, cpu);
			get_ctx(ctx);
		}
	}

6202
	perf_install_in_context(ctx, event, cpu);
6203
	++ctx->generation;
6204
	perf_unpin_context(ctx);
6205
	mutex_unlock(&ctx->mutex);
6206

6207
	event->owner = current;
P
Peter Zijlstra 已提交
6208

6209 6210 6211
	mutex_lock(&current->perf_event_mutex);
	list_add_tail(&event->owner_entry, &current->perf_event_list);
	mutex_unlock(&current->perf_event_mutex);
6212

6213 6214 6215 6216
	/*
	 * Precalculate sample_data sizes
	 */
	perf_event__header_size(event);
6217
	perf_event__id_header_size(event);
6218

6219 6220 6221 6222 6223 6224
	/*
	 * Drop the reference on the group_event after placing the
	 * new event on the sibling_list. This ensures destruction
	 * of the group leader will find the pointer to itself in
	 * perf_group_detach().
	 */
6225 6226 6227
	fput_light(group_file, fput_needed);
	fd_install(event_fd, event_file);
	return event_fd;
T
Thomas Gleixner 已提交
6228

6229
err_context:
6230
	perf_unpin_context(ctx);
6231
	put_ctx(ctx);
6232
err_alloc:
6233
	free_event(event);
P
Peter Zijlstra 已提交
6234 6235 6236
err_task:
	if (task)
		put_task_struct(task);
6237
err_group_fd:
6238
	fput_light(group_file, fput_needed);
6239 6240
err_fd:
	put_unused_fd(event_fd);
6241
	return err;
T
Thomas Gleixner 已提交
6242 6243
}

6244 6245 6246 6247 6248
/**
 * perf_event_create_kernel_counter
 *
 * @attr: attributes of the counter to create
 * @cpu: cpu in which the counter is bound
M
Matt Helsley 已提交
6249
 * @task: task to profile (NULL for percpu)
6250 6251 6252
 */
struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
M
Matt Helsley 已提交
6253
				 struct task_struct *task,
6254
				 perf_overflow_handler_t overflow_handler)
6255 6256
{
	struct perf_event_context *ctx;
6257
	struct perf_event *event;
6258
	int err;
6259

6260 6261 6262
	/*
	 * Get the target context (task or percpu):
	 */
6263

6264
	event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler);
6265 6266 6267 6268
	if (IS_ERR(event)) {
		err = PTR_ERR(event);
		goto err;
	}
6269

M
Matt Helsley 已提交
6270
	ctx = find_get_context(event->pmu, task, cpu);
6271 6272
	if (IS_ERR(ctx)) {
		err = PTR_ERR(ctx);
6273
		goto err_free;
6274
	}
6275 6276 6277 6278 6279 6280

	event->filp = NULL;
	WARN_ON_ONCE(ctx->parent_ctx);
	mutex_lock(&ctx->mutex);
	perf_install_in_context(ctx, event, cpu);
	++ctx->generation;
6281
	perf_unpin_context(ctx);
6282 6283 6284 6285
	mutex_unlock(&ctx->mutex);

	return event;

6286 6287 6288
err_free:
	free_event(event);
err:
6289
	return ERR_PTR(err);
6290
}
6291
EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
6292

6293
static void sync_child_event(struct perf_event *child_event,
6294
			       struct task_struct *child)
6295
{
6296
	struct perf_event *parent_event = child_event->parent;
6297
	u64 child_val;
6298

6299 6300
	if (child_event->attr.inherit_stat)
		perf_event_read_event(child_event, child);
6301

P
Peter Zijlstra 已提交
6302
	child_val = perf_event_count(child_event);
6303 6304 6305 6306

	/*
	 * Add back the child's count to the parent's count:
	 */
6307
	atomic64_add(child_val, &parent_event->child_count);
6308 6309 6310 6311
	atomic64_add(child_event->total_time_enabled,
		     &parent_event->child_total_time_enabled);
	atomic64_add(child_event->total_time_running,
		     &parent_event->child_total_time_running);
6312 6313

	/*
6314
	 * Remove this event from the parent's list
6315
	 */
6316 6317 6318 6319
	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
	mutex_lock(&parent_event->child_mutex);
	list_del_init(&child_event->child_list);
	mutex_unlock(&parent_event->child_mutex);
6320 6321

	/*
6322
	 * Release the parent event, if this was the last
6323 6324
	 * reference to it.
	 */
6325
	fput(parent_event->filp);
6326 6327
}

6328
static void
6329 6330
__perf_event_exit_task(struct perf_event *child_event,
			 struct perf_event_context *child_ctx,
6331
			 struct task_struct *child)
6332
{
6333 6334 6335 6336 6337
	if (child_event->parent) {
		raw_spin_lock_irq(&child_ctx->lock);
		perf_group_detach(child_event);
		raw_spin_unlock_irq(&child_ctx->lock);
	}
6338

6339
	perf_remove_from_context(child_event);
6340

6341
	/*
6342
	 * It can happen that the parent exits first, and has events
6343
	 * that are still around due to the child reference. These
6344
	 * events need to be zapped.
6345
	 */
6346
	if (child_event->parent) {
6347 6348
		sync_child_event(child_event, child);
		free_event(child_event);
6349
	}
6350 6351
}

P
Peter Zijlstra 已提交
6352
static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
6353
{
6354 6355
	struct perf_event *child_event, *tmp;
	struct perf_event_context *child_ctx;
6356
	unsigned long flags;
6357

P
Peter Zijlstra 已提交
6358
	if (likely(!child->perf_event_ctxp[ctxn])) {
6359
		perf_event_task(child, NULL, 0);
6360
		return;
P
Peter Zijlstra 已提交
6361
	}
6362

6363
	local_irq_save(flags);
6364 6365 6366 6367 6368 6369
	/*
	 * We can't reschedule here because interrupts are disabled,
	 * and either child is current or it is a task that can't be
	 * scheduled, so we are now safe from rescheduling changing
	 * our context.
	 */
6370
	child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
6371 6372 6373

	/*
	 * Take the context lock here so that if find_get_context is
6374
	 * reading child->perf_event_ctxp, we wait until it has
6375 6376
	 * incremented the context's refcount before we do put_ctx below.
	 */
6377
	raw_spin_lock(&child_ctx->lock);
6378
	task_ctx_sched_out(child_ctx);
P
Peter Zijlstra 已提交
6379
	child->perf_event_ctxp[ctxn] = NULL;
6380 6381 6382
	/*
	 * If this context is a clone; unclone it so it can't get
	 * swapped to another process while we're removing all
6383
	 * the events from it.
6384 6385
	 */
	unclone_ctx(child_ctx);
6386
	update_context_time(child_ctx);
6387
	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
P
Peter Zijlstra 已提交
6388 6389

	/*
6390 6391 6392
	 * Report the task dead after unscheduling the events so that we
	 * won't get any samples after PERF_RECORD_EXIT. We can however still
	 * get a few PERF_RECORD_READ events.
P
Peter Zijlstra 已提交
6393
	 */
6394
	perf_event_task(child, child_ctx, 0);
6395

6396 6397 6398
	/*
	 * We can recurse on the same lock type through:
	 *
6399 6400 6401
	 *   __perf_event_exit_task()
	 *     sync_child_event()
	 *       fput(parent_event->filp)
6402 6403 6404 6405 6406
	 *         perf_release()
	 *           mutex_lock(&ctx->mutex)
	 *
	 * But since its the parent context it won't be the same instance.
	 */
6407
	mutex_lock(&child_ctx->mutex);
6408

6409
again:
6410 6411 6412 6413 6414
	list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
				 group_entry)
		__perf_event_exit_task(child_event, child_ctx, child);

	list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
6415
				 group_entry)
6416
		__perf_event_exit_task(child_event, child_ctx, child);
6417 6418

	/*
6419
	 * If the last event was a group event, it will have appended all
6420 6421 6422
	 * its siblings to the list, but we obtained 'tmp' before that which
	 * will still point to the list head terminating the iteration.
	 */
6423 6424
	if (!list_empty(&child_ctx->pinned_groups) ||
	    !list_empty(&child_ctx->flexible_groups))
6425
		goto again;
6426 6427 6428 6429

	mutex_unlock(&child_ctx->mutex);

	put_ctx(child_ctx);
6430 6431
}

P
Peter Zijlstra 已提交
6432 6433 6434 6435 6436
/*
 * When a child task exits, feed back event values to parent events.
 */
void perf_event_exit_task(struct task_struct *child)
{
P
Peter Zijlstra 已提交
6437
	struct perf_event *event, *tmp;
P
Peter Zijlstra 已提交
6438 6439
	int ctxn;

P
Peter Zijlstra 已提交
6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454
	mutex_lock(&child->perf_event_mutex);
	list_for_each_entry_safe(event, tmp, &child->perf_event_list,
				 owner_entry) {
		list_del_init(&event->owner_entry);

		/*
		 * Ensure the list deletion is visible before we clear
		 * the owner, closes a race against perf_release() where
		 * we need to serialize on the owner->perf_event_mutex.
		 */
		smp_wmb();
		event->owner = NULL;
	}
	mutex_unlock(&child->perf_event_mutex);

P
Peter Zijlstra 已提交
6455 6456 6457 6458
	for_each_task_context_nr(ctxn)
		perf_event_exit_task_context(child, ctxn);
}

6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472
static void perf_free_event(struct perf_event *event,
			    struct perf_event_context *ctx)
{
	struct perf_event *parent = event->parent;

	if (WARN_ON_ONCE(!parent))
		return;

	mutex_lock(&parent->child_mutex);
	list_del_init(&event->child_list);
	mutex_unlock(&parent->child_mutex);

	fput(parent->filp);

6473
	perf_group_detach(event);
6474 6475 6476 6477
	list_del_event(event, ctx);
	free_event(event);
}

6478 6479
/*
 * free an unexposed, unused context as created by inheritance by
P
Peter Zijlstra 已提交
6480
 * perf_event_init_task below, used by fork() in case of fail.
6481
 */
6482
void perf_event_free_task(struct task_struct *task)
6483
{
P
Peter Zijlstra 已提交
6484
	struct perf_event_context *ctx;
6485
	struct perf_event *event, *tmp;
P
Peter Zijlstra 已提交
6486
	int ctxn;
6487

P
Peter Zijlstra 已提交
6488 6489 6490 6491
	for_each_task_context_nr(ctxn) {
		ctx = task->perf_event_ctxp[ctxn];
		if (!ctx)
			continue;
6492

P
Peter Zijlstra 已提交
6493
		mutex_lock(&ctx->mutex);
6494
again:
P
Peter Zijlstra 已提交
6495 6496 6497
		list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
				group_entry)
			perf_free_event(event, ctx);
6498

P
Peter Zijlstra 已提交
6499 6500 6501
		list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
				group_entry)
			perf_free_event(event, ctx);
6502

P
Peter Zijlstra 已提交
6503 6504 6505
		if (!list_empty(&ctx->pinned_groups) ||
				!list_empty(&ctx->flexible_groups))
			goto again;
6506

P
Peter Zijlstra 已提交
6507
		mutex_unlock(&ctx->mutex);
6508

P
Peter Zijlstra 已提交
6509 6510
		put_ctx(ctx);
	}
6511 6512
}

6513 6514 6515 6516 6517 6518 6519 6520
void perf_event_delayed_put(struct task_struct *task)
{
	int ctxn;

	for_each_task_context_nr(ctxn)
		WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
}

P
Peter Zijlstra 已提交
6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532
/*
 * inherit a event from parent task to child task:
 */
static struct perf_event *
inherit_event(struct perf_event *parent_event,
	      struct task_struct *parent,
	      struct perf_event_context *parent_ctx,
	      struct task_struct *child,
	      struct perf_event *group_leader,
	      struct perf_event_context *child_ctx)
{
	struct perf_event *child_event;
6533
	unsigned long flags;
P
Peter Zijlstra 已提交
6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545

	/*
	 * Instead of creating recursive hierarchies of events,
	 * we link inherited events back to the original parent,
	 * which has a filp for sure, which we use as the reference
	 * count:
	 */
	if (parent_event->parent)
		parent_event = parent_event->parent;

	child_event = perf_event_alloc(&parent_event->attr,
					   parent_event->cpu,
6546
					   child,
P
Peter Zijlstra 已提交
6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575
					   group_leader, parent_event,
					   NULL);
	if (IS_ERR(child_event))
		return child_event;
	get_ctx(child_ctx);

	/*
	 * Make the child state follow the state of the parent event,
	 * not its attr.disabled bit.  We hold the parent's mutex,
	 * so we won't race with perf_event_{en, dis}able_family.
	 */
	if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
		child_event->state = PERF_EVENT_STATE_INACTIVE;
	else
		child_event->state = PERF_EVENT_STATE_OFF;

	if (parent_event->attr.freq) {
		u64 sample_period = parent_event->hw.sample_period;
		struct hw_perf_event *hwc = &child_event->hw;

		hwc->sample_period = sample_period;
		hwc->last_period   = sample_period;

		local64_set(&hwc->period_left, sample_period);
	}

	child_event->ctx = child_ctx;
	child_event->overflow_handler = parent_event->overflow_handler;

6576 6577 6578 6579
	/*
	 * Precalculate sample_data sizes
	 */
	perf_event__header_size(child_event);
6580
	perf_event__id_header_size(child_event);
6581

P
Peter Zijlstra 已提交
6582 6583 6584
	/*
	 * Link it up in the child's context:
	 */
6585
	raw_spin_lock_irqsave(&child_ctx->lock, flags);
P
Peter Zijlstra 已提交
6586
	add_event_to_ctx(child_event, child_ctx);
6587
	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
P
Peter Zijlstra 已提交
6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628

	/*
	 * Get a reference to the parent filp - we will fput it
	 * when the child event exits. This is safe to do because
	 * we are in the parent and we know that the filp still
	 * exists and has a nonzero count:
	 */
	atomic_long_inc(&parent_event->filp->f_count);

	/*
	 * Link this into the parent event's child list
	 */
	WARN_ON_ONCE(parent_event->ctx->parent_ctx);
	mutex_lock(&parent_event->child_mutex);
	list_add_tail(&child_event->child_list, &parent_event->child_list);
	mutex_unlock(&parent_event->child_mutex);

	return child_event;
}

static int inherit_group(struct perf_event *parent_event,
	      struct task_struct *parent,
	      struct perf_event_context *parent_ctx,
	      struct task_struct *child,
	      struct perf_event_context *child_ctx)
{
	struct perf_event *leader;
	struct perf_event *sub;
	struct perf_event *child_ctr;

	leader = inherit_event(parent_event, parent, parent_ctx,
				 child, NULL, child_ctx);
	if (IS_ERR(leader))
		return PTR_ERR(leader);
	list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
		child_ctr = inherit_event(sub, parent, parent_ctx,
					    child, leader, child_ctx);
		if (IS_ERR(child_ctr))
			return PTR_ERR(child_ctr);
	}
	return 0;
6629 6630 6631 6632 6633
}

static int
inherit_task_group(struct perf_event *event, struct task_struct *parent,
		   struct perf_event_context *parent_ctx,
P
Peter Zijlstra 已提交
6634
		   struct task_struct *child, int ctxn,
6635 6636 6637
		   int *inherited_all)
{
	int ret;
P
Peter Zijlstra 已提交
6638
	struct perf_event_context *child_ctx;
6639 6640 6641 6642

	if (!event->attr.inherit) {
		*inherited_all = 0;
		return 0;
6643 6644
	}

6645
	child_ctx = child->perf_event_ctxp[ctxn];
6646 6647 6648 6649 6650 6651 6652
	if (!child_ctx) {
		/*
		 * This is executed from the parent task context, so
		 * inherit events that have been marked for cloning.
		 * First allocate and initialize a context for the
		 * child.
		 */
6653

6654
		child_ctx = alloc_perf_context(event->pmu, child);
6655 6656
		if (!child_ctx)
			return -ENOMEM;
6657

P
Peter Zijlstra 已提交
6658
		child->perf_event_ctxp[ctxn] = child_ctx;
6659 6660 6661 6662 6663 6664 6665 6666 6667
	}

	ret = inherit_group(event, parent, parent_ctx,
			    child, child_ctx);

	if (ret)
		*inherited_all = 0;

	return ret;
6668 6669
}

6670
/*
6671
 * Initialize the perf_event context in task_struct
6672
 */
P
Peter Zijlstra 已提交
6673
int perf_event_init_context(struct task_struct *child, int ctxn)
6674
{
6675
	struct perf_event_context *child_ctx, *parent_ctx;
6676 6677
	struct perf_event_context *cloned_ctx;
	struct perf_event *event;
6678
	struct task_struct *parent = current;
6679
	int inherited_all = 1;
6680
	unsigned long flags;
6681
	int ret = 0;
6682

P
Peter Zijlstra 已提交
6683
	if (likely(!parent->perf_event_ctxp[ctxn]))
6684 6685
		return 0;

6686
	/*
6687 6688
	 * If the parent's context is a clone, pin it so it won't get
	 * swapped under us.
6689
	 */
P
Peter Zijlstra 已提交
6690
	parent_ctx = perf_pin_task_context(parent, ctxn);
6691

6692 6693 6694 6695 6696 6697 6698
	/*
	 * No need to check if parent_ctx != NULL here; since we saw
	 * it non-NULL earlier, the only reason for it to become NULL
	 * is if we exit, and since we're currently in the middle of
	 * a fork we can't be exiting at the same time.
	 */

6699 6700 6701 6702
	/*
	 * Lock the parent list. No need to lock the child - not PID
	 * hashed yet and not running, so nobody can access it.
	 */
6703
	mutex_lock(&parent_ctx->mutex);
6704 6705 6706 6707 6708

	/*
	 * We dont have to disable NMIs - we are only looking at
	 * the list, not manipulating it:
	 */
6709
	list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
P
Peter Zijlstra 已提交
6710 6711
		ret = inherit_task_group(event, parent, parent_ctx,
					 child, ctxn, &inherited_all);
6712 6713 6714
		if (ret)
			break;
	}
6715

6716 6717 6718 6719 6720 6721 6722 6723 6724
	/*
	 * We can't hold ctx->lock when iterating the ->flexible_group list due
	 * to allocations, but we need to prevent rotation because
	 * rotate_ctx() will change the list from interrupt context.
	 */
	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
	parent_ctx->rotate_disable = 1;
	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);

6725
	list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
P
Peter Zijlstra 已提交
6726 6727
		ret = inherit_task_group(event, parent, parent_ctx,
					 child, ctxn, &inherited_all);
6728
		if (ret)
6729
			break;
6730 6731
	}

6732 6733 6734
	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
	parent_ctx->rotate_disable = 0;

P
Peter Zijlstra 已提交
6735
	child_ctx = child->perf_event_ctxp[ctxn];
6736

6737
	if (child_ctx && inherited_all) {
6738 6739 6740
		/*
		 * Mark the child context as a clone of the parent
		 * context, or of whatever the parent is a clone of.
P
Peter Zijlstra 已提交
6741 6742 6743
		 *
		 * Note that if the parent is a clone, the holding of
		 * parent_ctx->lock avoids it from being uncloned.
6744
		 */
P
Peter Zijlstra 已提交
6745
		cloned_ctx = parent_ctx->parent_ctx;
6746 6747
		if (cloned_ctx) {
			child_ctx->parent_ctx = cloned_ctx;
6748
			child_ctx->parent_gen = parent_ctx->parent_gen;
6749 6750 6751 6752 6753
		} else {
			child_ctx->parent_ctx = parent_ctx;
			child_ctx->parent_gen = parent_ctx->generation;
		}
		get_ctx(child_ctx->parent_ctx);
6754 6755
	}

P
Peter Zijlstra 已提交
6756
	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6757
	mutex_unlock(&parent_ctx->mutex);
6758

6759
	perf_unpin_context(parent_ctx);
6760
	put_ctx(parent_ctx);
6761

6762
	return ret;
6763 6764
}

P
Peter Zijlstra 已提交
6765 6766 6767 6768 6769 6770 6771
/*
 * Initialize the perf_event context in task_struct
 */
int perf_event_init_task(struct task_struct *child)
{
	int ctxn, ret;

6772 6773 6774 6775
	memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
	mutex_init(&child->perf_event_mutex);
	INIT_LIST_HEAD(&child->perf_event_list);

P
Peter Zijlstra 已提交
6776 6777 6778 6779 6780 6781 6782 6783 6784
	for_each_task_context_nr(ctxn) {
		ret = perf_event_init_context(child, ctxn);
		if (ret)
			return ret;
	}

	return 0;
}

6785 6786
static void __init perf_event_init_all_cpus(void)
{
6787
	struct swevent_htable *swhash;
6788 6789 6790
	int cpu;

	for_each_possible_cpu(cpu) {
6791 6792
		swhash = &per_cpu(swevent_htable, cpu);
		mutex_init(&swhash->hlist_mutex);
6793
		INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
6794 6795 6796
	}
}

6797
static void __cpuinit perf_event_init_cpu(int cpu)
T
Thomas Gleixner 已提交
6798
{
P
Peter Zijlstra 已提交
6799
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
T
Thomas Gleixner 已提交
6800

6801 6802
	mutex_lock(&swhash->hlist_mutex);
	if (swhash->hlist_refcount > 0) {
6803 6804
		struct swevent_hlist *hlist;

6805 6806 6807
		hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
		WARN_ON(!hlist);
		rcu_assign_pointer(swhash->swevent_hlist, hlist);
6808
	}
6809
	mutex_unlock(&swhash->hlist_mutex);
T
Thomas Gleixner 已提交
6810 6811
}

P
Peter Zijlstra 已提交
6812
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
6813
static void perf_pmu_rotate_stop(struct pmu *pmu)
T
Thomas Gleixner 已提交
6814
{
6815 6816 6817 6818 6819 6820 6821
	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);

	WARN_ON(!irqs_disabled());

	list_del_init(&cpuctx->rotation_list);
}

P
Peter Zijlstra 已提交
6822
static void __perf_event_exit_context(void *__info)
T
Thomas Gleixner 已提交
6823
{
P
Peter Zijlstra 已提交
6824
	struct perf_event_context *ctx = __info;
6825
	struct perf_event *event, *tmp;
T
Thomas Gleixner 已提交
6826

P
Peter Zijlstra 已提交
6827
	perf_pmu_rotate_stop(ctx->pmu);
6828

6829
	list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
6830
		__perf_remove_from_context(event);
6831
	list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
6832
		__perf_remove_from_context(event);
T
Thomas Gleixner 已提交
6833
}
P
Peter Zijlstra 已提交
6834 6835 6836 6837 6838 6839 6840 6841 6842

static void perf_event_exit_cpu_context(int cpu)
{
	struct perf_event_context *ctx;
	struct pmu *pmu;
	int idx;

	idx = srcu_read_lock(&pmus_srcu);
	list_for_each_entry_rcu(pmu, &pmus, entry) {
6843
		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
P
Peter Zijlstra 已提交
6844 6845 6846 6847 6848 6849 6850 6851

		mutex_lock(&ctx->mutex);
		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
		mutex_unlock(&ctx->mutex);
	}
	srcu_read_unlock(&pmus_srcu, idx);
}

6852
static void perf_event_exit_cpu(int cpu)
T
Thomas Gleixner 已提交
6853
{
6854
	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6855

6856 6857 6858
	mutex_lock(&swhash->hlist_mutex);
	swevent_hlist_release(swhash);
	mutex_unlock(&swhash->hlist_mutex);
6859

P
Peter Zijlstra 已提交
6860
	perf_event_exit_cpu_context(cpu);
T
Thomas Gleixner 已提交
6861 6862
}
#else
6863
static inline void perf_event_exit_cpu(int cpu) { }
T
Thomas Gleixner 已提交
6864 6865
#endif

P
Peter Zijlstra 已提交
6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885
static int
perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
{
	int cpu;

	for_each_online_cpu(cpu)
		perf_event_exit_cpu(cpu);

	return NOTIFY_OK;
}

/*
 * Run the perf reboot notifier at the very last possible moment so that
 * the generic watchdog code runs as long as possible.
 */
static struct notifier_block perf_reboot_notifier = {
	.notifier_call = perf_reboot,
	.priority = INT_MIN,
};

T
Thomas Gleixner 已提交
6886 6887 6888 6889 6890
static int __cpuinit
perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
	unsigned int cpu = (long)hcpu;

P
Peter Zijlstra 已提交
6891
	switch (action & ~CPU_TASKS_FROZEN) {
T
Thomas Gleixner 已提交
6892 6893

	case CPU_UP_PREPARE:
P
Peter Zijlstra 已提交
6894
	case CPU_DOWN_FAILED:
6895
		perf_event_init_cpu(cpu);
T
Thomas Gleixner 已提交
6896 6897
		break;

P
Peter Zijlstra 已提交
6898
	case CPU_UP_CANCELED:
T
Thomas Gleixner 已提交
6899
	case CPU_DOWN_PREPARE:
6900
		perf_event_exit_cpu(cpu);
T
Thomas Gleixner 已提交
6901 6902 6903 6904 6905 6906 6907 6908 6909
		break;

	default:
		break;
	}

	return NOTIFY_OK;
}

6910
void __init perf_event_init(void)
T
Thomas Gleixner 已提交
6911
{
6912 6913
	int ret;

P
Peter Zijlstra 已提交
6914 6915
	idr_init(&pmu_idr);

6916
	perf_event_init_all_cpus();
6917
	init_srcu_struct(&pmus_srcu);
P
Peter Zijlstra 已提交
6918 6919 6920
	perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
	perf_pmu_register(&perf_cpu_clock, NULL, -1);
	perf_pmu_register(&perf_task_clock, NULL, -1);
6921 6922
	perf_tp_register();
	perf_cpu_notifier(perf_cpu_notify);
P
Peter Zijlstra 已提交
6923
	register_reboot_notifier(&perf_reboot_notifier);
6924 6925 6926

	ret = init_hw_breakpoint();
	WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
T
Thomas Gleixner 已提交
6927
}
P
Peter Zijlstra 已提交
6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955

static int __init perf_event_sysfs_init(void)
{
	struct pmu *pmu;
	int ret;

	mutex_lock(&pmus_lock);

	ret = bus_register(&pmu_bus);
	if (ret)
		goto unlock;

	list_for_each_entry(pmu, &pmus, entry) {
		if (!pmu->name || pmu->type < 0)
			continue;

		ret = pmu_dev_alloc(pmu);
		WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
	}
	pmu_bus_running = 1;
	ret = 0;

unlock:
	mutex_unlock(&pmus_lock);

	return ret;
}
device_initcall(perf_event_sysfs_init);
S
Stephane Eranian 已提交
6956 6957 6958 6959 6960 6961 6962

#ifdef CONFIG_CGROUP_PERF
static struct cgroup_subsys_state *perf_cgroup_create(
	struct cgroup_subsys *ss, struct cgroup *cont)
{
	struct perf_cgroup *jc;

6963
	jc = kzalloc(sizeof(*jc), GFP_KERNEL);
S
Stephane Eranian 已提交
6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992
	if (!jc)
		return ERR_PTR(-ENOMEM);

	jc->info = alloc_percpu(struct perf_cgroup_info);
	if (!jc->info) {
		kfree(jc);
		return ERR_PTR(-ENOMEM);
	}

	return &jc->css;
}

static void perf_cgroup_destroy(struct cgroup_subsys *ss,
				struct cgroup *cont)
{
	struct perf_cgroup *jc;
	jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
			  struct perf_cgroup, css);
	free_percpu(jc->info);
	kfree(jc);
}

static int __perf_cgroup_move(void *info)
{
	struct task_struct *task = info;
	perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
	return 0;
}

6993 6994
static void
perf_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *task)
S
Stephane Eranian 已提交
6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009
{
	task_function_call(task, __perf_cgroup_move, task);
}

static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
		struct cgroup *old_cgrp, struct task_struct *task)
{
	/*
	 * cgroup_exit() is called in the copy_process() failure path.
	 * Ignore this case since the task hasn't ran yet, this avoids
	 * trying to poke a half freed task state from generic code.
	 */
	if (!(task->flags & PF_EXITING))
		return;

7010
	perf_cgroup_attach_task(cgrp, task);
S
Stephane Eranian 已提交
7011 7012 7013
}

struct cgroup_subsys perf_subsys = {
7014 7015 7016 7017 7018
	.name		= "perf_event",
	.subsys_id	= perf_subsys_id,
	.create		= perf_cgroup_create,
	.destroy	= perf_cgroup_destroy,
	.exit		= perf_cgroup_exit,
7019
	.attach_task	= perf_cgroup_attach_task,
S
Stephane Eranian 已提交
7020 7021
};
#endif /* CONFIG_CGROUP_PERF */