perf_counter.c 92.7 KB
Newer Older
T
Thomas Gleixner 已提交
1 2 3
/*
 * Performance counter core code
 *
4 5 6
 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
 *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7
 *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 9
 *
 *  For licensing details see kernel-base/COPYING
T
Thomas Gleixner 已提交
10 11 12
 */

#include <linux/fs.h>
13
#include <linux/mm.h>
T
Thomas Gleixner 已提交
14 15
#include <linux/cpu.h>
#include <linux/smp.h>
16
#include <linux/file.h>
T
Thomas Gleixner 已提交
17 18 19 20
#include <linux/poll.h>
#include <linux/sysfs.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
21 22 23
#include <linux/vmstat.h>
#include <linux/hardirq.h>
#include <linux/rculist.h>
T
Thomas Gleixner 已提交
24 25 26
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
I
Ingo Molnar 已提交
27
#include <linux/kernel_stat.h>
T
Thomas Gleixner 已提交
28
#include <linux/perf_counter.h>
29
#include <linux/dcache.h>
T
Thomas Gleixner 已提交
30

31 32
#include <asm/irq_regs.h>

T
Thomas Gleixner 已提交
33 34 35 36 37
/*
 * Each CPU has a list of per CPU counters:
 */
DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);

38
int perf_max_counters __read_mostly = 1;
T
Thomas Gleixner 已提交
39 40 41
static int perf_reserved_percpu __read_mostly;
static int perf_overcommit __read_mostly = 1;

42
static atomic_t nr_counters __read_mostly;
43 44 45 46
static atomic_t nr_mmap_tracking __read_mostly;
static atomic_t nr_munmap_tracking __read_mostly;
static atomic_t nr_comm_tracking __read_mostly;

47
int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
48
int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
49
int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
50

T
Thomas Gleixner 已提交
51
/*
52
 * Lock for (sysadmin-configurable) counter reservations:
T
Thomas Gleixner 已提交
53
 */
54
static DEFINE_SPINLOCK(perf_resource_lock);
T
Thomas Gleixner 已提交
55 56 57 58

/*
 * Architecture provided APIs - weak aliases:
 */
59
extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
T
Thomas Gleixner 已提交
60
{
61
	return NULL;
T
Thomas Gleixner 已提交
62 63
}

64 65 66
void __weak hw_perf_disable(void)		{ barrier(); }
void __weak hw_perf_enable(void)		{ barrier(); }

67
void __weak hw_perf_counter_setup(int cpu)	{ barrier(); }
68 69 70 71 72 73
int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
	       struct perf_cpu_context *cpuctx,
	       struct perf_counter_context *ctx, int cpu)
{
	return 0;
}
T
Thomas Gleixner 已提交
74

75 76
void __weak perf_counter_print_debug(void)	{ }

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
static DEFINE_PER_CPU(int, disable_count);

void __perf_disable(void)
{
	__get_cpu_var(disable_count)++;
}

bool __perf_enable(void)
{
	return !--__get_cpu_var(disable_count);
}

void perf_disable(void)
{
	__perf_disable();
	hw_perf_disable();
}

void perf_enable(void)
{
	if (__perf_enable())
		hw_perf_enable();
}

101 102 103 104 105
static void get_ctx(struct perf_counter_context *ctx)
{
	atomic_inc(&ctx->refcount);
}

106 107 108 109 110 111 112 113
static void free_ctx(struct rcu_head *head)
{
	struct perf_counter_context *ctx;

	ctx = container_of(head, struct perf_counter_context, rcu_head);
	kfree(ctx);
}

114 115
static void put_ctx(struct perf_counter_context *ctx)
{
116 117 118
	if (atomic_dec_and_test(&ctx->refcount)) {
		if (ctx->parent_ctx)
			put_ctx(ctx->parent_ctx);
119 120 121
		if (ctx->task)
			put_task_struct(ctx->task);
		call_rcu(&ctx->rcu_head, free_ctx);
122
	}
123 124
}

125 126 127 128
/*
 * Add a counter from the lists for its context.
 * Must be called with ctx->mutex and ctx->lock held.
 */
129 130 131 132 133 134 135 136 137 138
static void
list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{
	struct perf_counter *group_leader = counter->group_leader;

	/*
	 * Depending on whether it is a standalone or sibling counter,
	 * add it straight to the context's counter list, or to the group
	 * leader's sibling list:
	 */
P
Peter Zijlstra 已提交
139
	if (group_leader == counter)
140
		list_add_tail(&counter->list_entry, &ctx->counter_list);
P
Peter Zijlstra 已提交
141
	else {
142
		list_add_tail(&counter->list_entry, &group_leader->sibling_list);
P
Peter Zijlstra 已提交
143 144
		group_leader->nr_siblings++;
	}
P
Peter Zijlstra 已提交
145 146

	list_add_rcu(&counter->event_entry, &ctx->event_list);
147
	ctx->nr_counters++;
148 149
}

150 151
/*
 * Remove a counter from the lists for its context.
152
 * Must be called with ctx->mutex and ctx->lock held.
153
 */
154 155 156 157 158
static void
list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{
	struct perf_counter *sibling, *tmp;

159 160
	if (list_empty(&counter->list_entry))
		return;
161 162
	ctx->nr_counters--;

163
	list_del_init(&counter->list_entry);
P
Peter Zijlstra 已提交
164
	list_del_rcu(&counter->event_entry);
165

P
Peter Zijlstra 已提交
166 167 168
	if (counter->group_leader != counter)
		counter->group_leader->nr_siblings--;

169 170 171 172 173 174 175 176
	/*
	 * If this was a group counter with sibling counters then
	 * upgrade the siblings to singleton counters by adding them
	 * to the context list directly:
	 */
	list_for_each_entry_safe(sibling, tmp,
				 &counter->sibling_list, list_entry) {

177
		list_move_tail(&sibling->list_entry, &ctx->counter_list);
178 179 180 181
		sibling->group_leader = sibling;
	}
}

182 183 184 185 186 187 188 189 190
static void
counter_sched_out(struct perf_counter *counter,
		  struct perf_cpu_context *cpuctx,
		  struct perf_counter_context *ctx)
{
	if (counter->state != PERF_COUNTER_STATE_ACTIVE)
		return;

	counter->state = PERF_COUNTER_STATE_INACTIVE;
191
	counter->tstamp_stopped = ctx->time;
192
	counter->pmu->disable(counter);
193 194 195 196 197 198 199 200 201
	counter->oncpu = -1;

	if (!is_software_counter(counter))
		cpuctx->active_oncpu--;
	ctx->nr_active--;
	if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
		cpuctx->exclusive = 0;
}

202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
static void
group_sched_out(struct perf_counter *group_counter,
		struct perf_cpu_context *cpuctx,
		struct perf_counter_context *ctx)
{
	struct perf_counter *counter;

	if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
		return;

	counter_sched_out(group_counter, cpuctx, ctx);

	/*
	 * Schedule out siblings (if any):
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
		counter_sched_out(counter, cpuctx, ctx);

	if (group_counter->hw_event.exclusive)
		cpuctx->exclusive = 0;
}

T
Thomas Gleixner 已提交
224 225 226 227 228 229
/*
 * Cross CPU call to remove a performance counter
 *
 * We disable the counter on the hardware level first. After that we
 * remove it from the context list.
 */
230
static void __perf_counter_remove_from_context(void *info)
T
Thomas Gleixner 已提交
231 232 233 234 235 236 237 238 239 240
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter *counter = info;
	struct perf_counter_context *ctx = counter->ctx;

	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu. If not it has been
	 * scheduled out before the smp call arrived.
	 */
241
	if (ctx->task && cpuctx->task_ctx != ctx)
T
Thomas Gleixner 已提交
242 243
		return;

244
	spin_lock(&ctx->lock);
245 246 247 248 249
	/*
	 * Protect the list operation against NMI by disabling the
	 * counters on a global level.
	 */
	perf_disable();
T
Thomas Gleixner 已提交
250

251 252
	counter_sched_out(counter, cpuctx, ctx);

253
	list_del_counter(counter, ctx);
T
Thomas Gleixner 已提交
254 255 256 257 258 259 260 261 262 263 264

	if (!ctx->task) {
		/*
		 * Allow more per task counters with respect to the
		 * reservation:
		 */
		cpuctx->max_pertask =
			min(perf_max_counters - ctx->nr_counters,
			    perf_max_counters - perf_reserved_percpu);
	}

265
	perf_enable();
266
	spin_unlock(&ctx->lock);
T
Thomas Gleixner 已提交
267 268 269 270 271 272
}


/*
 * Remove the counter from a task's (or a CPU's) list of counters.
 *
273
 * Must be called with ctx->mutex held.
T
Thomas Gleixner 已提交
274 275 276
 *
 * CPU counters are removed with a smp call. For task counters we only
 * call when the task is on a CPU.
277 278 279 280 281 282 283
 *
 * If counter->ctx is a cloned context, callers must make sure that
 * every task struct that counter->ctx->task could possibly point to
 * remains valid.  This is OK when called from perf_release since
 * that only calls us on the top-level context, which can't be a clone.
 * When called from perf_counter_exit_task, it's OK because the
 * context has been detached from its task.
T
Thomas Gleixner 已提交
284
 */
285
static void perf_counter_remove_from_context(struct perf_counter *counter)
T
Thomas Gleixner 已提交
286 287 288 289 290 291 292 293 294 295
{
	struct perf_counter_context *ctx = counter->ctx;
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Per cpu counters are removed via an smp call and
		 * the removal is always sucessful.
		 */
		smp_call_function_single(counter->cpu,
296
					 __perf_counter_remove_from_context,
T
Thomas Gleixner 已提交
297 298 299 300 301
					 counter, 1);
		return;
	}

retry:
302
	task_oncpu_function_call(task, __perf_counter_remove_from_context,
T
Thomas Gleixner 已提交
303 304 305 306 307 308
				 counter);

	spin_lock_irq(&ctx->lock);
	/*
	 * If the context is active we need to retry the smp call.
	 */
309
	if (ctx->nr_active && !list_empty(&counter->list_entry)) {
T
Thomas Gleixner 已提交
310 311 312 313 314 315
		spin_unlock_irq(&ctx->lock);
		goto retry;
	}

	/*
	 * The lock prevents that this context is scheduled in so we
316
	 * can remove the counter safely, if the call above did not
T
Thomas Gleixner 已提交
317 318
	 * succeed.
	 */
319 320
	if (!list_empty(&counter->list_entry)) {
		list_del_counter(counter, ctx);
T
Thomas Gleixner 已提交
321 322 323 324
	}
	spin_unlock_irq(&ctx->lock);
}

325
static inline u64 perf_clock(void)
326
{
327
	return cpu_clock(smp_processor_id());
328 329 330 331 332
}

/*
 * Update the record of the current time in a context.
 */
333
static void update_context_time(struct perf_counter_context *ctx)
334
{
335 336 337 338
	u64 now = perf_clock();

	ctx->time += now - ctx->timestamp;
	ctx->timestamp = now;
339 340 341 342 343 344 345 346 347 348
}

/*
 * Update the total_time_enabled and total_time_running fields for a counter.
 */
static void update_counter_times(struct perf_counter *counter)
{
	struct perf_counter_context *ctx = counter->ctx;
	u64 run_end;

349 350 351 352 353 354 355 356 357 358 359
	if (counter->state < PERF_COUNTER_STATE_INACTIVE)
		return;

	counter->total_time_enabled = ctx->time - counter->tstamp_enabled;

	if (counter->state == PERF_COUNTER_STATE_INACTIVE)
		run_end = counter->tstamp_stopped;
	else
		run_end = ctx->time;

	counter->total_time_running = run_end - counter->tstamp_running;
360 361 362 363 364 365 366 367 368 369 370 371 372 373
}

/*
 * Update total_time_enabled and total_time_running for all counters in a group.
 */
static void update_group_times(struct perf_counter *leader)
{
	struct perf_counter *counter;

	update_counter_times(leader);
	list_for_each_entry(counter, &leader->sibling_list, list_entry)
		update_counter_times(counter);
}

374 375 376 377 378 379 380 381 382 383 384 385 386
/*
 * Cross CPU call to disable a performance counter
 */
static void __perf_counter_disable(void *info)
{
	struct perf_counter *counter = info;
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter_context *ctx = counter->ctx;

	/*
	 * If this is a per-task counter, need to check whether this
	 * counter's task is the current task on this cpu.
	 */
387
	if (ctx->task && cpuctx->task_ctx != ctx)
388 389
		return;

390
	spin_lock(&ctx->lock);
391 392 393 394 395 396

	/*
	 * If the counter is on, turn it off.
	 * If it is in error state, leave it in error state.
	 */
	if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
397
		update_context_time(ctx);
398
		update_counter_times(counter);
399 400 401 402 403 404 405
		if (counter == counter->group_leader)
			group_sched_out(counter, cpuctx, ctx);
		else
			counter_sched_out(counter, cpuctx, ctx);
		counter->state = PERF_COUNTER_STATE_OFF;
	}

406
	spin_unlock(&ctx->lock);
407 408 409 410
}

/*
 * Disable a counter.
411 412 413 414 415 416 417 418 419 420
 *
 * If counter->ctx is a cloned context, callers must make sure that
 * every task struct that counter->ctx->task could possibly point to
 * remains valid.  This condition is satisifed when called through
 * perf_counter_for_each_child or perf_counter_for_each because they
 * hold the top-level counter's child_mutex, so any descendant that
 * goes to exit will block in sync_child_counter.
 * When called from perf_pending_counter it's OK because counter->ctx
 * is the current context on this CPU and preemption is disabled,
 * hence we can't get into perf_counter_task_sched_out for this context.
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
 */
static void perf_counter_disable(struct perf_counter *counter)
{
	struct perf_counter_context *ctx = counter->ctx;
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Disable the counter on the cpu that it's on
		 */
		smp_call_function_single(counter->cpu, __perf_counter_disable,
					 counter, 1);
		return;
	}

 retry:
	task_oncpu_function_call(task, __perf_counter_disable, counter);

	spin_lock_irq(&ctx->lock);
	/*
	 * If the counter is still active, we need to retry the cross-call.
	 */
	if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
		spin_unlock_irq(&ctx->lock);
		goto retry;
	}

	/*
	 * Since we have the lock this context can't be scheduled
	 * in, so we can change the state safely.
	 */
452 453
	if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
		update_counter_times(counter);
454
		counter->state = PERF_COUNTER_STATE_OFF;
455
	}
456 457 458 459

	spin_unlock_irq(&ctx->lock);
}

460 461 462 463 464 465
static int
counter_sched_in(struct perf_counter *counter,
		 struct perf_cpu_context *cpuctx,
		 struct perf_counter_context *ctx,
		 int cpu)
{
466
	if (counter->state <= PERF_COUNTER_STATE_OFF)
467 468 469 470 471 472 473 474 475
		return 0;

	counter->state = PERF_COUNTER_STATE_ACTIVE;
	counter->oncpu = cpu;	/* TODO: put 'cpu' into cpuctx->cpu */
	/*
	 * The new state must be visible before we turn it on in the hardware:
	 */
	smp_wmb();

476
	if (counter->pmu->enable(counter)) {
477 478 479 480 481
		counter->state = PERF_COUNTER_STATE_INACTIVE;
		counter->oncpu = -1;
		return -EAGAIN;
	}

482
	counter->tstamp_running += ctx->time - counter->tstamp_stopped;
483

484 485
	if (!is_software_counter(counter))
		cpuctx->active_oncpu++;
486 487
	ctx->nr_active++;

488 489 490
	if (counter->hw_event.exclusive)
		cpuctx->exclusive = 1;

491 492 493
	return 0;
}

494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
static int
group_sched_in(struct perf_counter *group_counter,
	       struct perf_cpu_context *cpuctx,
	       struct perf_counter_context *ctx,
	       int cpu)
{
	struct perf_counter *counter, *partial_group;
	int ret;

	if (group_counter->state == PERF_COUNTER_STATE_OFF)
		return 0;

	ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
	if (ret)
		return ret < 0 ? ret : 0;

	group_counter->prev_state = group_counter->state;
	if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
		return -EAGAIN;

	/*
	 * Schedule in siblings as one group (if any):
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
		counter->prev_state = counter->state;
		if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
			partial_group = counter;
			goto group_error;
		}
	}

	return 0;

group_error:
	/*
	 * Groups can be scheduled in as one unit only, so undo any
	 * partial group before returning:
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
		if (counter == partial_group)
			break;
		counter_sched_out(counter, cpuctx, ctx);
	}
	counter_sched_out(group_counter, cpuctx, ctx);

	return -EAGAIN;
}

542 543 544 545 546 547 548 549 550 551
/*
 * Return 1 for a group consisting entirely of software counters,
 * 0 if the group contains any hardware counters.
 */
static int is_software_only_group(struct perf_counter *leader)
{
	struct perf_counter *counter;

	if (!is_software_counter(leader))
		return 0;
P
Peter Zijlstra 已提交
552

553 554 555
	list_for_each_entry(counter, &leader->sibling_list, list_entry)
		if (!is_software_counter(counter))
			return 0;
P
Peter Zijlstra 已提交
556

557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
	return 1;
}

/*
 * Work out whether we can put this counter group on the CPU now.
 */
static int group_can_go_on(struct perf_counter *counter,
			   struct perf_cpu_context *cpuctx,
			   int can_add_hw)
{
	/*
	 * Groups consisting entirely of software counters can always go on.
	 */
	if (is_software_only_group(counter))
		return 1;
	/*
	 * If an exclusive group is already on, no other hardware
	 * counters can go on.
	 */
	if (cpuctx->exclusive)
		return 0;
	/*
	 * If this group is exclusive and there are already
	 * counters on the CPU, it can't go on.
	 */
	if (counter->hw_event.exclusive && cpuctx->active_oncpu)
		return 0;
	/*
	 * Otherwise, try to add it if all previous groups were able
	 * to go on.
	 */
	return can_add_hw;
}

591 592 593 594 595
static void add_counter_to_ctx(struct perf_counter *counter,
			       struct perf_counter_context *ctx)
{
	list_add_counter(counter, ctx);
	counter->prev_state = PERF_COUNTER_STATE_OFF;
596 597 598
	counter->tstamp_enabled = ctx->time;
	counter->tstamp_running = ctx->time;
	counter->tstamp_stopped = ctx->time;
599 600
}

T
Thomas Gleixner 已提交
601
/*
602
 * Cross CPU call to install and enable a performance counter
603 604
 *
 * Must be called with ctx->mutex held
T
Thomas Gleixner 已提交
605 606 607 608 609 610
 */
static void __perf_install_in_context(void *info)
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter *counter = info;
	struct perf_counter_context *ctx = counter->ctx;
611
	struct perf_counter *leader = counter->group_leader;
T
Thomas Gleixner 已提交
612
	int cpu = smp_processor_id();
613
	int err;
T
Thomas Gleixner 已提交
614 615 616 617 618

	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu. If not it has been
	 * scheduled out before the smp call arrived.
619 620
	 * Or possibly this is the right context but it isn't
	 * on this cpu because it had no counters.
T
Thomas Gleixner 已提交
621
	 */
622
	if (ctx->task && cpuctx->task_ctx != ctx) {
623
		if (cpuctx->task_ctx || ctx->task != current)
624 625 626
			return;
		cpuctx->task_ctx = ctx;
	}
T
Thomas Gleixner 已提交
627

628
	spin_lock(&ctx->lock);
629
	ctx->is_active = 1;
630
	update_context_time(ctx);
T
Thomas Gleixner 已提交
631 632 633 634 635

	/*
	 * Protect the list operation against NMI by disabling the
	 * counters on a global level. NOP for non NMI based counters.
	 */
636
	perf_disable();
T
Thomas Gleixner 已提交
637

638
	add_counter_to_ctx(counter, ctx);
T
Thomas Gleixner 已提交
639

640 641 642 643 644 645 646 647
	/*
	 * Don't put the counter on if it is disabled or if
	 * it is in a group and the group isn't on.
	 */
	if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
	    (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
		goto unlock;

648 649 650 651 652
	/*
	 * An exclusive counter can't go on if there are already active
	 * hardware counters, and no hardware counter can go on if there
	 * is already an exclusive counter on.
	 */
653
	if (!group_can_go_on(counter, cpuctx, 1))
654 655 656 657
		err = -EEXIST;
	else
		err = counter_sched_in(counter, cpuctx, ctx, cpu);

658 659 660 661 662 663 664 665
	if (err) {
		/*
		 * This counter couldn't go on.  If it is in a group
		 * then we have to pull the whole group off.
		 * If the counter group is pinned then put it in error state.
		 */
		if (leader != counter)
			group_sched_out(leader, cpuctx, ctx);
666 667
		if (leader->hw_event.pinned) {
			update_group_times(leader);
668
			leader->state = PERF_COUNTER_STATE_ERROR;
669
		}
670
	}
T
Thomas Gleixner 已提交
671

672
	if (!err && !ctx->task && cpuctx->max_pertask)
T
Thomas Gleixner 已提交
673 674
		cpuctx->max_pertask--;

675
 unlock:
676
	perf_enable();
677

678
	spin_unlock(&ctx->lock);
T
Thomas Gleixner 已提交
679 680 681 682 683 684 685 686 687 688 689
}

/*
 * Attach a performance counter to a context
 *
 * First we add the counter to the list with the hardware enable bit
 * in counter->hw_config cleared.
 *
 * If the counter is attached to a task which is on a CPU we use a smp
 * call to enable it in the task context. The task might have been
 * scheduled away, but we check this in the smp call again.
690 691
 *
 * Must be called with ctx->mutex held.
T
Thomas Gleixner 已提交
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717
 */
static void
perf_install_in_context(struct perf_counter_context *ctx,
			struct perf_counter *counter,
			int cpu)
{
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Per cpu counters are installed via an smp call and
		 * the install is always sucessful.
		 */
		smp_call_function_single(cpu, __perf_install_in_context,
					 counter, 1);
		return;
	}

retry:
	task_oncpu_function_call(task, __perf_install_in_context,
				 counter);

	spin_lock_irq(&ctx->lock);
	/*
	 * we need to retry the smp call.
	 */
718
	if (ctx->is_active && list_empty(&counter->list_entry)) {
T
Thomas Gleixner 已提交
719 720 721 722 723 724 725 726 727
		spin_unlock_irq(&ctx->lock);
		goto retry;
	}

	/*
	 * The lock prevents that this context is scheduled in so we
	 * can add the counter safely, if it the call above did not
	 * succeed.
	 */
728 729
	if (list_empty(&counter->list_entry))
		add_counter_to_ctx(counter, ctx);
T
Thomas Gleixner 已提交
730 731 732
	spin_unlock_irq(&ctx->lock);
}

733 734 735 736
/*
 * Cross CPU call to enable a performance counter
 */
static void __perf_counter_enable(void *info)
737
{
738 739 740 741 742
	struct perf_counter *counter = info;
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter_context *ctx = counter->ctx;
	struct perf_counter *leader = counter->group_leader;
	int err;
743

744 745 746 747
	/*
	 * If this is a per-task counter, need to check whether this
	 * counter's task is the current task on this cpu.
	 */
748
	if (ctx->task && cpuctx->task_ctx != ctx) {
749
		if (cpuctx->task_ctx || ctx->task != current)
750 751 752
			return;
		cpuctx->task_ctx = ctx;
	}
753

754
	spin_lock(&ctx->lock);
755
	ctx->is_active = 1;
756
	update_context_time(ctx);
757

758
	counter->prev_state = counter->state;
759 760 761
	if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
		goto unlock;
	counter->state = PERF_COUNTER_STATE_INACTIVE;
762
	counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
763 764

	/*
765 766
	 * If the counter is in a group and isn't the group leader,
	 * then don't put it on unless the group is on.
767
	 */
768 769
	if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
		goto unlock;
770

771
	if (!group_can_go_on(counter, cpuctx, 1)) {
772
		err = -EEXIST;
773
	} else {
774
		perf_disable();
775 776 777 778 779 780
		if (counter == leader)
			err = group_sched_in(counter, cpuctx, ctx,
					     smp_processor_id());
		else
			err = counter_sched_in(counter, cpuctx, ctx,
					       smp_processor_id());
781
		perf_enable();
782
	}
783 784 785 786 787 788 789 790

	if (err) {
		/*
		 * If this counter can't go on and it's part of a
		 * group, then the whole group has to come off.
		 */
		if (leader != counter)
			group_sched_out(leader, cpuctx, ctx);
791 792
		if (leader->hw_event.pinned) {
			update_group_times(leader);
793
			leader->state = PERF_COUNTER_STATE_ERROR;
794
		}
795 796 797
	}

 unlock:
798
	spin_unlock(&ctx->lock);
799 800 801 802
}

/*
 * Enable a counter.
803 804 805 806 807 808
 *
 * If counter->ctx is a cloned context, callers must make sure that
 * every task struct that counter->ctx->task could possibly point to
 * remains valid.  This condition is satisfied when called through
 * perf_counter_for_each_child or perf_counter_for_each as described
 * for perf_counter_disable.
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854
 */
static void perf_counter_enable(struct perf_counter *counter)
{
	struct perf_counter_context *ctx = counter->ctx;
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Enable the counter on the cpu that it's on
		 */
		smp_call_function_single(counter->cpu, __perf_counter_enable,
					 counter, 1);
		return;
	}

	spin_lock_irq(&ctx->lock);
	if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
		goto out;

	/*
	 * If the counter is in error state, clear that first.
	 * That way, if we see the counter in error state below, we
	 * know that it has gone back into error state, as distinct
	 * from the task having been scheduled away before the
	 * cross-call arrived.
	 */
	if (counter->state == PERF_COUNTER_STATE_ERROR)
		counter->state = PERF_COUNTER_STATE_OFF;

 retry:
	spin_unlock_irq(&ctx->lock);
	task_oncpu_function_call(task, __perf_counter_enable, counter);

	spin_lock_irq(&ctx->lock);

	/*
	 * If the context is active and the counter is still off,
	 * we need to retry the cross-call.
	 */
	if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
		goto retry;

	/*
	 * Since we have the lock this context can't be scheduled
	 * in, so we can change the state safely.
	 */
855
	if (counter->state == PERF_COUNTER_STATE_OFF) {
856
		counter->state = PERF_COUNTER_STATE_INACTIVE;
857 858
		counter->tstamp_enabled =
			ctx->time - counter->total_time_enabled;
859
	}
860 861 862 863
 out:
	spin_unlock_irq(&ctx->lock);
}

864
static int perf_counter_refresh(struct perf_counter *counter, int refresh)
865
{
866 867 868 869 870 871
	/*
	 * not supported on inherited counters
	 */
	if (counter->hw_event.inherit)
		return -EINVAL;

872 873
	atomic_add(refresh, &counter->event_limit);
	perf_counter_enable(counter);
874 875

	return 0;
876 877
}

878 879 880 881 882
void __perf_counter_sched_out(struct perf_counter_context *ctx,
			      struct perf_cpu_context *cpuctx)
{
	struct perf_counter *counter;

883 884
	spin_lock(&ctx->lock);
	ctx->is_active = 0;
885
	if (likely(!ctx->nr_counters))
886
		goto out;
887
	update_context_time(ctx);
888

889
	perf_disable();
890
	if (ctx->nr_active) {
891 892 893 894 895 896
		list_for_each_entry(counter, &ctx->counter_list, list_entry) {
			if (counter != counter->group_leader)
				counter_sched_out(counter, cpuctx, ctx);
			else
				group_sched_out(counter, cpuctx, ctx);
		}
897
	}
898
	perf_enable();
899
 out:
900 901 902
	spin_unlock(&ctx->lock);
}

903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
/*
 * Test whether two contexts are equivalent, i.e. whether they
 * have both been cloned from the same version of the same context
 * and they both have the same number of enabled counters.
 * If the number of enabled counters is the same, then the set
 * of enabled counters should be the same, because these are both
 * inherited contexts, therefore we can't access individual counters
 * in them directly with an fd; we can only enable/disable all
 * counters via prctl, or enable/disable all counters in a family
 * via ioctl, which will have the same effect on both contexts.
 */
static int context_equiv(struct perf_counter_context *ctx1,
			 struct perf_counter_context *ctx2)
{
	return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
918 919
		&& ctx1->parent_gen == ctx2->parent_gen
		&& ctx1->parent_gen != ~0ull;
920 921
}

T
Thomas Gleixner 已提交
922 923 924 925 926 927
/*
 * Called from scheduler to remove the counters of the current task,
 * with interrupts disabled.
 *
 * We stop each counter and update the counter value in counter->count.
 *
I
Ingo Molnar 已提交
928
 * This does not protect us against NMI, but disable()
T
Thomas Gleixner 已提交
929 930 931 932
 * sets the disabled bit in the control field of counter _before_
 * accessing the counter control register. If a NMI hits, then it will
 * not restart the counter.
 */
933 934
void perf_counter_task_sched_out(struct task_struct *task,
				 struct task_struct *next, int cpu)
T
Thomas Gleixner 已提交
935 936
{
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
937
	struct perf_counter_context *ctx = task->perf_counter_ctxp;
938
	struct perf_counter_context *next_ctx;
939
	struct perf_counter_context *parent;
940
	struct pt_regs *regs;
941
	int do_switch = 1;
T
Thomas Gleixner 已提交
942

943 944 945
	regs = task_pt_regs(task);
	perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);

946
	if (likely(!ctx || !cpuctx->task_ctx))
T
Thomas Gleixner 已提交
947 948
		return;

949
	update_context_time(ctx);
950 951 952

	rcu_read_lock();
	parent = rcu_dereference(ctx->parent_ctx);
953
	next_ctx = next->perf_counter_ctxp;
954 955 956 957 958 959 960 961 962 963 964 965 966 967
	if (parent && next_ctx &&
	    rcu_dereference(next_ctx->parent_ctx) == parent) {
		/*
		 * Looks like the two contexts are clones, so we might be
		 * able to optimize the context switch.  We lock both
		 * contexts and check that they are clones under the
		 * lock (including re-checking that neither has been
		 * uncloned in the meantime).  It doesn't matter which
		 * order we take the locks because no other cpu could
		 * be trying to lock both of these tasks.
		 */
		spin_lock(&ctx->lock);
		spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
		if (context_equiv(ctx, next_ctx)) {
968 969 970 971
			/*
			 * XXX do we need a memory barrier of sorts
			 * wrt to rcu_dereference() of perf_counter_ctxp
			 */
972 973 974 975 976 977 978 979
			task->perf_counter_ctxp = next_ctx;
			next->perf_counter_ctxp = ctx;
			ctx->task = next;
			next_ctx->task = task;
			do_switch = 0;
		}
		spin_unlock(&next_ctx->lock);
		spin_unlock(&ctx->lock);
980
	}
981
	rcu_read_unlock();
982

983 984 985 986
	if (do_switch) {
		__perf_counter_sched_out(ctx, cpuctx);
		cpuctx->task_ctx = NULL;
	}
T
Thomas Gleixner 已提交
987 988
}

989 990 991
/*
 * Called with IRQs disabled
 */
992 993 994 995
static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);

996 997
	if (!cpuctx->task_ctx)
		return;
998 999 1000 1001

	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
		return;

1002 1003 1004 1005
	__perf_counter_sched_out(ctx, cpuctx);
	cpuctx->task_ctx = NULL;
}

1006 1007 1008
/*
 * Called with IRQs disabled
 */
1009
static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
1010
{
1011
	__perf_counter_sched_out(&cpuctx->ctx, cpuctx);
1012 1013
}

1014 1015 1016
static void
__perf_counter_sched_in(struct perf_counter_context *ctx,
			struct perf_cpu_context *cpuctx, int cpu)
T
Thomas Gleixner 已提交
1017 1018
{
	struct perf_counter *counter;
1019
	int can_add_hw = 1;
T
Thomas Gleixner 已提交
1020

1021 1022
	spin_lock(&ctx->lock);
	ctx->is_active = 1;
T
Thomas Gleixner 已提交
1023
	if (likely(!ctx->nr_counters))
1024
		goto out;
T
Thomas Gleixner 已提交
1025

1026
	ctx->timestamp = perf_clock();
1027

1028
	perf_disable();
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040

	/*
	 * First go through the list and put on any pinned groups
	 * in order to give them the best chance of going on.
	 */
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
		if (counter->state <= PERF_COUNTER_STATE_OFF ||
		    !counter->hw_event.pinned)
			continue;
		if (counter->cpu != -1 && counter->cpu != cpu)
			continue;

1041 1042 1043 1044 1045 1046
		if (counter != counter->group_leader)
			counter_sched_in(counter, cpuctx, ctx, cpu);
		else {
			if (group_can_go_on(counter, cpuctx, 1))
				group_sched_in(counter, cpuctx, ctx, cpu);
		}
1047 1048 1049 1050 1051

		/*
		 * If this pinned group hasn't been scheduled,
		 * put it in error state.
		 */
1052 1053
		if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
			update_group_times(counter);
1054
			counter->state = PERF_COUNTER_STATE_ERROR;
1055
		}
1056 1057
	}

1058
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1059 1060 1061 1062 1063 1064 1065 1066
		/*
		 * Ignore counters in OFF or ERROR state, and
		 * ignore pinned counters since we did them already.
		 */
		if (counter->state <= PERF_COUNTER_STATE_OFF ||
		    counter->hw_event.pinned)
			continue;

1067 1068 1069 1070
		/*
		 * Listen to the 'cpu' scheduling filter constraint
		 * of counters:
		 */
T
Thomas Gleixner 已提交
1071 1072 1073
		if (counter->cpu != -1 && counter->cpu != cpu)
			continue;

1074 1075
		if (counter != counter->group_leader) {
			if (counter_sched_in(counter, cpuctx, ctx, cpu))
1076
				can_add_hw = 0;
1077 1078 1079 1080 1081
		} else {
			if (group_can_go_on(counter, cpuctx, can_add_hw)) {
				if (group_sched_in(counter, cpuctx, ctx, cpu))
					can_add_hw = 0;
			}
1082
		}
T
Thomas Gleixner 已提交
1083
	}
1084
	perf_enable();
1085
 out:
T
Thomas Gleixner 已提交
1086
	spin_unlock(&ctx->lock);
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
}

/*
 * Called from scheduler to add the counters of the current task
 * with interrupts disabled.
 *
 * We restore the counter value and then enable it.
 *
 * This does not protect us against NMI, but enable()
 * sets the enabled bit in the control field of counter _before_
 * accessing the counter control register. If a NMI hits, then it will
 * keep the counter running.
 */
void perf_counter_task_sched_in(struct task_struct *task, int cpu)
{
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1103
	struct perf_counter_context *ctx = task->perf_counter_ctxp;
1104

1105 1106
	if (likely(!ctx))
		return;
1107 1108
	if (cpuctx->task_ctx == ctx)
		return;
1109
	__perf_counter_sched_in(ctx, cpuctx, cpu);
T
Thomas Gleixner 已提交
1110 1111 1112
	cpuctx->task_ctx = ctx;
}

1113 1114 1115 1116 1117 1118 1119
static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
{
	struct perf_counter_context *ctx = &cpuctx->ctx;

	__perf_counter_sched_in(ctx, cpuctx, cpu);
}

1120 1121 1122
#define MAX_INTERRUPTS (~0ULL)

static void perf_log_throttle(struct perf_counter *counter, int enable);
1123 1124 1125
static void perf_log_period(struct perf_counter *counter, u64 period);

static void perf_adjust_freq(struct perf_counter_context *ctx)
1126 1127
{
	struct perf_counter *counter;
1128
	u64 interrupts, irq_period;
1129 1130 1131 1132 1133 1134 1135 1136
	u64 events, period;
	s64 delta;

	spin_lock(&ctx->lock);
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
		if (counter->state != PERF_COUNTER_STATE_ACTIVE)
			continue;

1137 1138 1139 1140 1141 1142 1143 1144 1145
		interrupts = counter->hw.interrupts;
		counter->hw.interrupts = 0;

		if (interrupts == MAX_INTERRUPTS) {
			perf_log_throttle(counter, 1);
			counter->pmu->unthrottle(counter);
			interrupts = 2*sysctl_perf_counter_limit/HZ;
		}

1146 1147 1148
		if (!counter->hw_event.freq || !counter->hw_event.irq_freq)
			continue;

1149
		events = HZ * interrupts * counter->hw.irq_period;
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
		period = div64_u64(events, counter->hw_event.irq_freq);

		delta = (s64)(1 + period - counter->hw.irq_period);
		delta >>= 1;

		irq_period = counter->hw.irq_period + delta;

		if (!irq_period)
			irq_period = 1;

1160 1161
		perf_log_period(counter, irq_period);

1162 1163 1164 1165 1166
		counter->hw.irq_period = irq_period;
	}
	spin_unlock(&ctx->lock);
}

1167 1168 1169 1170
/*
 * Round-robin a context's counters:
 */
static void rotate_ctx(struct perf_counter_context *ctx)
T
Thomas Gleixner 已提交
1171 1172 1173
{
	struct perf_counter *counter;

1174
	if (!ctx->nr_counters)
T
Thomas Gleixner 已提交
1175 1176 1177 1178
		return;

	spin_lock(&ctx->lock);
	/*
1179
	 * Rotate the first entry last (works just fine for group counters too):
T
Thomas Gleixner 已提交
1180
	 */
1181
	perf_disable();
1182
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1183
		list_move_tail(&counter->list_entry, &ctx->counter_list);
T
Thomas Gleixner 已提交
1184 1185
		break;
	}
1186
	perf_enable();
T
Thomas Gleixner 已提交
1187 1188

	spin_unlock(&ctx->lock);
1189 1190 1191 1192
}

void perf_counter_task_tick(struct task_struct *curr, int cpu)
{
1193 1194 1195 1196 1197 1198 1199
	struct perf_cpu_context *cpuctx;
	struct perf_counter_context *ctx;

	if (!atomic_read(&nr_counters))
		return;

	cpuctx = &per_cpu(perf_cpu_context, cpu);
1200
	ctx = curr->perf_counter_ctxp;
1201

1202
	perf_adjust_freq(&cpuctx->ctx);
1203 1204
	if (ctx)
		perf_adjust_freq(ctx);
1205

1206
	perf_counter_cpu_sched_out(cpuctx);
1207 1208
	if (ctx)
		__perf_counter_task_sched_out(ctx);
T
Thomas Gleixner 已提交
1209

1210
	rotate_ctx(&cpuctx->ctx);
1211 1212
	if (ctx)
		rotate_ctx(ctx);
1213

1214
	perf_counter_cpu_sched_in(cpuctx, cpu);
1215 1216
	if (ctx)
		perf_counter_task_sched_in(curr, cpu);
T
Thomas Gleixner 已提交
1217 1218 1219 1220 1221
}

/*
 * Cross CPU call to read the hardware counter
 */
I
Ingo Molnar 已提交
1222
static void __read(void *info)
T
Thomas Gleixner 已提交
1223
{
I
Ingo Molnar 已提交
1224
	struct perf_counter *counter = info;
1225
	struct perf_counter_context *ctx = counter->ctx;
I
Ingo Molnar 已提交
1226
	unsigned long flags;
I
Ingo Molnar 已提交
1227

1228
	local_irq_save(flags);
1229
	if (ctx->is_active)
1230
		update_context_time(ctx);
1231
	counter->pmu->read(counter);
1232
	update_counter_times(counter);
1233
	local_irq_restore(flags);
T
Thomas Gleixner 已提交
1234 1235
}

1236
static u64 perf_counter_read(struct perf_counter *counter)
T
Thomas Gleixner 已提交
1237 1238 1239 1240 1241
{
	/*
	 * If counter is enabled and currently active on a CPU, update the
	 * value in the counter structure:
	 */
1242
	if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
T
Thomas Gleixner 已提交
1243
		smp_call_function_single(counter->oncpu,
I
Ingo Molnar 已提交
1244
					 __read, counter, 1);
1245 1246
	} else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
		update_counter_times(counter);
T
Thomas Gleixner 已提交
1247 1248
	}

1249
	return atomic64_read(&counter->count);
T
Thomas Gleixner 已提交
1250 1251
}

1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
/*
 * Initialize the perf_counter context in a task_struct:
 */
static void
__perf_counter_init_context(struct perf_counter_context *ctx,
			    struct task_struct *task)
{
	memset(ctx, 0, sizeof(*ctx));
	spin_lock_init(&ctx->lock);
	mutex_init(&ctx->mutex);
	INIT_LIST_HEAD(&ctx->counter_list);
	INIT_LIST_HEAD(&ctx->event_list);
	atomic_set(&ctx->refcount, 1);
	ctx->task = task;
}

T
Thomas Gleixner 已提交
1268 1269 1270 1271
static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
{
	struct perf_cpu_context *cpuctx;
	struct perf_counter_context *ctx;
1272
	struct perf_counter_context *parent_ctx;
T
Thomas Gleixner 已提交
1273
	struct task_struct *task;
1274
	int err;
T
Thomas Gleixner 已提交
1275 1276 1277 1278 1279 1280

	/*
	 * If cpu is not a wildcard then this is a percpu counter:
	 */
	if (cpu != -1) {
		/* Must be root to operate on a CPU counter: */
1281
		if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
T
Thomas Gleixner 已提交
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
			return ERR_PTR(-EACCES);

		if (cpu < 0 || cpu > num_possible_cpus())
			return ERR_PTR(-EINVAL);

		/*
		 * We could be clever and allow to attach a counter to an
		 * offline CPU and activate it when the CPU comes up, but
		 * that's for later.
		 */
		if (!cpu_isset(cpu, cpu_online_map))
			return ERR_PTR(-ENODEV);

		cpuctx = &per_cpu(perf_cpu_context, cpu);
		ctx = &cpuctx->ctx;
1297
		get_ctx(ctx);
T
Thomas Gleixner 已提交
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313

		return ctx;
	}

	rcu_read_lock();
	if (!pid)
		task = current;
	else
		task = find_task_by_vpid(pid);
	if (task)
		get_task_struct(task);
	rcu_read_unlock();

	if (!task)
		return ERR_PTR(-ESRCH);

1314 1315 1316 1317 1318 1319 1320
	/*
	 * Can't attach counters to a dying task.
	 */
	err = -ESRCH;
	if (task->flags & PF_EXITING)
		goto errout;

T
Thomas Gleixner 已提交
1321
	/* Reuse ptrace permission checks for now. */
1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
	err = -EACCES;
	if (!ptrace_may_access(task, PTRACE_MODE_READ))
		goto errout;

 retry_lock:
	rcu_read_lock();
 retry:
	ctx = rcu_dereference(task->perf_counter_ctxp);
	if (ctx) {
		/*
		 * If this context is a clone of another, it might
		 * get swapped for another underneath us by
		 * perf_counter_task_sched_out, though the
		 * rcu_read_lock() protects us from any context
		 * getting freed.  Lock the context and check if it
		 * got swapped before we could get the lock, and retry
		 * if so.  If we locked the right context, then it
		 * can't get swapped on us any more and we can
		 * unclone it if necessary.
		 * Once it's not a clone things will be stable.
		 */
		spin_lock_irq(&ctx->lock);
		if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
			spin_unlock_irq(&ctx->lock);
			goto retry;
		}
		parent_ctx = ctx->parent_ctx;
		if (parent_ctx) {
			put_ctx(parent_ctx);
			ctx->parent_ctx = NULL;		/* no longer a clone */
		}
		/*
		 * Get an extra reference before dropping the lock so that
		 * this context won't get freed if the task exits.
		 */
		get_ctx(ctx);
		spin_unlock_irq(&ctx->lock);
T
Thomas Gleixner 已提交
1359
	}
1360
	rcu_read_unlock();
T
Thomas Gleixner 已提交
1361

1362 1363
	if (!ctx) {
		ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
1364 1365 1366
		err = -ENOMEM;
		if (!ctx)
			goto errout;
1367
		__perf_counter_init_context(ctx, task);
1368 1369
		get_ctx(ctx);
		if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
1370 1371 1372 1373 1374
			/*
			 * We raced with some other task; use
			 * the context they set.
			 */
			kfree(ctx);
1375
			goto retry_lock;
1376
		}
1377
		get_task_struct(task);
1378 1379
	}

1380
	put_task_struct(task);
T
Thomas Gleixner 已提交
1381
	return ctx;
1382 1383 1384 1385

 errout:
	put_task_struct(task);
	return ERR_PTR(err);
T
Thomas Gleixner 已提交
1386 1387
}

P
Peter Zijlstra 已提交
1388 1389 1390 1391 1392 1393 1394 1395
static void free_counter_rcu(struct rcu_head *head)
{
	struct perf_counter *counter;

	counter = container_of(head, struct perf_counter, rcu_head);
	kfree(counter);
}

1396 1397
static void perf_pending_sync(struct perf_counter *counter);

1398 1399
static void free_counter(struct perf_counter *counter)
{
1400 1401
	perf_pending_sync(counter);

1402
	atomic_dec(&nr_counters);
1403 1404 1405 1406 1407 1408 1409
	if (counter->hw_event.mmap)
		atomic_dec(&nr_mmap_tracking);
	if (counter->hw_event.munmap)
		atomic_dec(&nr_munmap_tracking);
	if (counter->hw_event.comm)
		atomic_dec(&nr_comm_tracking);

1410 1411 1412
	if (counter->destroy)
		counter->destroy(counter);

1413
	put_ctx(counter->ctx);
1414 1415 1416
	call_rcu(&counter->rcu_head, free_counter_rcu);
}

T
Thomas Gleixner 已提交
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
/*
 * Called when the last reference to the file is gone.
 */
static int perf_release(struct inode *inode, struct file *file)
{
	struct perf_counter *counter = file->private_data;
	struct perf_counter_context *ctx = counter->ctx;

	file->private_data = NULL;

1427
	WARN_ON_ONCE(ctx->parent_ctx);
1428
	mutex_lock(&ctx->mutex);
1429
	perf_counter_remove_from_context(counter);
1430
	mutex_unlock(&ctx->mutex);
T
Thomas Gleixner 已提交
1431

1432 1433 1434 1435 1436
	mutex_lock(&counter->owner->perf_counter_mutex);
	list_del_init(&counter->owner_entry);
	mutex_unlock(&counter->owner->perf_counter_mutex);
	put_task_struct(counter->owner);

1437
	free_counter(counter);
T
Thomas Gleixner 已提交
1438 1439 1440 1441 1442 1443 1444 1445 1446 1447

	return 0;
}

/*
 * Read the performance counter - simple non blocking version for now
 */
static ssize_t
perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
{
1448 1449
	u64 values[3];
	int n;
T
Thomas Gleixner 已提交
1450

1451 1452 1453 1454 1455 1456 1457 1458
	/*
	 * Return end-of-file for a read on a counter that is in
	 * error state (i.e. because it was pinned but it couldn't be
	 * scheduled on to the CPU at some point).
	 */
	if (counter->state == PERF_COUNTER_STATE_ERROR)
		return 0;

1459
	WARN_ON_ONCE(counter->ctx->parent_ctx);
1460
	mutex_lock(&counter->child_mutex);
1461 1462 1463 1464 1465 1466 1467 1468
	values[0] = perf_counter_read(counter);
	n = 1;
	if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		values[n++] = counter->total_time_enabled +
			atomic64_read(&counter->child_total_time_enabled);
	if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		values[n++] = counter->total_time_running +
			atomic64_read(&counter->child_total_time_running);
1469
	mutex_unlock(&counter->child_mutex);
T
Thomas Gleixner 已提交
1470

1471 1472 1473 1474 1475 1476 1477 1478
	if (count < n * sizeof(u64))
		return -EINVAL;
	count = n * sizeof(u64);

	if (copy_to_user(buf, values, count))
		return -EFAULT;

	return count;
T
Thomas Gleixner 已提交
1479 1480 1481 1482 1483 1484 1485
}

static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
	struct perf_counter *counter = file->private_data;

1486
	return perf_read_hw(counter, buf, count);
T
Thomas Gleixner 已提交
1487 1488 1489 1490 1491
}

static unsigned int perf_poll(struct file *file, poll_table *wait)
{
	struct perf_counter *counter = file->private_data;
P
Peter Zijlstra 已提交
1492
	struct perf_mmap_data *data;
1493
	unsigned int events = POLL_HUP;
P
Peter Zijlstra 已提交
1494 1495 1496 1497

	rcu_read_lock();
	data = rcu_dereference(counter->data);
	if (data)
1498
		events = atomic_xchg(&data->poll, 0);
P
Peter Zijlstra 已提交
1499
	rcu_read_unlock();
T
Thomas Gleixner 已提交
1500 1501 1502 1503 1504 1505

	poll_wait(file, &counter->waitq, wait);

	return events;
}

1506 1507
static void perf_counter_reset(struct perf_counter *counter)
{
P
Peter Zijlstra 已提交
1508
	(void)perf_counter_read(counter);
1509
	atomic64_set(&counter->count, 0);
P
Peter Zijlstra 已提交
1510 1511 1512 1513 1514 1515 1516 1517 1518
	perf_counter_update_userpage(counter);
}

static void perf_counter_for_each_sibling(struct perf_counter *counter,
					  void (*func)(struct perf_counter *))
{
	struct perf_counter_context *ctx = counter->ctx;
	struct perf_counter *sibling;

1519
	WARN_ON_ONCE(ctx->parent_ctx);
1520
	mutex_lock(&ctx->mutex);
P
Peter Zijlstra 已提交
1521 1522 1523 1524 1525
	counter = counter->group_leader;

	func(counter);
	list_for_each_entry(sibling, &counter->sibling_list, list_entry)
		func(sibling);
1526
	mutex_unlock(&ctx->mutex);
P
Peter Zijlstra 已提交
1527 1528
}

1529 1530 1531 1532 1533 1534
/*
 * Holding the top-level counter's child_mutex means that any
 * descendant process that has inherited this counter will block
 * in sync_child_counter if it goes to exit, thus satisfying the
 * task existence requirements of perf_counter_enable/disable.
 */
P
Peter Zijlstra 已提交
1535 1536 1537 1538 1539
static void perf_counter_for_each_child(struct perf_counter *counter,
					void (*func)(struct perf_counter *))
{
	struct perf_counter *child;

1540
	WARN_ON_ONCE(counter->ctx->parent_ctx);
1541
	mutex_lock(&counter->child_mutex);
P
Peter Zijlstra 已提交
1542 1543 1544
	func(counter);
	list_for_each_entry(child, &counter->child_list, child_list)
		func(child);
1545
	mutex_unlock(&counter->child_mutex);
P
Peter Zijlstra 已提交
1546 1547 1548 1549 1550 1551 1552
}

static void perf_counter_for_each(struct perf_counter *counter,
				  void (*func)(struct perf_counter *))
{
	struct perf_counter *child;

1553
	WARN_ON_ONCE(counter->ctx->parent_ctx);
1554
	mutex_lock(&counter->child_mutex);
P
Peter Zijlstra 已提交
1555 1556 1557
	perf_counter_for_each_sibling(counter, func);
	list_for_each_entry(child, &counter->child_list, child_list)
		perf_counter_for_each_sibling(child, func);
1558
	mutex_unlock(&counter->child_mutex);
1559 1560
}

1561 1562 1563
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct perf_counter *counter = file->private_data;
P
Peter Zijlstra 已提交
1564 1565
	void (*func)(struct perf_counter *);
	u32 flags = arg;
1566 1567 1568

	switch (cmd) {
	case PERF_COUNTER_IOC_ENABLE:
P
Peter Zijlstra 已提交
1569
		func = perf_counter_enable;
1570 1571
		break;
	case PERF_COUNTER_IOC_DISABLE:
P
Peter Zijlstra 已提交
1572
		func = perf_counter_disable;
1573
		break;
1574
	case PERF_COUNTER_IOC_RESET:
P
Peter Zijlstra 已提交
1575
		func = perf_counter_reset;
1576
		break;
P
Peter Zijlstra 已提交
1577 1578 1579

	case PERF_COUNTER_IOC_REFRESH:
		return perf_counter_refresh(counter, arg);
1580
	default:
P
Peter Zijlstra 已提交
1581
		return -ENOTTY;
1582
	}
P
Peter Zijlstra 已提交
1583 1584 1585 1586 1587 1588 1589

	if (flags & PERF_IOC_FLAG_GROUP)
		perf_counter_for_each(counter, func);
	else
		perf_counter_for_each_child(counter, func);

	return 0;
1590 1591
}

1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
int perf_counter_task_enable(void)
{
	struct perf_counter *counter;

	mutex_lock(&current->perf_counter_mutex);
	list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
		perf_counter_for_each_child(counter, perf_counter_enable);
	mutex_unlock(&current->perf_counter_mutex);

	return 0;
}

int perf_counter_task_disable(void)
{
	struct perf_counter *counter;

	mutex_lock(&current->perf_counter_mutex);
	list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
		perf_counter_for_each_child(counter, perf_counter_disable);
	mutex_unlock(&current->perf_counter_mutex);

	return 0;
}

1616 1617 1618 1619 1620 1621
/*
 * Callers need to ensure there can be no nesting of this function, otherwise
 * the seqlock logic goes bad. We can not serialize this because the arch
 * code calls this from NMI context.
 */
void perf_counter_update_userpage(struct perf_counter *counter)
1622
{
1623 1624 1625 1626 1627 1628 1629 1630 1631
	struct perf_mmap_data *data;
	struct perf_counter_mmap_page *userpg;

	rcu_read_lock();
	data = rcu_dereference(counter->data);
	if (!data)
		goto unlock;

	userpg = data->user_page;
1632

1633 1634 1635 1636 1637
	/*
	 * Disable preemption so as to not let the corresponding user-space
	 * spin too long if we get preempted.
	 */
	preempt_disable();
1638
	++userpg->lock;
1639
	barrier();
1640 1641 1642 1643
	userpg->index = counter->hw.idx;
	userpg->offset = atomic64_read(&counter->count);
	if (counter->state == PERF_COUNTER_STATE_ACTIVE)
		userpg->offset -= atomic64_read(&counter->hw.prev_count);
1644

1645
	barrier();
1646
	++userpg->lock;
1647
	preempt_enable();
1648
unlock:
1649
	rcu_read_unlock();
1650 1651 1652 1653 1654
}

static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct perf_counter *counter = vma->vm_file->private_data;
1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666
	struct perf_mmap_data *data;
	int ret = VM_FAULT_SIGBUS;

	rcu_read_lock();
	data = rcu_dereference(counter->data);
	if (!data)
		goto unlock;

	if (vmf->pgoff == 0) {
		vmf->page = virt_to_page(data->user_page);
	} else {
		int nr = vmf->pgoff - 1;
1667

1668 1669
		if ((unsigned)nr > data->nr_pages)
			goto unlock;
1670

1671 1672
		vmf->page = virt_to_page(data->data_pages[nr]);
	}
1673
	get_page(vmf->page);
1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706
	ret = 0;
unlock:
	rcu_read_unlock();

	return ret;
}

static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
{
	struct perf_mmap_data *data;
	unsigned long size;
	int i;

	WARN_ON(atomic_read(&counter->mmap_count));

	size = sizeof(struct perf_mmap_data);
	size += nr_pages * sizeof(void *);

	data = kzalloc(size, GFP_KERNEL);
	if (!data)
		goto fail;

	data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
	if (!data->user_page)
		goto fail_user_page;

	for (i = 0; i < nr_pages; i++) {
		data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
		if (!data->data_pages[i])
			goto fail_data_pages;
	}

	data->nr_pages = nr_pages;
1707
	atomic_set(&data->lock, -1);
1708 1709 1710

	rcu_assign_pointer(counter->data, data);

1711
	return 0;
1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758

fail_data_pages:
	for (i--; i >= 0; i--)
		free_page((unsigned long)data->data_pages[i]);

	free_page((unsigned long)data->user_page);

fail_user_page:
	kfree(data);

fail:
	return -ENOMEM;
}

static void __perf_mmap_data_free(struct rcu_head *rcu_head)
{
	struct perf_mmap_data *data = container_of(rcu_head,
			struct perf_mmap_data, rcu_head);
	int i;

	free_page((unsigned long)data->user_page);
	for (i = 0; i < data->nr_pages; i++)
		free_page((unsigned long)data->data_pages[i]);
	kfree(data);
}

static void perf_mmap_data_free(struct perf_counter *counter)
{
	struct perf_mmap_data *data = counter->data;

	WARN_ON(atomic_read(&counter->mmap_count));

	rcu_assign_pointer(counter->data, NULL);
	call_rcu(&data->rcu_head, __perf_mmap_data_free);
}

static void perf_mmap_open(struct vm_area_struct *vma)
{
	struct perf_counter *counter = vma->vm_file->private_data;

	atomic_inc(&counter->mmap_count);
}

static void perf_mmap_close(struct vm_area_struct *vma)
{
	struct perf_counter *counter = vma->vm_file->private_data;

1759
	WARN_ON_ONCE(counter->ctx->parent_ctx);
1760 1761
	if (atomic_dec_and_mutex_lock(&counter->mmap_count,
				      &counter->mmap_mutex)) {
1762 1763 1764
		struct user_struct *user = current_user();

		atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
1765
		vma->vm_mm->locked_vm -= counter->data->nr_locked;
1766 1767 1768
		perf_mmap_data_free(counter);
		mutex_unlock(&counter->mmap_mutex);
	}
1769 1770 1771
}

static struct vm_operations_struct perf_mmap_vmops = {
1772
	.open  = perf_mmap_open,
1773
	.close = perf_mmap_close,
1774 1775 1776 1777 1778 1779
	.fault = perf_mmap_fault,
};

static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct perf_counter *counter = file->private_data;
1780
	struct user_struct *user = current_user();
1781 1782
	unsigned long vma_size;
	unsigned long nr_pages;
1783
	unsigned long user_locked, user_lock_limit;
1784
	unsigned long locked, lock_limit;
1785
	long user_extra, extra;
1786
	int ret = 0;
1787 1788 1789

	if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
		return -EINVAL;
1790 1791 1792 1793

	vma_size = vma->vm_end - vma->vm_start;
	nr_pages = (vma_size / PAGE_SIZE) - 1;

1794 1795 1796 1797 1798
	/*
	 * If we have data pages ensure they're a power-of-two number, so we
	 * can do bitmasks instead of modulo.
	 */
	if (nr_pages != 0 && !is_power_of_2(nr_pages))
1799 1800
		return -EINVAL;

1801
	if (vma_size != PAGE_SIZE * (1 + nr_pages))
1802 1803
		return -EINVAL;

1804 1805
	if (vma->vm_pgoff != 0)
		return -EINVAL;
1806

1807
	WARN_ON_ONCE(counter->ctx->parent_ctx);
1808 1809 1810 1811 1812 1813 1814
	mutex_lock(&counter->mmap_mutex);
	if (atomic_inc_not_zero(&counter->mmap_count)) {
		if (nr_pages != counter->data->nr_pages)
			ret = -EINVAL;
		goto unlock;
	}

1815 1816
	user_extra = nr_pages + 1;
	user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
I
Ingo Molnar 已提交
1817 1818 1819 1820 1821 1822

	/*
	 * Increase the limit linearly with more CPUs:
	 */
	user_lock_limit *= num_online_cpus();

1823
	user_locked = atomic_long_read(&user->locked_vm) + user_extra;
1824

1825 1826 1827
	extra = 0;
	if (user_locked > user_lock_limit)
		extra = user_locked - user_lock_limit;
1828 1829 1830

	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
	lock_limit >>= PAGE_SHIFT;
1831
	locked = vma->vm_mm->locked_vm + extra;
1832

1833 1834 1835 1836
	if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
		ret = -EPERM;
		goto unlock;
	}
1837 1838 1839

	WARN_ON(counter->data);
	ret = perf_mmap_data_alloc(counter, nr_pages);
1840 1841 1842 1843
	if (ret)
		goto unlock;

	atomic_set(&counter->mmap_count, 1);
1844
	atomic_long_add(user_extra, &user->locked_vm);
1845 1846
	vma->vm_mm->locked_vm += extra;
	counter->data->nr_locked = extra;
1847
unlock:
1848
	mutex_unlock(&counter->mmap_mutex);
1849 1850 1851 1852

	vma->vm_flags &= ~VM_MAYWRITE;
	vma->vm_flags |= VM_RESERVED;
	vma->vm_ops = &perf_mmap_vmops;
1853 1854

	return ret;
1855 1856
}

P
Peter Zijlstra 已提交
1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872
static int perf_fasync(int fd, struct file *filp, int on)
{
	struct perf_counter *counter = filp->private_data;
	struct inode *inode = filp->f_path.dentry->d_inode;
	int retval;

	mutex_lock(&inode->i_mutex);
	retval = fasync_helper(fd, filp, on, &counter->fasync);
	mutex_unlock(&inode->i_mutex);

	if (retval < 0)
		return retval;

	return 0;
}

T
Thomas Gleixner 已提交
1873 1874 1875 1876
static const struct file_operations perf_fops = {
	.release		= perf_release,
	.read			= perf_read,
	.poll			= perf_poll,
1877 1878
	.unlocked_ioctl		= perf_ioctl,
	.compat_ioctl		= perf_ioctl,
1879
	.mmap			= perf_mmap,
P
Peter Zijlstra 已提交
1880
	.fasync			= perf_fasync,
T
Thomas Gleixner 已提交
1881 1882
};

1883 1884 1885 1886 1887 1888 1889 1890 1891 1892
/*
 * Perf counter wakeup
 *
 * If there's data, ensure we set the poll() state and publish everything
 * to user-space before waking everybody up.
 */

void perf_counter_wakeup(struct perf_counter *counter)
{
	wake_up_all(&counter->waitq);
1893 1894 1895 1896 1897

	if (counter->pending_kill) {
		kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
		counter->pending_kill = 0;
	}
1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
}

/*
 * Pending wakeups
 *
 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
 *
 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
 * single linked list and use cmpxchg() to add entries lockless.
 */

1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924
static void perf_pending_counter(struct perf_pending_entry *entry)
{
	struct perf_counter *counter = container_of(entry,
			struct perf_counter, pending);

	if (counter->pending_disable) {
		counter->pending_disable = 0;
		perf_counter_disable(counter);
	}

	if (counter->pending_wakeup) {
		counter->pending_wakeup = 0;
		perf_counter_wakeup(counter);
	}
}

1925
#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
1926

1927
static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
1928 1929 1930
	PENDING_TAIL,
};

1931 1932
static void perf_pending_queue(struct perf_pending_entry *entry,
			       void (*func)(struct perf_pending_entry *))
1933
{
1934
	struct perf_pending_entry **head;
1935

1936
	if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
1937 1938
		return;

1939 1940 1941
	entry->func = func;

	head = &get_cpu_var(perf_pending_head);
1942 1943

	do {
1944 1945
		entry->next = *head;
	} while (cmpxchg(head, entry->next, entry) != entry->next);
1946 1947 1948

	set_perf_counter_pending();

1949
	put_cpu_var(perf_pending_head);
1950 1951 1952 1953
}

static int __perf_pending_run(void)
{
1954
	struct perf_pending_entry *list;
1955 1956
	int nr = 0;

1957
	list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
1958
	while (list != PENDING_TAIL) {
1959 1960
		void (*func)(struct perf_pending_entry *);
		struct perf_pending_entry *entry = list;
1961 1962 1963

		list = list->next;

1964 1965
		func = entry->func;
		entry->next = NULL;
1966 1967 1968 1969 1970 1971 1972
		/*
		 * Ensure we observe the unqueue before we issue the wakeup,
		 * so that we won't be waiting forever.
		 * -- see perf_not_pending().
		 */
		smp_wmb();

1973
		func(entry);
1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994
		nr++;
	}

	return nr;
}

static inline int perf_not_pending(struct perf_counter *counter)
{
	/*
	 * If we flush on whatever cpu we run, there is a chance we don't
	 * need to wait.
	 */
	get_cpu();
	__perf_pending_run();
	put_cpu();

	/*
	 * Ensure we see the proper queue state before going to sleep
	 * so that we do not miss the wakeup. -- see perf_pending_handle()
	 */
	smp_rmb();
1995
	return counter->pending.next == NULL;
1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007
}

static void perf_pending_sync(struct perf_counter *counter)
{
	wait_event(counter->waitq, perf_not_pending(counter));
}

void perf_counter_do_pending(void)
{
	__perf_pending_run();
}

2008 2009 2010 2011
/*
 * Callchain support -- arch specific
 */

2012
__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2013 2014 2015 2016
{
	return NULL;
}

2017 2018 2019 2020
/*
 * Output
 */

2021 2022 2023 2024
struct perf_output_handle {
	struct perf_counter	*counter;
	struct perf_mmap_data	*data;
	unsigned int		offset;
2025
	unsigned int		head;
2026
	int			nmi;
2027
	int			overflow;
2028 2029
	int			locked;
	unsigned long		flags;
2030 2031
};

2032
static void perf_output_wakeup(struct perf_output_handle *handle)
2033
{
2034 2035
	atomic_set(&handle->data->poll, POLL_IN);

2036
	if (handle->nmi) {
2037
		handle->counter->pending_wakeup = 1;
2038
		perf_pending_queue(&handle->counter->pending,
2039
				   perf_pending_counter);
2040
	} else
2041 2042 2043
		perf_counter_wakeup(handle->counter);
}

2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069
/*
 * Curious locking construct.
 *
 * We need to ensure a later event doesn't publish a head when a former
 * event isn't done writing. However since we need to deal with NMIs we
 * cannot fully serialize things.
 *
 * What we do is serialize between CPUs so we only have to deal with NMI
 * nesting on a single CPU.
 *
 * We only publish the head (and generate a wakeup) when the outer-most
 * event completes.
 */
static void perf_output_lock(struct perf_output_handle *handle)
{
	struct perf_mmap_data *data = handle->data;
	int cpu;

	handle->locked = 0;

	local_irq_save(handle->flags);
	cpu = smp_processor_id();

	if (in_nmi() && atomic_read(&data->lock) == cpu)
		return;

2070
	while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2071 2072 2073 2074 2075 2076 2077 2078 2079 2080
		cpu_relax();

	handle->locked = 1;
}

static void perf_output_unlock(struct perf_output_handle *handle)
{
	struct perf_mmap_data *data = handle->data;
	int head, cpu;

2081
	data->done_head = data->head;
2082 2083 2084 2085 2086 2087 2088 2089 2090 2091

	if (!handle->locked)
		goto out;

again:
	/*
	 * The xchg implies a full barrier that ensures all writes are done
	 * before we publish the new head, matched by a rmb() in userspace when
	 * reading this position.
	 */
2092
	while ((head = atomic_xchg(&data->done_head, 0)))
2093 2094 2095
		data->user_page->data_head = head;

	/*
2096
	 * NMI can happen here, which means we can miss a done_head update.
2097 2098
	 */

2099
	cpu = atomic_xchg(&data->lock, -1);
2100 2101 2102 2103 2104
	WARN_ON_ONCE(cpu != smp_processor_id());

	/*
	 * Therefore we have to validate we did not indeed do so.
	 */
2105
	if (unlikely(atomic_read(&data->done_head))) {
2106 2107 2108
		/*
		 * Since we had it locked, we can lock it again.
		 */
2109
		while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2110 2111 2112 2113 2114
			cpu_relax();

		goto again;
	}

2115
	if (atomic_xchg(&data->wakeup, 0))
2116 2117 2118 2119 2120
		perf_output_wakeup(handle);
out:
	local_irq_restore(handle->flags);
}

2121
static int perf_output_begin(struct perf_output_handle *handle,
2122
			     struct perf_counter *counter, unsigned int size,
2123
			     int nmi, int overflow)
2124
{
2125
	struct perf_mmap_data *data;
2126
	unsigned int offset, head;
2127

2128 2129 2130 2131 2132 2133
	/*
	 * For inherited counters we send all the output towards the parent.
	 */
	if (counter->parent)
		counter = counter->parent;

2134 2135 2136 2137 2138
	rcu_read_lock();
	data = rcu_dereference(counter->data);
	if (!data)
		goto out;

2139
	handle->data	 = data;
2140 2141 2142
	handle->counter	 = counter;
	handle->nmi	 = nmi;
	handle->overflow = overflow;
2143

2144
	if (!data->nr_pages)
2145
		goto fail;
2146

2147 2148
	perf_output_lock(handle);

2149 2150
	do {
		offset = head = atomic_read(&data->head);
P
Peter Zijlstra 已提交
2151
		head += size;
2152 2153
	} while (atomic_cmpxchg(&data->head, offset, head) != offset);

2154
	handle->offset	= offset;
2155
	handle->head	= head;
2156 2157 2158

	if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
		atomic_set(&data->wakeup, 1);
2159

2160
	return 0;
2161

2162
fail:
2163
	perf_output_wakeup(handle);
2164 2165
out:
	rcu_read_unlock();
2166

2167 2168
	return -ENOSPC;
}
2169

2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197
static void perf_output_copy(struct perf_output_handle *handle,
			     void *buf, unsigned int len)
{
	unsigned int pages_mask;
	unsigned int offset;
	unsigned int size;
	void **pages;

	offset		= handle->offset;
	pages_mask	= handle->data->nr_pages - 1;
	pages		= handle->data->data_pages;

	do {
		unsigned int page_offset;
		int nr;

		nr	    = (offset >> PAGE_SHIFT) & pages_mask;
		page_offset = offset & (PAGE_SIZE - 1);
		size	    = min_t(unsigned int, PAGE_SIZE - page_offset, len);

		memcpy(pages[nr] + page_offset, buf, size);

		len	    -= size;
		buf	    += size;
		offset	    += size;
	} while (len);

	handle->offset = offset;
2198

2199 2200 2201 2202 2203
	/*
	 * Check we didn't copy past our reservation window, taking the
	 * possible unsigned int wrap into account.
	 */
	WARN_ON_ONCE(((int)(handle->head - handle->offset)) < 0);
2204 2205
}

P
Peter Zijlstra 已提交
2206 2207 2208
#define perf_output_put(handle, x) \
	perf_output_copy((handle), &(x), sizeof(x))

2209
static void perf_output_end(struct perf_output_handle *handle)
2210
{
2211 2212 2213 2214
	struct perf_counter *counter = handle->counter;
	struct perf_mmap_data *data = handle->data;

	int wakeup_events = counter->hw_event.wakeup_events;
P
Peter Zijlstra 已提交
2215

2216
	if (handle->overflow && wakeup_events) {
2217
		int events = atomic_inc_return(&data->events);
P
Peter Zijlstra 已提交
2218
		if (events >= wakeup_events) {
2219
			atomic_sub(wakeup_events, &data->events);
2220
			atomic_set(&data->wakeup, 1);
P
Peter Zijlstra 已提交
2221
		}
2222 2223 2224
	}

	perf_output_unlock(handle);
2225
	rcu_read_unlock();
2226 2227
}

2228
static void perf_counter_output(struct perf_counter *counter,
2229
				int nmi, struct pt_regs *regs, u64 addr)
2230
{
2231
	int ret;
2232
	u64 record_type = counter->hw_event.record_type;
2233 2234 2235
	struct perf_output_handle handle;
	struct perf_event_header header;
	u64 ip;
P
Peter Zijlstra 已提交
2236
	struct {
2237
		u32 pid, tid;
2238
	} tid_entry;
2239 2240 2241 2242
	struct {
		u64 event;
		u64 counter;
	} group_entry;
2243 2244
	struct perf_callchain_entry *callchain = NULL;
	int callchain_size = 0;
P
Peter Zijlstra 已提交
2245
	u64 time;
2246 2247 2248
	struct {
		u32 cpu, reserved;
	} cpu_entry;
2249

2250
	header.type = 0;
2251
	header.size = sizeof(header);
2252

2253
	header.misc = PERF_EVENT_MISC_OVERFLOW;
2254
	header.misc |= perf_misc_flags(regs);
2255

2256
	if (record_type & PERF_RECORD_IP) {
2257
		ip = perf_instruction_pointer(regs);
2258
		header.type |= PERF_RECORD_IP;
2259 2260
		header.size += sizeof(ip);
	}
2261

2262
	if (record_type & PERF_RECORD_TID) {
2263
		/* namespace issues */
2264 2265 2266
		tid_entry.pid = current->group_leader->pid;
		tid_entry.tid = current->pid;

2267
		header.type |= PERF_RECORD_TID;
2268 2269 2270
		header.size += sizeof(tid_entry);
	}

2271 2272 2273 2274 2275 2276 2277 2278 2279 2280
	if (record_type & PERF_RECORD_TIME) {
		/*
		 * Maybe do better on x86 and provide cpu_clock_nmi()
		 */
		time = sched_clock();

		header.type |= PERF_RECORD_TIME;
		header.size += sizeof(u64);
	}

2281 2282 2283 2284 2285
	if (record_type & PERF_RECORD_ADDR) {
		header.type |= PERF_RECORD_ADDR;
		header.size += sizeof(u64);
	}

2286 2287 2288 2289 2290
	if (record_type & PERF_RECORD_CONFIG) {
		header.type |= PERF_RECORD_CONFIG;
		header.size += sizeof(u64);
	}

2291 2292 2293 2294 2295 2296 2297
	if (record_type & PERF_RECORD_CPU) {
		header.type |= PERF_RECORD_CPU;
		header.size += sizeof(cpu_entry);

		cpu_entry.cpu = raw_smp_processor_id();
	}

2298
	if (record_type & PERF_RECORD_GROUP) {
2299
		header.type |= PERF_RECORD_GROUP;
2300 2301 2302 2303 2304
		header.size += sizeof(u64) +
			counter->nr_siblings * sizeof(group_entry);
	}

	if (record_type & PERF_RECORD_CALLCHAIN) {
2305 2306 2307
		callchain = perf_callchain(regs);

		if (callchain) {
2308
			callchain_size = (1 + callchain->nr) * sizeof(u64);
2309

2310
			header.type |= PERF_RECORD_CALLCHAIN;
2311 2312 2313 2314
			header.size += callchain_size;
		}
	}

2315
	ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2316 2317
	if (ret)
		return;
2318

2319
	perf_output_put(&handle, header);
P
Peter Zijlstra 已提交
2320

2321 2322
	if (record_type & PERF_RECORD_IP)
		perf_output_put(&handle, ip);
P
Peter Zijlstra 已提交
2323

2324 2325
	if (record_type & PERF_RECORD_TID)
		perf_output_put(&handle, tid_entry);
P
Peter Zijlstra 已提交
2326

2327 2328 2329
	if (record_type & PERF_RECORD_TIME)
		perf_output_put(&handle, time);

2330 2331 2332
	if (record_type & PERF_RECORD_ADDR)
		perf_output_put(&handle, addr);

2333 2334 2335
	if (record_type & PERF_RECORD_CONFIG)
		perf_output_put(&handle, counter->hw_event.config);

2336 2337 2338
	if (record_type & PERF_RECORD_CPU)
		perf_output_put(&handle, cpu_entry);

2339 2340 2341
	/*
	 * XXX PERF_RECORD_GROUP vs inherited counters seems difficult.
	 */
2342 2343 2344
	if (record_type & PERF_RECORD_GROUP) {
		struct perf_counter *leader, *sub;
		u64 nr = counter->nr_siblings;
P
Peter Zijlstra 已提交
2345

2346
		perf_output_put(&handle, nr);
2347

2348 2349 2350
		leader = counter->group_leader;
		list_for_each_entry(sub, &leader->sibling_list, list_entry) {
			if (sub != counter)
2351
				sub->pmu->read(sub);
2352

2353 2354
			group_entry.event = sub->hw_event.config;
			group_entry.counter = atomic64_read(&sub->count);
2355

2356 2357
			perf_output_put(&handle, group_entry);
		}
2358
	}
P
Peter Zijlstra 已提交
2359

2360 2361
	if (callchain)
		perf_output_copy(&handle, callchain, callchain_size);
2362

2363
	perf_output_end(&handle);
2364 2365
}

2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427
/*
 * comm tracking
 */

struct perf_comm_event {
	struct task_struct 	*task;
	char 			*comm;
	int			comm_size;

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				tid;
	} event;
};

static void perf_counter_comm_output(struct perf_counter *counter,
				     struct perf_comm_event *comm_event)
{
	struct perf_output_handle handle;
	int size = comm_event->event.header.size;
	int ret = perf_output_begin(&handle, counter, size, 0, 0);

	if (ret)
		return;

	perf_output_put(&handle, comm_event->event);
	perf_output_copy(&handle, comm_event->comm,
				   comm_event->comm_size);
	perf_output_end(&handle);
}

static int perf_counter_comm_match(struct perf_counter *counter,
				   struct perf_comm_event *comm_event)
{
	if (counter->hw_event.comm &&
	    comm_event->event.header.type == PERF_EVENT_COMM)
		return 1;

	return 0;
}

static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
				  struct perf_comm_event *comm_event)
{
	struct perf_counter *counter;

	if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
		return;

	rcu_read_lock();
	list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
		if (perf_counter_comm_match(counter, comm_event))
			perf_counter_comm_output(counter, comm_event);
	}
	rcu_read_unlock();
}

static void perf_counter_comm_event(struct perf_comm_event *comm_event)
{
	struct perf_cpu_context *cpuctx;
2428
	struct perf_counter_context *ctx;
2429 2430 2431
	unsigned int size;
	char *comm = comm_event->task->comm;

2432
	size = ALIGN(strlen(comm)+1, sizeof(u64));
2433 2434 2435 2436 2437 2438 2439 2440 2441

	comm_event->comm = comm;
	comm_event->comm_size = size;

	comm_event->event.header.size = sizeof(comm_event->event) + size;

	cpuctx = &get_cpu_var(perf_cpu_context);
	perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
	put_cpu_var(perf_cpu_context);
2442 2443 2444 2445 2446 2447 2448 2449 2450 2451

	rcu_read_lock();
	/*
	 * doesn't really matter which of the child contexts the
	 * events ends up in.
	 */
	ctx = rcu_dereference(current->perf_counter_ctxp);
	if (ctx)
		perf_counter_comm_ctx(ctx, comm_event);
	rcu_read_unlock();
2452 2453 2454 2455
}

void perf_counter_comm(struct task_struct *task)
{
2456 2457 2458 2459
	struct perf_comm_event comm_event;

	if (!atomic_read(&nr_comm_tracking))
		return;
2460

2461
	comm_event = (struct perf_comm_event){
2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472
		.task	= task,
		.event  = {
			.header = { .type = PERF_EVENT_COMM, },
			.pid	= task->group_leader->pid,
			.tid	= task->pid,
		},
	};

	perf_counter_comm_event(&comm_event);
}

2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497
/*
 * mmap tracking
 */

struct perf_mmap_event {
	struct file	*file;
	char		*file_name;
	int		file_size;

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				tid;
		u64				start;
		u64				len;
		u64				pgoff;
	} event;
};

static void perf_counter_mmap_output(struct perf_counter *counter,
				     struct perf_mmap_event *mmap_event)
{
	struct perf_output_handle handle;
	int size = mmap_event->event.header.size;
2498
	int ret = perf_output_begin(&handle, counter, size, 0, 0);
2499 2500 2501 2502 2503 2504 2505

	if (ret)
		return;

	perf_output_put(&handle, mmap_event->event);
	perf_output_copy(&handle, mmap_event->file_name,
				   mmap_event->file_size);
2506
	perf_output_end(&handle);
2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541
}

static int perf_counter_mmap_match(struct perf_counter *counter,
				   struct perf_mmap_event *mmap_event)
{
	if (counter->hw_event.mmap &&
	    mmap_event->event.header.type == PERF_EVENT_MMAP)
		return 1;

	if (counter->hw_event.munmap &&
	    mmap_event->event.header.type == PERF_EVENT_MUNMAP)
		return 1;

	return 0;
}

static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
				  struct perf_mmap_event *mmap_event)
{
	struct perf_counter *counter;

	if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
		return;

	rcu_read_lock();
	list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
		if (perf_counter_mmap_match(counter, mmap_event))
			perf_counter_mmap_output(counter, mmap_event);
	}
	rcu_read_unlock();
}

static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
{
	struct perf_cpu_context *cpuctx;
2542
	struct perf_counter_context *ctx;
2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554
	struct file *file = mmap_event->file;
	unsigned int size;
	char tmp[16];
	char *buf = NULL;
	char *name;

	if (file) {
		buf = kzalloc(PATH_MAX, GFP_KERNEL);
		if (!buf) {
			name = strncpy(tmp, "//enomem", sizeof(tmp));
			goto got_name;
		}
2555
		name = d_path(&file->f_path, buf, PATH_MAX);
2556 2557 2558 2559 2560 2561 2562 2563 2564 2565
		if (IS_ERR(name)) {
			name = strncpy(tmp, "//toolong", sizeof(tmp));
			goto got_name;
		}
	} else {
		name = strncpy(tmp, "//anon", sizeof(tmp));
		goto got_name;
	}

got_name:
2566
	size = ALIGN(strlen(name)+1, sizeof(u64));
2567 2568 2569 2570 2571 2572 2573 2574 2575 2576

	mmap_event->file_name = name;
	mmap_event->file_size = size;

	mmap_event->event.header.size = sizeof(mmap_event->event) + size;

	cpuctx = &get_cpu_var(perf_cpu_context);
	perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
	put_cpu_var(perf_cpu_context);

2577 2578 2579 2580 2581 2582 2583 2584 2585 2586
	rcu_read_lock();
	/*
	 * doesn't really matter which of the child contexts the
	 * events ends up in.
	 */
	ctx = rcu_dereference(current->perf_counter_ctxp);
	if (ctx)
		perf_counter_mmap_ctx(ctx, mmap_event);
	rcu_read_unlock();

2587 2588 2589 2590 2591 2592
	kfree(buf);
}

void perf_counter_mmap(unsigned long addr, unsigned long len,
		       unsigned long pgoff, struct file *file)
{
2593 2594 2595 2596 2597 2598
	struct perf_mmap_event mmap_event;

	if (!atomic_read(&nr_mmap_tracking))
		return;

	mmap_event = (struct perf_mmap_event){
2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615
		.file   = file,
		.event  = {
			.header = { .type = PERF_EVENT_MMAP, },
			.pid	= current->group_leader->pid,
			.tid	= current->pid,
			.start  = addr,
			.len    = len,
			.pgoff  = pgoff,
		},
	};

	perf_counter_mmap_event(&mmap_event);
}

void perf_counter_munmap(unsigned long addr, unsigned long len,
			 unsigned long pgoff, struct file *file)
{
2616 2617 2618 2619 2620 2621
	struct perf_mmap_event mmap_event;

	if (!atomic_read(&nr_munmap_tracking))
		return;

	mmap_event = (struct perf_mmap_event){
2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635
		.file   = file,
		.event  = {
			.header = { .type = PERF_EVENT_MUNMAP, },
			.pid	= current->group_leader->pid,
			.tid	= current->pid,
			.start  = addr,
			.len    = len,
			.pgoff  = pgoff,
		},
	};

	perf_counter_mmap_event(&mmap_event);
}

2636
/*
2637 2638
 * Log irq_period changes so that analyzing tools can re-normalize the
 * event flow.
2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670
 */

static void perf_log_period(struct perf_counter *counter, u64 period)
{
	struct perf_output_handle handle;
	int ret;

	struct {
		struct perf_event_header	header;
		u64				time;
		u64				period;
	} freq_event = {
		.header = {
			.type = PERF_EVENT_PERIOD,
			.misc = 0,
			.size = sizeof(freq_event),
		},
		.time = sched_clock(),
		.period = period,
	};

	if (counter->hw.irq_period == period)
		return;

	ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
	if (ret)
		return;

	perf_output_put(&handle, freq_event);
	perf_output_end(&handle);
}

2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691
/*
 * IRQ throttle logging
 */

static void perf_log_throttle(struct perf_counter *counter, int enable)
{
	struct perf_output_handle handle;
	int ret;

	struct {
		struct perf_event_header	header;
		u64				time;
	} throttle_event = {
		.header = {
			.type = PERF_EVENT_THROTTLE + 1,
			.misc = 0,
			.size = sizeof(throttle_event),
		},
		.time = sched_clock(),
	};

I
Ingo Molnar 已提交
2692
	ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
2693 2694 2695 2696 2697 2698 2699
	if (ret)
		return;

	perf_output_put(&handle, throttle_event);
	perf_output_end(&handle);
}

2700 2701 2702 2703 2704
/*
 * Generic counter overflow handling.
 */

int perf_counter_overflow(struct perf_counter *counter,
2705
			  int nmi, struct pt_regs *regs, u64 addr)
2706
{
2707
	int events = atomic_read(&counter->event_limit);
2708
	int throttle = counter->pmu->unthrottle != NULL;
2709 2710
	int ret = 0;

2711 2712 2713 2714 2715 2716 2717 2718 2719 2720
	if (!throttle) {
		counter->hw.interrupts++;
	} else if (counter->hw.interrupts != MAX_INTERRUPTS) {
		counter->hw.interrupts++;
		if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) {
			counter->hw.interrupts = MAX_INTERRUPTS;
			perf_log_throttle(counter, 0);
			ret = 1;
		}
	}
2721

2722 2723 2724 2725 2726
	/*
	 * XXX event_limit might not quite work as expected on inherited
	 * counters
	 */

2727
	counter->pending_kill = POLL_IN;
2728 2729
	if (events && atomic_dec_and_test(&counter->event_limit)) {
		ret = 1;
2730
		counter->pending_kill = POLL_HUP;
2731 2732 2733 2734 2735 2736 2737 2738
		if (nmi) {
			counter->pending_disable = 1;
			perf_pending_queue(&counter->pending,
					   perf_pending_counter);
		} else
			perf_counter_disable(counter);
	}

2739
	perf_counter_output(counter, nmi, regs, addr);
2740
	return ret;
2741 2742
}

2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784
/*
 * Generic software counter infrastructure
 */

static void perf_swcounter_update(struct perf_counter *counter)
{
	struct hw_perf_counter *hwc = &counter->hw;
	u64 prev, now;
	s64 delta;

again:
	prev = atomic64_read(&hwc->prev_count);
	now = atomic64_read(&hwc->count);
	if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
		goto again;

	delta = now - prev;

	atomic64_add(delta, &counter->count);
	atomic64_sub(delta, &hwc->period_left);
}

static void perf_swcounter_set_period(struct perf_counter *counter)
{
	struct hw_perf_counter *hwc = &counter->hw;
	s64 left = atomic64_read(&hwc->period_left);
	s64 period = hwc->irq_period;

	if (unlikely(left <= -period)) {
		left = period;
		atomic64_set(&hwc->period_left, left);
	}

	if (unlikely(left <= 0)) {
		left += period;
		atomic64_add(period, &hwc->period_left);
	}

	atomic64_set(&hwc->prev_count, -left);
	atomic64_set(&hwc->count, -left);
}

2785 2786
static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
{
2787
	enum hrtimer_restart ret = HRTIMER_RESTART;
2788 2789
	struct perf_counter *counter;
	struct pt_regs *regs;
2790
	u64 period;
2791 2792

	counter	= container_of(hrtimer, struct perf_counter, hw.hrtimer);
2793
	counter->pmu->read(counter);
2794 2795 2796 2797 2798 2799 2800 2801 2802 2803

	regs = get_irq_regs();
	/*
	 * In case we exclude kernel IPs or are somehow not in interrupt
	 * context, provide the next best thing, the user IP.
	 */
	if ((counter->hw_event.exclude_kernel || !regs) &&
			!counter->hw_event.exclude_user)
		regs = task_pt_regs(current);

2804
	if (regs) {
2805
		if (perf_counter_overflow(counter, 0, regs, 0))
2806 2807
			ret = HRTIMER_NORESTART;
	}
2808

2809 2810
	period = max_t(u64, 10000, counter->hw.irq_period);
	hrtimer_forward_now(hrtimer, ns_to_ktime(period));
2811

2812
	return ret;
2813 2814 2815
}

static void perf_swcounter_overflow(struct perf_counter *counter,
2816
				    int nmi, struct pt_regs *regs, u64 addr)
2817
{
2818 2819
	perf_swcounter_update(counter);
	perf_swcounter_set_period(counter);
2820
	if (perf_counter_overflow(counter, nmi, regs, addr))
2821 2822 2823
		/* soft-disable the counter */
		;

2824 2825
}

2826
static int perf_swcounter_match(struct perf_counter *counter,
2827 2828
				enum perf_event_types type,
				u32 event, struct pt_regs *regs)
2829 2830 2831 2832
{
	if (counter->state != PERF_COUNTER_STATE_ACTIVE)
		return 0;

2833
	if (perf_event_raw(&counter->hw_event))
2834 2835
		return 0;

2836
	if (perf_event_type(&counter->hw_event) != type)
2837 2838
		return 0;

2839
	if (perf_event_id(&counter->hw_event) != event)
2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850
		return 0;

	if (counter->hw_event.exclude_user && user_mode(regs))
		return 0;

	if (counter->hw_event.exclude_kernel && !user_mode(regs))
		return 0;

	return 1;
}

2851
static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
2852
			       int nmi, struct pt_regs *regs, u64 addr)
2853 2854 2855
{
	int neg = atomic64_add_negative(nr, &counter->hw.count);
	if (counter->hw.irq_period && !neg)
2856
		perf_swcounter_overflow(counter, nmi, regs, addr);
2857 2858
}

2859
static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
2860
				     enum perf_event_types type, u32 event,
2861 2862
				     u64 nr, int nmi, struct pt_regs *regs,
				     u64 addr)
2863 2864 2865
{
	struct perf_counter *counter;

2866
	if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2867 2868
		return;

P
Peter Zijlstra 已提交
2869 2870
	rcu_read_lock();
	list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2871
		if (perf_swcounter_match(counter, type, event, regs))
2872
			perf_swcounter_add(counter, nr, nmi, regs, addr);
2873
	}
P
Peter Zijlstra 已提交
2874
	rcu_read_unlock();
2875 2876
}

P
Peter Zijlstra 已提交
2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890
static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
{
	if (in_nmi())
		return &cpuctx->recursion[3];

	if (in_irq())
		return &cpuctx->recursion[2];

	if (in_softirq())
		return &cpuctx->recursion[1];

	return &cpuctx->recursion[0];
}

2891
static void __perf_swcounter_event(enum perf_event_types type, u32 event,
2892 2893
				   u64 nr, int nmi, struct pt_regs *regs,
				   u64 addr)
2894 2895
{
	struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
P
Peter Zijlstra 已提交
2896
	int *recursion = perf_swcounter_recursion_context(cpuctx);
2897
	struct perf_counter_context *ctx;
P
Peter Zijlstra 已提交
2898 2899 2900 2901 2902 2903

	if (*recursion)
		goto out;

	(*recursion)++;
	barrier();
2904

2905 2906
	perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
				 nr, nmi, regs, addr);
2907 2908 2909 2910 2911 2912 2913 2914 2915
	rcu_read_lock();
	/*
	 * doesn't really matter which of the child contexts the
	 * events ends up in.
	 */
	ctx = rcu_dereference(current->perf_counter_ctxp);
	if (ctx)
		perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr);
	rcu_read_unlock();
2916

P
Peter Zijlstra 已提交
2917 2918 2919 2920
	barrier();
	(*recursion)--;

out:
2921 2922 2923
	put_cpu_var(perf_cpu_context);
}

2924 2925
void
perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
2926
{
2927
	__perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
2928 2929
}

2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945
static void perf_swcounter_read(struct perf_counter *counter)
{
	perf_swcounter_update(counter);
}

static int perf_swcounter_enable(struct perf_counter *counter)
{
	perf_swcounter_set_period(counter);
	return 0;
}

static void perf_swcounter_disable(struct perf_counter *counter)
{
	perf_swcounter_update(counter);
}

2946
static const struct pmu perf_ops_generic = {
2947 2948 2949 2950 2951
	.enable		= perf_swcounter_enable,
	.disable	= perf_swcounter_disable,
	.read		= perf_swcounter_read,
};

2952 2953 2954 2955
/*
 * Software counter: cpu wall time clock
 */

2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967
static void cpu_clock_perf_counter_update(struct perf_counter *counter)
{
	int cpu = raw_smp_processor_id();
	s64 prev;
	u64 now;

	now = cpu_clock(cpu);
	prev = atomic64_read(&counter->hw.prev_count);
	atomic64_set(&counter->hw.prev_count, now);
	atomic64_add(now - prev, &counter->count);
}

2968 2969 2970 2971 2972 2973
static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
{
	struct hw_perf_counter *hwc = &counter->hw;
	int cpu = raw_smp_processor_id();

	atomic64_set(&hwc->prev_count, cpu_clock(cpu));
2974 2975
	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hwc->hrtimer.function = perf_swcounter_hrtimer;
2976
	if (hwc->irq_period) {
2977
		u64 period = max_t(u64, 10000, hwc->irq_period);
2978
		__hrtimer_start_range_ns(&hwc->hrtimer,
2979
				ns_to_ktime(period), 0,
2980 2981 2982 2983 2984 2985
				HRTIMER_MODE_REL, 0);
	}

	return 0;
}

2986 2987
static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
{
2988 2989
	if (counter->hw.irq_period)
		hrtimer_cancel(&counter->hw.hrtimer);
2990
	cpu_clock_perf_counter_update(counter);
2991 2992 2993 2994
}

static void cpu_clock_perf_counter_read(struct perf_counter *counter)
{
2995
	cpu_clock_perf_counter_update(counter);
2996 2997
}

2998
static const struct pmu perf_ops_cpu_clock = {
I
Ingo Molnar 已提交
2999 3000 3001
	.enable		= cpu_clock_perf_counter_enable,
	.disable	= cpu_clock_perf_counter_disable,
	.read		= cpu_clock_perf_counter_read,
3002 3003
};

3004 3005 3006 3007
/*
 * Software counter: task time clock
 */

3008
static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
I
Ingo Molnar 已提交
3009
{
3010
	u64 prev;
I
Ingo Molnar 已提交
3011 3012
	s64 delta;

3013
	prev = atomic64_xchg(&counter->hw.prev_count, now);
I
Ingo Molnar 已提交
3014 3015
	delta = now - prev;
	atomic64_add(delta, &counter->count);
3016 3017
}

3018
static int task_clock_perf_counter_enable(struct perf_counter *counter)
I
Ingo Molnar 已提交
3019
{
3020
	struct hw_perf_counter *hwc = &counter->hw;
3021 3022 3023
	u64 now;

	now = counter->ctx->time;
3024

3025
	atomic64_set(&hwc->prev_count, now);
3026 3027
	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hwc->hrtimer.function = perf_swcounter_hrtimer;
3028
	if (hwc->irq_period) {
3029
		u64 period = max_t(u64, 10000, hwc->irq_period);
3030
		__hrtimer_start_range_ns(&hwc->hrtimer,
3031
				ns_to_ktime(period), 0,
3032 3033
				HRTIMER_MODE_REL, 0);
	}
3034 3035

	return 0;
I
Ingo Molnar 已提交
3036 3037 3038
}

static void task_clock_perf_counter_disable(struct perf_counter *counter)
3039
{
3040 3041
	if (counter->hw.irq_period)
		hrtimer_cancel(&counter->hw.hrtimer);
3042 3043
	task_clock_perf_counter_update(counter, counter->ctx->time);

3044
}
I
Ingo Molnar 已提交
3045

3046 3047
static void task_clock_perf_counter_read(struct perf_counter *counter)
{
3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059
	u64 time;

	if (!in_nmi()) {
		update_context_time(counter->ctx);
		time = counter->ctx->time;
	} else {
		u64 now = perf_clock();
		u64 delta = now - counter->ctx->timestamp;
		time = counter->ctx->time + delta;
	}

	task_clock_perf_counter_update(counter, time);
3060 3061
}

3062
static const struct pmu perf_ops_task_clock = {
I
Ingo Molnar 已提交
3063 3064 3065
	.enable		= task_clock_perf_counter_enable,
	.disable	= task_clock_perf_counter_disable,
	.read		= task_clock_perf_counter_read,
3066 3067
};

3068 3069 3070 3071
/*
 * Software counter: cpu migrations
 */

3072
static inline u64 get_cpu_migrations(struct perf_counter *counter)
3073
{
3074 3075 3076 3077 3078
	struct task_struct *curr = counter->ctx->task;

	if (curr)
		return curr->se.nr_migrations;
	return cpu_nr_migrations(smp_processor_id());
3079 3080 3081 3082 3083 3084 3085 3086
}

static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
{
	u64 prev, now;
	s64 delta;

	prev = atomic64_read(&counter->hw.prev_count);
3087
	now = get_cpu_migrations(counter);
3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100

	atomic64_set(&counter->hw.prev_count, now);

	delta = now - prev;

	atomic64_add(delta, &counter->count);
}

static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
{
	cpu_migrations_perf_counter_update(counter);
}

3101
static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
3102
{
3103 3104 3105
	if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
		atomic64_set(&counter->hw.prev_count,
			     get_cpu_migrations(counter));
3106
	return 0;
3107 3108 3109 3110 3111 3112 3113
}

static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
{
	cpu_migrations_perf_counter_update(counter);
}

3114
static const struct pmu perf_ops_cpu_migrations = {
I
Ingo Molnar 已提交
3115 3116 3117
	.enable		= cpu_migrations_perf_counter_enable,
	.disable	= cpu_migrations_perf_counter_disable,
	.read		= cpu_migrations_perf_counter_read,
3118 3119
};

3120 3121 3122
#ifdef CONFIG_EVENT_PROFILE
void perf_tpcounter_event(int event_id)
{
3123 3124 3125 3126 3127
	struct pt_regs *regs = get_irq_regs();

	if (!regs)
		regs = task_pt_regs(current);

3128
	__perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
3129
}
3130
EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3131 3132 3133 3134 3135 3136

extern int ftrace_profile_enable(int);
extern void ftrace_profile_disable(int);

static void tp_perf_counter_destroy(struct perf_counter *counter)
{
3137
	ftrace_profile_disable(perf_event_id(&counter->hw_event));
3138 3139
}

3140
static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3141
{
3142
	int event_id = perf_event_id(&counter->hw_event);
3143 3144 3145 3146 3147 3148 3149
	int ret;

	ret = ftrace_profile_enable(event_id);
	if (ret)
		return NULL;

	counter->destroy = tp_perf_counter_destroy;
3150
	counter->hw.irq_period = counter->hw_event.irq_period;
3151 3152 3153 3154

	return &perf_ops_generic;
}
#else
3155
static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3156 3157 3158 3159 3160
{
	return NULL;
}
#endif

3161
static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
3162
{
3163
	const struct pmu *pmu = NULL;
3164

3165 3166 3167 3168 3169 3170 3171
	/*
	 * Software counters (currently) can't in general distinguish
	 * between user, kernel and hypervisor events.
	 * However, context switches and cpu migrations are considered
	 * to be kernel events, and page faults are never hypervisor
	 * events.
	 */
3172
	switch (perf_event_id(&counter->hw_event)) {
3173
	case PERF_COUNT_CPU_CLOCK:
3174
		pmu = &perf_ops_cpu_clock;
3175

3176
		break;
3177
	case PERF_COUNT_TASK_CLOCK:
3178 3179 3180 3181 3182
		/*
		 * If the user instantiates this as a per-cpu counter,
		 * use the cpu_clock counter instead.
		 */
		if (counter->ctx->task)
3183
			pmu = &perf_ops_task_clock;
3184
		else
3185
			pmu = &perf_ops_cpu_clock;
3186

3187
		break;
3188
	case PERF_COUNT_PAGE_FAULTS:
3189 3190
	case PERF_COUNT_PAGE_FAULTS_MIN:
	case PERF_COUNT_PAGE_FAULTS_MAJ:
3191
	case PERF_COUNT_CONTEXT_SWITCHES:
3192
		pmu = &perf_ops_generic;
3193
		break;
3194
	case PERF_COUNT_CPU_MIGRATIONS:
3195
		if (!counter->hw_event.exclude_kernel)
3196
			pmu = &perf_ops_cpu_migrations;
3197
		break;
3198
	}
3199

3200
	return pmu;
3201 3202
}

T
Thomas Gleixner 已提交
3203 3204 3205 3206
/*
 * Allocate and initialize a counter structure
 */
static struct perf_counter *
3207 3208
perf_counter_alloc(struct perf_counter_hw_event *hw_event,
		   int cpu,
3209
		   struct perf_counter_context *ctx,
3210 3211
		   struct perf_counter *group_leader,
		   gfp_t gfpflags)
T
Thomas Gleixner 已提交
3212
{
3213
	const struct pmu *pmu;
I
Ingo Molnar 已提交
3214
	struct perf_counter *counter;
3215
	struct hw_perf_counter *hwc;
3216
	long err;
T
Thomas Gleixner 已提交
3217

3218
	counter = kzalloc(sizeof(*counter), gfpflags);
T
Thomas Gleixner 已提交
3219
	if (!counter)
3220
		return ERR_PTR(-ENOMEM);
T
Thomas Gleixner 已提交
3221

3222 3223 3224 3225 3226 3227 3228
	/*
	 * Single counters are their own group leaders, with an
	 * empty sibling list:
	 */
	if (!group_leader)
		group_leader = counter;

3229 3230 3231
	mutex_init(&counter->child_mutex);
	INIT_LIST_HEAD(&counter->child_list);

3232
	INIT_LIST_HEAD(&counter->list_entry);
P
Peter Zijlstra 已提交
3233
	INIT_LIST_HEAD(&counter->event_entry);
3234
	INIT_LIST_HEAD(&counter->sibling_list);
T
Thomas Gleixner 已提交
3235 3236
	init_waitqueue_head(&counter->waitq);

3237 3238
	mutex_init(&counter->mmap_mutex);

I
Ingo Molnar 已提交
3239 3240
	counter->cpu			= cpu;
	counter->hw_event		= *hw_event;
3241
	counter->group_leader		= group_leader;
3242
	counter->pmu			= NULL;
3243
	counter->ctx			= ctx;
3244 3245
	counter->oncpu			= -1;

3246
	counter->state = PERF_COUNTER_STATE_INACTIVE;
3247 3248 3249
	if (hw_event->disabled)
		counter->state = PERF_COUNTER_STATE_OFF;

3250
	pmu = NULL;
3251

3252 3253
	hwc = &counter->hw;
	if (hw_event->freq && hw_event->irq_freq)
3254
		hwc->irq_period = div64_u64(TICK_NSEC, hw_event->irq_freq);
3255 3256 3257
	else
		hwc->irq_period = hw_event->irq_period;

3258 3259 3260 3261 3262 3263
	/*
	 * we currently do not support PERF_RECORD_GROUP on inherited counters
	 */
	if (hw_event->inherit && (hw_event->record_type & PERF_RECORD_GROUP))
		goto done;

3264
	if (perf_event_raw(hw_event)) {
3265
		pmu = hw_perf_counter_init(counter);
3266 3267 3268 3269
		goto done;
	}

	switch (perf_event_type(hw_event)) {
3270
	case PERF_TYPE_HARDWARE:
3271
		pmu = hw_perf_counter_init(counter);
3272 3273 3274
		break;

	case PERF_TYPE_SOFTWARE:
3275
		pmu = sw_perf_counter_init(counter);
3276 3277 3278
		break;

	case PERF_TYPE_TRACEPOINT:
3279
		pmu = tp_perf_counter_init(counter);
3280 3281
		break;
	}
3282 3283
done:
	err = 0;
3284
	if (!pmu)
3285
		err = -EINVAL;
3286 3287
	else if (IS_ERR(pmu))
		err = PTR_ERR(pmu);
3288

3289
	if (err) {
I
Ingo Molnar 已提交
3290
		kfree(counter);
3291
		return ERR_PTR(err);
I
Ingo Molnar 已提交
3292
	}
3293

3294
	counter->pmu = pmu;
T
Thomas Gleixner 已提交
3295

3296
	atomic_inc(&nr_counters);
3297 3298 3299 3300 3301 3302 3303
	if (counter->hw_event.mmap)
		atomic_inc(&nr_mmap_tracking);
	if (counter->hw_event.munmap)
		atomic_inc(&nr_munmap_tracking);
	if (counter->hw_event.comm)
		atomic_inc(&nr_comm_tracking);

T
Thomas Gleixner 已提交
3304 3305 3306 3307
	return counter;
}

/**
3308
 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
I
Ingo Molnar 已提交
3309 3310
 *
 * @hw_event_uptr:	event type attributes for monitoring/sampling
T
Thomas Gleixner 已提交
3311
 * @pid:		target pid
I
Ingo Molnar 已提交
3312 3313
 * @cpu:		target cpu
 * @group_fd:		group leader counter fd
T
Thomas Gleixner 已提交
3314
 */
3315
SYSCALL_DEFINE5(perf_counter_open,
3316
		const struct perf_counter_hw_event __user *, hw_event_uptr,
3317
		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
T
Thomas Gleixner 已提交
3318
{
3319
	struct perf_counter *counter, *group_leader;
I
Ingo Molnar 已提交
3320
	struct perf_counter_hw_event hw_event;
3321
	struct perf_counter_context *ctx;
3322
	struct file *counter_file = NULL;
3323 3324
	struct file *group_file = NULL;
	int fput_needed = 0;
3325
	int fput_needed2 = 0;
T
Thomas Gleixner 已提交
3326 3327
	int ret;

3328 3329 3330 3331
	/* for future expandability... */
	if (flags)
		return -EINVAL;

I
Ingo Molnar 已提交
3332
	if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
3333 3334
		return -EFAULT;

3335
	/*
I
Ingo Molnar 已提交
3336 3337 3338 3339 3340 3341 3342 3343
	 * Get the target context (task or percpu):
	 */
	ctx = find_get_context(pid, cpu);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	/*
	 * Look up the group leader (we will attach this counter to it):
3344 3345 3346 3347 3348 3349
	 */
	group_leader = NULL;
	if (group_fd != -1) {
		ret = -EINVAL;
		group_file = fget_light(group_fd, &fput_needed);
		if (!group_file)
I
Ingo Molnar 已提交
3350
			goto err_put_context;
3351
		if (group_file->f_op != &perf_fops)
I
Ingo Molnar 已提交
3352
			goto err_put_context;
3353 3354 3355

		group_leader = group_file->private_data;
		/*
I
Ingo Molnar 已提交
3356 3357 3358 3359 3360 3361 3362 3363
		 * Do not allow a recursive hierarchy (this new sibling
		 * becoming part of another group-sibling):
		 */
		if (group_leader->group_leader != group_leader)
			goto err_put_context;
		/*
		 * Do not allow to attach to a group in a different
		 * task or CPU context:
3364
		 */
I
Ingo Molnar 已提交
3365 3366
		if (group_leader->ctx != ctx)
			goto err_put_context;
3367 3368 3369 3370 3371
		/*
		 * Only a group leader can be exclusive or pinned
		 */
		if (hw_event.exclusive || hw_event.pinned)
			goto err_put_context;
3372 3373
	}

3374 3375
	counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
				     GFP_KERNEL);
3376 3377
	ret = PTR_ERR(counter);
	if (IS_ERR(counter))
T
Thomas Gleixner 已提交
3378 3379 3380 3381
		goto err_put_context;

	ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
	if (ret < 0)
3382 3383 3384 3385 3386 3387 3388
		goto err_free_put_context;

	counter_file = fget_light(ret, &fput_needed2);
	if (!counter_file)
		goto err_free_put_context;

	counter->filp = counter_file;
3389
	WARN_ON_ONCE(ctx->parent_ctx);
3390
	mutex_lock(&ctx->mutex);
3391
	perf_install_in_context(ctx, counter, cpu);
3392
	++ctx->generation;
3393
	mutex_unlock(&ctx->mutex);
3394

3395 3396 3397 3398 3399 3400
	counter->owner = current;
	get_task_struct(current);
	mutex_lock(&current->perf_counter_mutex);
	list_add_tail(&counter->owner_entry, &current->perf_counter_list);
	mutex_unlock(&current->perf_counter_mutex);

3401
	fput_light(counter_file, fput_needed2);
T
Thomas Gleixner 已提交
3402

3403 3404 3405
out_fput:
	fput_light(group_file, fput_needed);

T
Thomas Gleixner 已提交
3406 3407
	return ret;

3408
err_free_put_context:
T
Thomas Gleixner 已提交
3409 3410 3411
	kfree(counter);

err_put_context:
3412
	put_ctx(ctx);
T
Thomas Gleixner 已提交
3413

3414
	goto out_fput;
T
Thomas Gleixner 已提交
3415 3416
}

3417 3418 3419
/*
 * inherit a counter from parent task to child task:
 */
3420
static struct perf_counter *
3421 3422 3423 3424
inherit_counter(struct perf_counter *parent_counter,
	      struct task_struct *parent,
	      struct perf_counter_context *parent_ctx,
	      struct task_struct *child,
3425
	      struct perf_counter *group_leader,
3426 3427 3428 3429
	      struct perf_counter_context *child_ctx)
{
	struct perf_counter *child_counter;

3430 3431 3432 3433 3434 3435 3436 3437 3438
	/*
	 * Instead of creating recursive hierarchies of counters,
	 * we link inherited counters back to the original parent,
	 * which has a filp for sure, which we use as the reference
	 * count:
	 */
	if (parent_counter->parent)
		parent_counter = parent_counter->parent;

3439
	child_counter = perf_counter_alloc(&parent_counter->hw_event,
3440 3441
					   parent_counter->cpu, child_ctx,
					   group_leader, GFP_KERNEL);
3442 3443
	if (IS_ERR(child_counter))
		return child_counter;
3444
	get_ctx(child_ctx);
3445

3446 3447 3448 3449 3450 3451 3452 3453 3454 3455
	/*
	 * Make the child state follow the state of the parent counter,
	 * not its hw_event.disabled bit.  We hold the parent's mutex,
	 * so we won't race with perf_counter_{en,dis}able_family.
	 */
	if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
		child_counter->state = PERF_COUNTER_STATE_INACTIVE;
	else
		child_counter->state = PERF_COUNTER_STATE_OFF;

3456 3457 3458
	/*
	 * Link it up in the child's context:
	 */
3459
	add_counter_to_ctx(child_counter, child_ctx);
3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474

	child_counter->parent = parent_counter;
	/*
	 * inherit into child's child as well:
	 */
	child_counter->hw_event.inherit = 1;

	/*
	 * Get a reference to the parent filp - we will fput it
	 * when the child counter exits. This is safe to do because
	 * we are in the parent and we know that the filp still
	 * exists and has a nonzero count:
	 */
	atomic_long_inc(&parent_counter->filp->f_count);

3475 3476 3477
	/*
	 * Link this into the parent counter's child list
	 */
3478
	WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3479
	mutex_lock(&parent_counter->child_mutex);
3480
	list_add_tail(&child_counter->child_list, &parent_counter->child_list);
3481
	mutex_unlock(&parent_counter->child_mutex);
3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493

	return child_counter;
}

static int inherit_group(struct perf_counter *parent_counter,
	      struct task_struct *parent,
	      struct perf_counter_context *parent_ctx,
	      struct task_struct *child,
	      struct perf_counter_context *child_ctx)
{
	struct perf_counter *leader;
	struct perf_counter *sub;
3494
	struct perf_counter *child_ctr;
3495 3496 3497

	leader = inherit_counter(parent_counter, parent, parent_ctx,
				 child, NULL, child_ctx);
3498 3499
	if (IS_ERR(leader))
		return PTR_ERR(leader);
3500
	list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
3501 3502 3503 3504
		child_ctr = inherit_counter(sub, parent, parent_ctx,
					    child, leader, child_ctx);
		if (IS_ERR(child_ctr))
			return PTR_ERR(child_ctr);
3505
	}
3506 3507 3508
	return 0;
}

3509 3510 3511
static void sync_child_counter(struct perf_counter *child_counter,
			       struct perf_counter *parent_counter)
{
3512
	u64 child_val;
3513 3514 3515 3516 3517 3518 3519

	child_val = atomic64_read(&child_counter->count);

	/*
	 * Add back the child's count to the parent's count:
	 */
	atomic64_add(child_val, &parent_counter->count);
3520 3521 3522 3523
	atomic64_add(child_counter->total_time_enabled,
		     &parent_counter->child_total_time_enabled);
	atomic64_add(child_counter->total_time_running,
		     &parent_counter->child_total_time_running);
3524 3525 3526 3527

	/*
	 * Remove this counter from the parent's list
	 */
3528
	WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3529
	mutex_lock(&parent_counter->child_mutex);
3530
	list_del_init(&child_counter->child_list);
3531
	mutex_unlock(&parent_counter->child_mutex);
3532 3533 3534 3535 3536 3537 3538 3539

	/*
	 * Release the parent counter, if this was the last
	 * reference to it.
	 */
	fput(parent_counter->filp);
}

3540 3541 3542 3543 3544 3545 3546
static void
__perf_counter_exit_task(struct task_struct *child,
			 struct perf_counter *child_counter,
			 struct perf_counter_context *child_ctx)
{
	struct perf_counter *parent_counter;

3547
	update_counter_times(child_counter);
3548
	perf_counter_remove_from_context(child_counter);
3549

3550 3551 3552 3553 3554 3555
	parent_counter = child_counter->parent;
	/*
	 * It can happen that parent exits first, and has counters
	 * that are still around due to the child reference. These
	 * counters need to be zapped - but otherwise linger.
	 */
3556 3557
	if (parent_counter) {
		sync_child_counter(child_counter, parent_counter);
3558
		free_counter(child_counter);
3559
	}
3560 3561 3562
}

/*
3563
 * When a child task exits, feed back counter values to parent counters.
3564 3565 3566 3567 3568
 */
void perf_counter_exit_task(struct task_struct *child)
{
	struct perf_counter *child_counter, *tmp;
	struct perf_counter_context *child_ctx;
3569
	unsigned long flags;
3570

3571
	if (likely(!child->perf_counter_ctxp))
3572 3573
		return;

3574
	local_irq_save(flags);
3575 3576 3577 3578 3579 3580 3581
	/*
	 * We can't reschedule here because interrupts are disabled,
	 * and either child is current or it is a task that can't be
	 * scheduled, so we are now safe from rescheduling changing
	 * our context.
	 */
	child_ctx = child->perf_counter_ctxp;
3582
	__perf_counter_task_sched_out(child_ctx);
3583 3584 3585 3586 3587 3588 3589

	/*
	 * Take the context lock here so that if find_get_context is
	 * reading child->perf_counter_ctxp, we wait until it has
	 * incremented the context's refcount before we do put_ctx below.
	 */
	spin_lock(&child_ctx->lock);
3590
	child->perf_counter_ctxp = NULL;
3591 3592 3593 3594 3595 3596 3597 3598 3599
	if (child_ctx->parent_ctx) {
		/*
		 * This context is a clone; unclone it so it can't get
		 * swapped to another process while we're removing all
		 * the counters from it.
		 */
		put_ctx(child_ctx->parent_ctx);
		child_ctx->parent_ctx = NULL;
	}
3600
	spin_unlock(&child_ctx->lock);
3601 3602 3603 3604
	local_irq_restore(flags);

	mutex_lock(&child_ctx->mutex);

3605
again:
3606 3607 3608
	list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
				 list_entry)
		__perf_counter_exit_task(child, child_counter, child_ctx);
3609 3610 3611 3612 3613 3614 3615 3616

	/*
	 * If the last counter was a group counter, it will have appended all
	 * its siblings to the list, but we obtained 'tmp' before that which
	 * will still point to the list head terminating the iteration.
	 */
	if (!list_empty(&child_ctx->counter_list))
		goto again;
3617 3618 3619 3620

	mutex_unlock(&child_ctx->mutex);

	put_ctx(child_ctx);
3621 3622 3623 3624 3625
}

/*
 * Initialize the perf_counter context in task_struct
 */
3626
int perf_counter_init_task(struct task_struct *child)
3627 3628
{
	struct perf_counter_context *child_ctx, *parent_ctx;
3629
	struct perf_counter_context *cloned_ctx;
3630
	struct perf_counter *counter;
3631
	struct task_struct *parent = current;
3632
	int inherited_all = 1;
3633
	u64 cloned_gen;
3634
	int ret = 0;
3635

3636
	child->perf_counter_ctxp = NULL;
3637

3638 3639 3640
	mutex_init(&child->perf_counter_mutex);
	INIT_LIST_HEAD(&child->perf_counter_list);

3641
	if (likely(!parent->perf_counter_ctxp))
3642 3643
		return 0;

3644 3645
	/*
	 * This is executed from the parent task context, so inherit
3646 3647
	 * counters that have been marked for cloning.
	 * First allocate and initialize a context for the child.
3648 3649
	 */

3650 3651
	child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
	if (!child_ctx)
3652
		return -ENOMEM;
3653

3654 3655
	__perf_counter_init_context(child_ctx, child);
	child->perf_counter_ctxp = child_ctx;
3656
	get_task_struct(child);
3657

3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685
	/*
	 * If the parent's context is a clone, temporarily set its
	 * parent_gen to an impossible value (all 1s) so it won't get
	 * swapped under us.  The rcu_read_lock makes sure that
	 * parent_ctx continues to exist even if it gets swapped to
	 * another process and then freed while we are trying to get
	 * its lock.
	 */
	rcu_read_lock();
 retry:
	parent_ctx = rcu_dereference(parent->perf_counter_ctxp);
	/*
	 * No need to check if parent_ctx != NULL here; since we saw
	 * it non-NULL earlier, the only reason for it to become NULL
	 * is if we exit, and since we're currently in the middle of
	 * a fork we can't be exiting at the same time.
	 */
	spin_lock_irq(&parent_ctx->lock);
	if (parent_ctx != rcu_dereference(parent->perf_counter_ctxp)) {
		spin_unlock_irq(&parent_ctx->lock);
		goto retry;
	}
	cloned_gen = parent_ctx->parent_gen;
	if (parent_ctx->parent_ctx)
		parent_ctx->parent_gen = ~0ull;
	spin_unlock_irq(&parent_ctx->lock);
	rcu_read_unlock();

3686 3687 3688 3689
	/*
	 * Lock the parent list. No need to lock the child - not PID
	 * hashed yet and not running, so nobody can access it.
	 */
3690
	mutex_lock(&parent_ctx->mutex);
3691 3692 3693 3694 3695

	/*
	 * We dont have to disable NMIs - we are only looking at
	 * the list, not manipulating it:
	 */
3696 3697 3698 3699
	list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
		if (counter != counter->group_leader)
			continue;

3700 3701
		if (!counter->hw_event.inherit) {
			inherited_all = 0;
3702
			continue;
3703
		}
3704

3705 3706 3707
		ret = inherit_group(counter, parent, parent_ctx,
					     child, child_ctx);
		if (ret) {
3708
			inherited_all = 0;
3709
			break;
3710 3711 3712 3713 3714 3715 3716
		}
	}

	if (inherited_all) {
		/*
		 * Mark the child context as a clone of the parent
		 * context, or of whatever the parent is a clone of.
3717 3718 3719 3720
		 * Note that if the parent is a clone, it could get
		 * uncloned at any point, but that doesn't matter
		 * because the list of counters and the generation
		 * count can't have changed since we took the mutex.
3721
		 */
3722 3723 3724 3725
		cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
		if (cloned_ctx) {
			child_ctx->parent_ctx = cloned_ctx;
			child_ctx->parent_gen = cloned_gen;
3726 3727 3728 3729 3730
		} else {
			child_ctx->parent_ctx = parent_ctx;
			child_ctx->parent_gen = parent_ctx->generation;
		}
		get_ctx(child_ctx->parent_ctx);
3731 3732
	}

3733
	mutex_unlock(&parent_ctx->mutex);
3734

3735 3736 3737 3738 3739 3740 3741 3742 3743 3744
	/*
	 * Restore the clone status of the parent.
	 */
	if (parent_ctx->parent_ctx) {
		spin_lock_irq(&parent_ctx->lock);
		if (parent_ctx->parent_ctx)
			parent_ctx->parent_gen = cloned_gen;
		spin_unlock_irq(&parent_ctx->lock);
	}

3745
	return ret;
3746 3747
}

3748
static void __cpuinit perf_counter_init_cpu(int cpu)
T
Thomas Gleixner 已提交
3749
{
3750
	struct perf_cpu_context *cpuctx;
T
Thomas Gleixner 已提交
3751

3752 3753
	cpuctx = &per_cpu(perf_cpu_context, cpu);
	__perf_counter_init_context(&cpuctx->ctx, NULL);
T
Thomas Gleixner 已提交
3754

3755
	spin_lock(&perf_resource_lock);
3756
	cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
3757
	spin_unlock(&perf_resource_lock);
3758

3759
	hw_perf_counter_setup(cpu);
T
Thomas Gleixner 已提交
3760 3761 3762
}

#ifdef CONFIG_HOTPLUG_CPU
3763
static void __perf_counter_exit_cpu(void *info)
T
Thomas Gleixner 已提交
3764 3765 3766 3767 3768
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter_context *ctx = &cpuctx->ctx;
	struct perf_counter *counter, *tmp;

3769 3770
	list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
		__perf_counter_remove_from_context(counter);
T
Thomas Gleixner 已提交
3771
}
3772
static void perf_counter_exit_cpu(int cpu)
T
Thomas Gleixner 已提交
3773
{
3774 3775 3776 3777
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
	struct perf_counter_context *ctx = &cpuctx->ctx;

	mutex_lock(&ctx->mutex);
3778
	smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
3779
	mutex_unlock(&ctx->mutex);
T
Thomas Gleixner 已提交
3780 3781
}
#else
3782
static inline void perf_counter_exit_cpu(int cpu) { }
T
Thomas Gleixner 已提交
3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793
#endif

static int __cpuinit
perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
	unsigned int cpu = (long)hcpu;

	switch (action) {

	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
3794
		perf_counter_init_cpu(cpu);
T
Thomas Gleixner 已提交
3795 3796 3797 3798
		break;

	case CPU_DOWN_PREPARE:
	case CPU_DOWN_PREPARE_FROZEN:
3799
		perf_counter_exit_cpu(cpu);
T
Thomas Gleixner 已提交
3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812
		break;

	default:
		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block __cpuinitdata perf_cpu_nb = {
	.notifier_call		= perf_cpu_notify,
};

3813
void __init perf_counter_init(void)
T
Thomas Gleixner 已提交
3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839
{
	perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
			(void *)(long)smp_processor_id());
	register_cpu_notifier(&perf_cpu_nb);
}

static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
{
	return sprintf(buf, "%d\n", perf_reserved_percpu);
}

static ssize_t
perf_set_reserve_percpu(struct sysdev_class *class,
			const char *buf,
			size_t count)
{
	struct perf_cpu_context *cpuctx;
	unsigned long val;
	int err, cpu, mpt;

	err = strict_strtoul(buf, 10, &val);
	if (err)
		return err;
	if (val > perf_max_counters)
		return -EINVAL;

3840
	spin_lock(&perf_resource_lock);
T
Thomas Gleixner 已提交
3841 3842 3843 3844 3845 3846 3847 3848 3849
	perf_reserved_percpu = val;
	for_each_online_cpu(cpu) {
		cpuctx = &per_cpu(perf_cpu_context, cpu);
		spin_lock_irq(&cpuctx->ctx.lock);
		mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
			  perf_max_counters - perf_reserved_percpu);
		cpuctx->max_pertask = mpt;
		spin_unlock_irq(&cpuctx->ctx.lock);
	}
3850
	spin_unlock(&perf_resource_lock);
T
Thomas Gleixner 已提交
3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871

	return count;
}

static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
{
	return sprintf(buf, "%d\n", perf_overcommit);
}

static ssize_t
perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
{
	unsigned long val;
	int err;

	err = strict_strtoul(buf, 10, &val);
	if (err)
		return err;
	if (val > 1)
		return -EINVAL;

3872
	spin_lock(&perf_resource_lock);
T
Thomas Gleixner 已提交
3873
	perf_overcommit = val;
3874
	spin_unlock(&perf_resource_lock);
T
Thomas Gleixner 已提交
3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909

	return count;
}

static SYSDEV_CLASS_ATTR(
				reserve_percpu,
				0644,
				perf_show_reserve_percpu,
				perf_set_reserve_percpu
			);

static SYSDEV_CLASS_ATTR(
				overcommit,
				0644,
				perf_show_overcommit,
				perf_set_overcommit
			);

static struct attribute *perfclass_attrs[] = {
	&attr_reserve_percpu.attr,
	&attr_overcommit.attr,
	NULL
};

static struct attribute_group perfclass_attr_group = {
	.attrs			= perfclass_attrs,
	.name			= "perf_counters",
};

static int __init perf_counter_sysfs_init(void)
{
	return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
				  &perfclass_attr_group);
}
device_initcall(perf_counter_sysfs_init);