perf_counter.c 94.9 KB
Newer Older
T
Thomas Gleixner 已提交
1 2 3
/*
 * Performance counter core code
 *
4 5 6
 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
 *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7
 *  Copyright    2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 9
 *
 *  For licensing details see kernel-base/COPYING
T
Thomas Gleixner 已提交
10 11 12
 */

#include <linux/fs.h>
13
#include <linux/mm.h>
T
Thomas Gleixner 已提交
14 15
#include <linux/cpu.h>
#include <linux/smp.h>
16
#include <linux/file.h>
T
Thomas Gleixner 已提交
17 18 19 20
#include <linux/poll.h>
#include <linux/sysfs.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
21 22 23
#include <linux/vmstat.h>
#include <linux/hardirq.h>
#include <linux/rculist.h>
T
Thomas Gleixner 已提交
24 25 26
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
I
Ingo Molnar 已提交
27
#include <linux/kernel_stat.h>
T
Thomas Gleixner 已提交
28
#include <linux/perf_counter.h>
29
#include <linux/dcache.h>
T
Thomas Gleixner 已提交
30

31 32
#include <asm/irq_regs.h>

T
Thomas Gleixner 已提交
33 34 35 36 37
/*
 * Each CPU has a list of per CPU counters:
 */
DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);

38
int perf_max_counters __read_mostly = 1;
T
Thomas Gleixner 已提交
39 40 41
static int perf_reserved_percpu __read_mostly;
static int perf_overcommit __read_mostly = 1;

42
static atomic_t nr_counters __read_mostly;
43 44 45 46
static atomic_t nr_mmap_tracking __read_mostly;
static atomic_t nr_munmap_tracking __read_mostly;
static atomic_t nr_comm_tracking __read_mostly;

47
int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
48
int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
49
int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
50

T
Thomas Gleixner 已提交
51
/*
52
 * Lock for (sysadmin-configurable) counter reservations:
T
Thomas Gleixner 已提交
53
 */
54
static DEFINE_SPINLOCK(perf_resource_lock);
T
Thomas Gleixner 已提交
55 56 57 58

/*
 * Architecture provided APIs - weak aliases:
 */
59
extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
T
Thomas Gleixner 已提交
60
{
61
	return NULL;
T
Thomas Gleixner 已提交
62 63
}

64 65 66
void __weak hw_perf_disable(void)		{ barrier(); }
void __weak hw_perf_enable(void)		{ barrier(); }

67
void __weak hw_perf_counter_setup(int cpu)	{ barrier(); }
68 69 70 71 72 73
int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
	       struct perf_cpu_context *cpuctx,
	       struct perf_counter_context *ctx, int cpu)
{
	return 0;
}
T
Thomas Gleixner 已提交
74

75 76
void __weak perf_counter_print_debug(void)	{ }

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
static DEFINE_PER_CPU(int, disable_count);

void __perf_disable(void)
{
	__get_cpu_var(disable_count)++;
}

bool __perf_enable(void)
{
	return !--__get_cpu_var(disable_count);
}

void perf_disable(void)
{
	__perf_disable();
	hw_perf_disable();
}

void perf_enable(void)
{
	if (__perf_enable())
		hw_perf_enable();
}

101 102 103 104 105
static void get_ctx(struct perf_counter_context *ctx)
{
	atomic_inc(&ctx->refcount);
}

106 107 108 109 110 111 112 113
static void free_ctx(struct rcu_head *head)
{
	struct perf_counter_context *ctx;

	ctx = container_of(head, struct perf_counter_context, rcu_head);
	kfree(ctx);
}

114 115
static void put_ctx(struct perf_counter_context *ctx)
{
116 117 118
	if (atomic_dec_and_test(&ctx->refcount)) {
		if (ctx->parent_ctx)
			put_ctx(ctx->parent_ctx);
119 120 121
		if (ctx->task)
			put_task_struct(ctx->task);
		call_rcu(&ctx->rcu_head, free_ctx);
122
	}
123 124
}

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
/*
 * Get the perf_counter_context for a task and lock it.
 * This has to cope with with the fact that until it is locked,
 * the context could get moved to another task.
 */
static struct perf_counter_context *perf_lock_task_context(
				struct task_struct *task, unsigned long *flags)
{
	struct perf_counter_context *ctx;

	rcu_read_lock();
 retry:
	ctx = rcu_dereference(task->perf_counter_ctxp);
	if (ctx) {
		/*
		 * If this context is a clone of another, it might
		 * get swapped for another underneath us by
		 * perf_counter_task_sched_out, though the
		 * rcu_read_lock() protects us from any context
		 * getting freed.  Lock the context and check if it
		 * got swapped before we could get the lock, and retry
		 * if so.  If we locked the right context, then it
		 * can't get swapped on us any more.
		 */
		spin_lock_irqsave(&ctx->lock, *flags);
		if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
			spin_unlock_irqrestore(&ctx->lock, *flags);
			goto retry;
		}
	}
	rcu_read_unlock();
	return ctx;
}

/*
 * Get the context for a task and increment its pin_count so it
 * can't get swapped to another task.  This also increments its
 * reference count so that the context can't get freed.
 */
static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
{
	struct perf_counter_context *ctx;
	unsigned long flags;

	ctx = perf_lock_task_context(task, &flags);
	if (ctx) {
		++ctx->pin_count;
		get_ctx(ctx);
		spin_unlock_irqrestore(&ctx->lock, flags);
	}
	return ctx;
}

static void perf_unpin_context(struct perf_counter_context *ctx)
{
	unsigned long flags;

	spin_lock_irqsave(&ctx->lock, flags);
	--ctx->pin_count;
	spin_unlock_irqrestore(&ctx->lock, flags);
	put_ctx(ctx);
}

188 189 190 191
/*
 * Add a counter from the lists for its context.
 * Must be called with ctx->mutex and ctx->lock held.
 */
192 193 194 195 196 197 198 199 200 201
static void
list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{
	struct perf_counter *group_leader = counter->group_leader;

	/*
	 * Depending on whether it is a standalone or sibling counter,
	 * add it straight to the context's counter list, or to the group
	 * leader's sibling list:
	 */
P
Peter Zijlstra 已提交
202
	if (group_leader == counter)
203
		list_add_tail(&counter->list_entry, &ctx->counter_list);
P
Peter Zijlstra 已提交
204
	else {
205
		list_add_tail(&counter->list_entry, &group_leader->sibling_list);
P
Peter Zijlstra 已提交
206 207
		group_leader->nr_siblings++;
	}
P
Peter Zijlstra 已提交
208 209

	list_add_rcu(&counter->event_entry, &ctx->event_list);
210
	ctx->nr_counters++;
211 212
}

213 214
/*
 * Remove a counter from the lists for its context.
215
 * Must be called with ctx->mutex and ctx->lock held.
216
 */
217 218 219 220 221
static void
list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{
	struct perf_counter *sibling, *tmp;

222 223
	if (list_empty(&counter->list_entry))
		return;
224 225
	ctx->nr_counters--;

226
	list_del_init(&counter->list_entry);
P
Peter Zijlstra 已提交
227
	list_del_rcu(&counter->event_entry);
228

P
Peter Zijlstra 已提交
229 230 231
	if (counter->group_leader != counter)
		counter->group_leader->nr_siblings--;

232 233 234 235 236 237 238 239
	/*
	 * If this was a group counter with sibling counters then
	 * upgrade the siblings to singleton counters by adding them
	 * to the context list directly:
	 */
	list_for_each_entry_safe(sibling, tmp,
				 &counter->sibling_list, list_entry) {

240
		list_move_tail(&sibling->list_entry, &ctx->counter_list);
241 242 243 244
		sibling->group_leader = sibling;
	}
}

245 246 247 248 249 250 251 252 253
static void
counter_sched_out(struct perf_counter *counter,
		  struct perf_cpu_context *cpuctx,
		  struct perf_counter_context *ctx)
{
	if (counter->state != PERF_COUNTER_STATE_ACTIVE)
		return;

	counter->state = PERF_COUNTER_STATE_INACTIVE;
254
	counter->tstamp_stopped = ctx->time;
255
	counter->pmu->disable(counter);
256 257 258 259 260 261 262 263 264
	counter->oncpu = -1;

	if (!is_software_counter(counter))
		cpuctx->active_oncpu--;
	ctx->nr_active--;
	if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
		cpuctx->exclusive = 0;
}

265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
static void
group_sched_out(struct perf_counter *group_counter,
		struct perf_cpu_context *cpuctx,
		struct perf_counter_context *ctx)
{
	struct perf_counter *counter;

	if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
		return;

	counter_sched_out(group_counter, cpuctx, ctx);

	/*
	 * Schedule out siblings (if any):
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
		counter_sched_out(counter, cpuctx, ctx);

	if (group_counter->hw_event.exclusive)
		cpuctx->exclusive = 0;
}

T
Thomas Gleixner 已提交
287 288 289 290 291 292
/*
 * Cross CPU call to remove a performance counter
 *
 * We disable the counter on the hardware level first. After that we
 * remove it from the context list.
 */
293
static void __perf_counter_remove_from_context(void *info)
T
Thomas Gleixner 已提交
294 295 296 297 298 299 300 301 302 303
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter *counter = info;
	struct perf_counter_context *ctx = counter->ctx;

	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu. If not it has been
	 * scheduled out before the smp call arrived.
	 */
304
	if (ctx->task && cpuctx->task_ctx != ctx)
T
Thomas Gleixner 已提交
305 306
		return;

307
	spin_lock(&ctx->lock);
308 309 310 311 312
	/*
	 * Protect the list operation against NMI by disabling the
	 * counters on a global level.
	 */
	perf_disable();
T
Thomas Gleixner 已提交
313

314 315
	counter_sched_out(counter, cpuctx, ctx);

316
	list_del_counter(counter, ctx);
T
Thomas Gleixner 已提交
317 318 319 320 321 322 323 324 325 326 327

	if (!ctx->task) {
		/*
		 * Allow more per task counters with respect to the
		 * reservation:
		 */
		cpuctx->max_pertask =
			min(perf_max_counters - ctx->nr_counters,
			    perf_max_counters - perf_reserved_percpu);
	}

328
	perf_enable();
329
	spin_unlock(&ctx->lock);
T
Thomas Gleixner 已提交
330 331 332 333 334 335
}


/*
 * Remove the counter from a task's (or a CPU's) list of counters.
 *
336
 * Must be called with ctx->mutex held.
T
Thomas Gleixner 已提交
337 338 339
 *
 * CPU counters are removed with a smp call. For task counters we only
 * call when the task is on a CPU.
340 341 342 343 344 345 346
 *
 * If counter->ctx is a cloned context, callers must make sure that
 * every task struct that counter->ctx->task could possibly point to
 * remains valid.  This is OK when called from perf_release since
 * that only calls us on the top-level context, which can't be a clone.
 * When called from perf_counter_exit_task, it's OK because the
 * context has been detached from its task.
T
Thomas Gleixner 已提交
347
 */
348
static void perf_counter_remove_from_context(struct perf_counter *counter)
T
Thomas Gleixner 已提交
349 350 351 352 353 354 355 356 357 358
{
	struct perf_counter_context *ctx = counter->ctx;
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Per cpu counters are removed via an smp call and
		 * the removal is always sucessful.
		 */
		smp_call_function_single(counter->cpu,
359
					 __perf_counter_remove_from_context,
T
Thomas Gleixner 已提交
360 361 362 363 364
					 counter, 1);
		return;
	}

retry:
365
	task_oncpu_function_call(task, __perf_counter_remove_from_context,
T
Thomas Gleixner 已提交
366 367 368 369 370 371
				 counter);

	spin_lock_irq(&ctx->lock);
	/*
	 * If the context is active we need to retry the smp call.
	 */
372
	if (ctx->nr_active && !list_empty(&counter->list_entry)) {
T
Thomas Gleixner 已提交
373 374 375 376 377 378
		spin_unlock_irq(&ctx->lock);
		goto retry;
	}

	/*
	 * The lock prevents that this context is scheduled in so we
379
	 * can remove the counter safely, if the call above did not
T
Thomas Gleixner 已提交
380 381
	 * succeed.
	 */
382 383
	if (!list_empty(&counter->list_entry)) {
		list_del_counter(counter, ctx);
T
Thomas Gleixner 已提交
384 385 386 387
	}
	spin_unlock_irq(&ctx->lock);
}

388
static inline u64 perf_clock(void)
389
{
390
	return cpu_clock(smp_processor_id());
391 392 393 394 395
}

/*
 * Update the record of the current time in a context.
 */
396
static void update_context_time(struct perf_counter_context *ctx)
397
{
398 399 400 401
	u64 now = perf_clock();

	ctx->time += now - ctx->timestamp;
	ctx->timestamp = now;
402 403 404 405 406 407 408 409 410 411
}

/*
 * Update the total_time_enabled and total_time_running fields for a counter.
 */
static void update_counter_times(struct perf_counter *counter)
{
	struct perf_counter_context *ctx = counter->ctx;
	u64 run_end;

412 413 414 415 416 417 418 419 420 421 422
	if (counter->state < PERF_COUNTER_STATE_INACTIVE)
		return;

	counter->total_time_enabled = ctx->time - counter->tstamp_enabled;

	if (counter->state == PERF_COUNTER_STATE_INACTIVE)
		run_end = counter->tstamp_stopped;
	else
		run_end = ctx->time;

	counter->total_time_running = run_end - counter->tstamp_running;
423 424 425 426 427 428 429 430 431 432 433 434 435 436
}

/*
 * Update total_time_enabled and total_time_running for all counters in a group.
 */
static void update_group_times(struct perf_counter *leader)
{
	struct perf_counter *counter;

	update_counter_times(leader);
	list_for_each_entry(counter, &leader->sibling_list, list_entry)
		update_counter_times(counter);
}

437 438 439 440 441 442 443 444 445 446 447 448 449
/*
 * Cross CPU call to disable a performance counter
 */
static void __perf_counter_disable(void *info)
{
	struct perf_counter *counter = info;
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter_context *ctx = counter->ctx;

	/*
	 * If this is a per-task counter, need to check whether this
	 * counter's task is the current task on this cpu.
	 */
450
	if (ctx->task && cpuctx->task_ctx != ctx)
451 452
		return;

453
	spin_lock(&ctx->lock);
454 455 456 457 458 459

	/*
	 * If the counter is on, turn it off.
	 * If it is in error state, leave it in error state.
	 */
	if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
460
		update_context_time(ctx);
461
		update_counter_times(counter);
462 463 464 465 466 467 468
		if (counter == counter->group_leader)
			group_sched_out(counter, cpuctx, ctx);
		else
			counter_sched_out(counter, cpuctx, ctx);
		counter->state = PERF_COUNTER_STATE_OFF;
	}

469
	spin_unlock(&ctx->lock);
470 471 472 473
}

/*
 * Disable a counter.
474 475 476 477 478 479 480 481 482 483
 *
 * If counter->ctx is a cloned context, callers must make sure that
 * every task struct that counter->ctx->task could possibly point to
 * remains valid.  This condition is satisifed when called through
 * perf_counter_for_each_child or perf_counter_for_each because they
 * hold the top-level counter's child_mutex, so any descendant that
 * goes to exit will block in sync_child_counter.
 * When called from perf_pending_counter it's OK because counter->ctx
 * is the current context on this CPU and preemption is disabled,
 * hence we can't get into perf_counter_task_sched_out for this context.
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
 */
static void perf_counter_disable(struct perf_counter *counter)
{
	struct perf_counter_context *ctx = counter->ctx;
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Disable the counter on the cpu that it's on
		 */
		smp_call_function_single(counter->cpu, __perf_counter_disable,
					 counter, 1);
		return;
	}

 retry:
	task_oncpu_function_call(task, __perf_counter_disable, counter);

	spin_lock_irq(&ctx->lock);
	/*
	 * If the counter is still active, we need to retry the cross-call.
	 */
	if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
		spin_unlock_irq(&ctx->lock);
		goto retry;
	}

	/*
	 * Since we have the lock this context can't be scheduled
	 * in, so we can change the state safely.
	 */
515 516
	if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
		update_counter_times(counter);
517
		counter->state = PERF_COUNTER_STATE_OFF;
518
	}
519 520 521 522

	spin_unlock_irq(&ctx->lock);
}

523 524 525 526 527 528
static int
counter_sched_in(struct perf_counter *counter,
		 struct perf_cpu_context *cpuctx,
		 struct perf_counter_context *ctx,
		 int cpu)
{
529
	if (counter->state <= PERF_COUNTER_STATE_OFF)
530 531 532 533 534 535 536 537 538
		return 0;

	counter->state = PERF_COUNTER_STATE_ACTIVE;
	counter->oncpu = cpu;	/* TODO: put 'cpu' into cpuctx->cpu */
	/*
	 * The new state must be visible before we turn it on in the hardware:
	 */
	smp_wmb();

539
	if (counter->pmu->enable(counter)) {
540 541 542 543 544
		counter->state = PERF_COUNTER_STATE_INACTIVE;
		counter->oncpu = -1;
		return -EAGAIN;
	}

545
	counter->tstamp_running += ctx->time - counter->tstamp_stopped;
546

547 548
	if (!is_software_counter(counter))
		cpuctx->active_oncpu++;
549 550
	ctx->nr_active++;

551 552 553
	if (counter->hw_event.exclusive)
		cpuctx->exclusive = 1;

554 555 556
	return 0;
}

557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
static int
group_sched_in(struct perf_counter *group_counter,
	       struct perf_cpu_context *cpuctx,
	       struct perf_counter_context *ctx,
	       int cpu)
{
	struct perf_counter *counter, *partial_group;
	int ret;

	if (group_counter->state == PERF_COUNTER_STATE_OFF)
		return 0;

	ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
	if (ret)
		return ret < 0 ? ret : 0;

	group_counter->prev_state = group_counter->state;
	if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
		return -EAGAIN;

	/*
	 * Schedule in siblings as one group (if any):
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
		counter->prev_state = counter->state;
		if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
			partial_group = counter;
			goto group_error;
		}
	}

	return 0;

group_error:
	/*
	 * Groups can be scheduled in as one unit only, so undo any
	 * partial group before returning:
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
		if (counter == partial_group)
			break;
		counter_sched_out(counter, cpuctx, ctx);
	}
	counter_sched_out(group_counter, cpuctx, ctx);

	return -EAGAIN;
}

605 606 607 608 609 610 611 612 613 614
/*
 * Return 1 for a group consisting entirely of software counters,
 * 0 if the group contains any hardware counters.
 */
static int is_software_only_group(struct perf_counter *leader)
{
	struct perf_counter *counter;

	if (!is_software_counter(leader))
		return 0;
P
Peter Zijlstra 已提交
615

616 617 618
	list_for_each_entry(counter, &leader->sibling_list, list_entry)
		if (!is_software_counter(counter))
			return 0;
P
Peter Zijlstra 已提交
619

620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
	return 1;
}

/*
 * Work out whether we can put this counter group on the CPU now.
 */
static int group_can_go_on(struct perf_counter *counter,
			   struct perf_cpu_context *cpuctx,
			   int can_add_hw)
{
	/*
	 * Groups consisting entirely of software counters can always go on.
	 */
	if (is_software_only_group(counter))
		return 1;
	/*
	 * If an exclusive group is already on, no other hardware
	 * counters can go on.
	 */
	if (cpuctx->exclusive)
		return 0;
	/*
	 * If this group is exclusive and there are already
	 * counters on the CPU, it can't go on.
	 */
	if (counter->hw_event.exclusive && cpuctx->active_oncpu)
		return 0;
	/*
	 * Otherwise, try to add it if all previous groups were able
	 * to go on.
	 */
	return can_add_hw;
}

654 655 656 657 658
static void add_counter_to_ctx(struct perf_counter *counter,
			       struct perf_counter_context *ctx)
{
	list_add_counter(counter, ctx);
	counter->prev_state = PERF_COUNTER_STATE_OFF;
659 660 661
	counter->tstamp_enabled = ctx->time;
	counter->tstamp_running = ctx->time;
	counter->tstamp_stopped = ctx->time;
662 663
}

T
Thomas Gleixner 已提交
664
/*
665
 * Cross CPU call to install and enable a performance counter
666 667
 *
 * Must be called with ctx->mutex held
T
Thomas Gleixner 已提交
668 669 670 671 672 673
 */
static void __perf_install_in_context(void *info)
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter *counter = info;
	struct perf_counter_context *ctx = counter->ctx;
674
	struct perf_counter *leader = counter->group_leader;
T
Thomas Gleixner 已提交
675
	int cpu = smp_processor_id();
676
	int err;
T
Thomas Gleixner 已提交
677 678 679 680 681

	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu. If not it has been
	 * scheduled out before the smp call arrived.
682 683
	 * Or possibly this is the right context but it isn't
	 * on this cpu because it had no counters.
T
Thomas Gleixner 已提交
684
	 */
685
	if (ctx->task && cpuctx->task_ctx != ctx) {
686
		if (cpuctx->task_ctx || ctx->task != current)
687 688 689
			return;
		cpuctx->task_ctx = ctx;
	}
T
Thomas Gleixner 已提交
690

691
	spin_lock(&ctx->lock);
692
	ctx->is_active = 1;
693
	update_context_time(ctx);
T
Thomas Gleixner 已提交
694 695 696 697 698

	/*
	 * Protect the list operation against NMI by disabling the
	 * counters on a global level. NOP for non NMI based counters.
	 */
699
	perf_disable();
T
Thomas Gleixner 已提交
700

701
	add_counter_to_ctx(counter, ctx);
T
Thomas Gleixner 已提交
702

703 704 705 706 707 708 709 710
	/*
	 * Don't put the counter on if it is disabled or if
	 * it is in a group and the group isn't on.
	 */
	if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
	    (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
		goto unlock;

711 712 713 714 715
	/*
	 * An exclusive counter can't go on if there are already active
	 * hardware counters, and no hardware counter can go on if there
	 * is already an exclusive counter on.
	 */
716
	if (!group_can_go_on(counter, cpuctx, 1))
717 718 719 720
		err = -EEXIST;
	else
		err = counter_sched_in(counter, cpuctx, ctx, cpu);

721 722 723 724 725 726 727 728
	if (err) {
		/*
		 * This counter couldn't go on.  If it is in a group
		 * then we have to pull the whole group off.
		 * If the counter group is pinned then put it in error state.
		 */
		if (leader != counter)
			group_sched_out(leader, cpuctx, ctx);
729 730
		if (leader->hw_event.pinned) {
			update_group_times(leader);
731
			leader->state = PERF_COUNTER_STATE_ERROR;
732
		}
733
	}
T
Thomas Gleixner 已提交
734

735
	if (!err && !ctx->task && cpuctx->max_pertask)
T
Thomas Gleixner 已提交
736 737
		cpuctx->max_pertask--;

738
 unlock:
739
	perf_enable();
740

741
	spin_unlock(&ctx->lock);
T
Thomas Gleixner 已提交
742 743 744 745 746 747 748 749 750 751 752
}

/*
 * Attach a performance counter to a context
 *
 * First we add the counter to the list with the hardware enable bit
 * in counter->hw_config cleared.
 *
 * If the counter is attached to a task which is on a CPU we use a smp
 * call to enable it in the task context. The task might have been
 * scheduled away, but we check this in the smp call again.
753 754
 *
 * Must be called with ctx->mutex held.
T
Thomas Gleixner 已提交
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
 */
static void
perf_install_in_context(struct perf_counter_context *ctx,
			struct perf_counter *counter,
			int cpu)
{
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Per cpu counters are installed via an smp call and
		 * the install is always sucessful.
		 */
		smp_call_function_single(cpu, __perf_install_in_context,
					 counter, 1);
		return;
	}

retry:
	task_oncpu_function_call(task, __perf_install_in_context,
				 counter);

	spin_lock_irq(&ctx->lock);
	/*
	 * we need to retry the smp call.
	 */
781
	if (ctx->is_active && list_empty(&counter->list_entry)) {
T
Thomas Gleixner 已提交
782 783 784 785 786 787 788 789 790
		spin_unlock_irq(&ctx->lock);
		goto retry;
	}

	/*
	 * The lock prevents that this context is scheduled in so we
	 * can add the counter safely, if it the call above did not
	 * succeed.
	 */
791 792
	if (list_empty(&counter->list_entry))
		add_counter_to_ctx(counter, ctx);
T
Thomas Gleixner 已提交
793 794 795
	spin_unlock_irq(&ctx->lock);
}

796 797 798 799
/*
 * Cross CPU call to enable a performance counter
 */
static void __perf_counter_enable(void *info)
800
{
801 802 803 804 805
	struct perf_counter *counter = info;
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter_context *ctx = counter->ctx;
	struct perf_counter *leader = counter->group_leader;
	int err;
806

807 808 809 810
	/*
	 * If this is a per-task counter, need to check whether this
	 * counter's task is the current task on this cpu.
	 */
811
	if (ctx->task && cpuctx->task_ctx != ctx) {
812
		if (cpuctx->task_ctx || ctx->task != current)
813 814 815
			return;
		cpuctx->task_ctx = ctx;
	}
816

817
	spin_lock(&ctx->lock);
818
	ctx->is_active = 1;
819
	update_context_time(ctx);
820

821
	counter->prev_state = counter->state;
822 823 824
	if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
		goto unlock;
	counter->state = PERF_COUNTER_STATE_INACTIVE;
825
	counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
826 827

	/*
828 829
	 * If the counter is in a group and isn't the group leader,
	 * then don't put it on unless the group is on.
830
	 */
831 832
	if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
		goto unlock;
833

834
	if (!group_can_go_on(counter, cpuctx, 1)) {
835
		err = -EEXIST;
836
	} else {
837
		perf_disable();
838 839 840 841 842 843
		if (counter == leader)
			err = group_sched_in(counter, cpuctx, ctx,
					     smp_processor_id());
		else
			err = counter_sched_in(counter, cpuctx, ctx,
					       smp_processor_id());
844
		perf_enable();
845
	}
846 847 848 849 850 851 852 853

	if (err) {
		/*
		 * If this counter can't go on and it's part of a
		 * group, then the whole group has to come off.
		 */
		if (leader != counter)
			group_sched_out(leader, cpuctx, ctx);
854 855
		if (leader->hw_event.pinned) {
			update_group_times(leader);
856
			leader->state = PERF_COUNTER_STATE_ERROR;
857
		}
858 859 860
	}

 unlock:
861
	spin_unlock(&ctx->lock);
862 863 864 865
}

/*
 * Enable a counter.
866 867 868 869 870 871
 *
 * If counter->ctx is a cloned context, callers must make sure that
 * every task struct that counter->ctx->task could possibly point to
 * remains valid.  This condition is satisfied when called through
 * perf_counter_for_each_child or perf_counter_for_each as described
 * for perf_counter_disable.
872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
 */
static void perf_counter_enable(struct perf_counter *counter)
{
	struct perf_counter_context *ctx = counter->ctx;
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Enable the counter on the cpu that it's on
		 */
		smp_call_function_single(counter->cpu, __perf_counter_enable,
					 counter, 1);
		return;
	}

	spin_lock_irq(&ctx->lock);
	if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
		goto out;

	/*
	 * If the counter is in error state, clear that first.
	 * That way, if we see the counter in error state below, we
	 * know that it has gone back into error state, as distinct
	 * from the task having been scheduled away before the
	 * cross-call arrived.
	 */
	if (counter->state == PERF_COUNTER_STATE_ERROR)
		counter->state = PERF_COUNTER_STATE_OFF;

 retry:
	spin_unlock_irq(&ctx->lock);
	task_oncpu_function_call(task, __perf_counter_enable, counter);

	spin_lock_irq(&ctx->lock);

	/*
	 * If the context is active and the counter is still off,
	 * we need to retry the cross-call.
	 */
	if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
		goto retry;

	/*
	 * Since we have the lock this context can't be scheduled
	 * in, so we can change the state safely.
	 */
918
	if (counter->state == PERF_COUNTER_STATE_OFF) {
919
		counter->state = PERF_COUNTER_STATE_INACTIVE;
920 921
		counter->tstamp_enabled =
			ctx->time - counter->total_time_enabled;
922
	}
923 924 925 926
 out:
	spin_unlock_irq(&ctx->lock);
}

927
static int perf_counter_refresh(struct perf_counter *counter, int refresh)
928
{
929 930 931 932 933 934
	/*
	 * not supported on inherited counters
	 */
	if (counter->hw_event.inherit)
		return -EINVAL;

935 936
	atomic_add(refresh, &counter->event_limit);
	perf_counter_enable(counter);
937 938

	return 0;
939 940
}

941 942 943 944 945
void __perf_counter_sched_out(struct perf_counter_context *ctx,
			      struct perf_cpu_context *cpuctx)
{
	struct perf_counter *counter;

946 947
	spin_lock(&ctx->lock);
	ctx->is_active = 0;
948
	if (likely(!ctx->nr_counters))
949
		goto out;
950
	update_context_time(ctx);
951

952
	perf_disable();
953
	if (ctx->nr_active) {
954 955 956 957 958 959
		list_for_each_entry(counter, &ctx->counter_list, list_entry) {
			if (counter != counter->group_leader)
				counter_sched_out(counter, cpuctx, ctx);
			else
				group_sched_out(counter, cpuctx, ctx);
		}
960
	}
961
	perf_enable();
962
 out:
963 964 965
	spin_unlock(&ctx->lock);
}

966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
/*
 * Test whether two contexts are equivalent, i.e. whether they
 * have both been cloned from the same version of the same context
 * and they both have the same number of enabled counters.
 * If the number of enabled counters is the same, then the set
 * of enabled counters should be the same, because these are both
 * inherited contexts, therefore we can't access individual counters
 * in them directly with an fd; we can only enable/disable all
 * counters via prctl, or enable/disable all counters in a family
 * via ioctl, which will have the same effect on both contexts.
 */
static int context_equiv(struct perf_counter_context *ctx1,
			 struct perf_counter_context *ctx2)
{
	return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
981
		&& ctx1->parent_gen == ctx2->parent_gen
982
		&& !ctx1->pin_count && !ctx2->pin_count;
983 984
}

T
Thomas Gleixner 已提交
985 986 987 988 989 990
/*
 * Called from scheduler to remove the counters of the current task,
 * with interrupts disabled.
 *
 * We stop each counter and update the counter value in counter->count.
 *
I
Ingo Molnar 已提交
991
 * This does not protect us against NMI, but disable()
T
Thomas Gleixner 已提交
992 993 994 995
 * sets the disabled bit in the control field of counter _before_
 * accessing the counter control register. If a NMI hits, then it will
 * not restart the counter.
 */
996 997
void perf_counter_task_sched_out(struct task_struct *task,
				 struct task_struct *next, int cpu)
T
Thomas Gleixner 已提交
998 999
{
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1000
	struct perf_counter_context *ctx = task->perf_counter_ctxp;
1001
	struct perf_counter_context *next_ctx;
1002
	struct perf_counter_context *parent;
1003
	struct pt_regs *regs;
1004
	int do_switch = 1;
T
Thomas Gleixner 已提交
1005

1006 1007 1008
	regs = task_pt_regs(task);
	perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);

1009
	if (likely(!ctx || !cpuctx->task_ctx))
T
Thomas Gleixner 已提交
1010 1011
		return;

1012
	update_context_time(ctx);
1013 1014 1015

	rcu_read_lock();
	parent = rcu_dereference(ctx->parent_ctx);
1016
	next_ctx = next->perf_counter_ctxp;
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
	if (parent && next_ctx &&
	    rcu_dereference(next_ctx->parent_ctx) == parent) {
		/*
		 * Looks like the two contexts are clones, so we might be
		 * able to optimize the context switch.  We lock both
		 * contexts and check that they are clones under the
		 * lock (including re-checking that neither has been
		 * uncloned in the meantime).  It doesn't matter which
		 * order we take the locks because no other cpu could
		 * be trying to lock both of these tasks.
		 */
		spin_lock(&ctx->lock);
		spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
		if (context_equiv(ctx, next_ctx)) {
1031 1032 1033 1034
			/*
			 * XXX do we need a memory barrier of sorts
			 * wrt to rcu_dereference() of perf_counter_ctxp
			 */
1035 1036 1037 1038 1039 1040 1041 1042
			task->perf_counter_ctxp = next_ctx;
			next->perf_counter_ctxp = ctx;
			ctx->task = next;
			next_ctx->task = task;
			do_switch = 0;
		}
		spin_unlock(&next_ctx->lock);
		spin_unlock(&ctx->lock);
1043
	}
1044
	rcu_read_unlock();
1045

1046 1047 1048 1049
	if (do_switch) {
		__perf_counter_sched_out(ctx, cpuctx);
		cpuctx->task_ctx = NULL;
	}
T
Thomas Gleixner 已提交
1050 1051
}

1052 1053 1054
/*
 * Called with IRQs disabled
 */
1055 1056 1057 1058
static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);

1059 1060
	if (!cpuctx->task_ctx)
		return;
1061 1062 1063 1064

	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
		return;

1065 1066 1067 1068
	__perf_counter_sched_out(ctx, cpuctx);
	cpuctx->task_ctx = NULL;
}

1069 1070 1071
/*
 * Called with IRQs disabled
 */
1072
static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
1073
{
1074
	__perf_counter_sched_out(&cpuctx->ctx, cpuctx);
1075 1076
}

1077 1078 1079
static void
__perf_counter_sched_in(struct perf_counter_context *ctx,
			struct perf_cpu_context *cpuctx, int cpu)
T
Thomas Gleixner 已提交
1080 1081
{
	struct perf_counter *counter;
1082
	int can_add_hw = 1;
T
Thomas Gleixner 已提交
1083

1084 1085
	spin_lock(&ctx->lock);
	ctx->is_active = 1;
T
Thomas Gleixner 已提交
1086
	if (likely(!ctx->nr_counters))
1087
		goto out;
T
Thomas Gleixner 已提交
1088

1089
	ctx->timestamp = perf_clock();
1090

1091
	perf_disable();
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103

	/*
	 * First go through the list and put on any pinned groups
	 * in order to give them the best chance of going on.
	 */
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
		if (counter->state <= PERF_COUNTER_STATE_OFF ||
		    !counter->hw_event.pinned)
			continue;
		if (counter->cpu != -1 && counter->cpu != cpu)
			continue;

1104 1105 1106 1107 1108 1109
		if (counter != counter->group_leader)
			counter_sched_in(counter, cpuctx, ctx, cpu);
		else {
			if (group_can_go_on(counter, cpuctx, 1))
				group_sched_in(counter, cpuctx, ctx, cpu);
		}
1110 1111 1112 1113 1114

		/*
		 * If this pinned group hasn't been scheduled,
		 * put it in error state.
		 */
1115 1116
		if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
			update_group_times(counter);
1117
			counter->state = PERF_COUNTER_STATE_ERROR;
1118
		}
1119 1120
	}

1121
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1122 1123 1124 1125 1126 1127 1128 1129
		/*
		 * Ignore counters in OFF or ERROR state, and
		 * ignore pinned counters since we did them already.
		 */
		if (counter->state <= PERF_COUNTER_STATE_OFF ||
		    counter->hw_event.pinned)
			continue;

1130 1131 1132 1133
		/*
		 * Listen to the 'cpu' scheduling filter constraint
		 * of counters:
		 */
T
Thomas Gleixner 已提交
1134 1135 1136
		if (counter->cpu != -1 && counter->cpu != cpu)
			continue;

1137 1138
		if (counter != counter->group_leader) {
			if (counter_sched_in(counter, cpuctx, ctx, cpu))
1139
				can_add_hw = 0;
1140 1141 1142 1143 1144
		} else {
			if (group_can_go_on(counter, cpuctx, can_add_hw)) {
				if (group_sched_in(counter, cpuctx, ctx, cpu))
					can_add_hw = 0;
			}
1145
		}
T
Thomas Gleixner 已提交
1146
	}
1147
	perf_enable();
1148
 out:
T
Thomas Gleixner 已提交
1149
	spin_unlock(&ctx->lock);
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
}

/*
 * Called from scheduler to add the counters of the current task
 * with interrupts disabled.
 *
 * We restore the counter value and then enable it.
 *
 * This does not protect us against NMI, but enable()
 * sets the enabled bit in the control field of counter _before_
 * accessing the counter control register. If a NMI hits, then it will
 * keep the counter running.
 */
void perf_counter_task_sched_in(struct task_struct *task, int cpu)
{
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1166
	struct perf_counter_context *ctx = task->perf_counter_ctxp;
1167

1168 1169
	if (likely(!ctx))
		return;
1170 1171
	if (cpuctx->task_ctx == ctx)
		return;
1172
	__perf_counter_sched_in(ctx, cpuctx, cpu);
T
Thomas Gleixner 已提交
1173 1174 1175
	cpuctx->task_ctx = ctx;
}

1176 1177 1178 1179 1180 1181 1182
static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
{
	struct perf_counter_context *ctx = &cpuctx->ctx;

	__perf_counter_sched_in(ctx, cpuctx, cpu);
}

1183 1184 1185
#define MAX_INTERRUPTS (~0ULL)

static void perf_log_throttle(struct perf_counter *counter, int enable);
1186 1187 1188
static void perf_log_period(struct perf_counter *counter, u64 period);

static void perf_adjust_freq(struct perf_counter_context *ctx)
1189 1190
{
	struct perf_counter *counter;
1191
	u64 interrupts, irq_period;
1192 1193 1194 1195 1196 1197 1198 1199
	u64 events, period;
	s64 delta;

	spin_lock(&ctx->lock);
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
		if (counter->state != PERF_COUNTER_STATE_ACTIVE)
			continue;

1200 1201 1202 1203 1204 1205 1206 1207 1208
		interrupts = counter->hw.interrupts;
		counter->hw.interrupts = 0;

		if (interrupts == MAX_INTERRUPTS) {
			perf_log_throttle(counter, 1);
			counter->pmu->unthrottle(counter);
			interrupts = 2*sysctl_perf_counter_limit/HZ;
		}

1209 1210 1211
		if (!counter->hw_event.freq || !counter->hw_event.irq_freq)
			continue;

1212
		events = HZ * interrupts * counter->hw.irq_period;
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
		period = div64_u64(events, counter->hw_event.irq_freq);

		delta = (s64)(1 + period - counter->hw.irq_period);
		delta >>= 1;

		irq_period = counter->hw.irq_period + delta;

		if (!irq_period)
			irq_period = 1;

1223 1224
		perf_log_period(counter, irq_period);

1225 1226 1227 1228 1229
		counter->hw.irq_period = irq_period;
	}
	spin_unlock(&ctx->lock);
}

1230 1231 1232 1233
/*
 * Round-robin a context's counters:
 */
static void rotate_ctx(struct perf_counter_context *ctx)
T
Thomas Gleixner 已提交
1234 1235 1236
{
	struct perf_counter *counter;

1237
	if (!ctx->nr_counters)
T
Thomas Gleixner 已提交
1238 1239 1240 1241
		return;

	spin_lock(&ctx->lock);
	/*
1242
	 * Rotate the first entry last (works just fine for group counters too):
T
Thomas Gleixner 已提交
1243
	 */
1244
	perf_disable();
1245
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1246
		list_move_tail(&counter->list_entry, &ctx->counter_list);
T
Thomas Gleixner 已提交
1247 1248
		break;
	}
1249
	perf_enable();
T
Thomas Gleixner 已提交
1250 1251

	spin_unlock(&ctx->lock);
1252 1253 1254 1255
}

void perf_counter_task_tick(struct task_struct *curr, int cpu)
{
1256 1257 1258 1259 1260 1261 1262
	struct perf_cpu_context *cpuctx;
	struct perf_counter_context *ctx;

	if (!atomic_read(&nr_counters))
		return;

	cpuctx = &per_cpu(perf_cpu_context, cpu);
1263
	ctx = curr->perf_counter_ctxp;
1264

1265
	perf_adjust_freq(&cpuctx->ctx);
1266 1267
	if (ctx)
		perf_adjust_freq(ctx);
1268

1269
	perf_counter_cpu_sched_out(cpuctx);
1270 1271
	if (ctx)
		__perf_counter_task_sched_out(ctx);
T
Thomas Gleixner 已提交
1272

1273
	rotate_ctx(&cpuctx->ctx);
1274 1275
	if (ctx)
		rotate_ctx(ctx);
1276

1277
	perf_counter_cpu_sched_in(cpuctx, cpu);
1278 1279
	if (ctx)
		perf_counter_task_sched_in(curr, cpu);
T
Thomas Gleixner 已提交
1280 1281 1282 1283 1284
}

/*
 * Cross CPU call to read the hardware counter
 */
I
Ingo Molnar 已提交
1285
static void __read(void *info)
T
Thomas Gleixner 已提交
1286
{
I
Ingo Molnar 已提交
1287
	struct perf_counter *counter = info;
1288
	struct perf_counter_context *ctx = counter->ctx;
I
Ingo Molnar 已提交
1289
	unsigned long flags;
I
Ingo Molnar 已提交
1290

1291
	local_irq_save(flags);
1292
	if (ctx->is_active)
1293
		update_context_time(ctx);
1294
	counter->pmu->read(counter);
1295
	update_counter_times(counter);
1296
	local_irq_restore(flags);
T
Thomas Gleixner 已提交
1297 1298
}

1299
static u64 perf_counter_read(struct perf_counter *counter)
T
Thomas Gleixner 已提交
1300 1301 1302 1303 1304
{
	/*
	 * If counter is enabled and currently active on a CPU, update the
	 * value in the counter structure:
	 */
1305
	if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
T
Thomas Gleixner 已提交
1306
		smp_call_function_single(counter->oncpu,
I
Ingo Molnar 已提交
1307
					 __read, counter, 1);
1308 1309
	} else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
		update_counter_times(counter);
T
Thomas Gleixner 已提交
1310 1311
	}

1312
	return atomic64_read(&counter->count);
T
Thomas Gleixner 已提交
1313 1314
}

1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
/*
 * Initialize the perf_counter context in a task_struct:
 */
static void
__perf_counter_init_context(struct perf_counter_context *ctx,
			    struct task_struct *task)
{
	memset(ctx, 0, sizeof(*ctx));
	spin_lock_init(&ctx->lock);
	mutex_init(&ctx->mutex);
	INIT_LIST_HEAD(&ctx->counter_list);
	INIT_LIST_HEAD(&ctx->event_list);
	atomic_set(&ctx->refcount, 1);
	ctx->task = task;
}

T
Thomas Gleixner 已提交
1331 1332 1333 1334
static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
{
	struct perf_cpu_context *cpuctx;
	struct perf_counter_context *ctx;
1335
	struct perf_counter_context *parent_ctx;
T
Thomas Gleixner 已提交
1336
	struct task_struct *task;
1337
	unsigned long flags;
1338
	int err;
T
Thomas Gleixner 已提交
1339 1340 1341 1342 1343 1344

	/*
	 * If cpu is not a wildcard then this is a percpu counter:
	 */
	if (cpu != -1) {
		/* Must be root to operate on a CPU counter: */
1345
		if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
T
Thomas Gleixner 已提交
1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
			return ERR_PTR(-EACCES);

		if (cpu < 0 || cpu > num_possible_cpus())
			return ERR_PTR(-EINVAL);

		/*
		 * We could be clever and allow to attach a counter to an
		 * offline CPU and activate it when the CPU comes up, but
		 * that's for later.
		 */
		if (!cpu_isset(cpu, cpu_online_map))
			return ERR_PTR(-ENODEV);

		cpuctx = &per_cpu(perf_cpu_context, cpu);
		ctx = &cpuctx->ctx;
1361
		get_ctx(ctx);
T
Thomas Gleixner 已提交
1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377

		return ctx;
	}

	rcu_read_lock();
	if (!pid)
		task = current;
	else
		task = find_task_by_vpid(pid);
	if (task)
		get_task_struct(task);
	rcu_read_unlock();

	if (!task)
		return ERR_PTR(-ESRCH);

1378 1379 1380 1381 1382 1383 1384
	/*
	 * Can't attach counters to a dying task.
	 */
	err = -ESRCH;
	if (task->flags & PF_EXITING)
		goto errout;

T
Thomas Gleixner 已提交
1385
	/* Reuse ptrace permission checks for now. */
1386 1387 1388 1389 1390
	err = -EACCES;
	if (!ptrace_may_access(task, PTRACE_MODE_READ))
		goto errout;

 retry:
1391
	ctx = perf_lock_task_context(task, &flags);
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
	if (ctx) {
		parent_ctx = ctx->parent_ctx;
		if (parent_ctx) {
			put_ctx(parent_ctx);
			ctx->parent_ctx = NULL;		/* no longer a clone */
		}
		/*
		 * Get an extra reference before dropping the lock so that
		 * this context won't get freed if the task exits.
		 */
		get_ctx(ctx);
1403
		spin_unlock_irqrestore(&ctx->lock, flags);
T
Thomas Gleixner 已提交
1404 1405
	}

1406 1407
	if (!ctx) {
		ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
1408 1409 1410
		err = -ENOMEM;
		if (!ctx)
			goto errout;
1411
		__perf_counter_init_context(ctx, task);
1412 1413
		get_ctx(ctx);
		if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
1414 1415 1416 1417 1418
			/*
			 * We raced with some other task; use
			 * the context they set.
			 */
			kfree(ctx);
1419
			goto retry;
1420
		}
1421
		get_task_struct(task);
1422 1423
	}

1424
	put_task_struct(task);
T
Thomas Gleixner 已提交
1425
	return ctx;
1426 1427 1428 1429

 errout:
	put_task_struct(task);
	return ERR_PTR(err);
T
Thomas Gleixner 已提交
1430 1431
}

P
Peter Zijlstra 已提交
1432 1433 1434 1435 1436 1437 1438 1439
static void free_counter_rcu(struct rcu_head *head)
{
	struct perf_counter *counter;

	counter = container_of(head, struct perf_counter, rcu_head);
	kfree(counter);
}

1440 1441
static void perf_pending_sync(struct perf_counter *counter);

1442 1443
static void free_counter(struct perf_counter *counter)
{
1444 1445
	perf_pending_sync(counter);

1446
	atomic_dec(&nr_counters);
1447 1448 1449 1450 1451 1452 1453
	if (counter->hw_event.mmap)
		atomic_dec(&nr_mmap_tracking);
	if (counter->hw_event.munmap)
		atomic_dec(&nr_munmap_tracking);
	if (counter->hw_event.comm)
		atomic_dec(&nr_comm_tracking);

1454 1455 1456
	if (counter->destroy)
		counter->destroy(counter);

1457
	put_ctx(counter->ctx);
1458 1459 1460
	call_rcu(&counter->rcu_head, free_counter_rcu);
}

T
Thomas Gleixner 已提交
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
/*
 * Called when the last reference to the file is gone.
 */
static int perf_release(struct inode *inode, struct file *file)
{
	struct perf_counter *counter = file->private_data;
	struct perf_counter_context *ctx = counter->ctx;

	file->private_data = NULL;

1471
	WARN_ON_ONCE(ctx->parent_ctx);
1472
	mutex_lock(&ctx->mutex);
1473
	perf_counter_remove_from_context(counter);
1474
	mutex_unlock(&ctx->mutex);
T
Thomas Gleixner 已提交
1475

1476 1477 1478 1479 1480
	mutex_lock(&counter->owner->perf_counter_mutex);
	list_del_init(&counter->owner_entry);
	mutex_unlock(&counter->owner->perf_counter_mutex);
	put_task_struct(counter->owner);

1481
	free_counter(counter);
T
Thomas Gleixner 已提交
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491

	return 0;
}

/*
 * Read the performance counter - simple non blocking version for now
 */
static ssize_t
perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
{
1492 1493
	u64 values[3];
	int n;
T
Thomas Gleixner 已提交
1494

1495 1496 1497 1498 1499 1500 1501 1502
	/*
	 * Return end-of-file for a read on a counter that is in
	 * error state (i.e. because it was pinned but it couldn't be
	 * scheduled on to the CPU at some point).
	 */
	if (counter->state == PERF_COUNTER_STATE_ERROR)
		return 0;

1503
	WARN_ON_ONCE(counter->ctx->parent_ctx);
1504
	mutex_lock(&counter->child_mutex);
1505 1506 1507 1508 1509 1510 1511 1512
	values[0] = perf_counter_read(counter);
	n = 1;
	if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
		values[n++] = counter->total_time_enabled +
			atomic64_read(&counter->child_total_time_enabled);
	if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
		values[n++] = counter->total_time_running +
			atomic64_read(&counter->child_total_time_running);
1513
	mutex_unlock(&counter->child_mutex);
T
Thomas Gleixner 已提交
1514

1515 1516 1517 1518 1519 1520 1521 1522
	if (count < n * sizeof(u64))
		return -EINVAL;
	count = n * sizeof(u64);

	if (copy_to_user(buf, values, count))
		return -EFAULT;

	return count;
T
Thomas Gleixner 已提交
1523 1524 1525 1526 1527 1528 1529
}

static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
	struct perf_counter *counter = file->private_data;

1530
	return perf_read_hw(counter, buf, count);
T
Thomas Gleixner 已提交
1531 1532 1533 1534 1535
}

static unsigned int perf_poll(struct file *file, poll_table *wait)
{
	struct perf_counter *counter = file->private_data;
P
Peter Zijlstra 已提交
1536
	struct perf_mmap_data *data;
1537
	unsigned int events = POLL_HUP;
P
Peter Zijlstra 已提交
1538 1539 1540 1541

	rcu_read_lock();
	data = rcu_dereference(counter->data);
	if (data)
1542
		events = atomic_xchg(&data->poll, 0);
P
Peter Zijlstra 已提交
1543
	rcu_read_unlock();
T
Thomas Gleixner 已提交
1544 1545 1546 1547 1548 1549

	poll_wait(file, &counter->waitq, wait);

	return events;
}

1550 1551
static void perf_counter_reset(struct perf_counter *counter)
{
P
Peter Zijlstra 已提交
1552
	(void)perf_counter_read(counter);
1553
	atomic64_set(&counter->count, 0);
P
Peter Zijlstra 已提交
1554 1555 1556 1557 1558 1559 1560 1561 1562
	perf_counter_update_userpage(counter);
}

static void perf_counter_for_each_sibling(struct perf_counter *counter,
					  void (*func)(struct perf_counter *))
{
	struct perf_counter_context *ctx = counter->ctx;
	struct perf_counter *sibling;

1563
	WARN_ON_ONCE(ctx->parent_ctx);
1564
	mutex_lock(&ctx->mutex);
P
Peter Zijlstra 已提交
1565 1566 1567 1568 1569
	counter = counter->group_leader;

	func(counter);
	list_for_each_entry(sibling, &counter->sibling_list, list_entry)
		func(sibling);
1570
	mutex_unlock(&ctx->mutex);
P
Peter Zijlstra 已提交
1571 1572
}

1573 1574 1575 1576 1577 1578
/*
 * Holding the top-level counter's child_mutex means that any
 * descendant process that has inherited this counter will block
 * in sync_child_counter if it goes to exit, thus satisfying the
 * task existence requirements of perf_counter_enable/disable.
 */
P
Peter Zijlstra 已提交
1579 1580 1581 1582 1583
static void perf_counter_for_each_child(struct perf_counter *counter,
					void (*func)(struct perf_counter *))
{
	struct perf_counter *child;

1584
	WARN_ON_ONCE(counter->ctx->parent_ctx);
1585
	mutex_lock(&counter->child_mutex);
P
Peter Zijlstra 已提交
1586 1587 1588
	func(counter);
	list_for_each_entry(child, &counter->child_list, child_list)
		func(child);
1589
	mutex_unlock(&counter->child_mutex);
P
Peter Zijlstra 已提交
1590 1591 1592 1593 1594 1595 1596
}

static void perf_counter_for_each(struct perf_counter *counter,
				  void (*func)(struct perf_counter *))
{
	struct perf_counter *child;

1597
	WARN_ON_ONCE(counter->ctx->parent_ctx);
1598
	mutex_lock(&counter->child_mutex);
P
Peter Zijlstra 已提交
1599 1600 1601
	perf_counter_for_each_sibling(counter, func);
	list_for_each_entry(child, &counter->child_list, child_list)
		perf_counter_for_each_sibling(child, func);
1602
	mutex_unlock(&counter->child_mutex);
1603 1604
}

1605 1606 1607
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct perf_counter *counter = file->private_data;
P
Peter Zijlstra 已提交
1608 1609
	void (*func)(struct perf_counter *);
	u32 flags = arg;
1610 1611 1612

	switch (cmd) {
	case PERF_COUNTER_IOC_ENABLE:
P
Peter Zijlstra 已提交
1613
		func = perf_counter_enable;
1614 1615
		break;
	case PERF_COUNTER_IOC_DISABLE:
P
Peter Zijlstra 已提交
1616
		func = perf_counter_disable;
1617
		break;
1618
	case PERF_COUNTER_IOC_RESET:
P
Peter Zijlstra 已提交
1619
		func = perf_counter_reset;
1620
		break;
P
Peter Zijlstra 已提交
1621 1622 1623

	case PERF_COUNTER_IOC_REFRESH:
		return perf_counter_refresh(counter, arg);
1624
	default:
P
Peter Zijlstra 已提交
1625
		return -ENOTTY;
1626
	}
P
Peter Zijlstra 已提交
1627 1628 1629 1630 1631 1632 1633

	if (flags & PERF_IOC_FLAG_GROUP)
		perf_counter_for_each(counter, func);
	else
		perf_counter_for_each_child(counter, func);

	return 0;
1634 1635
}

1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659
int perf_counter_task_enable(void)
{
	struct perf_counter *counter;

	mutex_lock(&current->perf_counter_mutex);
	list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
		perf_counter_for_each_child(counter, perf_counter_enable);
	mutex_unlock(&current->perf_counter_mutex);

	return 0;
}

int perf_counter_task_disable(void)
{
	struct perf_counter *counter;

	mutex_lock(&current->perf_counter_mutex);
	list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
		perf_counter_for_each_child(counter, perf_counter_disable);
	mutex_unlock(&current->perf_counter_mutex);

	return 0;
}

1660 1661 1662 1663 1664 1665
/*
 * Callers need to ensure there can be no nesting of this function, otherwise
 * the seqlock logic goes bad. We can not serialize this because the arch
 * code calls this from NMI context.
 */
void perf_counter_update_userpage(struct perf_counter *counter)
1666
{
1667 1668 1669 1670 1671 1672 1673 1674 1675
	struct perf_mmap_data *data;
	struct perf_counter_mmap_page *userpg;

	rcu_read_lock();
	data = rcu_dereference(counter->data);
	if (!data)
		goto unlock;

	userpg = data->user_page;
1676

1677 1678 1679 1680 1681
	/*
	 * Disable preemption so as to not let the corresponding user-space
	 * spin too long if we get preempted.
	 */
	preempt_disable();
1682
	++userpg->lock;
1683
	barrier();
1684 1685 1686 1687
	userpg->index = counter->hw.idx;
	userpg->offset = atomic64_read(&counter->count);
	if (counter->state == PERF_COUNTER_STATE_ACTIVE)
		userpg->offset -= atomic64_read(&counter->hw.prev_count);
1688

1689
	barrier();
1690
	++userpg->lock;
1691
	preempt_enable();
1692
unlock:
1693
	rcu_read_unlock();
1694 1695 1696 1697 1698
}

static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct perf_counter *counter = vma->vm_file->private_data;
1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710
	struct perf_mmap_data *data;
	int ret = VM_FAULT_SIGBUS;

	rcu_read_lock();
	data = rcu_dereference(counter->data);
	if (!data)
		goto unlock;

	if (vmf->pgoff == 0) {
		vmf->page = virt_to_page(data->user_page);
	} else {
		int nr = vmf->pgoff - 1;
1711

1712 1713
		if ((unsigned)nr > data->nr_pages)
			goto unlock;
1714

1715 1716
		vmf->page = virt_to_page(data->data_pages[nr]);
	}
1717
	get_page(vmf->page);
1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750
	ret = 0;
unlock:
	rcu_read_unlock();

	return ret;
}

static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
{
	struct perf_mmap_data *data;
	unsigned long size;
	int i;

	WARN_ON(atomic_read(&counter->mmap_count));

	size = sizeof(struct perf_mmap_data);
	size += nr_pages * sizeof(void *);

	data = kzalloc(size, GFP_KERNEL);
	if (!data)
		goto fail;

	data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
	if (!data->user_page)
		goto fail_user_page;

	for (i = 0; i < nr_pages; i++) {
		data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
		if (!data->data_pages[i])
			goto fail_data_pages;
	}

	data->nr_pages = nr_pages;
1751
	atomic_set(&data->lock, -1);
1752 1753 1754

	rcu_assign_pointer(counter->data, data);

1755
	return 0;
1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802

fail_data_pages:
	for (i--; i >= 0; i--)
		free_page((unsigned long)data->data_pages[i]);

	free_page((unsigned long)data->user_page);

fail_user_page:
	kfree(data);

fail:
	return -ENOMEM;
}

static void __perf_mmap_data_free(struct rcu_head *rcu_head)
{
	struct perf_mmap_data *data = container_of(rcu_head,
			struct perf_mmap_data, rcu_head);
	int i;

	free_page((unsigned long)data->user_page);
	for (i = 0; i < data->nr_pages; i++)
		free_page((unsigned long)data->data_pages[i]);
	kfree(data);
}

static void perf_mmap_data_free(struct perf_counter *counter)
{
	struct perf_mmap_data *data = counter->data;

	WARN_ON(atomic_read(&counter->mmap_count));

	rcu_assign_pointer(counter->data, NULL);
	call_rcu(&data->rcu_head, __perf_mmap_data_free);
}

static void perf_mmap_open(struct vm_area_struct *vma)
{
	struct perf_counter *counter = vma->vm_file->private_data;

	atomic_inc(&counter->mmap_count);
}

static void perf_mmap_close(struct vm_area_struct *vma)
{
	struct perf_counter *counter = vma->vm_file->private_data;

1803
	WARN_ON_ONCE(counter->ctx->parent_ctx);
1804 1805
	if (atomic_dec_and_mutex_lock(&counter->mmap_count,
				      &counter->mmap_mutex)) {
1806 1807 1808
		struct user_struct *user = current_user();

		atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
1809
		vma->vm_mm->locked_vm -= counter->data->nr_locked;
1810 1811 1812
		perf_mmap_data_free(counter);
		mutex_unlock(&counter->mmap_mutex);
	}
1813 1814 1815
}

static struct vm_operations_struct perf_mmap_vmops = {
1816
	.open  = perf_mmap_open,
1817
	.close = perf_mmap_close,
1818 1819 1820 1821 1822 1823
	.fault = perf_mmap_fault,
};

static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct perf_counter *counter = file->private_data;
1824
	struct user_struct *user = current_user();
1825 1826
	unsigned long vma_size;
	unsigned long nr_pages;
1827
	unsigned long user_locked, user_lock_limit;
1828
	unsigned long locked, lock_limit;
1829
	long user_extra, extra;
1830
	int ret = 0;
1831 1832 1833

	if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
		return -EINVAL;
1834 1835 1836 1837

	vma_size = vma->vm_end - vma->vm_start;
	nr_pages = (vma_size / PAGE_SIZE) - 1;

1838 1839 1840 1841 1842
	/*
	 * If we have data pages ensure they're a power-of-two number, so we
	 * can do bitmasks instead of modulo.
	 */
	if (nr_pages != 0 && !is_power_of_2(nr_pages))
1843 1844
		return -EINVAL;

1845
	if (vma_size != PAGE_SIZE * (1 + nr_pages))
1846 1847
		return -EINVAL;

1848 1849
	if (vma->vm_pgoff != 0)
		return -EINVAL;
1850

1851
	WARN_ON_ONCE(counter->ctx->parent_ctx);
1852 1853 1854 1855 1856 1857 1858
	mutex_lock(&counter->mmap_mutex);
	if (atomic_inc_not_zero(&counter->mmap_count)) {
		if (nr_pages != counter->data->nr_pages)
			ret = -EINVAL;
		goto unlock;
	}

1859 1860
	user_extra = nr_pages + 1;
	user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
I
Ingo Molnar 已提交
1861 1862 1863 1864 1865 1866

	/*
	 * Increase the limit linearly with more CPUs:
	 */
	user_lock_limit *= num_online_cpus();

1867
	user_locked = atomic_long_read(&user->locked_vm) + user_extra;
1868

1869 1870 1871
	extra = 0;
	if (user_locked > user_lock_limit)
		extra = user_locked - user_lock_limit;
1872 1873 1874

	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
	lock_limit >>= PAGE_SHIFT;
1875
	locked = vma->vm_mm->locked_vm + extra;
1876

1877 1878 1879 1880
	if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
		ret = -EPERM;
		goto unlock;
	}
1881 1882 1883

	WARN_ON(counter->data);
	ret = perf_mmap_data_alloc(counter, nr_pages);
1884 1885 1886 1887
	if (ret)
		goto unlock;

	atomic_set(&counter->mmap_count, 1);
1888
	atomic_long_add(user_extra, &user->locked_vm);
1889 1890
	vma->vm_mm->locked_vm += extra;
	counter->data->nr_locked = extra;
1891
unlock:
1892
	mutex_unlock(&counter->mmap_mutex);
1893 1894 1895 1896

	vma->vm_flags &= ~VM_MAYWRITE;
	vma->vm_flags |= VM_RESERVED;
	vma->vm_ops = &perf_mmap_vmops;
1897 1898

	return ret;
1899 1900
}

P
Peter Zijlstra 已提交
1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916
static int perf_fasync(int fd, struct file *filp, int on)
{
	struct perf_counter *counter = filp->private_data;
	struct inode *inode = filp->f_path.dentry->d_inode;
	int retval;

	mutex_lock(&inode->i_mutex);
	retval = fasync_helper(fd, filp, on, &counter->fasync);
	mutex_unlock(&inode->i_mutex);

	if (retval < 0)
		return retval;

	return 0;
}

T
Thomas Gleixner 已提交
1917 1918 1919 1920
static const struct file_operations perf_fops = {
	.release		= perf_release,
	.read			= perf_read,
	.poll			= perf_poll,
1921 1922
	.unlocked_ioctl		= perf_ioctl,
	.compat_ioctl		= perf_ioctl,
1923
	.mmap			= perf_mmap,
P
Peter Zijlstra 已提交
1924
	.fasync			= perf_fasync,
T
Thomas Gleixner 已提交
1925 1926
};

1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
/*
 * Perf counter wakeup
 *
 * If there's data, ensure we set the poll() state and publish everything
 * to user-space before waking everybody up.
 */

void perf_counter_wakeup(struct perf_counter *counter)
{
	wake_up_all(&counter->waitq);
1937 1938 1939 1940 1941

	if (counter->pending_kill) {
		kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
		counter->pending_kill = 0;
	}
1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952
}

/*
 * Pending wakeups
 *
 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
 *
 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
 * single linked list and use cmpxchg() to add entries lockless.
 */

1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968
static void perf_pending_counter(struct perf_pending_entry *entry)
{
	struct perf_counter *counter = container_of(entry,
			struct perf_counter, pending);

	if (counter->pending_disable) {
		counter->pending_disable = 0;
		perf_counter_disable(counter);
	}

	if (counter->pending_wakeup) {
		counter->pending_wakeup = 0;
		perf_counter_wakeup(counter);
	}
}

1969
#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
1970

1971
static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
1972 1973 1974
	PENDING_TAIL,
};

1975 1976
static void perf_pending_queue(struct perf_pending_entry *entry,
			       void (*func)(struct perf_pending_entry *))
1977
{
1978
	struct perf_pending_entry **head;
1979

1980
	if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
1981 1982
		return;

1983 1984 1985
	entry->func = func;

	head = &get_cpu_var(perf_pending_head);
1986 1987

	do {
1988 1989
		entry->next = *head;
	} while (cmpxchg(head, entry->next, entry) != entry->next);
1990 1991 1992

	set_perf_counter_pending();

1993
	put_cpu_var(perf_pending_head);
1994 1995 1996 1997
}

static int __perf_pending_run(void)
{
1998
	struct perf_pending_entry *list;
1999 2000
	int nr = 0;

2001
	list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2002
	while (list != PENDING_TAIL) {
2003 2004
		void (*func)(struct perf_pending_entry *);
		struct perf_pending_entry *entry = list;
2005 2006 2007

		list = list->next;

2008 2009
		func = entry->func;
		entry->next = NULL;
2010 2011 2012 2013 2014 2015 2016
		/*
		 * Ensure we observe the unqueue before we issue the wakeup,
		 * so that we won't be waiting forever.
		 * -- see perf_not_pending().
		 */
		smp_wmb();

2017
		func(entry);
2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038
		nr++;
	}

	return nr;
}

static inline int perf_not_pending(struct perf_counter *counter)
{
	/*
	 * If we flush on whatever cpu we run, there is a chance we don't
	 * need to wait.
	 */
	get_cpu();
	__perf_pending_run();
	put_cpu();

	/*
	 * Ensure we see the proper queue state before going to sleep
	 * so that we do not miss the wakeup. -- see perf_pending_handle()
	 */
	smp_rmb();
2039
	return counter->pending.next == NULL;
2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051
}

static void perf_pending_sync(struct perf_counter *counter)
{
	wait_event(counter->waitq, perf_not_pending(counter));
}

void perf_counter_do_pending(void)
{
	__perf_pending_run();
}

2052 2053 2054 2055
/*
 * Callchain support -- arch specific
 */

2056
__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2057 2058 2059 2060
{
	return NULL;
}

2061 2062 2063 2064
/*
 * Output
 */

2065 2066 2067 2068
struct perf_output_handle {
	struct perf_counter	*counter;
	struct perf_mmap_data	*data;
	unsigned int		offset;
2069
	unsigned int		head;
2070
	int			nmi;
2071
	int			overflow;
2072 2073
	int			locked;
	unsigned long		flags;
2074 2075
};

2076
static void perf_output_wakeup(struct perf_output_handle *handle)
2077
{
2078 2079
	atomic_set(&handle->data->poll, POLL_IN);

2080
	if (handle->nmi) {
2081
		handle->counter->pending_wakeup = 1;
2082
		perf_pending_queue(&handle->counter->pending,
2083
				   perf_pending_counter);
2084
	} else
2085 2086 2087
		perf_counter_wakeup(handle->counter);
}

2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113
/*
 * Curious locking construct.
 *
 * We need to ensure a later event doesn't publish a head when a former
 * event isn't done writing. However since we need to deal with NMIs we
 * cannot fully serialize things.
 *
 * What we do is serialize between CPUs so we only have to deal with NMI
 * nesting on a single CPU.
 *
 * We only publish the head (and generate a wakeup) when the outer-most
 * event completes.
 */
static void perf_output_lock(struct perf_output_handle *handle)
{
	struct perf_mmap_data *data = handle->data;
	int cpu;

	handle->locked = 0;

	local_irq_save(handle->flags);
	cpu = smp_processor_id();

	if (in_nmi() && atomic_read(&data->lock) == cpu)
		return;

2114
	while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2115 2116 2117 2118 2119 2120 2121 2122 2123 2124
		cpu_relax();

	handle->locked = 1;
}

static void perf_output_unlock(struct perf_output_handle *handle)
{
	struct perf_mmap_data *data = handle->data;
	int head, cpu;

2125
	data->done_head = data->head;
2126 2127 2128 2129 2130 2131 2132 2133 2134 2135

	if (!handle->locked)
		goto out;

again:
	/*
	 * The xchg implies a full barrier that ensures all writes are done
	 * before we publish the new head, matched by a rmb() in userspace when
	 * reading this position.
	 */
2136
	while ((head = atomic_xchg(&data->done_head, 0)))
2137 2138 2139
		data->user_page->data_head = head;

	/*
2140
	 * NMI can happen here, which means we can miss a done_head update.
2141 2142
	 */

2143
	cpu = atomic_xchg(&data->lock, -1);
2144 2145 2146 2147 2148
	WARN_ON_ONCE(cpu != smp_processor_id());

	/*
	 * Therefore we have to validate we did not indeed do so.
	 */
2149
	if (unlikely(atomic_read(&data->done_head))) {
2150 2151 2152
		/*
		 * Since we had it locked, we can lock it again.
		 */
2153
		while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2154 2155 2156 2157 2158
			cpu_relax();

		goto again;
	}

2159
	if (atomic_xchg(&data->wakeup, 0))
2160 2161 2162 2163 2164
		perf_output_wakeup(handle);
out:
	local_irq_restore(handle->flags);
}

2165
static int perf_output_begin(struct perf_output_handle *handle,
2166
			     struct perf_counter *counter, unsigned int size,
2167
			     int nmi, int overflow)
2168
{
2169
	struct perf_mmap_data *data;
2170
	unsigned int offset, head;
2171

2172 2173 2174 2175 2176 2177
	/*
	 * For inherited counters we send all the output towards the parent.
	 */
	if (counter->parent)
		counter = counter->parent;

2178 2179 2180 2181 2182
	rcu_read_lock();
	data = rcu_dereference(counter->data);
	if (!data)
		goto out;

2183
	handle->data	 = data;
2184 2185 2186
	handle->counter	 = counter;
	handle->nmi	 = nmi;
	handle->overflow = overflow;
2187

2188
	if (!data->nr_pages)
2189
		goto fail;
2190

2191 2192
	perf_output_lock(handle);

2193 2194
	do {
		offset = head = atomic_read(&data->head);
P
Peter Zijlstra 已提交
2195
		head += size;
2196 2197
	} while (atomic_cmpxchg(&data->head, offset, head) != offset);

2198
	handle->offset	= offset;
2199
	handle->head	= head;
2200 2201 2202

	if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
		atomic_set(&data->wakeup, 1);
2203

2204
	return 0;
2205

2206
fail:
2207
	perf_output_wakeup(handle);
2208 2209
out:
	rcu_read_unlock();
2210

2211 2212
	return -ENOSPC;
}
2213

2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241
static void perf_output_copy(struct perf_output_handle *handle,
			     void *buf, unsigned int len)
{
	unsigned int pages_mask;
	unsigned int offset;
	unsigned int size;
	void **pages;

	offset		= handle->offset;
	pages_mask	= handle->data->nr_pages - 1;
	pages		= handle->data->data_pages;

	do {
		unsigned int page_offset;
		int nr;

		nr	    = (offset >> PAGE_SHIFT) & pages_mask;
		page_offset = offset & (PAGE_SIZE - 1);
		size	    = min_t(unsigned int, PAGE_SIZE - page_offset, len);

		memcpy(pages[nr] + page_offset, buf, size);

		len	    -= size;
		buf	    += size;
		offset	    += size;
	} while (len);

	handle->offset = offset;
2242

2243 2244 2245 2246 2247
	/*
	 * Check we didn't copy past our reservation window, taking the
	 * possible unsigned int wrap into account.
	 */
	WARN_ON_ONCE(((int)(handle->head - handle->offset)) < 0);
2248 2249
}

P
Peter Zijlstra 已提交
2250 2251 2252
#define perf_output_put(handle, x) \
	perf_output_copy((handle), &(x), sizeof(x))

2253
static void perf_output_end(struct perf_output_handle *handle)
2254
{
2255 2256 2257 2258
	struct perf_counter *counter = handle->counter;
	struct perf_mmap_data *data = handle->data;

	int wakeup_events = counter->hw_event.wakeup_events;
P
Peter Zijlstra 已提交
2259

2260
	if (handle->overflow && wakeup_events) {
2261
		int events = atomic_inc_return(&data->events);
P
Peter Zijlstra 已提交
2262
		if (events >= wakeup_events) {
2263
			atomic_sub(wakeup_events, &data->events);
2264
			atomic_set(&data->wakeup, 1);
P
Peter Zijlstra 已提交
2265
		}
2266 2267 2268
	}

	perf_output_unlock(handle);
2269
	rcu_read_unlock();
2270 2271
}

2272
static void perf_counter_output(struct perf_counter *counter,
2273
				int nmi, struct pt_regs *regs, u64 addr)
2274
{
2275
	int ret;
2276
	u64 record_type = counter->hw_event.record_type;
2277 2278 2279
	struct perf_output_handle handle;
	struct perf_event_header header;
	u64 ip;
P
Peter Zijlstra 已提交
2280
	struct {
2281
		u32 pid, tid;
2282
	} tid_entry;
2283 2284 2285 2286
	struct {
		u64 event;
		u64 counter;
	} group_entry;
2287 2288
	struct perf_callchain_entry *callchain = NULL;
	int callchain_size = 0;
P
Peter Zijlstra 已提交
2289
	u64 time;
2290 2291 2292
	struct {
		u32 cpu, reserved;
	} cpu_entry;
2293

2294
	header.type = 0;
2295
	header.size = sizeof(header);
2296

2297
	header.misc = PERF_EVENT_MISC_OVERFLOW;
2298
	header.misc |= perf_misc_flags(regs);
2299

2300
	if (record_type & PERF_RECORD_IP) {
2301
		ip = perf_instruction_pointer(regs);
2302
		header.type |= PERF_RECORD_IP;
2303 2304
		header.size += sizeof(ip);
	}
2305

2306
	if (record_type & PERF_RECORD_TID) {
2307
		/* namespace issues */
2308 2309 2310
		tid_entry.pid = current->group_leader->pid;
		tid_entry.tid = current->pid;

2311
		header.type |= PERF_RECORD_TID;
2312 2313 2314
		header.size += sizeof(tid_entry);
	}

2315 2316 2317 2318 2319 2320 2321 2322 2323 2324
	if (record_type & PERF_RECORD_TIME) {
		/*
		 * Maybe do better on x86 and provide cpu_clock_nmi()
		 */
		time = sched_clock();

		header.type |= PERF_RECORD_TIME;
		header.size += sizeof(u64);
	}

2325 2326 2327 2328 2329
	if (record_type & PERF_RECORD_ADDR) {
		header.type |= PERF_RECORD_ADDR;
		header.size += sizeof(u64);
	}

2330 2331 2332 2333 2334
	if (record_type & PERF_RECORD_CONFIG) {
		header.type |= PERF_RECORD_CONFIG;
		header.size += sizeof(u64);
	}

2335 2336 2337 2338 2339 2340 2341
	if (record_type & PERF_RECORD_CPU) {
		header.type |= PERF_RECORD_CPU;
		header.size += sizeof(cpu_entry);

		cpu_entry.cpu = raw_smp_processor_id();
	}

2342
	if (record_type & PERF_RECORD_GROUP) {
2343
		header.type |= PERF_RECORD_GROUP;
2344 2345 2346 2347 2348
		header.size += sizeof(u64) +
			counter->nr_siblings * sizeof(group_entry);
	}

	if (record_type & PERF_RECORD_CALLCHAIN) {
2349 2350 2351
		callchain = perf_callchain(regs);

		if (callchain) {
2352
			callchain_size = (1 + callchain->nr) * sizeof(u64);
2353

2354
			header.type |= PERF_RECORD_CALLCHAIN;
2355 2356 2357 2358
			header.size += callchain_size;
		}
	}

2359
	ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2360 2361
	if (ret)
		return;
2362

2363
	perf_output_put(&handle, header);
P
Peter Zijlstra 已提交
2364

2365 2366
	if (record_type & PERF_RECORD_IP)
		perf_output_put(&handle, ip);
P
Peter Zijlstra 已提交
2367

2368 2369
	if (record_type & PERF_RECORD_TID)
		perf_output_put(&handle, tid_entry);
P
Peter Zijlstra 已提交
2370

2371 2372 2373
	if (record_type & PERF_RECORD_TIME)
		perf_output_put(&handle, time);

2374 2375 2376
	if (record_type & PERF_RECORD_ADDR)
		perf_output_put(&handle, addr);

2377 2378 2379
	if (record_type & PERF_RECORD_CONFIG)
		perf_output_put(&handle, counter->hw_event.config);

2380 2381 2382
	if (record_type & PERF_RECORD_CPU)
		perf_output_put(&handle, cpu_entry);

2383 2384 2385
	/*
	 * XXX PERF_RECORD_GROUP vs inherited counters seems difficult.
	 */
2386 2387 2388
	if (record_type & PERF_RECORD_GROUP) {
		struct perf_counter *leader, *sub;
		u64 nr = counter->nr_siblings;
P
Peter Zijlstra 已提交
2389

2390
		perf_output_put(&handle, nr);
2391

2392 2393 2394
		leader = counter->group_leader;
		list_for_each_entry(sub, &leader->sibling_list, list_entry) {
			if (sub != counter)
2395
				sub->pmu->read(sub);
2396

2397 2398
			group_entry.event = sub->hw_event.config;
			group_entry.counter = atomic64_read(&sub->count);
2399

2400 2401
			perf_output_put(&handle, group_entry);
		}
2402
	}
P
Peter Zijlstra 已提交
2403

2404 2405
	if (callchain)
		perf_output_copy(&handle, callchain, callchain_size);
2406

2407
	perf_output_end(&handle);
2408 2409
}

2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471
/*
 * comm tracking
 */

struct perf_comm_event {
	struct task_struct 	*task;
	char 			*comm;
	int			comm_size;

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				tid;
	} event;
};

static void perf_counter_comm_output(struct perf_counter *counter,
				     struct perf_comm_event *comm_event)
{
	struct perf_output_handle handle;
	int size = comm_event->event.header.size;
	int ret = perf_output_begin(&handle, counter, size, 0, 0);

	if (ret)
		return;

	perf_output_put(&handle, comm_event->event);
	perf_output_copy(&handle, comm_event->comm,
				   comm_event->comm_size);
	perf_output_end(&handle);
}

static int perf_counter_comm_match(struct perf_counter *counter,
				   struct perf_comm_event *comm_event)
{
	if (counter->hw_event.comm &&
	    comm_event->event.header.type == PERF_EVENT_COMM)
		return 1;

	return 0;
}

static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
				  struct perf_comm_event *comm_event)
{
	struct perf_counter *counter;

	if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
		return;

	rcu_read_lock();
	list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
		if (perf_counter_comm_match(counter, comm_event))
			perf_counter_comm_output(counter, comm_event);
	}
	rcu_read_unlock();
}

static void perf_counter_comm_event(struct perf_comm_event *comm_event)
{
	struct perf_cpu_context *cpuctx;
2472
	struct perf_counter_context *ctx;
2473 2474 2475
	unsigned int size;
	char *comm = comm_event->task->comm;

2476
	size = ALIGN(strlen(comm)+1, sizeof(u64));
2477 2478 2479 2480 2481 2482 2483 2484 2485

	comm_event->comm = comm;
	comm_event->comm_size = size;

	comm_event->event.header.size = sizeof(comm_event->event) + size;

	cpuctx = &get_cpu_var(perf_cpu_context);
	perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
	put_cpu_var(perf_cpu_context);
2486 2487 2488 2489 2490 2491 2492 2493 2494 2495

	rcu_read_lock();
	/*
	 * doesn't really matter which of the child contexts the
	 * events ends up in.
	 */
	ctx = rcu_dereference(current->perf_counter_ctxp);
	if (ctx)
		perf_counter_comm_ctx(ctx, comm_event);
	rcu_read_unlock();
2496 2497 2498 2499
}

void perf_counter_comm(struct task_struct *task)
{
2500 2501 2502 2503
	struct perf_comm_event comm_event;

	if (!atomic_read(&nr_comm_tracking))
		return;
2504

2505
	comm_event = (struct perf_comm_event){
2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516
		.task	= task,
		.event  = {
			.header = { .type = PERF_EVENT_COMM, },
			.pid	= task->group_leader->pid,
			.tid	= task->pid,
		},
	};

	perf_counter_comm_event(&comm_event);
}

2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541
/*
 * mmap tracking
 */

struct perf_mmap_event {
	struct file	*file;
	char		*file_name;
	int		file_size;

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				tid;
		u64				start;
		u64				len;
		u64				pgoff;
	} event;
};

static void perf_counter_mmap_output(struct perf_counter *counter,
				     struct perf_mmap_event *mmap_event)
{
	struct perf_output_handle handle;
	int size = mmap_event->event.header.size;
2542
	int ret = perf_output_begin(&handle, counter, size, 0, 0);
2543 2544 2545 2546 2547 2548 2549

	if (ret)
		return;

	perf_output_put(&handle, mmap_event->event);
	perf_output_copy(&handle, mmap_event->file_name,
				   mmap_event->file_size);
2550
	perf_output_end(&handle);
2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585
}

static int perf_counter_mmap_match(struct perf_counter *counter,
				   struct perf_mmap_event *mmap_event)
{
	if (counter->hw_event.mmap &&
	    mmap_event->event.header.type == PERF_EVENT_MMAP)
		return 1;

	if (counter->hw_event.munmap &&
	    mmap_event->event.header.type == PERF_EVENT_MUNMAP)
		return 1;

	return 0;
}

static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
				  struct perf_mmap_event *mmap_event)
{
	struct perf_counter *counter;

	if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
		return;

	rcu_read_lock();
	list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
		if (perf_counter_mmap_match(counter, mmap_event))
			perf_counter_mmap_output(counter, mmap_event);
	}
	rcu_read_unlock();
}

static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
{
	struct perf_cpu_context *cpuctx;
2586
	struct perf_counter_context *ctx;
2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598
	struct file *file = mmap_event->file;
	unsigned int size;
	char tmp[16];
	char *buf = NULL;
	char *name;

	if (file) {
		buf = kzalloc(PATH_MAX, GFP_KERNEL);
		if (!buf) {
			name = strncpy(tmp, "//enomem", sizeof(tmp));
			goto got_name;
		}
2599
		name = d_path(&file->f_path, buf, PATH_MAX);
2600 2601 2602 2603 2604 2605 2606 2607 2608 2609
		if (IS_ERR(name)) {
			name = strncpy(tmp, "//toolong", sizeof(tmp));
			goto got_name;
		}
	} else {
		name = strncpy(tmp, "//anon", sizeof(tmp));
		goto got_name;
	}

got_name:
2610
	size = ALIGN(strlen(name)+1, sizeof(u64));
2611 2612 2613 2614 2615 2616 2617 2618 2619 2620

	mmap_event->file_name = name;
	mmap_event->file_size = size;

	mmap_event->event.header.size = sizeof(mmap_event->event) + size;

	cpuctx = &get_cpu_var(perf_cpu_context);
	perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
	put_cpu_var(perf_cpu_context);

2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
	rcu_read_lock();
	/*
	 * doesn't really matter which of the child contexts the
	 * events ends up in.
	 */
	ctx = rcu_dereference(current->perf_counter_ctxp);
	if (ctx)
		perf_counter_mmap_ctx(ctx, mmap_event);
	rcu_read_unlock();

2631 2632 2633 2634 2635 2636
	kfree(buf);
}

void perf_counter_mmap(unsigned long addr, unsigned long len,
		       unsigned long pgoff, struct file *file)
{
2637 2638 2639 2640 2641 2642
	struct perf_mmap_event mmap_event;

	if (!atomic_read(&nr_mmap_tracking))
		return;

	mmap_event = (struct perf_mmap_event){
2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659
		.file   = file,
		.event  = {
			.header = { .type = PERF_EVENT_MMAP, },
			.pid	= current->group_leader->pid,
			.tid	= current->pid,
			.start  = addr,
			.len    = len,
			.pgoff  = pgoff,
		},
	};

	perf_counter_mmap_event(&mmap_event);
}

void perf_counter_munmap(unsigned long addr, unsigned long len,
			 unsigned long pgoff, struct file *file)
{
2660 2661 2662 2663 2664 2665
	struct perf_mmap_event mmap_event;

	if (!atomic_read(&nr_munmap_tracking))
		return;

	mmap_event = (struct perf_mmap_event){
2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679
		.file   = file,
		.event  = {
			.header = { .type = PERF_EVENT_MUNMAP, },
			.pid	= current->group_leader->pid,
			.tid	= current->pid,
			.start  = addr,
			.len    = len,
			.pgoff  = pgoff,
		},
	};

	perf_counter_mmap_event(&mmap_event);
}

2680
/*
2681 2682
 * Log irq_period changes so that analyzing tools can re-normalize the
 * event flow.
2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714
 */

static void perf_log_period(struct perf_counter *counter, u64 period)
{
	struct perf_output_handle handle;
	int ret;

	struct {
		struct perf_event_header	header;
		u64				time;
		u64				period;
	} freq_event = {
		.header = {
			.type = PERF_EVENT_PERIOD,
			.misc = 0,
			.size = sizeof(freq_event),
		},
		.time = sched_clock(),
		.period = period,
	};

	if (counter->hw.irq_period == period)
		return;

	ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
	if (ret)
		return;

	perf_output_put(&handle, freq_event);
	perf_output_end(&handle);
}

2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735
/*
 * IRQ throttle logging
 */

static void perf_log_throttle(struct perf_counter *counter, int enable)
{
	struct perf_output_handle handle;
	int ret;

	struct {
		struct perf_event_header	header;
		u64				time;
	} throttle_event = {
		.header = {
			.type = PERF_EVENT_THROTTLE + 1,
			.misc = 0,
			.size = sizeof(throttle_event),
		},
		.time = sched_clock(),
	};

I
Ingo Molnar 已提交
2736
	ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
2737 2738 2739 2740 2741 2742 2743
	if (ret)
		return;

	perf_output_put(&handle, throttle_event);
	perf_output_end(&handle);
}

2744 2745 2746 2747 2748
/*
 * Generic counter overflow handling.
 */

int perf_counter_overflow(struct perf_counter *counter,
2749
			  int nmi, struct pt_regs *regs, u64 addr)
2750
{
2751
	int events = atomic_read(&counter->event_limit);
2752
	int throttle = counter->pmu->unthrottle != NULL;
2753 2754
	int ret = 0;

2755 2756 2757 2758 2759 2760 2761 2762 2763 2764
	if (!throttle) {
		counter->hw.interrupts++;
	} else if (counter->hw.interrupts != MAX_INTERRUPTS) {
		counter->hw.interrupts++;
		if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) {
			counter->hw.interrupts = MAX_INTERRUPTS;
			perf_log_throttle(counter, 0);
			ret = 1;
		}
	}
2765

2766 2767 2768 2769 2770
	/*
	 * XXX event_limit might not quite work as expected on inherited
	 * counters
	 */

2771
	counter->pending_kill = POLL_IN;
2772 2773
	if (events && atomic_dec_and_test(&counter->event_limit)) {
		ret = 1;
2774
		counter->pending_kill = POLL_HUP;
2775 2776 2777 2778 2779 2780 2781 2782
		if (nmi) {
			counter->pending_disable = 1;
			perf_pending_queue(&counter->pending,
					   perf_pending_counter);
		} else
			perf_counter_disable(counter);
	}

2783
	perf_counter_output(counter, nmi, regs, addr);
2784
	return ret;
2785 2786
}

2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828
/*
 * Generic software counter infrastructure
 */

static void perf_swcounter_update(struct perf_counter *counter)
{
	struct hw_perf_counter *hwc = &counter->hw;
	u64 prev, now;
	s64 delta;

again:
	prev = atomic64_read(&hwc->prev_count);
	now = atomic64_read(&hwc->count);
	if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
		goto again;

	delta = now - prev;

	atomic64_add(delta, &counter->count);
	atomic64_sub(delta, &hwc->period_left);
}

static void perf_swcounter_set_period(struct perf_counter *counter)
{
	struct hw_perf_counter *hwc = &counter->hw;
	s64 left = atomic64_read(&hwc->period_left);
	s64 period = hwc->irq_period;

	if (unlikely(left <= -period)) {
		left = period;
		atomic64_set(&hwc->period_left, left);
	}

	if (unlikely(left <= 0)) {
		left += period;
		atomic64_add(period, &hwc->period_left);
	}

	atomic64_set(&hwc->prev_count, -left);
	atomic64_set(&hwc->count, -left);
}

2829 2830
static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
{
2831
	enum hrtimer_restart ret = HRTIMER_RESTART;
2832 2833
	struct perf_counter *counter;
	struct pt_regs *regs;
2834
	u64 period;
2835 2836

	counter	= container_of(hrtimer, struct perf_counter, hw.hrtimer);
2837
	counter->pmu->read(counter);
2838 2839 2840 2841 2842 2843 2844 2845 2846 2847

	regs = get_irq_regs();
	/*
	 * In case we exclude kernel IPs or are somehow not in interrupt
	 * context, provide the next best thing, the user IP.
	 */
	if ((counter->hw_event.exclude_kernel || !regs) &&
			!counter->hw_event.exclude_user)
		regs = task_pt_regs(current);

2848
	if (regs) {
2849
		if (perf_counter_overflow(counter, 0, regs, 0))
2850 2851
			ret = HRTIMER_NORESTART;
	}
2852

2853 2854
	period = max_t(u64, 10000, counter->hw.irq_period);
	hrtimer_forward_now(hrtimer, ns_to_ktime(period));
2855

2856
	return ret;
2857 2858 2859
}

static void perf_swcounter_overflow(struct perf_counter *counter,
2860
				    int nmi, struct pt_regs *regs, u64 addr)
2861
{
2862 2863
	perf_swcounter_update(counter);
	perf_swcounter_set_period(counter);
2864
	if (perf_counter_overflow(counter, nmi, regs, addr))
2865 2866 2867
		/* soft-disable the counter */
		;

2868 2869
}

2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907
static int perf_swcounter_is_counting(struct perf_counter *counter)
{
	struct perf_counter_context *ctx;
	unsigned long flags;
	int count;

	if (counter->state == PERF_COUNTER_STATE_ACTIVE)
		return 1;

	if (counter->state != PERF_COUNTER_STATE_INACTIVE)
		return 0;

	/*
	 * If the counter is inactive, it could be just because
	 * its task is scheduled out, or because it's in a group
	 * which could not go on the PMU.  We want to count in
	 * the first case but not the second.  If the context is
	 * currently active then an inactive software counter must
	 * be the second case.  If it's not currently active then
	 * we need to know whether the counter was active when the
	 * context was last active, which we can determine by
	 * comparing counter->tstamp_stopped with ctx->time.
	 *
	 * We are within an RCU read-side critical section,
	 * which protects the existence of *ctx.
	 */
	ctx = counter->ctx;
	spin_lock_irqsave(&ctx->lock, flags);
	count = 1;
	/* Re-check state now we have the lock */
	if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
	    counter->ctx->is_active ||
	    counter->tstamp_stopped < ctx->time)
		count = 0;
	spin_unlock_irqrestore(&ctx->lock, flags);
	return count;
}

2908
static int perf_swcounter_match(struct perf_counter *counter,
2909 2910
				enum perf_event_types type,
				u32 event, struct pt_regs *regs)
2911
{
2912
	u64 event_config;
2913

2914
	event_config = ((u64) type << PERF_COUNTER_TYPE_SHIFT) | event;
2915

2916
	if (!perf_swcounter_is_counting(counter))
2917 2918
		return 0;

2919
	if (counter->hw_event.config != event_config)
2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930
		return 0;

	if (counter->hw_event.exclude_user && user_mode(regs))
		return 0;

	if (counter->hw_event.exclude_kernel && !user_mode(regs))
		return 0;

	return 1;
}

2931
static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
2932
			       int nmi, struct pt_regs *regs, u64 addr)
2933 2934 2935
{
	int neg = atomic64_add_negative(nr, &counter->hw.count);
	if (counter->hw.irq_period && !neg)
2936
		perf_swcounter_overflow(counter, nmi, regs, addr);
2937 2938
}

2939
static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
2940
				     enum perf_event_types type, u32 event,
2941 2942
				     u64 nr, int nmi, struct pt_regs *regs,
				     u64 addr)
2943 2944 2945
{
	struct perf_counter *counter;

2946
	if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2947 2948
		return;

P
Peter Zijlstra 已提交
2949 2950
	rcu_read_lock();
	list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2951
		if (perf_swcounter_match(counter, type, event, regs))
2952
			perf_swcounter_add(counter, nr, nmi, regs, addr);
2953
	}
P
Peter Zijlstra 已提交
2954
	rcu_read_unlock();
2955 2956
}

P
Peter Zijlstra 已提交
2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970
static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
{
	if (in_nmi())
		return &cpuctx->recursion[3];

	if (in_irq())
		return &cpuctx->recursion[2];

	if (in_softirq())
		return &cpuctx->recursion[1];

	return &cpuctx->recursion[0];
}

2971
static void __perf_swcounter_event(enum perf_event_types type, u32 event,
2972 2973
				   u64 nr, int nmi, struct pt_regs *regs,
				   u64 addr)
2974 2975
{
	struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
P
Peter Zijlstra 已提交
2976
	int *recursion = perf_swcounter_recursion_context(cpuctx);
2977
	struct perf_counter_context *ctx;
P
Peter Zijlstra 已提交
2978 2979 2980 2981 2982 2983

	if (*recursion)
		goto out;

	(*recursion)++;
	barrier();
2984

2985 2986
	perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
				 nr, nmi, regs, addr);
2987 2988 2989 2990 2991 2992 2993 2994 2995
	rcu_read_lock();
	/*
	 * doesn't really matter which of the child contexts the
	 * events ends up in.
	 */
	ctx = rcu_dereference(current->perf_counter_ctxp);
	if (ctx)
		perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr);
	rcu_read_unlock();
2996

P
Peter Zijlstra 已提交
2997 2998 2999 3000
	barrier();
	(*recursion)--;

out:
3001 3002 3003
	put_cpu_var(perf_cpu_context);
}

3004 3005
void
perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
3006
{
3007
	__perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
3008 3009
}

3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025
static void perf_swcounter_read(struct perf_counter *counter)
{
	perf_swcounter_update(counter);
}

static int perf_swcounter_enable(struct perf_counter *counter)
{
	perf_swcounter_set_period(counter);
	return 0;
}

static void perf_swcounter_disable(struct perf_counter *counter)
{
	perf_swcounter_update(counter);
}

3026
static const struct pmu perf_ops_generic = {
3027 3028 3029 3030 3031
	.enable		= perf_swcounter_enable,
	.disable	= perf_swcounter_disable,
	.read		= perf_swcounter_read,
};

3032 3033 3034 3035
/*
 * Software counter: cpu wall time clock
 */

3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047
static void cpu_clock_perf_counter_update(struct perf_counter *counter)
{
	int cpu = raw_smp_processor_id();
	s64 prev;
	u64 now;

	now = cpu_clock(cpu);
	prev = atomic64_read(&counter->hw.prev_count);
	atomic64_set(&counter->hw.prev_count, now);
	atomic64_add(now - prev, &counter->count);
}

3048 3049 3050 3051 3052 3053
static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
{
	struct hw_perf_counter *hwc = &counter->hw;
	int cpu = raw_smp_processor_id();

	atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3054 3055
	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hwc->hrtimer.function = perf_swcounter_hrtimer;
3056
	if (hwc->irq_period) {
3057
		u64 period = max_t(u64, 10000, hwc->irq_period);
3058
		__hrtimer_start_range_ns(&hwc->hrtimer,
3059
				ns_to_ktime(period), 0,
3060 3061 3062 3063 3064 3065
				HRTIMER_MODE_REL, 0);
	}

	return 0;
}

3066 3067
static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
{
3068 3069
	if (counter->hw.irq_period)
		hrtimer_cancel(&counter->hw.hrtimer);
3070
	cpu_clock_perf_counter_update(counter);
3071 3072 3073 3074
}

static void cpu_clock_perf_counter_read(struct perf_counter *counter)
{
3075
	cpu_clock_perf_counter_update(counter);
3076 3077
}

3078
static const struct pmu perf_ops_cpu_clock = {
I
Ingo Molnar 已提交
3079 3080 3081
	.enable		= cpu_clock_perf_counter_enable,
	.disable	= cpu_clock_perf_counter_disable,
	.read		= cpu_clock_perf_counter_read,
3082 3083
};

3084 3085 3086 3087
/*
 * Software counter: task time clock
 */

3088
static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
I
Ingo Molnar 已提交
3089
{
3090
	u64 prev;
I
Ingo Molnar 已提交
3091 3092
	s64 delta;

3093
	prev = atomic64_xchg(&counter->hw.prev_count, now);
I
Ingo Molnar 已提交
3094 3095
	delta = now - prev;
	atomic64_add(delta, &counter->count);
3096 3097
}

3098
static int task_clock_perf_counter_enable(struct perf_counter *counter)
I
Ingo Molnar 已提交
3099
{
3100
	struct hw_perf_counter *hwc = &counter->hw;
3101 3102 3103
	u64 now;

	now = counter->ctx->time;
3104

3105
	atomic64_set(&hwc->prev_count, now);
3106 3107
	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hwc->hrtimer.function = perf_swcounter_hrtimer;
3108
	if (hwc->irq_period) {
3109
		u64 period = max_t(u64, 10000, hwc->irq_period);
3110
		__hrtimer_start_range_ns(&hwc->hrtimer,
3111
				ns_to_ktime(period), 0,
3112 3113
				HRTIMER_MODE_REL, 0);
	}
3114 3115

	return 0;
I
Ingo Molnar 已提交
3116 3117 3118
}

static void task_clock_perf_counter_disable(struct perf_counter *counter)
3119
{
3120 3121
	if (counter->hw.irq_period)
		hrtimer_cancel(&counter->hw.hrtimer);
3122 3123
	task_clock_perf_counter_update(counter, counter->ctx->time);

3124
}
I
Ingo Molnar 已提交
3125

3126 3127
static void task_clock_perf_counter_read(struct perf_counter *counter)
{
3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139
	u64 time;

	if (!in_nmi()) {
		update_context_time(counter->ctx);
		time = counter->ctx->time;
	} else {
		u64 now = perf_clock();
		u64 delta = now - counter->ctx->timestamp;
		time = counter->ctx->time + delta;
	}

	task_clock_perf_counter_update(counter, time);
3140 3141
}

3142
static const struct pmu perf_ops_task_clock = {
I
Ingo Molnar 已提交
3143 3144 3145
	.enable		= task_clock_perf_counter_enable,
	.disable	= task_clock_perf_counter_disable,
	.read		= task_clock_perf_counter_read,
3146 3147
};

3148 3149 3150 3151
/*
 * Software counter: cpu migrations
 */

3152
static inline u64 get_cpu_migrations(struct perf_counter *counter)
3153
{
3154 3155 3156 3157 3158
	struct task_struct *curr = counter->ctx->task;

	if (curr)
		return curr->se.nr_migrations;
	return cpu_nr_migrations(smp_processor_id());
3159 3160 3161 3162 3163 3164 3165 3166
}

static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
{
	u64 prev, now;
	s64 delta;

	prev = atomic64_read(&counter->hw.prev_count);
3167
	now = get_cpu_migrations(counter);
3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180

	atomic64_set(&counter->hw.prev_count, now);

	delta = now - prev;

	atomic64_add(delta, &counter->count);
}

static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
{
	cpu_migrations_perf_counter_update(counter);
}

3181
static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
3182
{
3183 3184 3185
	if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
		atomic64_set(&counter->hw.prev_count,
			     get_cpu_migrations(counter));
3186
	return 0;
3187 3188 3189 3190 3191 3192 3193
}

static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
{
	cpu_migrations_perf_counter_update(counter);
}

3194
static const struct pmu perf_ops_cpu_migrations = {
I
Ingo Molnar 已提交
3195 3196 3197
	.enable		= cpu_migrations_perf_counter_enable,
	.disable	= cpu_migrations_perf_counter_disable,
	.read		= cpu_migrations_perf_counter_read,
3198 3199
};

3200 3201 3202
#ifdef CONFIG_EVENT_PROFILE
void perf_tpcounter_event(int event_id)
{
3203 3204 3205 3206 3207
	struct pt_regs *regs = get_irq_regs();

	if (!regs)
		regs = task_pt_regs(current);

3208
	__perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
3209
}
3210
EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3211 3212 3213 3214 3215 3216

extern int ftrace_profile_enable(int);
extern void ftrace_profile_disable(int);

static void tp_perf_counter_destroy(struct perf_counter *counter)
{
3217
	ftrace_profile_disable(perf_event_id(&counter->hw_event));
3218 3219
}

3220
static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3221
{
3222
	int event_id = perf_event_id(&counter->hw_event);
3223 3224 3225 3226 3227 3228 3229
	int ret;

	ret = ftrace_profile_enable(event_id);
	if (ret)
		return NULL;

	counter->destroy = tp_perf_counter_destroy;
3230
	counter->hw.irq_period = counter->hw_event.irq_period;
3231 3232 3233 3234

	return &perf_ops_generic;
}
#else
3235
static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3236 3237 3238 3239 3240
{
	return NULL;
}
#endif

3241
static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
3242
{
3243
	const struct pmu *pmu = NULL;
3244

3245 3246 3247 3248 3249 3250 3251
	/*
	 * Software counters (currently) can't in general distinguish
	 * between user, kernel and hypervisor events.
	 * However, context switches and cpu migrations are considered
	 * to be kernel events, and page faults are never hypervisor
	 * events.
	 */
3252
	switch (perf_event_id(&counter->hw_event)) {
3253
	case PERF_COUNT_CPU_CLOCK:
3254
		pmu = &perf_ops_cpu_clock;
3255

3256
		break;
3257
	case PERF_COUNT_TASK_CLOCK:
3258 3259 3260 3261 3262
		/*
		 * If the user instantiates this as a per-cpu counter,
		 * use the cpu_clock counter instead.
		 */
		if (counter->ctx->task)
3263
			pmu = &perf_ops_task_clock;
3264
		else
3265
			pmu = &perf_ops_cpu_clock;
3266

3267
		break;
3268
	case PERF_COUNT_PAGE_FAULTS:
3269 3270
	case PERF_COUNT_PAGE_FAULTS_MIN:
	case PERF_COUNT_PAGE_FAULTS_MAJ:
3271
	case PERF_COUNT_CONTEXT_SWITCHES:
3272
		pmu = &perf_ops_generic;
3273
		break;
3274
	case PERF_COUNT_CPU_MIGRATIONS:
3275
		if (!counter->hw_event.exclude_kernel)
3276
			pmu = &perf_ops_cpu_migrations;
3277
		break;
3278
	}
3279

3280
	return pmu;
3281 3282
}

T
Thomas Gleixner 已提交
3283 3284 3285 3286
/*
 * Allocate and initialize a counter structure
 */
static struct perf_counter *
3287 3288
perf_counter_alloc(struct perf_counter_hw_event *hw_event,
		   int cpu,
3289
		   struct perf_counter_context *ctx,
3290 3291
		   struct perf_counter *group_leader,
		   gfp_t gfpflags)
T
Thomas Gleixner 已提交
3292
{
3293
	const struct pmu *pmu;
I
Ingo Molnar 已提交
3294
	struct perf_counter *counter;
3295
	struct hw_perf_counter *hwc;
3296
	long err;
T
Thomas Gleixner 已提交
3297

3298
	counter = kzalloc(sizeof(*counter), gfpflags);
T
Thomas Gleixner 已提交
3299
	if (!counter)
3300
		return ERR_PTR(-ENOMEM);
T
Thomas Gleixner 已提交
3301

3302 3303 3304 3305 3306 3307 3308
	/*
	 * Single counters are their own group leaders, with an
	 * empty sibling list:
	 */
	if (!group_leader)
		group_leader = counter;

3309 3310 3311
	mutex_init(&counter->child_mutex);
	INIT_LIST_HEAD(&counter->child_list);

3312
	INIT_LIST_HEAD(&counter->list_entry);
P
Peter Zijlstra 已提交
3313
	INIT_LIST_HEAD(&counter->event_entry);
3314
	INIT_LIST_HEAD(&counter->sibling_list);
T
Thomas Gleixner 已提交
3315 3316
	init_waitqueue_head(&counter->waitq);

3317 3318
	mutex_init(&counter->mmap_mutex);

I
Ingo Molnar 已提交
3319 3320
	counter->cpu			= cpu;
	counter->hw_event		= *hw_event;
3321
	counter->group_leader		= group_leader;
3322
	counter->pmu			= NULL;
3323
	counter->ctx			= ctx;
3324 3325
	counter->oncpu			= -1;

3326
	counter->state = PERF_COUNTER_STATE_INACTIVE;
3327 3328 3329
	if (hw_event->disabled)
		counter->state = PERF_COUNTER_STATE_OFF;

3330
	pmu = NULL;
3331

3332 3333
	hwc = &counter->hw;
	if (hw_event->freq && hw_event->irq_freq)
3334
		hwc->irq_period = div64_u64(TICK_NSEC, hw_event->irq_freq);
3335 3336 3337
	else
		hwc->irq_period = hw_event->irq_period;

3338 3339 3340 3341 3342 3343
	/*
	 * we currently do not support PERF_RECORD_GROUP on inherited counters
	 */
	if (hw_event->inherit && (hw_event->record_type & PERF_RECORD_GROUP))
		goto done;

3344
	if (perf_event_raw(hw_event)) {
3345
		pmu = hw_perf_counter_init(counter);
3346 3347 3348 3349
		goto done;
	}

	switch (perf_event_type(hw_event)) {
3350
	case PERF_TYPE_HARDWARE:
3351
		pmu = hw_perf_counter_init(counter);
3352 3353 3354
		break;

	case PERF_TYPE_SOFTWARE:
3355
		pmu = sw_perf_counter_init(counter);
3356 3357 3358
		break;

	case PERF_TYPE_TRACEPOINT:
3359
		pmu = tp_perf_counter_init(counter);
3360 3361
		break;
	}
3362 3363
done:
	err = 0;
3364
	if (!pmu)
3365
		err = -EINVAL;
3366 3367
	else if (IS_ERR(pmu))
		err = PTR_ERR(pmu);
3368

3369
	if (err) {
I
Ingo Molnar 已提交
3370
		kfree(counter);
3371
		return ERR_PTR(err);
I
Ingo Molnar 已提交
3372
	}
3373

3374
	counter->pmu = pmu;
T
Thomas Gleixner 已提交
3375

3376
	atomic_inc(&nr_counters);
3377 3378 3379 3380 3381 3382 3383
	if (counter->hw_event.mmap)
		atomic_inc(&nr_mmap_tracking);
	if (counter->hw_event.munmap)
		atomic_inc(&nr_munmap_tracking);
	if (counter->hw_event.comm)
		atomic_inc(&nr_comm_tracking);

T
Thomas Gleixner 已提交
3384 3385 3386 3387
	return counter;
}

/**
3388
 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
I
Ingo Molnar 已提交
3389 3390
 *
 * @hw_event_uptr:	event type attributes for monitoring/sampling
T
Thomas Gleixner 已提交
3391
 * @pid:		target pid
I
Ingo Molnar 已提交
3392 3393
 * @cpu:		target cpu
 * @group_fd:		group leader counter fd
T
Thomas Gleixner 已提交
3394
 */
3395
SYSCALL_DEFINE5(perf_counter_open,
3396
		const struct perf_counter_hw_event __user *, hw_event_uptr,
3397
		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
T
Thomas Gleixner 已提交
3398
{
3399
	struct perf_counter *counter, *group_leader;
I
Ingo Molnar 已提交
3400
	struct perf_counter_hw_event hw_event;
3401
	struct perf_counter_context *ctx;
3402
	struct file *counter_file = NULL;
3403 3404
	struct file *group_file = NULL;
	int fput_needed = 0;
3405
	int fput_needed2 = 0;
T
Thomas Gleixner 已提交
3406 3407
	int ret;

3408 3409 3410 3411
	/* for future expandability... */
	if (flags)
		return -EINVAL;

I
Ingo Molnar 已提交
3412
	if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
3413 3414
		return -EFAULT;

3415
	/*
I
Ingo Molnar 已提交
3416 3417 3418 3419 3420 3421 3422 3423
	 * Get the target context (task or percpu):
	 */
	ctx = find_get_context(pid, cpu);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	/*
	 * Look up the group leader (we will attach this counter to it):
3424 3425 3426 3427 3428 3429
	 */
	group_leader = NULL;
	if (group_fd != -1) {
		ret = -EINVAL;
		group_file = fget_light(group_fd, &fput_needed);
		if (!group_file)
I
Ingo Molnar 已提交
3430
			goto err_put_context;
3431
		if (group_file->f_op != &perf_fops)
I
Ingo Molnar 已提交
3432
			goto err_put_context;
3433 3434 3435

		group_leader = group_file->private_data;
		/*
I
Ingo Molnar 已提交
3436 3437 3438 3439 3440 3441 3442 3443
		 * Do not allow a recursive hierarchy (this new sibling
		 * becoming part of another group-sibling):
		 */
		if (group_leader->group_leader != group_leader)
			goto err_put_context;
		/*
		 * Do not allow to attach to a group in a different
		 * task or CPU context:
3444
		 */
I
Ingo Molnar 已提交
3445 3446
		if (group_leader->ctx != ctx)
			goto err_put_context;
3447 3448 3449 3450 3451
		/*
		 * Only a group leader can be exclusive or pinned
		 */
		if (hw_event.exclusive || hw_event.pinned)
			goto err_put_context;
3452 3453
	}

3454 3455
	counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
				     GFP_KERNEL);
3456 3457
	ret = PTR_ERR(counter);
	if (IS_ERR(counter))
T
Thomas Gleixner 已提交
3458 3459 3460 3461
		goto err_put_context;

	ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
	if (ret < 0)
3462 3463 3464 3465 3466 3467 3468
		goto err_free_put_context;

	counter_file = fget_light(ret, &fput_needed2);
	if (!counter_file)
		goto err_free_put_context;

	counter->filp = counter_file;
3469
	WARN_ON_ONCE(ctx->parent_ctx);
3470
	mutex_lock(&ctx->mutex);
3471
	perf_install_in_context(ctx, counter, cpu);
3472
	++ctx->generation;
3473
	mutex_unlock(&ctx->mutex);
3474

3475 3476 3477 3478 3479 3480
	counter->owner = current;
	get_task_struct(current);
	mutex_lock(&current->perf_counter_mutex);
	list_add_tail(&counter->owner_entry, &current->perf_counter_list);
	mutex_unlock(&current->perf_counter_mutex);

3481
	fput_light(counter_file, fput_needed2);
T
Thomas Gleixner 已提交
3482

3483 3484 3485
out_fput:
	fput_light(group_file, fput_needed);

T
Thomas Gleixner 已提交
3486 3487
	return ret;

3488
err_free_put_context:
T
Thomas Gleixner 已提交
3489 3490 3491
	kfree(counter);

err_put_context:
3492
	put_ctx(ctx);
T
Thomas Gleixner 已提交
3493

3494
	goto out_fput;
T
Thomas Gleixner 已提交
3495 3496
}

3497 3498 3499
/*
 * inherit a counter from parent task to child task:
 */
3500
static struct perf_counter *
3501 3502 3503 3504
inherit_counter(struct perf_counter *parent_counter,
	      struct task_struct *parent,
	      struct perf_counter_context *parent_ctx,
	      struct task_struct *child,
3505
	      struct perf_counter *group_leader,
3506 3507 3508 3509
	      struct perf_counter_context *child_ctx)
{
	struct perf_counter *child_counter;

3510 3511 3512 3513 3514 3515 3516 3517 3518
	/*
	 * Instead of creating recursive hierarchies of counters,
	 * we link inherited counters back to the original parent,
	 * which has a filp for sure, which we use as the reference
	 * count:
	 */
	if (parent_counter->parent)
		parent_counter = parent_counter->parent;

3519
	child_counter = perf_counter_alloc(&parent_counter->hw_event,
3520 3521
					   parent_counter->cpu, child_ctx,
					   group_leader, GFP_KERNEL);
3522 3523
	if (IS_ERR(child_counter))
		return child_counter;
3524
	get_ctx(child_ctx);
3525

3526 3527 3528 3529 3530 3531 3532 3533 3534 3535
	/*
	 * Make the child state follow the state of the parent counter,
	 * not its hw_event.disabled bit.  We hold the parent's mutex,
	 * so we won't race with perf_counter_{en,dis}able_family.
	 */
	if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
		child_counter->state = PERF_COUNTER_STATE_INACTIVE;
	else
		child_counter->state = PERF_COUNTER_STATE_OFF;

3536 3537 3538
	/*
	 * Link it up in the child's context:
	 */
3539
	add_counter_to_ctx(child_counter, child_ctx);
3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554

	child_counter->parent = parent_counter;
	/*
	 * inherit into child's child as well:
	 */
	child_counter->hw_event.inherit = 1;

	/*
	 * Get a reference to the parent filp - we will fput it
	 * when the child counter exits. This is safe to do because
	 * we are in the parent and we know that the filp still
	 * exists and has a nonzero count:
	 */
	atomic_long_inc(&parent_counter->filp->f_count);

3555 3556 3557
	/*
	 * Link this into the parent counter's child list
	 */
3558
	WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3559
	mutex_lock(&parent_counter->child_mutex);
3560
	list_add_tail(&child_counter->child_list, &parent_counter->child_list);
3561
	mutex_unlock(&parent_counter->child_mutex);
3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573

	return child_counter;
}

static int inherit_group(struct perf_counter *parent_counter,
	      struct task_struct *parent,
	      struct perf_counter_context *parent_ctx,
	      struct task_struct *child,
	      struct perf_counter_context *child_ctx)
{
	struct perf_counter *leader;
	struct perf_counter *sub;
3574
	struct perf_counter *child_ctr;
3575 3576 3577

	leader = inherit_counter(parent_counter, parent, parent_ctx,
				 child, NULL, child_ctx);
3578 3579
	if (IS_ERR(leader))
		return PTR_ERR(leader);
3580
	list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
3581 3582 3583 3584
		child_ctr = inherit_counter(sub, parent, parent_ctx,
					    child, leader, child_ctx);
		if (IS_ERR(child_ctr))
			return PTR_ERR(child_ctr);
3585
	}
3586 3587 3588
	return 0;
}

3589 3590 3591
static void sync_child_counter(struct perf_counter *child_counter,
			       struct perf_counter *parent_counter)
{
3592
	u64 child_val;
3593 3594 3595 3596 3597 3598 3599

	child_val = atomic64_read(&child_counter->count);

	/*
	 * Add back the child's count to the parent's count:
	 */
	atomic64_add(child_val, &parent_counter->count);
3600 3601 3602 3603
	atomic64_add(child_counter->total_time_enabled,
		     &parent_counter->child_total_time_enabled);
	atomic64_add(child_counter->total_time_running,
		     &parent_counter->child_total_time_running);
3604 3605 3606 3607

	/*
	 * Remove this counter from the parent's list
	 */
3608
	WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3609
	mutex_lock(&parent_counter->child_mutex);
3610
	list_del_init(&child_counter->child_list);
3611
	mutex_unlock(&parent_counter->child_mutex);
3612 3613 3614 3615 3616 3617 3618 3619

	/*
	 * Release the parent counter, if this was the last
	 * reference to it.
	 */
	fput(parent_counter->filp);
}

3620
static void
3621
__perf_counter_exit_task(struct perf_counter *child_counter,
3622 3623 3624 3625
			 struct perf_counter_context *child_ctx)
{
	struct perf_counter *parent_counter;

3626
	update_counter_times(child_counter);
3627
	perf_counter_remove_from_context(child_counter);
3628

3629 3630 3631 3632 3633 3634
	parent_counter = child_counter->parent;
	/*
	 * It can happen that parent exits first, and has counters
	 * that are still around due to the child reference. These
	 * counters need to be zapped - but otherwise linger.
	 */
3635 3636
	if (parent_counter) {
		sync_child_counter(child_counter, parent_counter);
3637
		free_counter(child_counter);
3638
	}
3639 3640 3641
}

/*
3642
 * When a child task exits, feed back counter values to parent counters.
3643 3644 3645 3646 3647
 */
void perf_counter_exit_task(struct task_struct *child)
{
	struct perf_counter *child_counter, *tmp;
	struct perf_counter_context *child_ctx;
3648
	unsigned long flags;
3649

3650
	if (likely(!child->perf_counter_ctxp))
3651 3652
		return;

3653
	local_irq_save(flags);
3654 3655 3656 3657 3658 3659 3660
	/*
	 * We can't reschedule here because interrupts are disabled,
	 * and either child is current or it is a task that can't be
	 * scheduled, so we are now safe from rescheduling changing
	 * our context.
	 */
	child_ctx = child->perf_counter_ctxp;
3661
	__perf_counter_task_sched_out(child_ctx);
3662 3663 3664 3665 3666 3667 3668

	/*
	 * Take the context lock here so that if find_get_context is
	 * reading child->perf_counter_ctxp, we wait until it has
	 * incremented the context's refcount before we do put_ctx below.
	 */
	spin_lock(&child_ctx->lock);
3669
	child->perf_counter_ctxp = NULL;
3670 3671 3672 3673 3674 3675 3676 3677 3678
	if (child_ctx->parent_ctx) {
		/*
		 * This context is a clone; unclone it so it can't get
		 * swapped to another process while we're removing all
		 * the counters from it.
		 */
		put_ctx(child_ctx->parent_ctx);
		child_ctx->parent_ctx = NULL;
	}
3679
	spin_unlock(&child_ctx->lock);
3680 3681 3682 3683
	local_irq_restore(flags);

	mutex_lock(&child_ctx->mutex);

3684
again:
3685 3686
	list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
				 list_entry)
3687
		__perf_counter_exit_task(child_counter, child_ctx);
3688 3689 3690 3691 3692 3693 3694 3695

	/*
	 * If the last counter was a group counter, it will have appended all
	 * its siblings to the list, but we obtained 'tmp' before that which
	 * will still point to the list head terminating the iteration.
	 */
	if (!list_empty(&child_ctx->counter_list))
		goto again;
3696 3697 3698 3699

	mutex_unlock(&child_ctx->mutex);

	put_ctx(child_ctx);
3700 3701
}

3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739
/*
 * free an unexposed, unused context as created by inheritance by
 * init_task below, used by fork() in case of fail.
 */
void perf_counter_free_task(struct task_struct *task)
{
	struct perf_counter_context *ctx = task->perf_counter_ctxp;
	struct perf_counter *counter, *tmp;

	if (!ctx)
		return;

	mutex_lock(&ctx->mutex);
again:
	list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
		struct perf_counter *parent = counter->parent;

		if (WARN_ON_ONCE(!parent))
			continue;

		mutex_lock(&parent->child_mutex);
		list_del_init(&counter->child_list);
		mutex_unlock(&parent->child_mutex);

		fput(parent->filp);

		list_del_counter(counter, ctx);
		free_counter(counter);
	}

	if (!list_empty(&ctx->counter_list))
		goto again;

	mutex_unlock(&ctx->mutex);

	put_ctx(ctx);
}

3740 3741 3742
/*
 * Initialize the perf_counter context in task_struct
 */
3743
int perf_counter_init_task(struct task_struct *child)
3744 3745
{
	struct perf_counter_context *child_ctx, *parent_ctx;
3746
	struct perf_counter_context *cloned_ctx;
3747
	struct perf_counter *counter;
3748
	struct task_struct *parent = current;
3749
	int inherited_all = 1;
3750
	int ret = 0;
3751

3752
	child->perf_counter_ctxp = NULL;
3753

3754 3755 3756
	mutex_init(&child->perf_counter_mutex);
	INIT_LIST_HEAD(&child->perf_counter_list);

3757
	if (likely(!parent->perf_counter_ctxp))
3758 3759
		return 0;

3760 3761
	/*
	 * This is executed from the parent task context, so inherit
3762 3763
	 * counters that have been marked for cloning.
	 * First allocate and initialize a context for the child.
3764 3765
	 */

3766 3767
	child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
	if (!child_ctx)
3768
		return -ENOMEM;
3769

3770 3771
	__perf_counter_init_context(child_ctx, child);
	child->perf_counter_ctxp = child_ctx;
3772
	get_task_struct(child);
3773

3774
	/*
3775 3776
	 * If the parent's context is a clone, pin it so it won't get
	 * swapped under us.
3777
	 */
3778 3779
	parent_ctx = perf_pin_task_context(parent);

3780 3781 3782 3783 3784 3785 3786
	/*
	 * No need to check if parent_ctx != NULL here; since we saw
	 * it non-NULL earlier, the only reason for it to become NULL
	 * is if we exit, and since we're currently in the middle of
	 * a fork we can't be exiting at the same time.
	 */

3787 3788 3789 3790
	/*
	 * Lock the parent list. No need to lock the child - not PID
	 * hashed yet and not running, so nobody can access it.
	 */
3791
	mutex_lock(&parent_ctx->mutex);
3792 3793 3794 3795 3796

	/*
	 * We dont have to disable NMIs - we are only looking at
	 * the list, not manipulating it:
	 */
3797 3798 3799 3800
	list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
		if (counter != counter->group_leader)
			continue;

3801 3802
		if (!counter->hw_event.inherit) {
			inherited_all = 0;
3803
			continue;
3804
		}
3805

3806 3807 3808
		ret = inherit_group(counter, parent, parent_ctx,
					     child, child_ctx);
		if (ret) {
3809
			inherited_all = 0;
3810
			break;
3811 3812 3813 3814 3815 3816 3817
		}
	}

	if (inherited_all) {
		/*
		 * Mark the child context as a clone of the parent
		 * context, or of whatever the parent is a clone of.
3818 3819 3820 3821
		 * Note that if the parent is a clone, it could get
		 * uncloned at any point, but that doesn't matter
		 * because the list of counters and the generation
		 * count can't have changed since we took the mutex.
3822
		 */
3823 3824 3825
		cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
		if (cloned_ctx) {
			child_ctx->parent_ctx = cloned_ctx;
3826
			child_ctx->parent_gen = parent_ctx->parent_gen;
3827 3828 3829 3830 3831
		} else {
			child_ctx->parent_ctx = parent_ctx;
			child_ctx->parent_gen = parent_ctx->generation;
		}
		get_ctx(child_ctx->parent_ctx);
3832 3833
	}

3834
	mutex_unlock(&parent_ctx->mutex);
3835

3836
	perf_unpin_context(parent_ctx);
3837

3838
	return ret;
3839 3840
}

3841
static void __cpuinit perf_counter_init_cpu(int cpu)
T
Thomas Gleixner 已提交
3842
{
3843
	struct perf_cpu_context *cpuctx;
T
Thomas Gleixner 已提交
3844

3845 3846
	cpuctx = &per_cpu(perf_cpu_context, cpu);
	__perf_counter_init_context(&cpuctx->ctx, NULL);
T
Thomas Gleixner 已提交
3847

3848
	spin_lock(&perf_resource_lock);
3849
	cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
3850
	spin_unlock(&perf_resource_lock);
3851

3852
	hw_perf_counter_setup(cpu);
T
Thomas Gleixner 已提交
3853 3854 3855
}

#ifdef CONFIG_HOTPLUG_CPU
3856
static void __perf_counter_exit_cpu(void *info)
T
Thomas Gleixner 已提交
3857 3858 3859 3860 3861
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter_context *ctx = &cpuctx->ctx;
	struct perf_counter *counter, *tmp;

3862 3863
	list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
		__perf_counter_remove_from_context(counter);
T
Thomas Gleixner 已提交
3864
}
3865
static void perf_counter_exit_cpu(int cpu)
T
Thomas Gleixner 已提交
3866
{
3867 3868 3869 3870
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
	struct perf_counter_context *ctx = &cpuctx->ctx;

	mutex_lock(&ctx->mutex);
3871
	smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
3872
	mutex_unlock(&ctx->mutex);
T
Thomas Gleixner 已提交
3873 3874
}
#else
3875
static inline void perf_counter_exit_cpu(int cpu) { }
T
Thomas Gleixner 已提交
3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886
#endif

static int __cpuinit
perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
	unsigned int cpu = (long)hcpu;

	switch (action) {

	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
3887
		perf_counter_init_cpu(cpu);
T
Thomas Gleixner 已提交
3888 3889 3890 3891
		break;

	case CPU_DOWN_PREPARE:
	case CPU_DOWN_PREPARE_FROZEN:
3892
		perf_counter_exit_cpu(cpu);
T
Thomas Gleixner 已提交
3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905
		break;

	default:
		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block __cpuinitdata perf_cpu_nb = {
	.notifier_call		= perf_cpu_notify,
};

3906
void __init perf_counter_init(void)
T
Thomas Gleixner 已提交
3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932
{
	perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
			(void *)(long)smp_processor_id());
	register_cpu_notifier(&perf_cpu_nb);
}

static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
{
	return sprintf(buf, "%d\n", perf_reserved_percpu);
}

static ssize_t
perf_set_reserve_percpu(struct sysdev_class *class,
			const char *buf,
			size_t count)
{
	struct perf_cpu_context *cpuctx;
	unsigned long val;
	int err, cpu, mpt;

	err = strict_strtoul(buf, 10, &val);
	if (err)
		return err;
	if (val > perf_max_counters)
		return -EINVAL;

3933
	spin_lock(&perf_resource_lock);
T
Thomas Gleixner 已提交
3934 3935 3936 3937 3938 3939 3940 3941 3942
	perf_reserved_percpu = val;
	for_each_online_cpu(cpu) {
		cpuctx = &per_cpu(perf_cpu_context, cpu);
		spin_lock_irq(&cpuctx->ctx.lock);
		mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
			  perf_max_counters - perf_reserved_percpu);
		cpuctx->max_pertask = mpt;
		spin_unlock_irq(&cpuctx->ctx.lock);
	}
3943
	spin_unlock(&perf_resource_lock);
T
Thomas Gleixner 已提交
3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964

	return count;
}

static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
{
	return sprintf(buf, "%d\n", perf_overcommit);
}

static ssize_t
perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
{
	unsigned long val;
	int err;

	err = strict_strtoul(buf, 10, &val);
	if (err)
		return err;
	if (val > 1)
		return -EINVAL;

3965
	spin_lock(&perf_resource_lock);
T
Thomas Gleixner 已提交
3966
	perf_overcommit = val;
3967
	spin_unlock(&perf_resource_lock);
T
Thomas Gleixner 已提交
3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002

	return count;
}

static SYSDEV_CLASS_ATTR(
				reserve_percpu,
				0644,
				perf_show_reserve_percpu,
				perf_set_reserve_percpu
			);

static SYSDEV_CLASS_ATTR(
				overcommit,
				0644,
				perf_show_overcommit,
				perf_set_overcommit
			);

static struct attribute *perfclass_attrs[] = {
	&attr_reserve_percpu.attr,
	&attr_overcommit.attr,
	NULL
};

static struct attribute_group perfclass_attr_group = {
	.attrs			= perfclass_attrs,
	.name			= "perf_counters",
};

static int __init perf_counter_sysfs_init(void)
{
	return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
				  &perfclass_attr_group);
}
device_initcall(perf_counter_sysfs_init);