perf_counter.c 97.8 KB
Newer Older
T
Thomas Gleixner 已提交
1 2 3
/*
 * Performance counter core code
 *
4 5 6
 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
 *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7
 *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 9
 *
 *  For licensing details see kernel-base/COPYING
T
Thomas Gleixner 已提交
10 11 12
 */

#include <linux/fs.h>
13
#include <linux/mm.h>
T
Thomas Gleixner 已提交
14 15
#include <linux/cpu.h>
#include <linux/smp.h>
16
#include <linux/file.h>
T
Thomas Gleixner 已提交
17 18
#include <linux/poll.h>
#include <linux/sysfs.h>
19
#include <linux/dcache.h>
T
Thomas Gleixner 已提交
20
#include <linux/percpu.h>
21
#include <linux/ptrace.h>
22 23 24
#include <linux/vmstat.h>
#include <linux/hardirq.h>
#include <linux/rculist.h>
T
Thomas Gleixner 已提交
25 26 27
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
I
Ingo Molnar 已提交
28
#include <linux/kernel_stat.h>
T
Thomas Gleixner 已提交
29 30
#include <linux/perf_counter.h>

31 32
#include <asm/irq_regs.h>

T
Thomas Gleixner 已提交
33 34 35 36 37
/*
 * Each CPU has a list of per CPU counters:
 */
DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);

38
int perf_max_counters __read_mostly = 1;
T
Thomas Gleixner 已提交
39 40 41
static int perf_reserved_percpu __read_mostly;
static int perf_overcommit __read_mostly = 1;

42
static atomic_t nr_counters __read_mostly;
P
Peter Zijlstra 已提交
43 44
static atomic_t nr_mmap_counters __read_mostly;
static atomic_t nr_comm_counters __read_mostly;
45

46
int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
47
int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
48
int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
49

50 51
static atomic64_t perf_counter_id;

T
Thomas Gleixner 已提交
52
/*
53
 * Lock for (sysadmin-configurable) counter reservations:
T
Thomas Gleixner 已提交
54
 */
55
static DEFINE_SPINLOCK(perf_resource_lock);
T
Thomas Gleixner 已提交
56 57 58 59

/*
 * Architecture provided APIs - weak aliases:
 */
60
extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
T
Thomas Gleixner 已提交
61
{
62
	return NULL;
T
Thomas Gleixner 已提交
63 64
}

65 66 67
void __weak hw_perf_disable(void)		{ barrier(); }
void __weak hw_perf_enable(void)		{ barrier(); }

68
void __weak hw_perf_counter_setup(int cpu)	{ barrier(); }
69 70 71

int __weak
hw_perf_group_sched_in(struct perf_counter *group_leader,
72 73 74 75 76
	       struct perf_cpu_context *cpuctx,
	       struct perf_counter_context *ctx, int cpu)
{
	return 0;
}
T
Thomas Gleixner 已提交
77

78 79
void __weak perf_counter_print_debug(void)	{ }

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
static DEFINE_PER_CPU(int, disable_count);

void __perf_disable(void)
{
	__get_cpu_var(disable_count)++;
}

bool __perf_enable(void)
{
	return !--__get_cpu_var(disable_count);
}

void perf_disable(void)
{
	__perf_disable();
	hw_perf_disable();
}

void perf_enable(void)
{
	if (__perf_enable())
		hw_perf_enable();
}

104 105 106 107 108
static void get_ctx(struct perf_counter_context *ctx)
{
	atomic_inc(&ctx->refcount);
}

109 110 111 112 113 114 115 116
static void free_ctx(struct rcu_head *head)
{
	struct perf_counter_context *ctx;

	ctx = container_of(head, struct perf_counter_context, rcu_head);
	kfree(ctx);
}

117 118
static void put_ctx(struct perf_counter_context *ctx)
{
119 120 121
	if (atomic_dec_and_test(&ctx->refcount)) {
		if (ctx->parent_ctx)
			put_ctx(ctx->parent_ctx);
122 123 124
		if (ctx->task)
			put_task_struct(ctx->task);
		call_rcu(&ctx->rcu_head, free_ctx);
125
	}
126 127
}

128 129 130 131 132
/*
 * Get the perf_counter_context for a task and lock it.
 * This has to cope with with the fact that until it is locked,
 * the context could get moved to another task.
 */
133 134
static struct perf_counter_context *
perf_lock_task_context(struct task_struct *task, unsigned long *flags)
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
{
	struct perf_counter_context *ctx;

	rcu_read_lock();
 retry:
	ctx = rcu_dereference(task->perf_counter_ctxp);
	if (ctx) {
		/*
		 * If this context is a clone of another, it might
		 * get swapped for another underneath us by
		 * perf_counter_task_sched_out, though the
		 * rcu_read_lock() protects us from any context
		 * getting freed.  Lock the context and check if it
		 * got swapped before we could get the lock, and retry
		 * if so.  If we locked the right context, then it
		 * can't get swapped on us any more.
		 */
		spin_lock_irqsave(&ctx->lock, *flags);
		if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
			spin_unlock_irqrestore(&ctx->lock, *flags);
			goto retry;
		}
	}
	rcu_read_unlock();
	return ctx;
}

/*
 * Get the context for a task and increment its pin_count so it
 * can't get swapped to another task.  This also increments its
 * reference count so that the context can't get freed.
 */
static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
{
	struct perf_counter_context *ctx;
	unsigned long flags;

	ctx = perf_lock_task_context(task, &flags);
	if (ctx) {
		++ctx->pin_count;
		get_ctx(ctx);
		spin_unlock_irqrestore(&ctx->lock, flags);
	}
	return ctx;
}

static void perf_unpin_context(struct perf_counter_context *ctx)
{
	unsigned long flags;

	spin_lock_irqsave(&ctx->lock, flags);
	--ctx->pin_count;
	spin_unlock_irqrestore(&ctx->lock, flags);
	put_ctx(ctx);
}

191 192 193 194
/*
 * Add a counter from the lists for its context.
 * Must be called with ctx->mutex and ctx->lock held.
 */
195 196 197 198 199 200 201 202 203 204
static void
list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{
	struct perf_counter *group_leader = counter->group_leader;

	/*
	 * Depending on whether it is a standalone or sibling counter,
	 * add it straight to the context's counter list, or to the group
	 * leader's sibling list:
	 */
P
Peter Zijlstra 已提交
205
	if (group_leader == counter)
206
		list_add_tail(&counter->list_entry, &ctx->counter_list);
P
Peter Zijlstra 已提交
207
	else {
208
		list_add_tail(&counter->list_entry, &group_leader->sibling_list);
P
Peter Zijlstra 已提交
209 210
		group_leader->nr_siblings++;
	}
P
Peter Zijlstra 已提交
211 212

	list_add_rcu(&counter->event_entry, &ctx->event_list);
213
	ctx->nr_counters++;
214 215
}

216 217
/*
 * Remove a counter from the lists for its context.
218
 * Must be called with ctx->mutex and ctx->lock held.
219
 */
220 221 222 223 224
static void
list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{
	struct perf_counter *sibling, *tmp;

225 226
	if (list_empty(&counter->list_entry))
		return;
227 228
	ctx->nr_counters--;

229
	list_del_init(&counter->list_entry);
P
Peter Zijlstra 已提交
230
	list_del_rcu(&counter->event_entry);
231

P
Peter Zijlstra 已提交
232 233 234
	if (counter->group_leader != counter)
		counter->group_leader->nr_siblings--;

235 236 237 238 239 240 241 242
	/*
	 * If this was a group counter with sibling counters then
	 * upgrade the siblings to singleton counters by adding them
	 * to the context list directly:
	 */
	list_for_each_entry_safe(sibling, tmp,
				 &counter->sibling_list, list_entry) {

243
		list_move_tail(&sibling->list_entry, &ctx->counter_list);
244 245 246 247
		sibling->group_leader = sibling;
	}
}

248 249 250 251 252 253 254 255 256
static void
counter_sched_out(struct perf_counter *counter,
		  struct perf_cpu_context *cpuctx,
		  struct perf_counter_context *ctx)
{
	if (counter->state != PERF_COUNTER_STATE_ACTIVE)
		return;

	counter->state = PERF_COUNTER_STATE_INACTIVE;
257
	counter->tstamp_stopped = ctx->time;
258
	counter->pmu->disable(counter);
259 260 261 262 263
	counter->oncpu = -1;

	if (!is_software_counter(counter))
		cpuctx->active_oncpu--;
	ctx->nr_active--;
264
	if (counter->attr.exclusive || !cpuctx->active_oncpu)
265 266 267
		cpuctx->exclusive = 0;
}

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
static void
group_sched_out(struct perf_counter *group_counter,
		struct perf_cpu_context *cpuctx,
		struct perf_counter_context *ctx)
{
	struct perf_counter *counter;

	if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
		return;

	counter_sched_out(group_counter, cpuctx, ctx);

	/*
	 * Schedule out siblings (if any):
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
		counter_sched_out(counter, cpuctx, ctx);

286
	if (group_counter->attr.exclusive)
287 288 289
		cpuctx->exclusive = 0;
}

T
Thomas Gleixner 已提交
290 291 292 293 294 295
/*
 * Cross CPU call to remove a performance counter
 *
 * We disable the counter on the hardware level first. After that we
 * remove it from the context list.
 */
296
static void __perf_counter_remove_from_context(void *info)
T
Thomas Gleixner 已提交
297 298 299 300 301 302 303 304 305 306
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter *counter = info;
	struct perf_counter_context *ctx = counter->ctx;

	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu. If not it has been
	 * scheduled out before the smp call arrived.
	 */
307
	if (ctx->task && cpuctx->task_ctx != ctx)
T
Thomas Gleixner 已提交
308 309
		return;

310
	spin_lock(&ctx->lock);
311 312 313 314 315
	/*
	 * Protect the list operation against NMI by disabling the
	 * counters on a global level.
	 */
	perf_disable();
T
Thomas Gleixner 已提交
316

317 318
	counter_sched_out(counter, cpuctx, ctx);

319
	list_del_counter(counter, ctx);
T
Thomas Gleixner 已提交
320 321 322 323 324 325 326 327 328 329 330

	if (!ctx->task) {
		/*
		 * Allow more per task counters with respect to the
		 * reservation:
		 */
		cpuctx->max_pertask =
			min(perf_max_counters - ctx->nr_counters,
			    perf_max_counters - perf_reserved_percpu);
	}

331
	perf_enable();
332
	spin_unlock(&ctx->lock);
T
Thomas Gleixner 已提交
333 334 335 336 337 338
}


/*
 * Remove the counter from a task's (or a CPU's) list of counters.
 *
339
 * Must be called with ctx->mutex held.
T
Thomas Gleixner 已提交
340 341 342
 *
 * CPU counters are removed with a smp call. For task counters we only
 * call when the task is on a CPU.
343 344 345 346 347 348 349
 *
 * If counter->ctx is a cloned context, callers must make sure that
 * every task struct that counter->ctx->task could possibly point to
 * remains valid.  This is OK when called from perf_release since
 * that only calls us on the top-level context, which can't be a clone.
 * When called from perf_counter_exit_task, it's OK because the
 * context has been detached from its task.
T
Thomas Gleixner 已提交
350
 */
351
static void perf_counter_remove_from_context(struct perf_counter *counter)
T
Thomas Gleixner 已提交
352 353 354 355 356 357 358 359 360 361
{
	struct perf_counter_context *ctx = counter->ctx;
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Per cpu counters are removed via an smp call and
		 * the removal is always sucessful.
		 */
		smp_call_function_single(counter->cpu,
362
					 __perf_counter_remove_from_context,
T
Thomas Gleixner 已提交
363 364 365 366 367
					 counter, 1);
		return;
	}

retry:
368
	task_oncpu_function_call(task, __perf_counter_remove_from_context,
T
Thomas Gleixner 已提交
369 370 371 372 373 374
				 counter);

	spin_lock_irq(&ctx->lock);
	/*
	 * If the context is active we need to retry the smp call.
	 */
375
	if (ctx->nr_active && !list_empty(&counter->list_entry)) {
T
Thomas Gleixner 已提交
376 377 378 379 380 381
		spin_unlock_irq(&ctx->lock);
		goto retry;
	}

	/*
	 * The lock prevents that this context is scheduled in so we
382
	 * can remove the counter safely, if the call above did not
T
Thomas Gleixner 已提交
383 384
	 * succeed.
	 */
385 386
	if (!list_empty(&counter->list_entry)) {
		list_del_counter(counter, ctx);
T
Thomas Gleixner 已提交
387 388 389 390
	}
	spin_unlock_irq(&ctx->lock);
}

391
static inline u64 perf_clock(void)
392
{
393
	return cpu_clock(smp_processor_id());
394 395 396 397 398
}

/*
 * Update the record of the current time in a context.
 */
399
static void update_context_time(struct perf_counter_context *ctx)
400
{
401 402 403 404
	u64 now = perf_clock();

	ctx->time += now - ctx->timestamp;
	ctx->timestamp = now;
405 406 407 408 409 410 411 412 413 414
}

/*
 * Update the total_time_enabled and total_time_running fields for a counter.
 */
static void update_counter_times(struct perf_counter *counter)
{
	struct perf_counter_context *ctx = counter->ctx;
	u64 run_end;

415 416 417 418 419 420 421 422 423 424 425
	if (counter->state < PERF_COUNTER_STATE_INACTIVE)
		return;

	counter->total_time_enabled = ctx->time - counter->tstamp_enabled;

	if (counter->state == PERF_COUNTER_STATE_INACTIVE)
		run_end = counter->tstamp_stopped;
	else
		run_end = ctx->time;

	counter->total_time_running = run_end - counter->tstamp_running;
426 427 428 429 430 431 432 433 434 435 436 437 438 439
}

/*
 * Update total_time_enabled and total_time_running for all counters in a group.
 */
static void update_group_times(struct perf_counter *leader)
{
	struct perf_counter *counter;

	update_counter_times(leader);
	list_for_each_entry(counter, &leader->sibling_list, list_entry)
		update_counter_times(counter);
}

440 441 442 443 444 445 446 447 448 449 450 451 452
/*
 * Cross CPU call to disable a performance counter
 */
static void __perf_counter_disable(void *info)
{
	struct perf_counter *counter = info;
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter_context *ctx = counter->ctx;

	/*
	 * If this is a per-task counter, need to check whether this
	 * counter's task is the current task on this cpu.
	 */
453
	if (ctx->task && cpuctx->task_ctx != ctx)
454 455
		return;

456
	spin_lock(&ctx->lock);
457 458 459 460 461 462

	/*
	 * If the counter is on, turn it off.
	 * If it is in error state, leave it in error state.
	 */
	if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
463
		update_context_time(ctx);
464
		update_counter_times(counter);
465 466 467 468 469 470 471
		if (counter == counter->group_leader)
			group_sched_out(counter, cpuctx, ctx);
		else
			counter_sched_out(counter, cpuctx, ctx);
		counter->state = PERF_COUNTER_STATE_OFF;
	}

472
	spin_unlock(&ctx->lock);
473 474 475 476
}

/*
 * Disable a counter.
477 478 479 480 481 482 483 484 485 486
 *
 * If counter->ctx is a cloned context, callers must make sure that
 * every task struct that counter->ctx->task could possibly point to
 * remains valid.  This condition is satisifed when called through
 * perf_counter_for_each_child or perf_counter_for_each because they
 * hold the top-level counter's child_mutex, so any descendant that
 * goes to exit will block in sync_child_counter.
 * When called from perf_pending_counter it's OK because counter->ctx
 * is the current context on this CPU and preemption is disabled,
 * hence we can't get into perf_counter_task_sched_out for this context.
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
 */
static void perf_counter_disable(struct perf_counter *counter)
{
	struct perf_counter_context *ctx = counter->ctx;
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Disable the counter on the cpu that it's on
		 */
		smp_call_function_single(counter->cpu, __perf_counter_disable,
					 counter, 1);
		return;
	}

 retry:
	task_oncpu_function_call(task, __perf_counter_disable, counter);

	spin_lock_irq(&ctx->lock);
	/*
	 * If the counter is still active, we need to retry the cross-call.
	 */
	if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
		spin_unlock_irq(&ctx->lock);
		goto retry;
	}

	/*
	 * Since we have the lock this context can't be scheduled
	 * in, so we can change the state safely.
	 */
518 519
	if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
		update_counter_times(counter);
520
		counter->state = PERF_COUNTER_STATE_OFF;
521
	}
522 523 524 525

	spin_unlock_irq(&ctx->lock);
}

526 527 528 529 530 531
static int
counter_sched_in(struct perf_counter *counter,
		 struct perf_cpu_context *cpuctx,
		 struct perf_counter_context *ctx,
		 int cpu)
{
532
	if (counter->state <= PERF_COUNTER_STATE_OFF)
533 534 535 536 537 538 539 540 541
		return 0;

	counter->state = PERF_COUNTER_STATE_ACTIVE;
	counter->oncpu = cpu;	/* TODO: put 'cpu' into cpuctx->cpu */
	/*
	 * The new state must be visible before we turn it on in the hardware:
	 */
	smp_wmb();

542
	if (counter->pmu->enable(counter)) {
543 544 545 546 547
		counter->state = PERF_COUNTER_STATE_INACTIVE;
		counter->oncpu = -1;
		return -EAGAIN;
	}

548
	counter->tstamp_running += ctx->time - counter->tstamp_stopped;
549

550 551
	if (!is_software_counter(counter))
		cpuctx->active_oncpu++;
552 553
	ctx->nr_active++;

554
	if (counter->attr.exclusive)
555 556
		cpuctx->exclusive = 1;

557 558 559
	return 0;
}

560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
static int
group_sched_in(struct perf_counter *group_counter,
	       struct perf_cpu_context *cpuctx,
	       struct perf_counter_context *ctx,
	       int cpu)
{
	struct perf_counter *counter, *partial_group;
	int ret;

	if (group_counter->state == PERF_COUNTER_STATE_OFF)
		return 0;

	ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
	if (ret)
		return ret < 0 ? ret : 0;

	if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
		return -EAGAIN;

	/*
	 * Schedule in siblings as one group (if any):
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
		if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
			partial_group = counter;
			goto group_error;
		}
	}

	return 0;

group_error:
	/*
	 * Groups can be scheduled in as one unit only, so undo any
	 * partial group before returning:
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
		if (counter == partial_group)
			break;
		counter_sched_out(counter, cpuctx, ctx);
	}
	counter_sched_out(group_counter, cpuctx, ctx);

	return -EAGAIN;
}

606 607 608 609 610 611 612 613 614 615
/*
 * Return 1 for a group consisting entirely of software counters,
 * 0 if the group contains any hardware counters.
 */
static int is_software_only_group(struct perf_counter *leader)
{
	struct perf_counter *counter;

	if (!is_software_counter(leader))
		return 0;
P
Peter Zijlstra 已提交
616

617 618 619
	list_for_each_entry(counter, &leader->sibling_list, list_entry)
		if (!is_software_counter(counter))
			return 0;
P
Peter Zijlstra 已提交
620

621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
	return 1;
}

/*
 * Work out whether we can put this counter group on the CPU now.
 */
static int group_can_go_on(struct perf_counter *counter,
			   struct perf_cpu_context *cpuctx,
			   int can_add_hw)
{
	/*
	 * Groups consisting entirely of software counters can always go on.
	 */
	if (is_software_only_group(counter))
		return 1;
	/*
	 * If an exclusive group is already on, no other hardware
	 * counters can go on.
	 */
	if (cpuctx->exclusive)
		return 0;
	/*
	 * If this group is exclusive and there are already
	 * counters on the CPU, it can't go on.
	 */
646
	if (counter->attr.exclusive && cpuctx->active_oncpu)
647 648 649 650 651 652 653 654
		return 0;
	/*
	 * Otherwise, try to add it if all previous groups were able
	 * to go on.
	 */
	return can_add_hw;
}

655 656 657 658
static void add_counter_to_ctx(struct perf_counter *counter,
			       struct perf_counter_context *ctx)
{
	list_add_counter(counter, ctx);
659 660 661
	counter->tstamp_enabled = ctx->time;
	counter->tstamp_running = ctx->time;
	counter->tstamp_stopped = ctx->time;
662 663
}

T
Thomas Gleixner 已提交
664
/*
665
 * Cross CPU call to install and enable a performance counter
666 667
 *
 * Must be called with ctx->mutex held
T
Thomas Gleixner 已提交
668 669 670 671 672 673
 */
static void __perf_install_in_context(void *info)
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter *counter = info;
	struct perf_counter_context *ctx = counter->ctx;
674
	struct perf_counter *leader = counter->group_leader;
T
Thomas Gleixner 已提交
675
	int cpu = smp_processor_id();
676
	int err;
T
Thomas Gleixner 已提交
677 678 679 680 681

	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu. If not it has been
	 * scheduled out before the smp call arrived.
682 683
	 * Or possibly this is the right context but it isn't
	 * on this cpu because it had no counters.
T
Thomas Gleixner 已提交
684
	 */
685
	if (ctx->task && cpuctx->task_ctx != ctx) {
686
		if (cpuctx->task_ctx || ctx->task != current)
687 688 689
			return;
		cpuctx->task_ctx = ctx;
	}
T
Thomas Gleixner 已提交
690

691
	spin_lock(&ctx->lock);
692
	ctx->is_active = 1;
693
	update_context_time(ctx);
T
Thomas Gleixner 已提交
694 695 696 697 698

	/*
	 * Protect the list operation against NMI by disabling the
	 * counters on a global level. NOP for non NMI based counters.
	 */
699
	perf_disable();
T
Thomas Gleixner 已提交
700

701
	add_counter_to_ctx(counter, ctx);
T
Thomas Gleixner 已提交
702

703 704 705 706 707 708 709 710
	/*
	 * Don't put the counter on if it is disabled or if
	 * it is in a group and the group isn't on.
	 */
	if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
	    (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
		goto unlock;

711 712 713 714 715
	/*
	 * An exclusive counter can't go on if there are already active
	 * hardware counters, and no hardware counter can go on if there
	 * is already an exclusive counter on.
	 */
716
	if (!group_can_go_on(counter, cpuctx, 1))
717 718 719 720
		err = -EEXIST;
	else
		err = counter_sched_in(counter, cpuctx, ctx, cpu);

721 722 723 724 725 726 727 728
	if (err) {
		/*
		 * This counter couldn't go on.  If it is in a group
		 * then we have to pull the whole group off.
		 * If the counter group is pinned then put it in error state.
		 */
		if (leader != counter)
			group_sched_out(leader, cpuctx, ctx);
729
		if (leader->attr.pinned) {
730
			update_group_times(leader);
731
			leader->state = PERF_COUNTER_STATE_ERROR;
732
		}
733
	}
T
Thomas Gleixner 已提交
734

735
	if (!err && !ctx->task && cpuctx->max_pertask)
T
Thomas Gleixner 已提交
736 737
		cpuctx->max_pertask--;

738
 unlock:
739
	perf_enable();
740

741
	spin_unlock(&ctx->lock);
T
Thomas Gleixner 已提交
742 743 744 745 746 747 748 749 750 751 752
}

/*
 * Attach a performance counter to a context
 *
 * First we add the counter to the list with the hardware enable bit
 * in counter->hw_config cleared.
 *
 * If the counter is attached to a task which is on a CPU we use a smp
 * call to enable it in the task context. The task might have been
 * scheduled away, but we check this in the smp call again.
753 754
 *
 * Must be called with ctx->mutex held.
T
Thomas Gleixner 已提交
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
 */
static void
perf_install_in_context(struct perf_counter_context *ctx,
			struct perf_counter *counter,
			int cpu)
{
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Per cpu counters are installed via an smp call and
		 * the install is always sucessful.
		 */
		smp_call_function_single(cpu, __perf_install_in_context,
					 counter, 1);
		return;
	}

retry:
	task_oncpu_function_call(task, __perf_install_in_context,
				 counter);

	spin_lock_irq(&ctx->lock);
	/*
	 * we need to retry the smp call.
	 */
781
	if (ctx->is_active && list_empty(&counter->list_entry)) {
T
Thomas Gleixner 已提交
782 783 784 785 786 787 788 789 790
		spin_unlock_irq(&ctx->lock);
		goto retry;
	}

	/*
	 * The lock prevents that this context is scheduled in so we
	 * can add the counter safely, if it the call above did not
	 * succeed.
	 */
791 792
	if (list_empty(&counter->list_entry))
		add_counter_to_ctx(counter, ctx);
T
Thomas Gleixner 已提交
793 794 795
	spin_unlock_irq(&ctx->lock);
}

796 797 798 799
/*
 * Cross CPU call to enable a performance counter
 */
static void __perf_counter_enable(void *info)
800
{
801 802 803 804 805
	struct perf_counter *counter = info;
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter_context *ctx = counter->ctx;
	struct perf_counter *leader = counter->group_leader;
	int err;
806

807 808 809 810
	/*
	 * If this is a per-task counter, need to check whether this
	 * counter's task is the current task on this cpu.
	 */
811
	if (ctx->task && cpuctx->task_ctx != ctx) {
812
		if (cpuctx->task_ctx || ctx->task != current)
813 814 815
			return;
		cpuctx->task_ctx = ctx;
	}
816

817
	spin_lock(&ctx->lock);
818
	ctx->is_active = 1;
819
	update_context_time(ctx);
820 821 822 823

	if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
		goto unlock;
	counter->state = PERF_COUNTER_STATE_INACTIVE;
824
	counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
825 826

	/*
827 828
	 * If the counter is in a group and isn't the group leader,
	 * then don't put it on unless the group is on.
829
	 */
830 831
	if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
		goto unlock;
832

833
	if (!group_can_go_on(counter, cpuctx, 1)) {
834
		err = -EEXIST;
835
	} else {
836
		perf_disable();
837 838 839 840 841 842
		if (counter == leader)
			err = group_sched_in(counter, cpuctx, ctx,
					     smp_processor_id());
		else
			err = counter_sched_in(counter, cpuctx, ctx,
					       smp_processor_id());
843
		perf_enable();
844
	}
845 846 847 848 849 850 851 852

	if (err) {
		/*
		 * If this counter can't go on and it's part of a
		 * group, then the whole group has to come off.
		 */
		if (leader != counter)
			group_sched_out(leader, cpuctx, ctx);
853
		if (leader->attr.pinned) {
854
			update_group_times(leader);
855
			leader->state = PERF_COUNTER_STATE_ERROR;
856
		}
857 858 859
	}

 unlock:
860
	spin_unlock(&ctx->lock);
861 862 863 864
}

/*
 * Enable a counter.
865 866 867 868 869 870
 *
 * If counter->ctx is a cloned context, callers must make sure that
 * every task struct that counter->ctx->task could possibly point to
 * remains valid.  This condition is satisfied when called through
 * perf_counter_for_each_child or perf_counter_for_each as described
 * for perf_counter_disable.
871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
 */
static void perf_counter_enable(struct perf_counter *counter)
{
	struct perf_counter_context *ctx = counter->ctx;
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Enable the counter on the cpu that it's on
		 */
		smp_call_function_single(counter->cpu, __perf_counter_enable,
					 counter, 1);
		return;
	}

	spin_lock_irq(&ctx->lock);
	if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
		goto out;

	/*
	 * If the counter is in error state, clear that first.
	 * That way, if we see the counter in error state below, we
	 * know that it has gone back into error state, as distinct
	 * from the task having been scheduled away before the
	 * cross-call arrived.
	 */
	if (counter->state == PERF_COUNTER_STATE_ERROR)
		counter->state = PERF_COUNTER_STATE_OFF;

 retry:
	spin_unlock_irq(&ctx->lock);
	task_oncpu_function_call(task, __perf_counter_enable, counter);

	spin_lock_irq(&ctx->lock);

	/*
	 * If the context is active and the counter is still off,
	 * we need to retry the cross-call.
	 */
	if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
		goto retry;

	/*
	 * Since we have the lock this context can't be scheduled
	 * in, so we can change the state safely.
	 */
917
	if (counter->state == PERF_COUNTER_STATE_OFF) {
918
		counter->state = PERF_COUNTER_STATE_INACTIVE;
919 920
		counter->tstamp_enabled =
			ctx->time - counter->total_time_enabled;
921
	}
922 923 924 925
 out:
	spin_unlock_irq(&ctx->lock);
}

926
static int perf_counter_refresh(struct perf_counter *counter, int refresh)
927
{
928 929 930
	/*
	 * not supported on inherited counters
	 */
931
	if (counter->attr.inherit)
932 933
		return -EINVAL;

934 935
	atomic_add(refresh, &counter->event_limit);
	perf_counter_enable(counter);
936 937

	return 0;
938 939
}

940 941 942 943 944
void __perf_counter_sched_out(struct perf_counter_context *ctx,
			      struct perf_cpu_context *cpuctx)
{
	struct perf_counter *counter;

945 946
	spin_lock(&ctx->lock);
	ctx->is_active = 0;
947
	if (likely(!ctx->nr_counters))
948
		goto out;
949
	update_context_time(ctx);
950

951
	perf_disable();
952
	if (ctx->nr_active) {
953 954 955 956 957 958
		list_for_each_entry(counter, &ctx->counter_list, list_entry) {
			if (counter != counter->group_leader)
				counter_sched_out(counter, cpuctx, ctx);
			else
				group_sched_out(counter, cpuctx, ctx);
		}
959
	}
960
	perf_enable();
961
 out:
962 963 964
	spin_unlock(&ctx->lock);
}

965 966 967 968 969 970 971 972 973 974 975 976 977 978 979
/*
 * Test whether two contexts are equivalent, i.e. whether they
 * have both been cloned from the same version of the same context
 * and they both have the same number of enabled counters.
 * If the number of enabled counters is the same, then the set
 * of enabled counters should be the same, because these are both
 * inherited contexts, therefore we can't access individual counters
 * in them directly with an fd; we can only enable/disable all
 * counters via prctl, or enable/disable all counters in a family
 * via ioctl, which will have the same effect on both contexts.
 */
static int context_equiv(struct perf_counter_context *ctx1,
			 struct perf_counter_context *ctx2)
{
	return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
980
		&& ctx1->parent_gen == ctx2->parent_gen
981
		&& !ctx1->pin_count && !ctx2->pin_count;
982 983
}

T
Thomas Gleixner 已提交
984 985 986 987 988 989
/*
 * Called from scheduler to remove the counters of the current task,
 * with interrupts disabled.
 *
 * We stop each counter and update the counter value in counter->count.
 *
I
Ingo Molnar 已提交
990
 * This does not protect us against NMI, but disable()
T
Thomas Gleixner 已提交
991 992 993 994
 * sets the disabled bit in the control field of counter _before_
 * accessing the counter control register. If a NMI hits, then it will
 * not restart the counter.
 */
995 996
void perf_counter_task_sched_out(struct task_struct *task,
				 struct task_struct *next, int cpu)
T
Thomas Gleixner 已提交
997 998
{
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
999
	struct perf_counter_context *ctx = task->perf_counter_ctxp;
1000
	struct perf_counter_context *next_ctx;
1001
	struct perf_counter_context *parent;
1002
	struct pt_regs *regs;
1003
	int do_switch = 1;
T
Thomas Gleixner 已提交
1004

1005 1006 1007
	regs = task_pt_regs(task);
	perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);

1008
	if (likely(!ctx || !cpuctx->task_ctx))
T
Thomas Gleixner 已提交
1009 1010
		return;

1011
	update_context_time(ctx);
1012 1013 1014

	rcu_read_lock();
	parent = rcu_dereference(ctx->parent_ctx);
1015
	next_ctx = next->perf_counter_ctxp;
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
	if (parent && next_ctx &&
	    rcu_dereference(next_ctx->parent_ctx) == parent) {
		/*
		 * Looks like the two contexts are clones, so we might be
		 * able to optimize the context switch.  We lock both
		 * contexts and check that they are clones under the
		 * lock (including re-checking that neither has been
		 * uncloned in the meantime).  It doesn't matter which
		 * order we take the locks because no other cpu could
		 * be trying to lock both of these tasks.
		 */
		spin_lock(&ctx->lock);
		spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
		if (context_equiv(ctx, next_ctx)) {
1030 1031 1032 1033
			/*
			 * XXX do we need a memory barrier of sorts
			 * wrt to rcu_dereference() of perf_counter_ctxp
			 */
1034 1035 1036 1037 1038 1039 1040 1041
			task->perf_counter_ctxp = next_ctx;
			next->perf_counter_ctxp = ctx;
			ctx->task = next;
			next_ctx->task = task;
			do_switch = 0;
		}
		spin_unlock(&next_ctx->lock);
		spin_unlock(&ctx->lock);
1042
	}
1043
	rcu_read_unlock();
1044

1045 1046 1047 1048
	if (do_switch) {
		__perf_counter_sched_out(ctx, cpuctx);
		cpuctx->task_ctx = NULL;
	}
T
Thomas Gleixner 已提交
1049 1050
}

1051 1052 1053
/*
 * Called with IRQs disabled
 */
1054 1055 1056 1057
static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);

1058 1059
	if (!cpuctx->task_ctx)
		return;
1060 1061 1062 1063

	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
		return;

1064 1065 1066 1067
	__perf_counter_sched_out(ctx, cpuctx);
	cpuctx->task_ctx = NULL;
}

1068 1069 1070
/*
 * Called with IRQs disabled
 */
1071
static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
1072
{
1073
	__perf_counter_sched_out(&cpuctx->ctx, cpuctx);
1074 1075
}

1076 1077 1078
static void
__perf_counter_sched_in(struct perf_counter_context *ctx,
			struct perf_cpu_context *cpuctx, int cpu)
T
Thomas Gleixner 已提交
1079 1080
{
	struct perf_counter *counter;
1081
	int can_add_hw = 1;
T
Thomas Gleixner 已提交
1082

1083 1084
	spin_lock(&ctx->lock);
	ctx->is_active = 1;
T
Thomas Gleixner 已提交
1085
	if (likely(!ctx->nr_counters))
1086
		goto out;
T
Thomas Gleixner 已提交
1087

1088
	ctx->timestamp = perf_clock();
1089

1090
	perf_disable();
1091 1092 1093 1094 1095 1096 1097

	/*
	 * First go through the list and put on any pinned groups
	 * in order to give them the best chance of going on.
	 */
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
		if (counter->state <= PERF_COUNTER_STATE_OFF ||
1098
		    !counter->attr.pinned)
1099 1100 1101 1102
			continue;
		if (counter->cpu != -1 && counter->cpu != cpu)
			continue;

1103 1104 1105 1106 1107 1108
		if (counter != counter->group_leader)
			counter_sched_in(counter, cpuctx, ctx, cpu);
		else {
			if (group_can_go_on(counter, cpuctx, 1))
				group_sched_in(counter, cpuctx, ctx, cpu);
		}
1109 1110 1111 1112 1113

		/*
		 * If this pinned group hasn't been scheduled,
		 * put it in error state.
		 */
1114 1115
		if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
			update_group_times(counter);
1116
			counter->state = PERF_COUNTER_STATE_ERROR;
1117
		}
1118 1119
	}

1120
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1121 1122 1123 1124 1125
		/*
		 * Ignore counters in OFF or ERROR state, and
		 * ignore pinned counters since we did them already.
		 */
		if (counter->state <= PERF_COUNTER_STATE_OFF ||
1126
		    counter->attr.pinned)
1127 1128
			continue;

1129 1130 1131 1132
		/*
		 * Listen to the 'cpu' scheduling filter constraint
		 * of counters:
		 */
T
Thomas Gleixner 已提交
1133 1134 1135
		if (counter->cpu != -1 && counter->cpu != cpu)
			continue;

1136 1137
		if (counter != counter->group_leader) {
			if (counter_sched_in(counter, cpuctx, ctx, cpu))
1138
				can_add_hw = 0;
1139 1140 1141 1142 1143
		} else {
			if (group_can_go_on(counter, cpuctx, can_add_hw)) {
				if (group_sched_in(counter, cpuctx, ctx, cpu))
					can_add_hw = 0;
			}
1144
		}
T
Thomas Gleixner 已提交
1145
	}
1146
	perf_enable();
1147
 out:
T
Thomas Gleixner 已提交
1148
	spin_unlock(&ctx->lock);
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
}

/*
 * Called from scheduler to add the counters of the current task
 * with interrupts disabled.
 *
 * We restore the counter value and then enable it.
 *
 * This does not protect us against NMI, but enable()
 * sets the enabled bit in the control field of counter _before_
 * accessing the counter control register. If a NMI hits, then it will
 * keep the counter running.
 */
void perf_counter_task_sched_in(struct task_struct *task, int cpu)
{
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1165
	struct perf_counter_context *ctx = task->perf_counter_ctxp;
1166

1167 1168
	if (likely(!ctx))
		return;
1169 1170
	if (cpuctx->task_ctx == ctx)
		return;
1171
	__perf_counter_sched_in(ctx, cpuctx, cpu);
T
Thomas Gleixner 已提交
1172 1173 1174
	cpuctx->task_ctx = ctx;
}

1175 1176 1177 1178 1179 1180 1181
static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
{
	struct perf_counter_context *ctx = &cpuctx->ctx;

	__perf_counter_sched_in(ctx, cpuctx, cpu);
}

1182 1183 1184
#define MAX_INTERRUPTS (~0ULL)

static void perf_log_throttle(struct perf_counter *counter, int enable);
1185 1186 1187
static void perf_log_period(struct perf_counter *counter, u64 period);

static void perf_adjust_freq(struct perf_counter_context *ctx)
1188 1189
{
	struct perf_counter *counter;
1190
	struct hw_perf_counter *hwc;
1191
	u64 interrupts, sample_period;
1192
	u64 events, period, freq;
1193 1194 1195 1196 1197 1198 1199
	s64 delta;

	spin_lock(&ctx->lock);
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
		if (counter->state != PERF_COUNTER_STATE_ACTIVE)
			continue;

1200 1201 1202 1203
		hwc = &counter->hw;

		interrupts = hwc->interrupts;
		hwc->interrupts = 0;
1204 1205 1206 1207 1208 1209 1210

		if (interrupts == MAX_INTERRUPTS) {
			perf_log_throttle(counter, 1);
			counter->pmu->unthrottle(counter);
			interrupts = 2*sysctl_perf_counter_limit/HZ;
		}

1211
		if (!counter->attr.freq || !counter->attr.sample_freq)
1212 1213
			continue;

1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
		if (counter->attr.sample_freq < HZ) {
			freq = counter->attr.sample_freq;

			hwc->freq_count += freq;
			hwc->freq_interrupts += interrupts;

			if (hwc->freq_count < HZ)
				continue;

			interrupts = hwc->freq_interrupts;
			hwc->freq_interrupts = 0;
			hwc->freq_count -= HZ;
		} else
			freq = HZ;

		events = freq * interrupts * hwc->sample_period;
1230
		period = div64_u64(events, counter->attr.sample_freq);
1231

1232
		delta = (s64)(1 + period - hwc->sample_period);
1233 1234
		delta >>= 1;

1235
		sample_period = hwc->sample_period + delta;
1236

1237 1238
		if (!sample_period)
			sample_period = 1;
1239

1240
		perf_log_period(counter, sample_period);
1241

1242
		hwc->sample_period = sample_period;
1243 1244 1245 1246
	}
	spin_unlock(&ctx->lock);
}

1247 1248 1249 1250
/*
 * Round-robin a context's counters:
 */
static void rotate_ctx(struct perf_counter_context *ctx)
T
Thomas Gleixner 已提交
1251 1252 1253
{
	struct perf_counter *counter;

1254
	if (!ctx->nr_counters)
T
Thomas Gleixner 已提交
1255 1256 1257 1258
		return;

	spin_lock(&ctx->lock);
	/*
1259
	 * Rotate the first entry last (works just fine for group counters too):
T
Thomas Gleixner 已提交
1260
	 */
1261
	perf_disable();
1262
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1263
		list_move_tail(&counter->list_entry, &ctx->counter_list);
T
Thomas Gleixner 已提交
1264 1265
		break;
	}
1266
	perf_enable();
T
Thomas Gleixner 已提交
1267 1268

	spin_unlock(&ctx->lock);
1269 1270 1271 1272
}

void perf_counter_task_tick(struct task_struct *curr, int cpu)
{
1273 1274 1275 1276 1277 1278 1279
	struct perf_cpu_context *cpuctx;
	struct perf_counter_context *ctx;

	if (!atomic_read(&nr_counters))
		return;

	cpuctx = &per_cpu(perf_cpu_context, cpu);
1280
	ctx = curr->perf_counter_ctxp;
1281

1282
	perf_adjust_freq(&cpuctx->ctx);
1283 1284
	if (ctx)
		perf_adjust_freq(ctx);
1285

1286
	perf_counter_cpu_sched_out(cpuctx);
1287 1288
	if (ctx)
		__perf_counter_task_sched_out(ctx);
T
Thomas Gleixner 已提交
1289

1290
	rotate_ctx(&cpuctx->ctx);
1291 1292
	if (ctx)
		rotate_ctx(ctx);
1293

1294
	perf_counter_cpu_sched_in(cpuctx, cpu);
1295 1296
	if (ctx)
		perf_counter_task_sched_in(curr, cpu);
T
Thomas Gleixner 已提交
1297 1298 1299 1300 1301
}

/*
 * Cross CPU call to read the hardware counter
 */
I
Ingo Molnar 已提交
1302
static void __read(void *info)
T
Thomas Gleixner 已提交
1303
{
I
Ingo Molnar 已提交
1304
	struct perf_counter *counter = info;
1305
	struct perf_counter_context *ctx = counter->ctx;
I
Ingo Molnar 已提交
1306
	unsigned long flags;
I
Ingo Molnar 已提交
1307

1308
	local_irq_save(flags);
1309
	if (ctx->is_active)
1310
		update_context_time(ctx);
1311
	counter->pmu->read(counter);
1312
	update_counter_times(counter);
1313
	local_irq_restore(flags);
T
Thomas Gleixner 已提交
1314 1315
}

1316
static u64 perf_counter_read(struct perf_counter *counter)
T
Thomas Gleixner 已提交
1317 1318 1319 1320 1321
{
	/*
	 * If counter is enabled and currently active on a CPU, update the
	 * value in the counter structure:
	 */
1322
	if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
T
Thomas Gleixner 已提交
1323
		smp_call_function_single(counter->oncpu,
I
Ingo Molnar 已提交
1324
					 __read, counter, 1);
1325 1326
	} else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
		update_counter_times(counter);
T
Thomas Gleixner 已提交
1327 1328
	}

1329
	return atomic64_read(&counter->count);
T
Thomas Gleixner 已提交
1330 1331
}

1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
/*
 * Initialize the perf_counter context in a task_struct:
 */
static void
__perf_counter_init_context(struct perf_counter_context *ctx,
			    struct task_struct *task)
{
	memset(ctx, 0, sizeof(*ctx));
	spin_lock_init(&ctx->lock);
	mutex_init(&ctx->mutex);
	INIT_LIST_HEAD(&ctx->counter_list);
	INIT_LIST_HEAD(&ctx->event_list);
	atomic_set(&ctx->refcount, 1);
	ctx->task = task;
}

T
Thomas Gleixner 已提交
1348 1349
static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
{
1350
	struct perf_counter_context *parent_ctx;
1351 1352
	struct perf_counter_context *ctx;
	struct perf_cpu_context *cpuctx;
T
Thomas Gleixner 已提交
1353
	struct task_struct *task;
1354
	unsigned long flags;
1355
	int err;
T
Thomas Gleixner 已提交
1356 1357 1358 1359 1360 1361

	/*
	 * If cpu is not a wildcard then this is a percpu counter:
	 */
	if (cpu != -1) {
		/* Must be root to operate on a CPU counter: */
1362
		if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
T
Thomas Gleixner 已提交
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
			return ERR_PTR(-EACCES);

		if (cpu < 0 || cpu > num_possible_cpus())
			return ERR_PTR(-EINVAL);

		/*
		 * We could be clever and allow to attach a counter to an
		 * offline CPU and activate it when the CPU comes up, but
		 * that's for later.
		 */
		if (!cpu_isset(cpu, cpu_online_map))
			return ERR_PTR(-ENODEV);

		cpuctx = &per_cpu(perf_cpu_context, cpu);
		ctx = &cpuctx->ctx;
1378
		get_ctx(ctx);
T
Thomas Gleixner 已提交
1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394

		return ctx;
	}

	rcu_read_lock();
	if (!pid)
		task = current;
	else
		task = find_task_by_vpid(pid);
	if (task)
		get_task_struct(task);
	rcu_read_unlock();

	if (!task)
		return ERR_PTR(-ESRCH);

1395 1396 1397 1398 1399 1400 1401
	/*
	 * Can't attach counters to a dying task.
	 */
	err = -ESRCH;
	if (task->flags & PF_EXITING)
		goto errout;

T
Thomas Gleixner 已提交
1402
	/* Reuse ptrace permission checks for now. */
1403 1404 1405 1406 1407
	err = -EACCES;
	if (!ptrace_may_access(task, PTRACE_MODE_READ))
		goto errout;

 retry:
1408
	ctx = perf_lock_task_context(task, &flags);
1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
	if (ctx) {
		parent_ctx = ctx->parent_ctx;
		if (parent_ctx) {
			put_ctx(parent_ctx);
			ctx->parent_ctx = NULL;		/* no longer a clone */
		}
		/*
		 * Get an extra reference before dropping the lock so that
		 * this context won't get freed if the task exits.
		 */
		get_ctx(ctx);
1420
		spin_unlock_irqrestore(&ctx->lock, flags);
T
Thomas Gleixner 已提交
1421 1422
	}

1423 1424
	if (!ctx) {
		ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
1425 1426 1427
		err = -ENOMEM;
		if (!ctx)
			goto errout;
1428
		__perf_counter_init_context(ctx, task);
1429 1430
		get_ctx(ctx);
		if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
1431 1432 1433 1434 1435
			/*
			 * We raced with some other task; use
			 * the context they set.
			 */
			kfree(ctx);
1436
			goto retry;
1437
		}
1438
		get_task_struct(task);
1439 1440
	}

1441
	put_task_struct(task);
T
Thomas Gleixner 已提交
1442
	return ctx;
1443 1444 1445 1446

 errout:
	put_task_struct(task);
	return ERR_PTR(err);
T
Thomas Gleixner 已提交
1447 1448
}

P
Peter Zijlstra 已提交
1449 1450 1451 1452 1453
static void free_counter_rcu(struct rcu_head *head)
{
	struct perf_counter *counter;

	counter = container_of(head, struct perf_counter, rcu_head);
1454 1455
	if (counter->ns)
		put_pid_ns(counter->ns);
P
Peter Zijlstra 已提交
1456 1457 1458
	kfree(counter);
}

1459 1460
static void perf_pending_sync(struct perf_counter *counter);

1461 1462
static void free_counter(struct perf_counter *counter)
{
1463 1464
	perf_pending_sync(counter);

1465
	atomic_dec(&nr_counters);
1466
	if (counter->attr.mmap)
P
Peter Zijlstra 已提交
1467
		atomic_dec(&nr_mmap_counters);
1468
	if (counter->attr.comm)
P
Peter Zijlstra 已提交
1469
		atomic_dec(&nr_comm_counters);
1470

1471 1472 1473
	if (counter->destroy)
		counter->destroy(counter);

1474
	put_ctx(counter->ctx);
1475 1476 1477
	call_rcu(&counter->rcu_head, free_counter_rcu);
}

T
Thomas Gleixner 已提交
1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
/*
 * Called when the last reference to the file is gone.
 */
static int perf_release(struct inode *inode, struct file *file)
{
	struct perf_counter *counter = file->private_data;
	struct perf_counter_context *ctx = counter->ctx;

	file->private_data = NULL;

1488
	WARN_ON_ONCE(ctx->parent_ctx);
1489
	mutex_lock(&ctx->mutex);
1490
	perf_counter_remove_from_context(counter);
1491
	mutex_unlock(&ctx->mutex);
T
Thomas Gleixner 已提交
1492

1493 1494 1495 1496 1497
	mutex_lock(&counter->owner->perf_counter_mutex);
	list_del_init(&counter->owner_entry);
	mutex_unlock(&counter->owner->perf_counter_mutex);
	put_task_struct(counter->owner);

1498
	free_counter(counter);
T
Thomas Gleixner 已提交
1499 1500 1501 1502 1503 1504 1505 1506 1507 1508

	return 0;
}

/*
 * Read the performance counter - simple non blocking version for now
 */
static ssize_t
perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
{
1509 1510
	u64 values[3];
	int n;
T
Thomas Gleixner 已提交
1511

1512 1513 1514 1515 1516 1517 1518 1519
	/*
	 * Return end-of-file for a read on a counter that is in
	 * error state (i.e. because it was pinned but it couldn't be
	 * scheduled on to the CPU at some point).
	 */
	if (counter->state == PERF_COUNTER_STATE_ERROR)
		return 0;

1520
	WARN_ON_ONCE(counter->ctx->parent_ctx);
1521
	mutex_lock(&counter->child_mutex);
1522 1523
	values[0] = perf_counter_read(counter);
	n = 1;
1524
	if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1525 1526
		values[n++] = counter->total_time_enabled +
			atomic64_read(&counter->child_total_time_enabled);
1527
	if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1528 1529
		values[n++] = counter->total_time_running +
			atomic64_read(&counter->child_total_time_running);
1530
	if (counter->attr.read_format & PERF_FORMAT_ID)
1531
		values[n++] = counter->id;
1532
	mutex_unlock(&counter->child_mutex);
T
Thomas Gleixner 已提交
1533

1534 1535 1536 1537 1538 1539 1540 1541
	if (count < n * sizeof(u64))
		return -EINVAL;
	count = n * sizeof(u64);

	if (copy_to_user(buf, values, count))
		return -EFAULT;

	return count;
T
Thomas Gleixner 已提交
1542 1543 1544 1545 1546 1547 1548
}

static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
	struct perf_counter *counter = file->private_data;

1549
	return perf_read_hw(counter, buf, count);
T
Thomas Gleixner 已提交
1550 1551 1552 1553 1554
}

static unsigned int perf_poll(struct file *file, poll_table *wait)
{
	struct perf_counter *counter = file->private_data;
P
Peter Zijlstra 已提交
1555
	struct perf_mmap_data *data;
1556
	unsigned int events = POLL_HUP;
P
Peter Zijlstra 已提交
1557 1558 1559 1560

	rcu_read_lock();
	data = rcu_dereference(counter->data);
	if (data)
1561
		events = atomic_xchg(&data->poll, 0);
P
Peter Zijlstra 已提交
1562
	rcu_read_unlock();
T
Thomas Gleixner 已提交
1563 1564 1565 1566 1567 1568

	poll_wait(file, &counter->waitq, wait);

	return events;
}

1569 1570
static void perf_counter_reset(struct perf_counter *counter)
{
P
Peter Zijlstra 已提交
1571
	(void)perf_counter_read(counter);
1572
	atomic64_set(&counter->count, 0);
P
Peter Zijlstra 已提交
1573 1574 1575 1576 1577 1578 1579 1580 1581
	perf_counter_update_userpage(counter);
}

static void perf_counter_for_each_sibling(struct perf_counter *counter,
					  void (*func)(struct perf_counter *))
{
	struct perf_counter_context *ctx = counter->ctx;
	struct perf_counter *sibling;

1582
	WARN_ON_ONCE(ctx->parent_ctx);
1583
	mutex_lock(&ctx->mutex);
P
Peter Zijlstra 已提交
1584 1585 1586 1587 1588
	counter = counter->group_leader;

	func(counter);
	list_for_each_entry(sibling, &counter->sibling_list, list_entry)
		func(sibling);
1589
	mutex_unlock(&ctx->mutex);
P
Peter Zijlstra 已提交
1590 1591
}

1592 1593 1594 1595 1596 1597
/*
 * Holding the top-level counter's child_mutex means that any
 * descendant process that has inherited this counter will block
 * in sync_child_counter if it goes to exit, thus satisfying the
 * task existence requirements of perf_counter_enable/disable.
 */
P
Peter Zijlstra 已提交
1598 1599 1600 1601 1602
static void perf_counter_for_each_child(struct perf_counter *counter,
					void (*func)(struct perf_counter *))
{
	struct perf_counter *child;

1603
	WARN_ON_ONCE(counter->ctx->parent_ctx);
1604
	mutex_lock(&counter->child_mutex);
P
Peter Zijlstra 已提交
1605 1606 1607
	func(counter);
	list_for_each_entry(child, &counter->child_list, child_list)
		func(child);
1608
	mutex_unlock(&counter->child_mutex);
P
Peter Zijlstra 已提交
1609 1610 1611 1612 1613 1614 1615
}

static void perf_counter_for_each(struct perf_counter *counter,
				  void (*func)(struct perf_counter *))
{
	struct perf_counter *child;

1616
	WARN_ON_ONCE(counter->ctx->parent_ctx);
1617
	mutex_lock(&counter->child_mutex);
P
Peter Zijlstra 已提交
1618 1619 1620
	perf_counter_for_each_sibling(counter, func);
	list_for_each_entry(child, &counter->child_list, child_list)
		perf_counter_for_each_sibling(child, func);
1621
	mutex_unlock(&counter->child_mutex);
1622 1623
}

1624 1625 1626 1627 1628 1629 1630
static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
{
	struct perf_counter_context *ctx = counter->ctx;
	unsigned long size;
	int ret = 0;
	u64 value;

1631
	if (!counter->attr.sample_period)
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
		return -EINVAL;

	size = copy_from_user(&value, arg, sizeof(value));
	if (size != sizeof(value))
		return -EFAULT;

	if (!value)
		return -EINVAL;

	spin_lock_irq(&ctx->lock);
1642
	if (counter->attr.freq) {
1643 1644 1645 1646 1647
		if (value > sysctl_perf_counter_limit) {
			ret = -EINVAL;
			goto unlock;
		}

1648
		counter->attr.sample_freq = value;
1649
	} else {
1650
		counter->attr.sample_period = value;
1651 1652 1653 1654 1655 1656 1657 1658 1659 1660
		counter->hw.sample_period = value;

		perf_log_period(counter, value);
	}
unlock:
	spin_unlock_irq(&ctx->lock);

	return ret;
}

1661 1662 1663
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct perf_counter *counter = file->private_data;
P
Peter Zijlstra 已提交
1664 1665
	void (*func)(struct perf_counter *);
	u32 flags = arg;
1666 1667 1668

	switch (cmd) {
	case PERF_COUNTER_IOC_ENABLE:
P
Peter Zijlstra 已提交
1669
		func = perf_counter_enable;
1670 1671
		break;
	case PERF_COUNTER_IOC_DISABLE:
P
Peter Zijlstra 已提交
1672
		func = perf_counter_disable;
1673
		break;
1674
	case PERF_COUNTER_IOC_RESET:
P
Peter Zijlstra 已提交
1675
		func = perf_counter_reset;
1676
		break;
P
Peter Zijlstra 已提交
1677 1678 1679

	case PERF_COUNTER_IOC_REFRESH:
		return perf_counter_refresh(counter, arg);
1680 1681 1682 1683

	case PERF_COUNTER_IOC_PERIOD:
		return perf_counter_period(counter, (u64 __user *)arg);

1684
	default:
P
Peter Zijlstra 已提交
1685
		return -ENOTTY;
1686
	}
P
Peter Zijlstra 已提交
1687 1688 1689 1690 1691 1692 1693

	if (flags & PERF_IOC_FLAG_GROUP)
		perf_counter_for_each(counter, func);
	else
		perf_counter_for_each_child(counter, func);

	return 0;
1694 1695
}

1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719
int perf_counter_task_enable(void)
{
	struct perf_counter *counter;

	mutex_lock(&current->perf_counter_mutex);
	list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
		perf_counter_for_each_child(counter, perf_counter_enable);
	mutex_unlock(&current->perf_counter_mutex);

	return 0;
}

int perf_counter_task_disable(void)
{
	struct perf_counter *counter;

	mutex_lock(&current->perf_counter_mutex);
	list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
		perf_counter_for_each_child(counter, perf_counter_disable);
	mutex_unlock(&current->perf_counter_mutex);

	return 0;
}

1720 1721 1722 1723 1724 1725
/*
 * Callers need to ensure there can be no nesting of this function, otherwise
 * the seqlock logic goes bad. We can not serialize this because the arch
 * code calls this from NMI context.
 */
void perf_counter_update_userpage(struct perf_counter *counter)
1726
{
1727
	struct perf_counter_mmap_page *userpg;
1728
	struct perf_mmap_data *data;
1729 1730 1731 1732 1733 1734 1735

	rcu_read_lock();
	data = rcu_dereference(counter->data);
	if (!data)
		goto unlock;

	userpg = data->user_page;
1736

1737 1738 1739 1740 1741
	/*
	 * Disable preemption so as to not let the corresponding user-space
	 * spin too long if we get preempted.
	 */
	preempt_disable();
1742
	++userpg->lock;
1743
	barrier();
1744 1745 1746 1747
	userpg->index = counter->hw.idx;
	userpg->offset = atomic64_read(&counter->count);
	if (counter->state == PERF_COUNTER_STATE_ACTIVE)
		userpg->offset -= atomic64_read(&counter->hw.prev_count);
1748

1749
	barrier();
1750
	++userpg->lock;
1751
	preempt_enable();
1752
unlock:
1753
	rcu_read_unlock();
1754 1755 1756 1757 1758
}

static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct perf_counter *counter = vma->vm_file->private_data;
1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770
	struct perf_mmap_data *data;
	int ret = VM_FAULT_SIGBUS;

	rcu_read_lock();
	data = rcu_dereference(counter->data);
	if (!data)
		goto unlock;

	if (vmf->pgoff == 0) {
		vmf->page = virt_to_page(data->user_page);
	} else {
		int nr = vmf->pgoff - 1;
1771

1772 1773
		if ((unsigned)nr > data->nr_pages)
			goto unlock;
1774

1775 1776
		vmf->page = virt_to_page(data->data_pages[nr]);
	}
1777
	get_page(vmf->page);
1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810
	ret = 0;
unlock:
	rcu_read_unlock();

	return ret;
}

static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
{
	struct perf_mmap_data *data;
	unsigned long size;
	int i;

	WARN_ON(atomic_read(&counter->mmap_count));

	size = sizeof(struct perf_mmap_data);
	size += nr_pages * sizeof(void *);

	data = kzalloc(size, GFP_KERNEL);
	if (!data)
		goto fail;

	data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
	if (!data->user_page)
		goto fail_user_page;

	for (i = 0; i < nr_pages; i++) {
		data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
		if (!data->data_pages[i])
			goto fail_data_pages;
	}

	data->nr_pages = nr_pages;
1811
	atomic_set(&data->lock, -1);
1812 1813 1814

	rcu_assign_pointer(counter->data, data);

1815
	return 0;
1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831

fail_data_pages:
	for (i--; i >= 0; i--)
		free_page((unsigned long)data->data_pages[i]);

	free_page((unsigned long)data->user_page);

fail_user_page:
	kfree(data);

fail:
	return -ENOMEM;
}

static void __perf_mmap_data_free(struct rcu_head *rcu_head)
{
1832
	struct perf_mmap_data *data;
1833 1834
	int i;

1835 1836
	data = container_of(rcu_head, struct perf_mmap_data, rcu_head);

1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863
	free_page((unsigned long)data->user_page);
	for (i = 0; i < data->nr_pages; i++)
		free_page((unsigned long)data->data_pages[i]);
	kfree(data);
}

static void perf_mmap_data_free(struct perf_counter *counter)
{
	struct perf_mmap_data *data = counter->data;

	WARN_ON(atomic_read(&counter->mmap_count));

	rcu_assign_pointer(counter->data, NULL);
	call_rcu(&data->rcu_head, __perf_mmap_data_free);
}

static void perf_mmap_open(struct vm_area_struct *vma)
{
	struct perf_counter *counter = vma->vm_file->private_data;

	atomic_inc(&counter->mmap_count);
}

static void perf_mmap_close(struct vm_area_struct *vma)
{
	struct perf_counter *counter = vma->vm_file->private_data;

1864
	WARN_ON_ONCE(counter->ctx->parent_ctx);
1865
	if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
1866 1867 1868
		struct user_struct *user = current_user();

		atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
1869
		vma->vm_mm->locked_vm -= counter->data->nr_locked;
1870 1871 1872
		perf_mmap_data_free(counter);
		mutex_unlock(&counter->mmap_mutex);
	}
1873 1874 1875
}

static struct vm_operations_struct perf_mmap_vmops = {
1876
	.open  = perf_mmap_open,
1877
	.close = perf_mmap_close,
1878 1879 1880 1881 1882 1883
	.fault = perf_mmap_fault,
};

static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct perf_counter *counter = file->private_data;
1884
	unsigned long user_locked, user_lock_limit;
1885
	struct user_struct *user = current_user();
1886
	unsigned long locked, lock_limit;
1887 1888
	unsigned long vma_size;
	unsigned long nr_pages;
1889
	long user_extra, extra;
1890
	int ret = 0;
1891 1892 1893

	if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
		return -EINVAL;
1894 1895 1896 1897

	vma_size = vma->vm_end - vma->vm_start;
	nr_pages = (vma_size / PAGE_SIZE) - 1;

1898 1899 1900 1901 1902
	/*
	 * If we have data pages ensure they're a power-of-two number, so we
	 * can do bitmasks instead of modulo.
	 */
	if (nr_pages != 0 && !is_power_of_2(nr_pages))
1903 1904
		return -EINVAL;

1905
	if (vma_size != PAGE_SIZE * (1 + nr_pages))
1906 1907
		return -EINVAL;

1908 1909
	if (vma->vm_pgoff != 0)
		return -EINVAL;
1910

1911
	WARN_ON_ONCE(counter->ctx->parent_ctx);
1912 1913 1914 1915 1916 1917 1918
	mutex_lock(&counter->mmap_mutex);
	if (atomic_inc_not_zero(&counter->mmap_count)) {
		if (nr_pages != counter->data->nr_pages)
			ret = -EINVAL;
		goto unlock;
	}

1919 1920
	user_extra = nr_pages + 1;
	user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
I
Ingo Molnar 已提交
1921 1922 1923 1924 1925 1926

	/*
	 * Increase the limit linearly with more CPUs:
	 */
	user_lock_limit *= num_online_cpus();

1927
	user_locked = atomic_long_read(&user->locked_vm) + user_extra;
1928

1929 1930 1931
	extra = 0;
	if (user_locked > user_lock_limit)
		extra = user_locked - user_lock_limit;
1932 1933 1934

	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
	lock_limit >>= PAGE_SHIFT;
1935
	locked = vma->vm_mm->locked_vm + extra;
1936

1937 1938 1939 1940
	if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
		ret = -EPERM;
		goto unlock;
	}
1941 1942 1943

	WARN_ON(counter->data);
	ret = perf_mmap_data_alloc(counter, nr_pages);
1944 1945 1946 1947
	if (ret)
		goto unlock;

	atomic_set(&counter->mmap_count, 1);
1948
	atomic_long_add(user_extra, &user->locked_vm);
1949 1950
	vma->vm_mm->locked_vm += extra;
	counter->data->nr_locked = extra;
1951
unlock:
1952
	mutex_unlock(&counter->mmap_mutex);
1953 1954 1955 1956

	vma->vm_flags &= ~VM_MAYWRITE;
	vma->vm_flags |= VM_RESERVED;
	vma->vm_ops = &perf_mmap_vmops;
1957 1958

	return ret;
1959 1960
}

P
Peter Zijlstra 已提交
1961 1962 1963
static int perf_fasync(int fd, struct file *filp, int on)
{
	struct inode *inode = filp->f_path.dentry->d_inode;
1964
	struct perf_counter *counter = filp->private_data;
P
Peter Zijlstra 已提交
1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
	int retval;

	mutex_lock(&inode->i_mutex);
	retval = fasync_helper(fd, filp, on, &counter->fasync);
	mutex_unlock(&inode->i_mutex);

	if (retval < 0)
		return retval;

	return 0;
}

T
Thomas Gleixner 已提交
1977 1978 1979 1980
static const struct file_operations perf_fops = {
	.release		= perf_release,
	.read			= perf_read,
	.poll			= perf_poll,
1981 1982
	.unlocked_ioctl		= perf_ioctl,
	.compat_ioctl		= perf_ioctl,
1983
	.mmap			= perf_mmap,
P
Peter Zijlstra 已提交
1984
	.fasync			= perf_fasync,
T
Thomas Gleixner 已提交
1985 1986
};

1987 1988 1989 1990 1991 1992 1993 1994 1995 1996
/*
 * Perf counter wakeup
 *
 * If there's data, ensure we set the poll() state and publish everything
 * to user-space before waking everybody up.
 */

void perf_counter_wakeup(struct perf_counter *counter)
{
	wake_up_all(&counter->waitq);
1997 1998 1999 2000 2001

	if (counter->pending_kill) {
		kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
		counter->pending_kill = 0;
	}
2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
}

/*
 * Pending wakeups
 *
 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
 *
 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
 * single linked list and use cmpxchg() to add entries lockless.
 */

2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028
static void perf_pending_counter(struct perf_pending_entry *entry)
{
	struct perf_counter *counter = container_of(entry,
			struct perf_counter, pending);

	if (counter->pending_disable) {
		counter->pending_disable = 0;
		perf_counter_disable(counter);
	}

	if (counter->pending_wakeup) {
		counter->pending_wakeup = 0;
		perf_counter_wakeup(counter);
	}
}

2029
#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2030

2031
static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2032 2033 2034
	PENDING_TAIL,
};

2035 2036
static void perf_pending_queue(struct perf_pending_entry *entry,
			       void (*func)(struct perf_pending_entry *))
2037
{
2038
	struct perf_pending_entry **head;
2039

2040
	if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2041 2042
		return;

2043 2044 2045
	entry->func = func;

	head = &get_cpu_var(perf_pending_head);
2046 2047

	do {
2048 2049
		entry->next = *head;
	} while (cmpxchg(head, entry->next, entry) != entry->next);
2050 2051 2052

	set_perf_counter_pending();

2053
	put_cpu_var(perf_pending_head);
2054 2055 2056 2057
}

static int __perf_pending_run(void)
{
2058
	struct perf_pending_entry *list;
2059 2060
	int nr = 0;

2061
	list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2062
	while (list != PENDING_TAIL) {
2063 2064
		void (*func)(struct perf_pending_entry *);
		struct perf_pending_entry *entry = list;
2065 2066 2067

		list = list->next;

2068 2069
		func = entry->func;
		entry->next = NULL;
2070 2071 2072 2073 2074 2075 2076
		/*
		 * Ensure we observe the unqueue before we issue the wakeup,
		 * so that we won't be waiting forever.
		 * -- see perf_not_pending().
		 */
		smp_wmb();

2077
		func(entry);
2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098
		nr++;
	}

	return nr;
}

static inline int perf_not_pending(struct perf_counter *counter)
{
	/*
	 * If we flush on whatever cpu we run, there is a chance we don't
	 * need to wait.
	 */
	get_cpu();
	__perf_pending_run();
	put_cpu();

	/*
	 * Ensure we see the proper queue state before going to sleep
	 * so that we do not miss the wakeup. -- see perf_pending_handle()
	 */
	smp_rmb();
2099
	return counter->pending.next == NULL;
2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111
}

static void perf_pending_sync(struct perf_counter *counter)
{
	wait_event(counter->waitq, perf_not_pending(counter));
}

void perf_counter_do_pending(void)
{
	__perf_pending_run();
}

2112 2113 2114 2115
/*
 * Callchain support -- arch specific
 */

2116
__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2117 2118 2119 2120
{
	return NULL;
}

2121 2122 2123 2124
/*
 * Output
 */

2125 2126 2127
struct perf_output_handle {
	struct perf_counter	*counter;
	struct perf_mmap_data	*data;
2128 2129
	unsigned long		head;
	unsigned long		offset;
2130
	int			nmi;
2131
	int			overflow;
2132 2133
	int			locked;
	unsigned long		flags;
2134 2135
};

2136
static void perf_output_wakeup(struct perf_output_handle *handle)
2137
{
2138 2139
	atomic_set(&handle->data->poll, POLL_IN);

2140
	if (handle->nmi) {
2141
		handle->counter->pending_wakeup = 1;
2142
		perf_pending_queue(&handle->counter->pending,
2143
				   perf_pending_counter);
2144
	} else
2145 2146 2147
		perf_counter_wakeup(handle->counter);
}

2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173
/*
 * Curious locking construct.
 *
 * We need to ensure a later event doesn't publish a head when a former
 * event isn't done writing. However since we need to deal with NMIs we
 * cannot fully serialize things.
 *
 * What we do is serialize between CPUs so we only have to deal with NMI
 * nesting on a single CPU.
 *
 * We only publish the head (and generate a wakeup) when the outer-most
 * event completes.
 */
static void perf_output_lock(struct perf_output_handle *handle)
{
	struct perf_mmap_data *data = handle->data;
	int cpu;

	handle->locked = 0;

	local_irq_save(handle->flags);
	cpu = smp_processor_id();

	if (in_nmi() && atomic_read(&data->lock) == cpu)
		return;

2174
	while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2175 2176 2177 2178 2179 2180 2181 2182
		cpu_relax();

	handle->locked = 1;
}

static void perf_output_unlock(struct perf_output_handle *handle)
{
	struct perf_mmap_data *data = handle->data;
2183 2184
	unsigned long head;
	int cpu;
2185

2186
	data->done_head = data->head;
2187 2188 2189 2190 2191 2192 2193 2194 2195 2196

	if (!handle->locked)
		goto out;

again:
	/*
	 * The xchg implies a full barrier that ensures all writes are done
	 * before we publish the new head, matched by a rmb() in userspace when
	 * reading this position.
	 */
2197
	while ((head = atomic_long_xchg(&data->done_head, 0)))
2198 2199 2200
		data->user_page->data_head = head;

	/*
2201
	 * NMI can happen here, which means we can miss a done_head update.
2202 2203
	 */

2204
	cpu = atomic_xchg(&data->lock, -1);
2205 2206 2207 2208 2209
	WARN_ON_ONCE(cpu != smp_processor_id());

	/*
	 * Therefore we have to validate we did not indeed do so.
	 */
2210
	if (unlikely(atomic_long_read(&data->done_head))) {
2211 2212 2213
		/*
		 * Since we had it locked, we can lock it again.
		 */
2214
		while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2215 2216 2217 2218 2219
			cpu_relax();

		goto again;
	}

2220
	if (atomic_xchg(&data->wakeup, 0))
2221 2222 2223 2224 2225
		perf_output_wakeup(handle);
out:
	local_irq_restore(handle->flags);
}

2226
static int perf_output_begin(struct perf_output_handle *handle,
2227
			     struct perf_counter *counter, unsigned int size,
2228
			     int nmi, int overflow)
2229
{
2230
	struct perf_mmap_data *data;
2231
	unsigned int offset, head;
2232

2233 2234 2235 2236 2237 2238
	/*
	 * For inherited counters we send all the output towards the parent.
	 */
	if (counter->parent)
		counter = counter->parent;

2239 2240 2241 2242 2243
	rcu_read_lock();
	data = rcu_dereference(counter->data);
	if (!data)
		goto out;

2244
	handle->data	 = data;
2245 2246 2247
	handle->counter	 = counter;
	handle->nmi	 = nmi;
	handle->overflow = overflow;
2248

2249
	if (!data->nr_pages)
2250
		goto fail;
2251

2252 2253
	perf_output_lock(handle);

2254
	do {
2255
		offset = head = atomic_long_read(&data->head);
P
Peter Zijlstra 已提交
2256
		head += size;
2257
	} while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2258

2259
	handle->offset	= offset;
2260
	handle->head	= head;
2261 2262 2263

	if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
		atomic_set(&data->wakeup, 1);
2264

2265
	return 0;
2266

2267
fail:
2268
	perf_output_wakeup(handle);
2269 2270
out:
	rcu_read_unlock();
2271

2272 2273
	return -ENOSPC;
}
2274

2275
static void perf_output_copy(struct perf_output_handle *handle,
2276
			     const void *buf, unsigned int len)
2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302
{
	unsigned int pages_mask;
	unsigned int offset;
	unsigned int size;
	void **pages;

	offset		= handle->offset;
	pages_mask	= handle->data->nr_pages - 1;
	pages		= handle->data->data_pages;

	do {
		unsigned int page_offset;
		int nr;

		nr	    = (offset >> PAGE_SHIFT) & pages_mask;
		page_offset = offset & (PAGE_SIZE - 1);
		size	    = min_t(unsigned int, PAGE_SIZE - page_offset, len);

		memcpy(pages[nr] + page_offset, buf, size);

		len	    -= size;
		buf	    += size;
		offset	    += size;
	} while (len);

	handle->offset = offset;
2303

2304 2305 2306 2307
	/*
	 * Check we didn't copy past our reservation window, taking the
	 * possible unsigned int wrap into account.
	 */
2308
	WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2309 2310
}

P
Peter Zijlstra 已提交
2311 2312 2313
#define perf_output_put(handle, x) \
	perf_output_copy((handle), &(x), sizeof(x))

2314
static void perf_output_end(struct perf_output_handle *handle)
2315
{
2316 2317 2318
	struct perf_counter *counter = handle->counter;
	struct perf_mmap_data *data = handle->data;

2319
	int wakeup_events = counter->attr.wakeup_events;
P
Peter Zijlstra 已提交
2320

2321
	if (handle->overflow && wakeup_events) {
2322
		int events = atomic_inc_return(&data->events);
P
Peter Zijlstra 已提交
2323
		if (events >= wakeup_events) {
2324
			atomic_sub(wakeup_events, &data->events);
2325
			atomic_set(&data->wakeup, 1);
P
Peter Zijlstra 已提交
2326
		}
2327 2328 2329
	}

	perf_output_unlock(handle);
2330
	rcu_read_unlock();
2331 2332
}

2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354
static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
{
	/*
	 * only top level counters have the pid namespace they were created in
	 */
	if (counter->parent)
		counter = counter->parent;

	return task_tgid_nr_ns(p, counter->ns);
}

static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
{
	/*
	 * only top level counters have the pid namespace they were created in
	 */
	if (counter->parent)
		counter = counter->parent;

	return task_pid_nr_ns(p, counter->ns);
}

2355
static void perf_counter_output(struct perf_counter *counter,
2356
				int nmi, struct pt_regs *regs, u64 addr)
2357
{
2358
	int ret;
2359
	u64 sample_type = counter->attr.sample_type;
2360 2361 2362
	struct perf_output_handle handle;
	struct perf_event_header header;
	u64 ip;
P
Peter Zijlstra 已提交
2363
	struct {
2364
		u32 pid, tid;
2365
	} tid_entry;
2366
	struct {
2367
		u64 id;
2368 2369
		u64 counter;
	} group_entry;
2370 2371
	struct perf_callchain_entry *callchain = NULL;
	int callchain_size = 0;
P
Peter Zijlstra 已提交
2372
	u64 time;
2373 2374 2375
	struct {
		u32 cpu, reserved;
	} cpu_entry;
2376

2377
	header.type = 0;
2378
	header.size = sizeof(header);
2379

2380
	header.misc = PERF_EVENT_MISC_OVERFLOW;
2381
	header.misc |= perf_misc_flags(regs);
2382

2383
	if (sample_type & PERF_SAMPLE_IP) {
2384
		ip = perf_instruction_pointer(regs);
2385
		header.type |= PERF_SAMPLE_IP;
2386 2387
		header.size += sizeof(ip);
	}
2388

2389
	if (sample_type & PERF_SAMPLE_TID) {
2390
		/* namespace issues */
2391 2392
		tid_entry.pid = perf_counter_pid(counter, current);
		tid_entry.tid = perf_counter_tid(counter, current);
2393

2394
		header.type |= PERF_SAMPLE_TID;
2395 2396 2397
		header.size += sizeof(tid_entry);
	}

2398
	if (sample_type & PERF_SAMPLE_TIME) {
2399 2400 2401 2402 2403
		/*
		 * Maybe do better on x86 and provide cpu_clock_nmi()
		 */
		time = sched_clock();

2404
		header.type |= PERF_SAMPLE_TIME;
2405 2406 2407
		header.size += sizeof(u64);
	}

2408 2409
	if (sample_type & PERF_SAMPLE_ADDR) {
		header.type |= PERF_SAMPLE_ADDR;
2410 2411 2412
		header.size += sizeof(u64);
	}

2413 2414
	if (sample_type & PERF_SAMPLE_ID) {
		header.type |= PERF_SAMPLE_ID;
2415 2416 2417
		header.size += sizeof(u64);
	}

2418 2419
	if (sample_type & PERF_SAMPLE_CPU) {
		header.type |= PERF_SAMPLE_CPU;
2420 2421 2422 2423 2424
		header.size += sizeof(cpu_entry);

		cpu_entry.cpu = raw_smp_processor_id();
	}

2425 2426 2427 2428 2429
	if (sample_type & PERF_SAMPLE_PERIOD) {
		header.type |= PERF_SAMPLE_PERIOD;
		header.size += sizeof(u64);
	}

2430 2431
	if (sample_type & PERF_SAMPLE_GROUP) {
		header.type |= PERF_SAMPLE_GROUP;
2432 2433 2434 2435
		header.size += sizeof(u64) +
			counter->nr_siblings * sizeof(group_entry);
	}

2436
	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2437 2438 2439
		callchain = perf_callchain(regs);

		if (callchain) {
2440
			callchain_size = (1 + callchain->nr) * sizeof(u64);
2441

2442
			header.type |= PERF_SAMPLE_CALLCHAIN;
2443 2444 2445 2446
			header.size += callchain_size;
		}
	}

2447
	ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2448 2449
	if (ret)
		return;
2450

2451
	perf_output_put(&handle, header);
P
Peter Zijlstra 已提交
2452

2453
	if (sample_type & PERF_SAMPLE_IP)
2454
		perf_output_put(&handle, ip);
P
Peter Zijlstra 已提交
2455

2456
	if (sample_type & PERF_SAMPLE_TID)
2457
		perf_output_put(&handle, tid_entry);
P
Peter Zijlstra 已提交
2458

2459
	if (sample_type & PERF_SAMPLE_TIME)
2460 2461
		perf_output_put(&handle, time);

2462
	if (sample_type & PERF_SAMPLE_ADDR)
2463 2464
		perf_output_put(&handle, addr);

2465 2466
	if (sample_type & PERF_SAMPLE_ID)
		perf_output_put(&handle, counter->id);
2467

2468
	if (sample_type & PERF_SAMPLE_CPU)
2469 2470
		perf_output_put(&handle, cpu_entry);

2471 2472 2473
	if (sample_type & PERF_SAMPLE_PERIOD)
		perf_output_put(&handle, counter->hw.sample_period);

2474
	/*
2475
	 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
2476
	 */
2477
	if (sample_type & PERF_SAMPLE_GROUP) {
2478 2479
		struct perf_counter *leader, *sub;
		u64 nr = counter->nr_siblings;
P
Peter Zijlstra 已提交
2480

2481
		perf_output_put(&handle, nr);
2482

2483 2484 2485
		leader = counter->group_leader;
		list_for_each_entry(sub, &leader->sibling_list, list_entry) {
			if (sub != counter)
2486
				sub->pmu->read(sub);
2487

2488
			group_entry.id = sub->id;
2489
			group_entry.counter = atomic64_read(&sub->count);
2490

2491 2492
			perf_output_put(&handle, group_entry);
		}
2493
	}
P
Peter Zijlstra 已提交
2494

2495 2496
	if (callchain)
		perf_output_copy(&handle, callchain, callchain_size);
2497

2498
	perf_output_end(&handle);
2499 2500
}

P
Peter Zijlstra 已提交
2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535
/*
 * fork tracking
 */

struct perf_fork_event {
	struct task_struct	*task;

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				ppid;
	} event;
};

static void perf_counter_fork_output(struct perf_counter *counter,
				     struct perf_fork_event *fork_event)
{
	struct perf_output_handle handle;
	int size = fork_event->event.header.size;
	struct task_struct *task = fork_event->task;
	int ret = perf_output_begin(&handle, counter, size, 0, 0);

	if (ret)
		return;

	fork_event->event.pid = perf_counter_pid(counter, task);
	fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);

	perf_output_put(&handle, fork_event->event);
	perf_output_end(&handle);
}

static int perf_counter_fork_match(struct perf_counter *counter)
{
2536
	if (counter->attr.comm || counter->attr.mmap)
P
Peter Zijlstra 已提交
2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582
		return 1;

	return 0;
}

static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
				  struct perf_fork_event *fork_event)
{
	struct perf_counter *counter;

	if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
		return;

	rcu_read_lock();
	list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
		if (perf_counter_fork_match(counter))
			perf_counter_fork_output(counter, fork_event);
	}
	rcu_read_unlock();
}

static void perf_counter_fork_event(struct perf_fork_event *fork_event)
{
	struct perf_cpu_context *cpuctx;
	struct perf_counter_context *ctx;

	cpuctx = &get_cpu_var(perf_cpu_context);
	perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
	put_cpu_var(perf_cpu_context);

	rcu_read_lock();
	/*
	 * doesn't really matter which of the child contexts the
	 * events ends up in.
	 */
	ctx = rcu_dereference(current->perf_counter_ctxp);
	if (ctx)
		perf_counter_fork_ctx(ctx, fork_event);
	rcu_read_unlock();
}

void perf_counter_fork(struct task_struct *task)
{
	struct perf_fork_event fork_event;

	if (!atomic_read(&nr_comm_counters) &&
2583
	    !atomic_read(&nr_mmap_counters))
P
Peter Zijlstra 已提交
2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598
		return;

	fork_event = (struct perf_fork_event){
		.task	= task,
		.event  = {
			.header = {
				.type = PERF_EVENT_FORK,
				.size = sizeof(fork_event.event),
			},
		},
	};

	perf_counter_fork_event(&fork_event);
}

2599 2600 2601 2602 2603
/*
 * comm tracking
 */

struct perf_comm_event {
2604 2605
	struct task_struct	*task;
	char			*comm;
2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625
	int			comm_size;

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				tid;
	} event;
};

static void perf_counter_comm_output(struct perf_counter *counter,
				     struct perf_comm_event *comm_event)
{
	struct perf_output_handle handle;
	int size = comm_event->event.header.size;
	int ret = perf_output_begin(&handle, counter, size, 0, 0);

	if (ret)
		return;

2626 2627 2628
	comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
	comm_event->event.tid = perf_counter_tid(counter, comm_event->task);

2629 2630 2631 2632 2633 2634
	perf_output_put(&handle, comm_event->event);
	perf_output_copy(&handle, comm_event->comm,
				   comm_event->comm_size);
	perf_output_end(&handle);
}

P
Peter Zijlstra 已提交
2635
static int perf_counter_comm_match(struct perf_counter *counter)
2636
{
P
Peter Zijlstra 已提交
2637
	if (counter->attr.comm)
2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652
		return 1;

	return 0;
}

static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
				  struct perf_comm_event *comm_event)
{
	struct perf_counter *counter;

	if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
		return;

	rcu_read_lock();
	list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
P
Peter Zijlstra 已提交
2653
		if (perf_counter_comm_match(counter))
2654 2655 2656 2657 2658 2659 2660 2661
			perf_counter_comm_output(counter, comm_event);
	}
	rcu_read_unlock();
}

static void perf_counter_comm_event(struct perf_comm_event *comm_event)
{
	struct perf_cpu_context *cpuctx;
2662
	struct perf_counter_context *ctx;
2663 2664 2665
	unsigned int size;
	char *comm = comm_event->task->comm;

2666
	size = ALIGN(strlen(comm)+1, sizeof(u64));
2667 2668 2669 2670 2671 2672 2673 2674 2675

	comm_event->comm = comm;
	comm_event->comm_size = size;

	comm_event->event.header.size = sizeof(comm_event->event) + size;

	cpuctx = &get_cpu_var(perf_cpu_context);
	perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
	put_cpu_var(perf_cpu_context);
2676 2677 2678 2679 2680 2681 2682 2683 2684 2685

	rcu_read_lock();
	/*
	 * doesn't really matter which of the child contexts the
	 * events ends up in.
	 */
	ctx = rcu_dereference(current->perf_counter_ctxp);
	if (ctx)
		perf_counter_comm_ctx(ctx, comm_event);
	rcu_read_unlock();
2686 2687 2688 2689
}

void perf_counter_comm(struct task_struct *task)
{
2690 2691
	struct perf_comm_event comm_event;

P
Peter Zijlstra 已提交
2692
	if (!atomic_read(&nr_comm_counters))
2693
		return;
2694

2695
	comm_event = (struct perf_comm_event){
2696 2697 2698 2699 2700 2701 2702 2703 2704
		.task	= task,
		.event  = {
			.header = { .type = PERF_EVENT_COMM, },
		},
	};

	perf_counter_comm_event(&comm_event);
}

2705 2706 2707 2708 2709
/*
 * mmap tracking
 */

struct perf_mmap_event {
2710 2711 2712 2713
	struct vm_area_struct	*vma;

	const char		*file_name;
	int			file_size;
2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				tid;
		u64				start;
		u64				len;
		u64				pgoff;
	} event;
};

static void perf_counter_mmap_output(struct perf_counter *counter,
				     struct perf_mmap_event *mmap_event)
{
	struct perf_output_handle handle;
	int size = mmap_event->event.header.size;
2731
	int ret = perf_output_begin(&handle, counter, size, 0, 0);
2732 2733 2734 2735

	if (ret)
		return;

2736 2737 2738
	mmap_event->event.pid = perf_counter_pid(counter, current);
	mmap_event->event.tid = perf_counter_tid(counter, current);

2739 2740 2741
	perf_output_put(&handle, mmap_event->event);
	perf_output_copy(&handle, mmap_event->file_name,
				   mmap_event->file_size);
2742
	perf_output_end(&handle);
2743 2744 2745 2746 2747
}

static int perf_counter_mmap_match(struct perf_counter *counter,
				   struct perf_mmap_event *mmap_event)
{
2748
	if (counter->attr.mmap)
2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772
		return 1;

	return 0;
}

static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
				  struct perf_mmap_event *mmap_event)
{
	struct perf_counter *counter;

	if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
		return;

	rcu_read_lock();
	list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
		if (perf_counter_mmap_match(counter, mmap_event))
			perf_counter_mmap_output(counter, mmap_event);
	}
	rcu_read_unlock();
}

static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
{
	struct perf_cpu_context *cpuctx;
2773
	struct perf_counter_context *ctx;
2774 2775
	struct vm_area_struct *vma = mmap_event->vma;
	struct file *file = vma->vm_file;
2776 2777 2778
	unsigned int size;
	char tmp[16];
	char *buf = NULL;
2779
	const char *name;
2780 2781 2782 2783 2784 2785 2786

	if (file) {
		buf = kzalloc(PATH_MAX, GFP_KERNEL);
		if (!buf) {
			name = strncpy(tmp, "//enomem", sizeof(tmp));
			goto got_name;
		}
2787
		name = d_path(&file->f_path, buf, PATH_MAX);
2788 2789 2790 2791 2792
		if (IS_ERR(name)) {
			name = strncpy(tmp, "//toolong", sizeof(tmp));
			goto got_name;
		}
	} else {
2793 2794 2795 2796 2797 2798 2799 2800 2801
		name = arch_vma_name(mmap_event->vma);
		if (name)
			goto got_name;

		if (!vma->vm_mm) {
			name = strncpy(tmp, "[vdso]", sizeof(tmp));
			goto got_name;
		}

2802 2803 2804 2805 2806
		name = strncpy(tmp, "//anon", sizeof(tmp));
		goto got_name;
	}

got_name:
2807
	size = ALIGN(strlen(name)+1, sizeof(u64));
2808 2809 2810 2811 2812 2813 2814 2815 2816 2817

	mmap_event->file_name = name;
	mmap_event->file_size = size;

	mmap_event->event.header.size = sizeof(mmap_event->event) + size;

	cpuctx = &get_cpu_var(perf_cpu_context);
	perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
	put_cpu_var(perf_cpu_context);

2818 2819 2820 2821 2822 2823 2824 2825 2826 2827
	rcu_read_lock();
	/*
	 * doesn't really matter which of the child contexts the
	 * events ends up in.
	 */
	ctx = rcu_dereference(current->perf_counter_ctxp);
	if (ctx)
		perf_counter_mmap_ctx(ctx, mmap_event);
	rcu_read_unlock();

2828 2829 2830
	kfree(buf);
}

2831
void __perf_counter_mmap(struct vm_area_struct *vma)
2832
{
2833 2834
	struct perf_mmap_event mmap_event;

P
Peter Zijlstra 已提交
2835
	if (!atomic_read(&nr_mmap_counters))
2836 2837 2838
		return;

	mmap_event = (struct perf_mmap_event){
2839
		.vma	= vma,
2840 2841
		.event  = {
			.header = { .type = PERF_EVENT_MMAP, },
2842 2843 2844
			.start  = vma->vm_start,
			.len    = vma->vm_end - vma->vm_start,
			.pgoff  = vma->vm_pgoff,
2845 2846 2847 2848 2849 2850
		},
	};

	perf_counter_mmap_event(&mmap_event);
}

2851
/*
2852
 * Log sample_period changes so that analyzing tools can re-normalize the
2853
 * event flow.
2854 2855 2856 2857 2858 2859 2860 2861 2862 2863
 */

static void perf_log_period(struct perf_counter *counter, u64 period)
{
	struct perf_output_handle handle;
	int ret;

	struct {
		struct perf_event_header	header;
		u64				time;
2864
		u64				id;
2865 2866 2867 2868 2869 2870 2871 2872
		u64				period;
	} freq_event = {
		.header = {
			.type = PERF_EVENT_PERIOD,
			.misc = 0,
			.size = sizeof(freq_event),
		},
		.time = sched_clock(),
2873
		.id = counter->id,
2874 2875 2876
		.period = period,
	};

2877
	if (counter->hw.sample_period == period)
2878 2879 2880 2881 2882 2883 2884 2885 2886 2887
		return;

	ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
	if (ret)
		return;

	perf_output_put(&handle, freq_event);
	perf_output_end(&handle);
}

2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908
/*
 * IRQ throttle logging
 */

static void perf_log_throttle(struct perf_counter *counter, int enable)
{
	struct perf_output_handle handle;
	int ret;

	struct {
		struct perf_event_header	header;
		u64				time;
	} throttle_event = {
		.header = {
			.type = PERF_EVENT_THROTTLE + 1,
			.misc = 0,
			.size = sizeof(throttle_event),
		},
		.time = sched_clock(),
	};

I
Ingo Molnar 已提交
2909
	ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
2910 2911 2912 2913 2914 2915 2916
	if (ret)
		return;

	perf_output_put(&handle, throttle_event);
	perf_output_end(&handle);
}

2917 2918 2919 2920 2921
/*
 * Generic counter overflow handling.
 */

int perf_counter_overflow(struct perf_counter *counter,
2922
			  int nmi, struct pt_regs *regs, u64 addr)
2923
{
2924
	int events = atomic_read(&counter->event_limit);
2925
	int throttle = counter->pmu->unthrottle != NULL;
2926 2927
	int ret = 0;

2928 2929
	if (!throttle) {
		counter->hw.interrupts++;
2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943
	} else {
		if (counter->hw.interrupts != MAX_INTERRUPTS) {
			counter->hw.interrupts++;
			if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) {
				counter->hw.interrupts = MAX_INTERRUPTS;
				perf_log_throttle(counter, 0);
				ret = 1;
			}
		} else {
			/*
			 * Keep re-disabling counters even though on the previous
			 * pass we disabled it - just in case we raced with a
			 * sched-in and the counter got enabled again:
			 */
2944 2945 2946
			ret = 1;
		}
	}
2947

2948 2949 2950 2951 2952
	/*
	 * XXX event_limit might not quite work as expected on inherited
	 * counters
	 */

2953
	counter->pending_kill = POLL_IN;
2954 2955
	if (events && atomic_dec_and_test(&counter->event_limit)) {
		ret = 1;
2956
		counter->pending_kill = POLL_HUP;
2957 2958 2959 2960 2961 2962 2963 2964
		if (nmi) {
			counter->pending_disable = 1;
			perf_pending_queue(&counter->pending,
					   perf_pending_counter);
		} else
			perf_counter_disable(counter);
	}

2965
	perf_counter_output(counter, nmi, regs, addr);
2966
	return ret;
2967 2968
}

2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994
/*
 * Generic software counter infrastructure
 */

static void perf_swcounter_update(struct perf_counter *counter)
{
	struct hw_perf_counter *hwc = &counter->hw;
	u64 prev, now;
	s64 delta;

again:
	prev = atomic64_read(&hwc->prev_count);
	now = atomic64_read(&hwc->count);
	if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
		goto again;

	delta = now - prev;

	atomic64_add(delta, &counter->count);
	atomic64_sub(delta, &hwc->period_left);
}

static void perf_swcounter_set_period(struct perf_counter *counter)
{
	struct hw_perf_counter *hwc = &counter->hw;
	s64 left = atomic64_read(&hwc->period_left);
2995
	s64 period = hwc->sample_period;
2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010

	if (unlikely(left <= -period)) {
		left = period;
		atomic64_set(&hwc->period_left, left);
	}

	if (unlikely(left <= 0)) {
		left += period;
		atomic64_add(period, &hwc->period_left);
	}

	atomic64_set(&hwc->prev_count, -left);
	atomic64_set(&hwc->count, -left);
}

3011 3012
static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
{
3013
	enum hrtimer_restart ret = HRTIMER_RESTART;
3014 3015
	struct perf_counter *counter;
	struct pt_regs *regs;
3016
	u64 period;
3017 3018

	counter	= container_of(hrtimer, struct perf_counter, hw.hrtimer);
3019
	counter->pmu->read(counter);
3020 3021 3022 3023 3024 3025

	regs = get_irq_regs();
	/*
	 * In case we exclude kernel IPs or are somehow not in interrupt
	 * context, provide the next best thing, the user IP.
	 */
3026 3027
	if ((counter->attr.exclude_kernel || !regs) &&
			!counter->attr.exclude_user)
3028 3029
		regs = task_pt_regs(current);

3030
	if (regs) {
3031
		if (perf_counter_overflow(counter, 0, regs, 0))
3032 3033
			ret = HRTIMER_NORESTART;
	}
3034

3035
	period = max_t(u64, 10000, counter->hw.sample_period);
3036
	hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3037

3038
	return ret;
3039 3040 3041
}

static void perf_swcounter_overflow(struct perf_counter *counter,
3042
				    int nmi, struct pt_regs *regs, u64 addr)
3043
{
3044 3045
	perf_swcounter_update(counter);
	perf_swcounter_set_period(counter);
3046
	if (perf_counter_overflow(counter, nmi, regs, addr))
3047 3048 3049
		/* soft-disable the counter */
		;

3050 3051
}

3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089
static int perf_swcounter_is_counting(struct perf_counter *counter)
{
	struct perf_counter_context *ctx;
	unsigned long flags;
	int count;

	if (counter->state == PERF_COUNTER_STATE_ACTIVE)
		return 1;

	if (counter->state != PERF_COUNTER_STATE_INACTIVE)
		return 0;

	/*
	 * If the counter is inactive, it could be just because
	 * its task is scheduled out, or because it's in a group
	 * which could not go on the PMU.  We want to count in
	 * the first case but not the second.  If the context is
	 * currently active then an inactive software counter must
	 * be the second case.  If it's not currently active then
	 * we need to know whether the counter was active when the
	 * context was last active, which we can determine by
	 * comparing counter->tstamp_stopped with ctx->time.
	 *
	 * We are within an RCU read-side critical section,
	 * which protects the existence of *ctx.
	 */
	ctx = counter->ctx;
	spin_lock_irqsave(&ctx->lock, flags);
	count = 1;
	/* Re-check state now we have the lock */
	if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
	    counter->ctx->is_active ||
	    counter->tstamp_stopped < ctx->time)
		count = 0;
	spin_unlock_irqrestore(&ctx->lock, flags);
	return count;
}

3090
static int perf_swcounter_match(struct perf_counter *counter,
3091 3092
				enum perf_event_types type,
				u32 event, struct pt_regs *regs)
3093
{
3094
	if (!perf_swcounter_is_counting(counter))
3095 3096
		return 0;

3097 3098 3099
	if (counter->attr.type != type)
		return 0;
	if (counter->attr.config != event)
3100 3101
		return 0;

3102
	if (regs) {
3103
		if (counter->attr.exclude_user && user_mode(regs))
3104
			return 0;
3105

3106
		if (counter->attr.exclude_kernel && !user_mode(regs))
3107 3108
			return 0;
	}
3109 3110 3111 3112

	return 1;
}

3113
static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3114
			       int nmi, struct pt_regs *regs, u64 addr)
3115 3116
{
	int neg = atomic64_add_negative(nr, &counter->hw.count);
3117

3118
	if (counter->hw.sample_period && !neg && regs)
3119
		perf_swcounter_overflow(counter, nmi, regs, addr);
3120 3121
}

3122
static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3123
				     enum perf_event_types type, u32 event,
3124 3125
				     u64 nr, int nmi, struct pt_regs *regs,
				     u64 addr)
3126 3127 3128
{
	struct perf_counter *counter;

3129
	if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3130 3131
		return;

P
Peter Zijlstra 已提交
3132 3133
	rcu_read_lock();
	list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3134
		if (perf_swcounter_match(counter, type, event, regs))
3135
			perf_swcounter_add(counter, nr, nmi, regs, addr);
3136
	}
P
Peter Zijlstra 已提交
3137
	rcu_read_unlock();
3138 3139
}

P
Peter Zijlstra 已提交
3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153
static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
{
	if (in_nmi())
		return &cpuctx->recursion[3];

	if (in_irq())
		return &cpuctx->recursion[2];

	if (in_softirq())
		return &cpuctx->recursion[1];

	return &cpuctx->recursion[0];
}

3154
static void __perf_swcounter_event(enum perf_event_types type, u32 event,
3155 3156
				   u64 nr, int nmi, struct pt_regs *regs,
				   u64 addr)
3157 3158
{
	struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
P
Peter Zijlstra 已提交
3159
	int *recursion = perf_swcounter_recursion_context(cpuctx);
3160
	struct perf_counter_context *ctx;
P
Peter Zijlstra 已提交
3161 3162 3163 3164 3165 3166

	if (*recursion)
		goto out;

	(*recursion)++;
	barrier();
3167

3168 3169
	perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
				 nr, nmi, regs, addr);
3170 3171 3172 3173 3174 3175 3176 3177 3178
	rcu_read_lock();
	/*
	 * doesn't really matter which of the child contexts the
	 * events ends up in.
	 */
	ctx = rcu_dereference(current->perf_counter_ctxp);
	if (ctx)
		perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr);
	rcu_read_unlock();
3179

P
Peter Zijlstra 已提交
3180 3181 3182 3183
	barrier();
	(*recursion)--;

out:
3184 3185 3186
	put_cpu_var(perf_cpu_context);
}

3187 3188
void
perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
3189
{
3190
	__perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
3191 3192
}

3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208
static void perf_swcounter_read(struct perf_counter *counter)
{
	perf_swcounter_update(counter);
}

static int perf_swcounter_enable(struct perf_counter *counter)
{
	perf_swcounter_set_period(counter);
	return 0;
}

static void perf_swcounter_disable(struct perf_counter *counter)
{
	perf_swcounter_update(counter);
}

3209
static const struct pmu perf_ops_generic = {
3210 3211 3212 3213 3214
	.enable		= perf_swcounter_enable,
	.disable	= perf_swcounter_disable,
	.read		= perf_swcounter_read,
};

3215 3216 3217 3218
/*
 * Software counter: cpu wall time clock
 */

3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230
static void cpu_clock_perf_counter_update(struct perf_counter *counter)
{
	int cpu = raw_smp_processor_id();
	s64 prev;
	u64 now;

	now = cpu_clock(cpu);
	prev = atomic64_read(&counter->hw.prev_count);
	atomic64_set(&counter->hw.prev_count, now);
	atomic64_add(now - prev, &counter->count);
}

3231 3232 3233 3234 3235 3236
static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
{
	struct hw_perf_counter *hwc = &counter->hw;
	int cpu = raw_smp_processor_id();

	atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3237 3238
	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hwc->hrtimer.function = perf_swcounter_hrtimer;
3239 3240
	if (hwc->sample_period) {
		u64 period = max_t(u64, 10000, hwc->sample_period);
3241
		__hrtimer_start_range_ns(&hwc->hrtimer,
3242
				ns_to_ktime(period), 0,
3243 3244 3245 3246 3247 3248
				HRTIMER_MODE_REL, 0);
	}

	return 0;
}

3249 3250
static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
{
3251
	if (counter->hw.sample_period)
3252
		hrtimer_cancel(&counter->hw.hrtimer);
3253
	cpu_clock_perf_counter_update(counter);
3254 3255 3256 3257
}

static void cpu_clock_perf_counter_read(struct perf_counter *counter)
{
3258
	cpu_clock_perf_counter_update(counter);
3259 3260
}

3261
static const struct pmu perf_ops_cpu_clock = {
I
Ingo Molnar 已提交
3262 3263 3264
	.enable		= cpu_clock_perf_counter_enable,
	.disable	= cpu_clock_perf_counter_disable,
	.read		= cpu_clock_perf_counter_read,
3265 3266
};

3267 3268 3269 3270
/*
 * Software counter: task time clock
 */

3271
static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
I
Ingo Molnar 已提交
3272
{
3273
	u64 prev;
I
Ingo Molnar 已提交
3274 3275
	s64 delta;

3276
	prev = atomic64_xchg(&counter->hw.prev_count, now);
I
Ingo Molnar 已提交
3277 3278
	delta = now - prev;
	atomic64_add(delta, &counter->count);
3279 3280
}

3281
static int task_clock_perf_counter_enable(struct perf_counter *counter)
I
Ingo Molnar 已提交
3282
{
3283
	struct hw_perf_counter *hwc = &counter->hw;
3284 3285 3286
	u64 now;

	now = counter->ctx->time;
3287

3288
	atomic64_set(&hwc->prev_count, now);
3289 3290
	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hwc->hrtimer.function = perf_swcounter_hrtimer;
3291 3292
	if (hwc->sample_period) {
		u64 period = max_t(u64, 10000, hwc->sample_period);
3293
		__hrtimer_start_range_ns(&hwc->hrtimer,
3294
				ns_to_ktime(period), 0,
3295 3296
				HRTIMER_MODE_REL, 0);
	}
3297 3298

	return 0;
I
Ingo Molnar 已提交
3299 3300 3301
}

static void task_clock_perf_counter_disable(struct perf_counter *counter)
3302
{
3303
	if (counter->hw.sample_period)
3304
		hrtimer_cancel(&counter->hw.hrtimer);
3305 3306
	task_clock_perf_counter_update(counter, counter->ctx->time);

3307
}
I
Ingo Molnar 已提交
3308

3309 3310
static void task_clock_perf_counter_read(struct perf_counter *counter)
{
3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322
	u64 time;

	if (!in_nmi()) {
		update_context_time(counter->ctx);
		time = counter->ctx->time;
	} else {
		u64 now = perf_clock();
		u64 delta = now - counter->ctx->timestamp;
		time = counter->ctx->time + delta;
	}

	task_clock_perf_counter_update(counter, time);
3323 3324
}

3325
static const struct pmu perf_ops_task_clock = {
I
Ingo Molnar 已提交
3326 3327 3328
	.enable		= task_clock_perf_counter_enable,
	.disable	= task_clock_perf_counter_disable,
	.read		= task_clock_perf_counter_read,
3329 3330
};

3331 3332 3333
/*
 * Software counter: cpu migrations
 */
3334
void perf_counter_task_migration(struct task_struct *task, int cpu)
3335
{
3336 3337
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
	struct perf_counter_context *ctx;
3338

3339 3340 3341
	perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
				 PERF_COUNT_CPU_MIGRATIONS,
				 1, 1, NULL, 0);
3342

3343 3344 3345 3346 3347 3348 3349
	ctx = perf_pin_task_context(task);
	if (ctx) {
		perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
					 PERF_COUNT_CPU_MIGRATIONS,
					 1, 1, NULL, 0);
		perf_unpin_context(ctx);
	}
3350 3351
}

3352 3353 3354
#ifdef CONFIG_EVENT_PROFILE
void perf_tpcounter_event(int event_id)
{
3355 3356 3357 3358 3359
	struct pt_regs *regs = get_irq_regs();

	if (!regs)
		regs = task_pt_regs(current);

3360
	__perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
3361
}
3362
EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3363 3364 3365 3366 3367 3368

extern int ftrace_profile_enable(int);
extern void ftrace_profile_disable(int);

static void tp_perf_counter_destroy(struct perf_counter *counter)
{
3369
	ftrace_profile_disable(perf_event_id(&counter->attr));
3370 3371
}

3372
static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3373
{
3374
	int event_id = perf_event_id(&counter->attr);
3375 3376 3377 3378 3379 3380 3381
	int ret;

	ret = ftrace_profile_enable(event_id);
	if (ret)
		return NULL;

	counter->destroy = tp_perf_counter_destroy;
3382
	counter->hw.sample_period = counter->attr.sample_period;
3383 3384 3385 3386

	return &perf_ops_generic;
}
#else
3387
static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3388 3389 3390 3391 3392
{
	return NULL;
}
#endif

3393
static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
3394
{
3395
	const struct pmu *pmu = NULL;
3396

3397 3398 3399 3400 3401 3402 3403
	/*
	 * Software counters (currently) can't in general distinguish
	 * between user, kernel and hypervisor events.
	 * However, context switches and cpu migrations are considered
	 * to be kernel events, and page faults are never hypervisor
	 * events.
	 */
3404
	switch (counter->attr.config) {
3405
	case PERF_COUNT_CPU_CLOCK:
3406
		pmu = &perf_ops_cpu_clock;
3407

3408
		break;
3409
	case PERF_COUNT_TASK_CLOCK:
3410 3411 3412 3413 3414
		/*
		 * If the user instantiates this as a per-cpu counter,
		 * use the cpu_clock counter instead.
		 */
		if (counter->ctx->task)
3415
			pmu = &perf_ops_task_clock;
3416
		else
3417
			pmu = &perf_ops_cpu_clock;
3418

3419
		break;
3420
	case PERF_COUNT_PAGE_FAULTS:
3421 3422
	case PERF_COUNT_PAGE_FAULTS_MIN:
	case PERF_COUNT_PAGE_FAULTS_MAJ:
3423
	case PERF_COUNT_CONTEXT_SWITCHES:
3424
	case PERF_COUNT_CPU_MIGRATIONS:
3425
		pmu = &perf_ops_generic;
3426
		break;
3427
	}
3428

3429
	return pmu;
3430 3431
}

T
Thomas Gleixner 已提交
3432 3433 3434 3435
/*
 * Allocate and initialize a counter structure
 */
static struct perf_counter *
3436
perf_counter_alloc(struct perf_counter_attr *attr,
3437
		   int cpu,
3438
		   struct perf_counter_context *ctx,
3439 3440
		   struct perf_counter *group_leader,
		   gfp_t gfpflags)
T
Thomas Gleixner 已提交
3441
{
3442
	const struct pmu *pmu;
I
Ingo Molnar 已提交
3443
	struct perf_counter *counter;
3444
	struct hw_perf_counter *hwc;
3445
	long err;
T
Thomas Gleixner 已提交
3446

3447
	counter = kzalloc(sizeof(*counter), gfpflags);
T
Thomas Gleixner 已提交
3448
	if (!counter)
3449
		return ERR_PTR(-ENOMEM);
T
Thomas Gleixner 已提交
3450

3451 3452 3453 3454 3455 3456 3457
	/*
	 * Single counters are their own group leaders, with an
	 * empty sibling list:
	 */
	if (!group_leader)
		group_leader = counter;

3458 3459 3460
	mutex_init(&counter->child_mutex);
	INIT_LIST_HEAD(&counter->child_list);

3461
	INIT_LIST_HEAD(&counter->list_entry);
P
Peter Zijlstra 已提交
3462
	INIT_LIST_HEAD(&counter->event_entry);
3463
	INIT_LIST_HEAD(&counter->sibling_list);
T
Thomas Gleixner 已提交
3464 3465
	init_waitqueue_head(&counter->waitq);

3466 3467
	mutex_init(&counter->mmap_mutex);

3468
	counter->cpu		= cpu;
3469
	counter->attr		= *attr;
3470 3471 3472 3473 3474 3475 3476 3477 3478
	counter->group_leader	= group_leader;
	counter->pmu		= NULL;
	counter->ctx		= ctx;
	counter->oncpu		= -1;

	counter->ns		= get_pid_ns(current->nsproxy->pid_ns);
	counter->id		= atomic64_inc_return(&perf_counter_id);

	counter->state		= PERF_COUNTER_STATE_INACTIVE;
3479

3480
	if (attr->disabled)
3481 3482
		counter->state = PERF_COUNTER_STATE_OFF;

3483
	pmu = NULL;
3484

3485
	hwc = &counter->hw;
3486 3487
	if (attr->freq && attr->sample_freq)
		hwc->sample_period = div64_u64(TICK_NSEC, attr->sample_freq);
3488
	else
3489
		hwc->sample_period = attr->sample_period;
3490

3491
	/*
3492
	 * we currently do not support PERF_SAMPLE_GROUP on inherited counters
3493
	 */
3494
	if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
3495 3496
		goto done;

3497
	if (attr->type == PERF_TYPE_RAW) {
3498
		pmu = hw_perf_counter_init(counter);
3499 3500 3501
		goto done;
	}

3502
	switch (attr->type) {
3503
	case PERF_TYPE_HARDWARE:
3504
		pmu = hw_perf_counter_init(counter);
3505 3506 3507
		break;

	case PERF_TYPE_SOFTWARE:
3508
		pmu = sw_perf_counter_init(counter);
3509 3510 3511
		break;

	case PERF_TYPE_TRACEPOINT:
3512
		pmu = tp_perf_counter_init(counter);
3513 3514
		break;
	}
3515 3516
done:
	err = 0;
3517
	if (!pmu)
3518
		err = -EINVAL;
3519 3520
	else if (IS_ERR(pmu))
		err = PTR_ERR(pmu);
3521

3522
	if (err) {
3523 3524
		if (counter->ns)
			put_pid_ns(counter->ns);
I
Ingo Molnar 已提交
3525
		kfree(counter);
3526
		return ERR_PTR(err);
I
Ingo Molnar 已提交
3527
	}
3528

3529
	counter->pmu = pmu;
T
Thomas Gleixner 已提交
3530

3531
	atomic_inc(&nr_counters);
3532
	if (counter->attr.mmap)
P
Peter Zijlstra 已提交
3533
		atomic_inc(&nr_mmap_counters);
3534
	if (counter->attr.comm)
P
Peter Zijlstra 已提交
3535
		atomic_inc(&nr_comm_counters);
3536

T
Thomas Gleixner 已提交
3537 3538 3539 3540
	return counter;
}

/**
3541
 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
I
Ingo Molnar 已提交
3542
 *
3543
 * @attr_uptr:	event type attributes for monitoring/sampling
T
Thomas Gleixner 已提交
3544
 * @pid:		target pid
I
Ingo Molnar 已提交
3545 3546
 * @cpu:		target cpu
 * @group_fd:		group leader counter fd
T
Thomas Gleixner 已提交
3547
 */
3548
SYSCALL_DEFINE5(perf_counter_open,
3549
		const struct perf_counter_attr __user *, attr_uptr,
3550
		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
T
Thomas Gleixner 已提交
3551
{
3552
	struct perf_counter *counter, *group_leader;
3553
	struct perf_counter_attr attr;
3554
	struct perf_counter_context *ctx;
3555
	struct file *counter_file = NULL;
3556 3557
	struct file *group_file = NULL;
	int fput_needed = 0;
3558
	int fput_needed2 = 0;
T
Thomas Gleixner 已提交
3559 3560
	int ret;

3561 3562 3563 3564
	/* for future expandability... */
	if (flags)
		return -EINVAL;

3565
	if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0)
3566 3567
		return -EFAULT;

3568
	/*
I
Ingo Molnar 已提交
3569 3570 3571 3572 3573 3574 3575 3576
	 * Get the target context (task or percpu):
	 */
	ctx = find_get_context(pid, cpu);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	/*
	 * Look up the group leader (we will attach this counter to it):
3577 3578 3579 3580 3581 3582
	 */
	group_leader = NULL;
	if (group_fd != -1) {
		ret = -EINVAL;
		group_file = fget_light(group_fd, &fput_needed);
		if (!group_file)
I
Ingo Molnar 已提交
3583
			goto err_put_context;
3584
		if (group_file->f_op != &perf_fops)
I
Ingo Molnar 已提交
3585
			goto err_put_context;
3586 3587 3588

		group_leader = group_file->private_data;
		/*
I
Ingo Molnar 已提交
3589 3590 3591 3592 3593 3594 3595 3596
		 * Do not allow a recursive hierarchy (this new sibling
		 * becoming part of another group-sibling):
		 */
		if (group_leader->group_leader != group_leader)
			goto err_put_context;
		/*
		 * Do not allow to attach to a group in a different
		 * task or CPU context:
3597
		 */
I
Ingo Molnar 已提交
3598 3599
		if (group_leader->ctx != ctx)
			goto err_put_context;
3600 3601 3602
		/*
		 * Only a group leader can be exclusive or pinned
		 */
3603
		if (attr.exclusive || attr.pinned)
3604
			goto err_put_context;
3605 3606
	}

3607
	counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
3608
				     GFP_KERNEL);
3609 3610
	ret = PTR_ERR(counter);
	if (IS_ERR(counter))
T
Thomas Gleixner 已提交
3611 3612 3613 3614
		goto err_put_context;

	ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
	if (ret < 0)
3615 3616 3617 3618 3619 3620 3621
		goto err_free_put_context;

	counter_file = fget_light(ret, &fput_needed2);
	if (!counter_file)
		goto err_free_put_context;

	counter->filp = counter_file;
3622
	WARN_ON_ONCE(ctx->parent_ctx);
3623
	mutex_lock(&ctx->mutex);
3624
	perf_install_in_context(ctx, counter, cpu);
3625
	++ctx->generation;
3626
	mutex_unlock(&ctx->mutex);
3627

3628 3629 3630 3631 3632 3633
	counter->owner = current;
	get_task_struct(current);
	mutex_lock(&current->perf_counter_mutex);
	list_add_tail(&counter->owner_entry, &current->perf_counter_list);
	mutex_unlock(&current->perf_counter_mutex);

3634
	fput_light(counter_file, fput_needed2);
T
Thomas Gleixner 已提交
3635

3636 3637 3638
out_fput:
	fput_light(group_file, fput_needed);

T
Thomas Gleixner 已提交
3639 3640
	return ret;

3641
err_free_put_context:
T
Thomas Gleixner 已提交
3642 3643 3644
	kfree(counter);

err_put_context:
3645
	put_ctx(ctx);
T
Thomas Gleixner 已提交
3646

3647
	goto out_fput;
T
Thomas Gleixner 已提交
3648 3649
}

3650 3651 3652
/*
 * inherit a counter from parent task to child task:
 */
3653
static struct perf_counter *
3654 3655 3656 3657
inherit_counter(struct perf_counter *parent_counter,
	      struct task_struct *parent,
	      struct perf_counter_context *parent_ctx,
	      struct task_struct *child,
3658
	      struct perf_counter *group_leader,
3659 3660 3661 3662
	      struct perf_counter_context *child_ctx)
{
	struct perf_counter *child_counter;

3663 3664 3665 3666 3667 3668 3669 3670 3671
	/*
	 * Instead of creating recursive hierarchies of counters,
	 * we link inherited counters back to the original parent,
	 * which has a filp for sure, which we use as the reference
	 * count:
	 */
	if (parent_counter->parent)
		parent_counter = parent_counter->parent;

3672
	child_counter = perf_counter_alloc(&parent_counter->attr,
3673 3674
					   parent_counter->cpu, child_ctx,
					   group_leader, GFP_KERNEL);
3675 3676
	if (IS_ERR(child_counter))
		return child_counter;
3677
	get_ctx(child_ctx);
3678

3679 3680
	/*
	 * Make the child state follow the state of the parent counter,
3681
	 * not its attr.disabled bit.  We hold the parent's mutex,
3682
	 * so we won't race with perf_counter_{en, dis}able_family.
3683 3684 3685 3686 3687 3688
	 */
	if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
		child_counter->state = PERF_COUNTER_STATE_INACTIVE;
	else
		child_counter->state = PERF_COUNTER_STATE_OFF;

3689 3690 3691
	/*
	 * Link it up in the child's context:
	 */
3692
	add_counter_to_ctx(child_counter, child_ctx);
3693 3694 3695 3696 3697

	child_counter->parent = parent_counter;
	/*
	 * inherit into child's child as well:
	 */
3698
	child_counter->attr.inherit = 1;
3699 3700 3701 3702 3703 3704 3705 3706 3707

	/*
	 * Get a reference to the parent filp - we will fput it
	 * when the child counter exits. This is safe to do because
	 * we are in the parent and we know that the filp still
	 * exists and has a nonzero count:
	 */
	atomic_long_inc(&parent_counter->filp->f_count);

3708 3709 3710
	/*
	 * Link this into the parent counter's child list
	 */
3711
	WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3712
	mutex_lock(&parent_counter->child_mutex);
3713
	list_add_tail(&child_counter->child_list, &parent_counter->child_list);
3714
	mutex_unlock(&parent_counter->child_mutex);
3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726

	return child_counter;
}

static int inherit_group(struct perf_counter *parent_counter,
	      struct task_struct *parent,
	      struct perf_counter_context *parent_ctx,
	      struct task_struct *child,
	      struct perf_counter_context *child_ctx)
{
	struct perf_counter *leader;
	struct perf_counter *sub;
3727
	struct perf_counter *child_ctr;
3728 3729 3730

	leader = inherit_counter(parent_counter, parent, parent_ctx,
				 child, NULL, child_ctx);
3731 3732
	if (IS_ERR(leader))
		return PTR_ERR(leader);
3733
	list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
3734 3735 3736 3737
		child_ctr = inherit_counter(sub, parent, parent_ctx,
					    child, leader, child_ctx);
		if (IS_ERR(child_ctr))
			return PTR_ERR(child_ctr);
3738
	}
3739 3740 3741
	return 0;
}

3742 3743 3744
static void sync_child_counter(struct perf_counter *child_counter,
			       struct perf_counter *parent_counter)
{
3745
	u64 child_val;
3746 3747 3748 3749 3750 3751 3752

	child_val = atomic64_read(&child_counter->count);

	/*
	 * Add back the child's count to the parent's count:
	 */
	atomic64_add(child_val, &parent_counter->count);
3753 3754 3755 3756
	atomic64_add(child_counter->total_time_enabled,
		     &parent_counter->child_total_time_enabled);
	atomic64_add(child_counter->total_time_running,
		     &parent_counter->child_total_time_running);
3757 3758 3759 3760

	/*
	 * Remove this counter from the parent's list
	 */
3761
	WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3762
	mutex_lock(&parent_counter->child_mutex);
3763
	list_del_init(&child_counter->child_list);
3764
	mutex_unlock(&parent_counter->child_mutex);
3765 3766 3767 3768 3769 3770 3771 3772

	/*
	 * Release the parent counter, if this was the last
	 * reference to it.
	 */
	fput(parent_counter->filp);
}

3773
static void
3774
__perf_counter_exit_task(struct perf_counter *child_counter,
3775 3776 3777 3778
			 struct perf_counter_context *child_ctx)
{
	struct perf_counter *parent_counter;

3779
	update_counter_times(child_counter);
3780
	perf_counter_remove_from_context(child_counter);
3781

3782 3783 3784 3785 3786 3787
	parent_counter = child_counter->parent;
	/*
	 * It can happen that parent exits first, and has counters
	 * that are still around due to the child reference. These
	 * counters need to be zapped - but otherwise linger.
	 */
3788 3789
	if (parent_counter) {
		sync_child_counter(child_counter, parent_counter);
3790
		free_counter(child_counter);
3791
	}
3792 3793 3794
}

/*
3795
 * When a child task exits, feed back counter values to parent counters.
3796 3797 3798 3799 3800
 */
void perf_counter_exit_task(struct task_struct *child)
{
	struct perf_counter *child_counter, *tmp;
	struct perf_counter_context *child_ctx;
3801
	unsigned long flags;
3802

3803
	if (likely(!child->perf_counter_ctxp))
3804 3805
		return;

3806
	local_irq_save(flags);
3807 3808 3809 3810 3811 3812 3813
	/*
	 * We can't reschedule here because interrupts are disabled,
	 * and either child is current or it is a task that can't be
	 * scheduled, so we are now safe from rescheduling changing
	 * our context.
	 */
	child_ctx = child->perf_counter_ctxp;
3814
	__perf_counter_task_sched_out(child_ctx);
3815 3816 3817 3818 3819 3820 3821

	/*
	 * Take the context lock here so that if find_get_context is
	 * reading child->perf_counter_ctxp, we wait until it has
	 * incremented the context's refcount before we do put_ctx below.
	 */
	spin_lock(&child_ctx->lock);
3822
	child->perf_counter_ctxp = NULL;
3823 3824 3825 3826 3827 3828 3829 3830 3831
	if (child_ctx->parent_ctx) {
		/*
		 * This context is a clone; unclone it so it can't get
		 * swapped to another process while we're removing all
		 * the counters from it.
		 */
		put_ctx(child_ctx->parent_ctx);
		child_ctx->parent_ctx = NULL;
	}
3832
	spin_unlock(&child_ctx->lock);
3833 3834 3835 3836
	local_irq_restore(flags);

	mutex_lock(&child_ctx->mutex);

3837
again:
3838 3839
	list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
				 list_entry)
3840
		__perf_counter_exit_task(child_counter, child_ctx);
3841 3842 3843 3844 3845 3846 3847 3848

	/*
	 * If the last counter was a group counter, it will have appended all
	 * its siblings to the list, but we obtained 'tmp' before that which
	 * will still point to the list head terminating the iteration.
	 */
	if (!list_empty(&child_ctx->counter_list))
		goto again;
3849 3850 3851 3852

	mutex_unlock(&child_ctx->mutex);

	put_ctx(child_ctx);
3853 3854
}

3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892
/*
 * free an unexposed, unused context as created by inheritance by
 * init_task below, used by fork() in case of fail.
 */
void perf_counter_free_task(struct task_struct *task)
{
	struct perf_counter_context *ctx = task->perf_counter_ctxp;
	struct perf_counter *counter, *tmp;

	if (!ctx)
		return;

	mutex_lock(&ctx->mutex);
again:
	list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
		struct perf_counter *parent = counter->parent;

		if (WARN_ON_ONCE(!parent))
			continue;

		mutex_lock(&parent->child_mutex);
		list_del_init(&counter->child_list);
		mutex_unlock(&parent->child_mutex);

		fput(parent->filp);

		list_del_counter(counter, ctx);
		free_counter(counter);
	}

	if (!list_empty(&ctx->counter_list))
		goto again;

	mutex_unlock(&ctx->mutex);

	put_ctx(ctx);
}

3893 3894 3895
/*
 * Initialize the perf_counter context in task_struct
 */
3896
int perf_counter_init_task(struct task_struct *child)
3897 3898
{
	struct perf_counter_context *child_ctx, *parent_ctx;
3899
	struct perf_counter_context *cloned_ctx;
3900
	struct perf_counter *counter;
3901
	struct task_struct *parent = current;
3902
	int inherited_all = 1;
3903
	int ret = 0;
3904

3905
	child->perf_counter_ctxp = NULL;
3906

3907 3908 3909
	mutex_init(&child->perf_counter_mutex);
	INIT_LIST_HEAD(&child->perf_counter_list);

3910
	if (likely(!parent->perf_counter_ctxp))
3911 3912
		return 0;

3913 3914
	/*
	 * This is executed from the parent task context, so inherit
3915 3916
	 * counters that have been marked for cloning.
	 * First allocate and initialize a context for the child.
3917 3918
	 */

3919 3920
	child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
	if (!child_ctx)
3921
		return -ENOMEM;
3922

3923 3924
	__perf_counter_init_context(child_ctx, child);
	child->perf_counter_ctxp = child_ctx;
3925
	get_task_struct(child);
3926

3927
	/*
3928 3929
	 * If the parent's context is a clone, pin it so it won't get
	 * swapped under us.
3930
	 */
3931 3932
	parent_ctx = perf_pin_task_context(parent);

3933 3934 3935 3936 3937 3938 3939
	/*
	 * No need to check if parent_ctx != NULL here; since we saw
	 * it non-NULL earlier, the only reason for it to become NULL
	 * is if we exit, and since we're currently in the middle of
	 * a fork we can't be exiting at the same time.
	 */

3940 3941 3942 3943
	/*
	 * Lock the parent list. No need to lock the child - not PID
	 * hashed yet and not running, so nobody can access it.
	 */
3944
	mutex_lock(&parent_ctx->mutex);
3945 3946 3947 3948 3949

	/*
	 * We dont have to disable NMIs - we are only looking at
	 * the list, not manipulating it:
	 */
3950 3951 3952 3953
	list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
		if (counter != counter->group_leader)
			continue;

3954
		if (!counter->attr.inherit) {
3955
			inherited_all = 0;
3956
			continue;
3957
		}
3958

3959 3960 3961
		ret = inherit_group(counter, parent, parent_ctx,
					     child, child_ctx);
		if (ret) {
3962
			inherited_all = 0;
3963
			break;
3964 3965 3966 3967 3968 3969 3970
		}
	}

	if (inherited_all) {
		/*
		 * Mark the child context as a clone of the parent
		 * context, or of whatever the parent is a clone of.
3971 3972 3973 3974
		 * Note that if the parent is a clone, it could get
		 * uncloned at any point, but that doesn't matter
		 * because the list of counters and the generation
		 * count can't have changed since we took the mutex.
3975
		 */
3976 3977 3978
		cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
		if (cloned_ctx) {
			child_ctx->parent_ctx = cloned_ctx;
3979
			child_ctx->parent_gen = parent_ctx->parent_gen;
3980 3981 3982 3983 3984
		} else {
			child_ctx->parent_ctx = parent_ctx;
			child_ctx->parent_gen = parent_ctx->generation;
		}
		get_ctx(child_ctx->parent_ctx);
3985 3986
	}

3987
	mutex_unlock(&parent_ctx->mutex);
3988

3989
	perf_unpin_context(parent_ctx);
3990

3991
	return ret;
3992 3993
}

3994
static void __cpuinit perf_counter_init_cpu(int cpu)
T
Thomas Gleixner 已提交
3995
{
3996
	struct perf_cpu_context *cpuctx;
T
Thomas Gleixner 已提交
3997

3998 3999
	cpuctx = &per_cpu(perf_cpu_context, cpu);
	__perf_counter_init_context(&cpuctx->ctx, NULL);
T
Thomas Gleixner 已提交
4000

4001
	spin_lock(&perf_resource_lock);
4002
	cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
4003
	spin_unlock(&perf_resource_lock);
4004

4005
	hw_perf_counter_setup(cpu);
T
Thomas Gleixner 已提交
4006 4007 4008
}

#ifdef CONFIG_HOTPLUG_CPU
4009
static void __perf_counter_exit_cpu(void *info)
T
Thomas Gleixner 已提交
4010 4011 4012 4013 4014
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter_context *ctx = &cpuctx->ctx;
	struct perf_counter *counter, *tmp;

4015 4016
	list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
		__perf_counter_remove_from_context(counter);
T
Thomas Gleixner 已提交
4017
}
4018
static void perf_counter_exit_cpu(int cpu)
T
Thomas Gleixner 已提交
4019
{
4020 4021 4022 4023
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
	struct perf_counter_context *ctx = &cpuctx->ctx;

	mutex_lock(&ctx->mutex);
4024
	smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
4025
	mutex_unlock(&ctx->mutex);
T
Thomas Gleixner 已提交
4026 4027
}
#else
4028
static inline void perf_counter_exit_cpu(int cpu) { }
T
Thomas Gleixner 已提交
4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039
#endif

static int __cpuinit
perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
	unsigned int cpu = (long)hcpu;

	switch (action) {

	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
4040
		perf_counter_init_cpu(cpu);
T
Thomas Gleixner 已提交
4041 4042 4043 4044
		break;

	case CPU_DOWN_PREPARE:
	case CPU_DOWN_PREPARE_FROZEN:
4045
		perf_counter_exit_cpu(cpu);
T
Thomas Gleixner 已提交
4046 4047 4048 4049 4050 4051 4052 4053 4054
		break;

	default:
		break;
	}

	return NOTIFY_OK;
}

4055 4056 4057
/*
 * This has to have a higher priority than migration_notifier in sched.c.
 */
T
Thomas Gleixner 已提交
4058 4059
static struct notifier_block __cpuinitdata perf_cpu_nb = {
	.notifier_call		= perf_cpu_notify,
4060
	.priority		= 20,
T
Thomas Gleixner 已提交
4061 4062
};

4063
void __init perf_counter_init(void)
T
Thomas Gleixner 已提交
4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089
{
	perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
			(void *)(long)smp_processor_id());
	register_cpu_notifier(&perf_cpu_nb);
}

static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
{
	return sprintf(buf, "%d\n", perf_reserved_percpu);
}

static ssize_t
perf_set_reserve_percpu(struct sysdev_class *class,
			const char *buf,
			size_t count)
{
	struct perf_cpu_context *cpuctx;
	unsigned long val;
	int err, cpu, mpt;

	err = strict_strtoul(buf, 10, &val);
	if (err)
		return err;
	if (val > perf_max_counters)
		return -EINVAL;

4090
	spin_lock(&perf_resource_lock);
T
Thomas Gleixner 已提交
4091 4092 4093 4094 4095 4096 4097 4098 4099
	perf_reserved_percpu = val;
	for_each_online_cpu(cpu) {
		cpuctx = &per_cpu(perf_cpu_context, cpu);
		spin_lock_irq(&cpuctx->ctx.lock);
		mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
			  perf_max_counters - perf_reserved_percpu);
		cpuctx->max_pertask = mpt;
		spin_unlock_irq(&cpuctx->ctx.lock);
	}
4100
	spin_unlock(&perf_resource_lock);
T
Thomas Gleixner 已提交
4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121

	return count;
}

static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
{
	return sprintf(buf, "%d\n", perf_overcommit);
}

static ssize_t
perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
{
	unsigned long val;
	int err;

	err = strict_strtoul(buf, 10, &val);
	if (err)
		return err;
	if (val > 1)
		return -EINVAL;

4122
	spin_lock(&perf_resource_lock);
T
Thomas Gleixner 已提交
4123
	perf_overcommit = val;
4124
	spin_unlock(&perf_resource_lock);
T
Thomas Gleixner 已提交
4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159

	return count;
}

static SYSDEV_CLASS_ATTR(
				reserve_percpu,
				0644,
				perf_show_reserve_percpu,
				perf_set_reserve_percpu
			);

static SYSDEV_CLASS_ATTR(
				overcommit,
				0644,
				perf_show_overcommit,
				perf_set_overcommit
			);

static struct attribute *perfclass_attrs[] = {
	&attr_reserve_percpu.attr,
	&attr_overcommit.attr,
	NULL
};

static struct attribute_group perfclass_attr_group = {
	.attrs			= perfclass_attrs,
	.name			= "perf_counters",
};

static int __init perf_counter_sysfs_init(void)
{
	return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
				  &perfclass_attr_group);
}
device_initcall(perf_counter_sysfs_init);