perf_counter.c 106.1 KB
Newer Older
T
Thomas Gleixner 已提交
1 2 3
/*
 * Performance counter core code
 *
4 5 6
 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
 *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7
 *  Copyright    2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 9
 *
 *  For licensing details see kernel-base/COPYING
T
Thomas Gleixner 已提交
10 11 12
 */

#include <linux/fs.h>
13
#include <linux/mm.h>
T
Thomas Gleixner 已提交
14 15
#include <linux/cpu.h>
#include <linux/smp.h>
16
#include <linux/file.h>
T
Thomas Gleixner 已提交
17 18
#include <linux/poll.h>
#include <linux/sysfs.h>
19
#include <linux/dcache.h>
T
Thomas Gleixner 已提交
20
#include <linux/percpu.h>
21
#include <linux/ptrace.h>
22 23 24
#include <linux/vmstat.h>
#include <linux/hardirq.h>
#include <linux/rculist.h>
T
Thomas Gleixner 已提交
25 26 27
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
I
Ingo Molnar 已提交
28
#include <linux/kernel_stat.h>
T
Thomas Gleixner 已提交
29 30
#include <linux/perf_counter.h>

31 32
#include <asm/irq_regs.h>

T
Thomas Gleixner 已提交
33 34 35 36 37
/*
 * Each CPU has a list of per CPU counters:
 */
DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);

38
int perf_max_counters __read_mostly = 1;
T
Thomas Gleixner 已提交
39 40 41
static int perf_reserved_percpu __read_mostly;
static int perf_overcommit __read_mostly = 1;

42
static atomic_t nr_counters __read_mostly;
P
Peter Zijlstra 已提交
43 44
static atomic_t nr_mmap_counters __read_mostly;
static atomic_t nr_comm_counters __read_mostly;
45

46
/*
47 48 49 50
 * perf counter paranoia level:
 *  0 - not paranoid
 *  1 - disallow cpu counters to unpriv
 *  2 - disallow kernel profiling to unpriv
51
 */
52
int sysctl_perf_counter_paranoid __read_mostly;
53 54 55 56 57 58 59 60 61 62 63

static inline bool perf_paranoid_cpu(void)
{
	return sysctl_perf_counter_paranoid > 0;
}

static inline bool perf_paranoid_kernel(void)
{
	return sysctl_perf_counter_paranoid > 1;
}

64
int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
65 66 67 68 69

/*
 * max perf counter sample rate
 */
int sysctl_perf_counter_sample_rate __read_mostly = 100000;
70

71 72
static atomic64_t perf_counter_id;

T
Thomas Gleixner 已提交
73
/*
74
 * Lock for (sysadmin-configurable) counter reservations:
T
Thomas Gleixner 已提交
75
 */
76
static DEFINE_SPINLOCK(perf_resource_lock);
T
Thomas Gleixner 已提交
77 78 79 80

/*
 * Architecture provided APIs - weak aliases:
 */
81
extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
T
Thomas Gleixner 已提交
82
{
83
	return NULL;
T
Thomas Gleixner 已提交
84 85
}

86 87 88
void __weak hw_perf_disable(void)		{ barrier(); }
void __weak hw_perf_enable(void)		{ barrier(); }

89
void __weak hw_perf_counter_setup(int cpu)	{ barrier(); }
90 91 92

int __weak
hw_perf_group_sched_in(struct perf_counter *group_leader,
93 94 95 96 97
	       struct perf_cpu_context *cpuctx,
	       struct perf_counter_context *ctx, int cpu)
{
	return 0;
}
T
Thomas Gleixner 已提交
98

99 100
void __weak perf_counter_print_debug(void)	{ }

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
static DEFINE_PER_CPU(int, disable_count);

void __perf_disable(void)
{
	__get_cpu_var(disable_count)++;
}

bool __perf_enable(void)
{
	return !--__get_cpu_var(disable_count);
}

void perf_disable(void)
{
	__perf_disable();
	hw_perf_disable();
}

void perf_enable(void)
{
	if (__perf_enable())
		hw_perf_enable();
}

125 126
static void get_ctx(struct perf_counter_context *ctx)
{
127
	WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
128 129
}

130 131 132 133 134 135 136 137
static void free_ctx(struct rcu_head *head)
{
	struct perf_counter_context *ctx;

	ctx = container_of(head, struct perf_counter_context, rcu_head);
	kfree(ctx);
}

138 139
static void put_ctx(struct perf_counter_context *ctx)
{
140 141 142
	if (atomic_dec_and_test(&ctx->refcount)) {
		if (ctx->parent_ctx)
			put_ctx(ctx->parent_ctx);
143 144 145
		if (ctx->task)
			put_task_struct(ctx->task);
		call_rcu(&ctx->rcu_head, free_ctx);
146
	}
147 148
}

149 150 151 152 153
/*
 * Get the perf_counter_context for a task and lock it.
 * This has to cope with with the fact that until it is locked,
 * the context could get moved to another task.
 */
154 155
static struct perf_counter_context *
perf_lock_task_context(struct task_struct *task, unsigned long *flags)
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
{
	struct perf_counter_context *ctx;

	rcu_read_lock();
 retry:
	ctx = rcu_dereference(task->perf_counter_ctxp);
	if (ctx) {
		/*
		 * If this context is a clone of another, it might
		 * get swapped for another underneath us by
		 * perf_counter_task_sched_out, though the
		 * rcu_read_lock() protects us from any context
		 * getting freed.  Lock the context and check if it
		 * got swapped before we could get the lock, and retry
		 * if so.  If we locked the right context, then it
		 * can't get swapped on us any more.
		 */
		spin_lock_irqsave(&ctx->lock, *flags);
		if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
			spin_unlock_irqrestore(&ctx->lock, *flags);
			goto retry;
		}
178 179 180 181 182

		if (!atomic_inc_not_zero(&ctx->refcount)) {
			spin_unlock_irqrestore(&ctx->lock, *flags);
			ctx = NULL;
		}
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
	}
	rcu_read_unlock();
	return ctx;
}

/*
 * Get the context for a task and increment its pin_count so it
 * can't get swapped to another task.  This also increments its
 * reference count so that the context can't get freed.
 */
static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
{
	struct perf_counter_context *ctx;
	unsigned long flags;

	ctx = perf_lock_task_context(task, &flags);
	if (ctx) {
		++ctx->pin_count;
		spin_unlock_irqrestore(&ctx->lock, flags);
	}
	return ctx;
}

static void perf_unpin_context(struct perf_counter_context *ctx)
{
	unsigned long flags;

	spin_lock_irqsave(&ctx->lock, flags);
	--ctx->pin_count;
	spin_unlock_irqrestore(&ctx->lock, flags);
	put_ctx(ctx);
}

216 217 218 219
/*
 * Add a counter from the lists for its context.
 * Must be called with ctx->mutex and ctx->lock held.
 */
220 221 222 223 224 225 226 227 228 229
static void
list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{
	struct perf_counter *group_leader = counter->group_leader;

	/*
	 * Depending on whether it is a standalone or sibling counter,
	 * add it straight to the context's counter list, or to the group
	 * leader's sibling list:
	 */
P
Peter Zijlstra 已提交
230
	if (group_leader == counter)
231
		list_add_tail(&counter->list_entry, &ctx->counter_list);
P
Peter Zijlstra 已提交
232
	else {
233
		list_add_tail(&counter->list_entry, &group_leader->sibling_list);
P
Peter Zijlstra 已提交
234 235
		group_leader->nr_siblings++;
	}
P
Peter Zijlstra 已提交
236 237

	list_add_rcu(&counter->event_entry, &ctx->event_list);
238
	ctx->nr_counters++;
239 240
	if (counter->attr.inherit_stat)
		ctx->nr_stat++;
241 242
}

243 244
/*
 * Remove a counter from the lists for its context.
245
 * Must be called with ctx->mutex and ctx->lock held.
246
 */
247 248 249 250 251
static void
list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{
	struct perf_counter *sibling, *tmp;

252 253
	if (list_empty(&counter->list_entry))
		return;
254
	ctx->nr_counters--;
255 256
	if (counter->attr.inherit_stat)
		ctx->nr_stat--;
257

258
	list_del_init(&counter->list_entry);
P
Peter Zijlstra 已提交
259
	list_del_rcu(&counter->event_entry);
260

P
Peter Zijlstra 已提交
261 262 263
	if (counter->group_leader != counter)
		counter->group_leader->nr_siblings--;

264 265 266 267 268 269 270 271
	/*
	 * If this was a group counter with sibling counters then
	 * upgrade the siblings to singleton counters by adding them
	 * to the context list directly:
	 */
	list_for_each_entry_safe(sibling, tmp,
				 &counter->sibling_list, list_entry) {

272
		list_move_tail(&sibling->list_entry, &ctx->counter_list);
273 274 275 276
		sibling->group_leader = sibling;
	}
}

277 278 279 280 281 282 283 284 285
static void
counter_sched_out(struct perf_counter *counter,
		  struct perf_cpu_context *cpuctx,
		  struct perf_counter_context *ctx)
{
	if (counter->state != PERF_COUNTER_STATE_ACTIVE)
		return;

	counter->state = PERF_COUNTER_STATE_INACTIVE;
286
	counter->tstamp_stopped = ctx->time;
287
	counter->pmu->disable(counter);
288 289 290 291 292
	counter->oncpu = -1;

	if (!is_software_counter(counter))
		cpuctx->active_oncpu--;
	ctx->nr_active--;
293
	if (counter->attr.exclusive || !cpuctx->active_oncpu)
294 295 296
		cpuctx->exclusive = 0;
}

297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
static void
group_sched_out(struct perf_counter *group_counter,
		struct perf_cpu_context *cpuctx,
		struct perf_counter_context *ctx)
{
	struct perf_counter *counter;

	if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
		return;

	counter_sched_out(group_counter, cpuctx, ctx);

	/*
	 * Schedule out siblings (if any):
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
		counter_sched_out(counter, cpuctx, ctx);

315
	if (group_counter->attr.exclusive)
316 317 318
		cpuctx->exclusive = 0;
}

T
Thomas Gleixner 已提交
319 320 321 322 323 324
/*
 * Cross CPU call to remove a performance counter
 *
 * We disable the counter on the hardware level first. After that we
 * remove it from the context list.
 */
325
static void __perf_counter_remove_from_context(void *info)
T
Thomas Gleixner 已提交
326 327 328 329 330 331 332 333 334 335
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter *counter = info;
	struct perf_counter_context *ctx = counter->ctx;

	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu. If not it has been
	 * scheduled out before the smp call arrived.
	 */
336
	if (ctx->task && cpuctx->task_ctx != ctx)
T
Thomas Gleixner 已提交
337 338
		return;

339
	spin_lock(&ctx->lock);
340 341 342 343 344
	/*
	 * Protect the list operation against NMI by disabling the
	 * counters on a global level.
	 */
	perf_disable();
T
Thomas Gleixner 已提交
345

346 347
	counter_sched_out(counter, cpuctx, ctx);

348
	list_del_counter(counter, ctx);
T
Thomas Gleixner 已提交
349 350 351 352 353 354 355 356 357 358 359

	if (!ctx->task) {
		/*
		 * Allow more per task counters with respect to the
		 * reservation:
		 */
		cpuctx->max_pertask =
			min(perf_max_counters - ctx->nr_counters,
			    perf_max_counters - perf_reserved_percpu);
	}

360
	perf_enable();
361
	spin_unlock(&ctx->lock);
T
Thomas Gleixner 已提交
362 363 364 365 366 367
}


/*
 * Remove the counter from a task's (or a CPU's) list of counters.
 *
368
 * Must be called with ctx->mutex held.
T
Thomas Gleixner 已提交
369 370 371
 *
 * CPU counters are removed with a smp call. For task counters we only
 * call when the task is on a CPU.
372 373 374 375 376 377 378
 *
 * If counter->ctx is a cloned context, callers must make sure that
 * every task struct that counter->ctx->task could possibly point to
 * remains valid.  This is OK when called from perf_release since
 * that only calls us on the top-level context, which can't be a clone.
 * When called from perf_counter_exit_task, it's OK because the
 * context has been detached from its task.
T
Thomas Gleixner 已提交
379
 */
380
static void perf_counter_remove_from_context(struct perf_counter *counter)
T
Thomas Gleixner 已提交
381 382 383 384 385 386 387 388 389 390
{
	struct perf_counter_context *ctx = counter->ctx;
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Per cpu counters are removed via an smp call and
		 * the removal is always sucessful.
		 */
		smp_call_function_single(counter->cpu,
391
					 __perf_counter_remove_from_context,
T
Thomas Gleixner 已提交
392 393 394 395 396
					 counter, 1);
		return;
	}

retry:
397
	task_oncpu_function_call(task, __perf_counter_remove_from_context,
T
Thomas Gleixner 已提交
398 399 400 401 402 403
				 counter);

	spin_lock_irq(&ctx->lock);
	/*
	 * If the context is active we need to retry the smp call.
	 */
404
	if (ctx->nr_active && !list_empty(&counter->list_entry)) {
T
Thomas Gleixner 已提交
405 406 407 408 409 410
		spin_unlock_irq(&ctx->lock);
		goto retry;
	}

	/*
	 * The lock prevents that this context is scheduled in so we
411
	 * can remove the counter safely, if the call above did not
T
Thomas Gleixner 已提交
412 413
	 * succeed.
	 */
414 415
	if (!list_empty(&counter->list_entry)) {
		list_del_counter(counter, ctx);
T
Thomas Gleixner 已提交
416 417 418 419
	}
	spin_unlock_irq(&ctx->lock);
}

420
static inline u64 perf_clock(void)
421
{
422
	return cpu_clock(smp_processor_id());
423 424 425 426 427
}

/*
 * Update the record of the current time in a context.
 */
428
static void update_context_time(struct perf_counter_context *ctx)
429
{
430 431 432 433
	u64 now = perf_clock();

	ctx->time += now - ctx->timestamp;
	ctx->timestamp = now;
434 435 436 437 438 439 440 441 442 443
}

/*
 * Update the total_time_enabled and total_time_running fields for a counter.
 */
static void update_counter_times(struct perf_counter *counter)
{
	struct perf_counter_context *ctx = counter->ctx;
	u64 run_end;

444 445 446 447 448 449 450 451 452 453 454
	if (counter->state < PERF_COUNTER_STATE_INACTIVE)
		return;

	counter->total_time_enabled = ctx->time - counter->tstamp_enabled;

	if (counter->state == PERF_COUNTER_STATE_INACTIVE)
		run_end = counter->tstamp_stopped;
	else
		run_end = ctx->time;

	counter->total_time_running = run_end - counter->tstamp_running;
455 456 457 458 459 460 461 462 463 464 465 466 467 468
}

/*
 * Update total_time_enabled and total_time_running for all counters in a group.
 */
static void update_group_times(struct perf_counter *leader)
{
	struct perf_counter *counter;

	update_counter_times(leader);
	list_for_each_entry(counter, &leader->sibling_list, list_entry)
		update_counter_times(counter);
}

469 470 471 472 473 474 475 476 477 478 479 480 481
/*
 * Cross CPU call to disable a performance counter
 */
static void __perf_counter_disable(void *info)
{
	struct perf_counter *counter = info;
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter_context *ctx = counter->ctx;

	/*
	 * If this is a per-task counter, need to check whether this
	 * counter's task is the current task on this cpu.
	 */
482
	if (ctx->task && cpuctx->task_ctx != ctx)
483 484
		return;

485
	spin_lock(&ctx->lock);
486 487 488 489 490 491

	/*
	 * If the counter is on, turn it off.
	 * If it is in error state, leave it in error state.
	 */
	if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
492
		update_context_time(ctx);
493
		update_counter_times(counter);
494 495 496 497 498 499 500
		if (counter == counter->group_leader)
			group_sched_out(counter, cpuctx, ctx);
		else
			counter_sched_out(counter, cpuctx, ctx);
		counter->state = PERF_COUNTER_STATE_OFF;
	}

501
	spin_unlock(&ctx->lock);
502 503 504 505
}

/*
 * Disable a counter.
506 507 508 509 510 511 512 513 514 515
 *
 * If counter->ctx is a cloned context, callers must make sure that
 * every task struct that counter->ctx->task could possibly point to
 * remains valid.  This condition is satisifed when called through
 * perf_counter_for_each_child or perf_counter_for_each because they
 * hold the top-level counter's child_mutex, so any descendant that
 * goes to exit will block in sync_child_counter.
 * When called from perf_pending_counter it's OK because counter->ctx
 * is the current context on this CPU and preemption is disabled,
 * hence we can't get into perf_counter_task_sched_out for this context.
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
 */
static void perf_counter_disable(struct perf_counter *counter)
{
	struct perf_counter_context *ctx = counter->ctx;
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Disable the counter on the cpu that it's on
		 */
		smp_call_function_single(counter->cpu, __perf_counter_disable,
					 counter, 1);
		return;
	}

 retry:
	task_oncpu_function_call(task, __perf_counter_disable, counter);

	spin_lock_irq(&ctx->lock);
	/*
	 * If the counter is still active, we need to retry the cross-call.
	 */
	if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
		spin_unlock_irq(&ctx->lock);
		goto retry;
	}

	/*
	 * Since we have the lock this context can't be scheduled
	 * in, so we can change the state safely.
	 */
547 548
	if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
		update_counter_times(counter);
549
		counter->state = PERF_COUNTER_STATE_OFF;
550
	}
551 552 553 554

	spin_unlock_irq(&ctx->lock);
}

555 556 557 558 559 560
static int
counter_sched_in(struct perf_counter *counter,
		 struct perf_cpu_context *cpuctx,
		 struct perf_counter_context *ctx,
		 int cpu)
{
561
	if (counter->state <= PERF_COUNTER_STATE_OFF)
562 563 564 565 566 567 568 569 570
		return 0;

	counter->state = PERF_COUNTER_STATE_ACTIVE;
	counter->oncpu = cpu;	/* TODO: put 'cpu' into cpuctx->cpu */
	/*
	 * The new state must be visible before we turn it on in the hardware:
	 */
	smp_wmb();

571
	if (counter->pmu->enable(counter)) {
572 573 574 575 576
		counter->state = PERF_COUNTER_STATE_INACTIVE;
		counter->oncpu = -1;
		return -EAGAIN;
	}

577
	counter->tstamp_running += ctx->time - counter->tstamp_stopped;
578

579 580
	if (!is_software_counter(counter))
		cpuctx->active_oncpu++;
581 582
	ctx->nr_active++;

583
	if (counter->attr.exclusive)
584 585
		cpuctx->exclusive = 1;

586 587 588
	return 0;
}

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
static int
group_sched_in(struct perf_counter *group_counter,
	       struct perf_cpu_context *cpuctx,
	       struct perf_counter_context *ctx,
	       int cpu)
{
	struct perf_counter *counter, *partial_group;
	int ret;

	if (group_counter->state == PERF_COUNTER_STATE_OFF)
		return 0;

	ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
	if (ret)
		return ret < 0 ? ret : 0;

	if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
		return -EAGAIN;

	/*
	 * Schedule in siblings as one group (if any):
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
		if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
			partial_group = counter;
			goto group_error;
		}
	}

	return 0;

group_error:
	/*
	 * Groups can be scheduled in as one unit only, so undo any
	 * partial group before returning:
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
		if (counter == partial_group)
			break;
		counter_sched_out(counter, cpuctx, ctx);
	}
	counter_sched_out(group_counter, cpuctx, ctx);

	return -EAGAIN;
}

635 636 637 638 639 640 641 642 643 644
/*
 * Return 1 for a group consisting entirely of software counters,
 * 0 if the group contains any hardware counters.
 */
static int is_software_only_group(struct perf_counter *leader)
{
	struct perf_counter *counter;

	if (!is_software_counter(leader))
		return 0;
P
Peter Zijlstra 已提交
645

646 647 648
	list_for_each_entry(counter, &leader->sibling_list, list_entry)
		if (!is_software_counter(counter))
			return 0;
P
Peter Zijlstra 已提交
649

650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674
	return 1;
}

/*
 * Work out whether we can put this counter group on the CPU now.
 */
static int group_can_go_on(struct perf_counter *counter,
			   struct perf_cpu_context *cpuctx,
			   int can_add_hw)
{
	/*
	 * Groups consisting entirely of software counters can always go on.
	 */
	if (is_software_only_group(counter))
		return 1;
	/*
	 * If an exclusive group is already on, no other hardware
	 * counters can go on.
	 */
	if (cpuctx->exclusive)
		return 0;
	/*
	 * If this group is exclusive and there are already
	 * counters on the CPU, it can't go on.
	 */
675
	if (counter->attr.exclusive && cpuctx->active_oncpu)
676 677 678 679 680 681 682 683
		return 0;
	/*
	 * Otherwise, try to add it if all previous groups were able
	 * to go on.
	 */
	return can_add_hw;
}

684 685 686 687
static void add_counter_to_ctx(struct perf_counter *counter,
			       struct perf_counter_context *ctx)
{
	list_add_counter(counter, ctx);
688 689 690
	counter->tstamp_enabled = ctx->time;
	counter->tstamp_running = ctx->time;
	counter->tstamp_stopped = ctx->time;
691 692
}

T
Thomas Gleixner 已提交
693
/*
694
 * Cross CPU call to install and enable a performance counter
695 696
 *
 * Must be called with ctx->mutex held
T
Thomas Gleixner 已提交
697 698 699 700 701 702
 */
static void __perf_install_in_context(void *info)
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter *counter = info;
	struct perf_counter_context *ctx = counter->ctx;
703
	struct perf_counter *leader = counter->group_leader;
T
Thomas Gleixner 已提交
704
	int cpu = smp_processor_id();
705
	int err;
T
Thomas Gleixner 已提交
706 707 708 709 710

	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu. If not it has been
	 * scheduled out before the smp call arrived.
711 712
	 * Or possibly this is the right context but it isn't
	 * on this cpu because it had no counters.
T
Thomas Gleixner 已提交
713
	 */
714
	if (ctx->task && cpuctx->task_ctx != ctx) {
715
		if (cpuctx->task_ctx || ctx->task != current)
716 717 718
			return;
		cpuctx->task_ctx = ctx;
	}
T
Thomas Gleixner 已提交
719

720
	spin_lock(&ctx->lock);
721
	ctx->is_active = 1;
722
	update_context_time(ctx);
T
Thomas Gleixner 已提交
723 724 725 726 727

	/*
	 * Protect the list operation against NMI by disabling the
	 * counters on a global level. NOP for non NMI based counters.
	 */
728
	perf_disable();
T
Thomas Gleixner 已提交
729

730
	add_counter_to_ctx(counter, ctx);
T
Thomas Gleixner 已提交
731

732 733 734 735 736 737 738 739
	/*
	 * Don't put the counter on if it is disabled or if
	 * it is in a group and the group isn't on.
	 */
	if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
	    (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
		goto unlock;

740 741 742 743 744
	/*
	 * An exclusive counter can't go on if there are already active
	 * hardware counters, and no hardware counter can go on if there
	 * is already an exclusive counter on.
	 */
745
	if (!group_can_go_on(counter, cpuctx, 1))
746 747 748 749
		err = -EEXIST;
	else
		err = counter_sched_in(counter, cpuctx, ctx, cpu);

750 751 752 753 754 755 756 757
	if (err) {
		/*
		 * This counter couldn't go on.  If it is in a group
		 * then we have to pull the whole group off.
		 * If the counter group is pinned then put it in error state.
		 */
		if (leader != counter)
			group_sched_out(leader, cpuctx, ctx);
758
		if (leader->attr.pinned) {
759
			update_group_times(leader);
760
			leader->state = PERF_COUNTER_STATE_ERROR;
761
		}
762
	}
T
Thomas Gleixner 已提交
763

764
	if (!err && !ctx->task && cpuctx->max_pertask)
T
Thomas Gleixner 已提交
765 766
		cpuctx->max_pertask--;

767
 unlock:
768
	perf_enable();
769

770
	spin_unlock(&ctx->lock);
T
Thomas Gleixner 已提交
771 772 773 774 775 776 777 778 779 780 781
}

/*
 * Attach a performance counter to a context
 *
 * First we add the counter to the list with the hardware enable bit
 * in counter->hw_config cleared.
 *
 * If the counter is attached to a task which is on a CPU we use a smp
 * call to enable it in the task context. The task might have been
 * scheduled away, but we check this in the smp call again.
782 783
 *
 * Must be called with ctx->mutex held.
T
Thomas Gleixner 已提交
784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
 */
static void
perf_install_in_context(struct perf_counter_context *ctx,
			struct perf_counter *counter,
			int cpu)
{
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Per cpu counters are installed via an smp call and
		 * the install is always sucessful.
		 */
		smp_call_function_single(cpu, __perf_install_in_context,
					 counter, 1);
		return;
	}

retry:
	task_oncpu_function_call(task, __perf_install_in_context,
				 counter);

	spin_lock_irq(&ctx->lock);
	/*
	 * we need to retry the smp call.
	 */
810
	if (ctx->is_active && list_empty(&counter->list_entry)) {
T
Thomas Gleixner 已提交
811 812 813 814 815 816 817 818 819
		spin_unlock_irq(&ctx->lock);
		goto retry;
	}

	/*
	 * The lock prevents that this context is scheduled in so we
	 * can add the counter safely, if it the call above did not
	 * succeed.
	 */
820 821
	if (list_empty(&counter->list_entry))
		add_counter_to_ctx(counter, ctx);
T
Thomas Gleixner 已提交
822 823 824
	spin_unlock_irq(&ctx->lock);
}

825 826 827 828
/*
 * Cross CPU call to enable a performance counter
 */
static void __perf_counter_enable(void *info)
829
{
830 831 832 833 834
	struct perf_counter *counter = info;
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter_context *ctx = counter->ctx;
	struct perf_counter *leader = counter->group_leader;
	int err;
835

836 837 838 839
	/*
	 * If this is a per-task counter, need to check whether this
	 * counter's task is the current task on this cpu.
	 */
840
	if (ctx->task && cpuctx->task_ctx != ctx) {
841
		if (cpuctx->task_ctx || ctx->task != current)
842 843 844
			return;
		cpuctx->task_ctx = ctx;
	}
845

846
	spin_lock(&ctx->lock);
847
	ctx->is_active = 1;
848
	update_context_time(ctx);
849 850 851 852

	if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
		goto unlock;
	counter->state = PERF_COUNTER_STATE_INACTIVE;
853
	counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
854 855

	/*
856 857
	 * If the counter is in a group and isn't the group leader,
	 * then don't put it on unless the group is on.
858
	 */
859 860
	if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
		goto unlock;
861

862
	if (!group_can_go_on(counter, cpuctx, 1)) {
863
		err = -EEXIST;
864
	} else {
865
		perf_disable();
866 867 868 869 870 871
		if (counter == leader)
			err = group_sched_in(counter, cpuctx, ctx,
					     smp_processor_id());
		else
			err = counter_sched_in(counter, cpuctx, ctx,
					       smp_processor_id());
872
		perf_enable();
873
	}
874 875 876 877 878 879 880 881

	if (err) {
		/*
		 * If this counter can't go on and it's part of a
		 * group, then the whole group has to come off.
		 */
		if (leader != counter)
			group_sched_out(leader, cpuctx, ctx);
882
		if (leader->attr.pinned) {
883
			update_group_times(leader);
884
			leader->state = PERF_COUNTER_STATE_ERROR;
885
		}
886 887 888
	}

 unlock:
889
	spin_unlock(&ctx->lock);
890 891 892 893
}

/*
 * Enable a counter.
894 895 896 897 898 899
 *
 * If counter->ctx is a cloned context, callers must make sure that
 * every task struct that counter->ctx->task could possibly point to
 * remains valid.  This condition is satisfied when called through
 * perf_counter_for_each_child or perf_counter_for_each as described
 * for perf_counter_disable.
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945
 */
static void perf_counter_enable(struct perf_counter *counter)
{
	struct perf_counter_context *ctx = counter->ctx;
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Enable the counter on the cpu that it's on
		 */
		smp_call_function_single(counter->cpu, __perf_counter_enable,
					 counter, 1);
		return;
	}

	spin_lock_irq(&ctx->lock);
	if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
		goto out;

	/*
	 * If the counter is in error state, clear that first.
	 * That way, if we see the counter in error state below, we
	 * know that it has gone back into error state, as distinct
	 * from the task having been scheduled away before the
	 * cross-call arrived.
	 */
	if (counter->state == PERF_COUNTER_STATE_ERROR)
		counter->state = PERF_COUNTER_STATE_OFF;

 retry:
	spin_unlock_irq(&ctx->lock);
	task_oncpu_function_call(task, __perf_counter_enable, counter);

	spin_lock_irq(&ctx->lock);

	/*
	 * If the context is active and the counter is still off,
	 * we need to retry the cross-call.
	 */
	if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
		goto retry;

	/*
	 * Since we have the lock this context can't be scheduled
	 * in, so we can change the state safely.
	 */
946
	if (counter->state == PERF_COUNTER_STATE_OFF) {
947
		counter->state = PERF_COUNTER_STATE_INACTIVE;
948 949
		counter->tstamp_enabled =
			ctx->time - counter->total_time_enabled;
950
	}
951 952 953 954
 out:
	spin_unlock_irq(&ctx->lock);
}

955
static int perf_counter_refresh(struct perf_counter *counter, int refresh)
956
{
957 958 959
	/*
	 * not supported on inherited counters
	 */
960
	if (counter->attr.inherit)
961 962
		return -EINVAL;

963 964
	atomic_add(refresh, &counter->event_limit);
	perf_counter_enable(counter);
965 966

	return 0;
967 968
}

969 970 971 972 973
void __perf_counter_sched_out(struct perf_counter_context *ctx,
			      struct perf_cpu_context *cpuctx)
{
	struct perf_counter *counter;

974 975
	spin_lock(&ctx->lock);
	ctx->is_active = 0;
976
	if (likely(!ctx->nr_counters))
977
		goto out;
978
	update_context_time(ctx);
979

980
	perf_disable();
981
	if (ctx->nr_active) {
982 983 984 985 986 987
		list_for_each_entry(counter, &ctx->counter_list, list_entry) {
			if (counter != counter->group_leader)
				counter_sched_out(counter, cpuctx, ctx);
			else
				group_sched_out(counter, cpuctx, ctx);
		}
988
	}
989
	perf_enable();
990
 out:
991 992 993
	spin_unlock(&ctx->lock);
}

994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
/*
 * Test whether two contexts are equivalent, i.e. whether they
 * have both been cloned from the same version of the same context
 * and they both have the same number of enabled counters.
 * If the number of enabled counters is the same, then the set
 * of enabled counters should be the same, because these are both
 * inherited contexts, therefore we can't access individual counters
 * in them directly with an fd; we can only enable/disable all
 * counters via prctl, or enable/disable all counters in a family
 * via ioctl, which will have the same effect on both contexts.
 */
static int context_equiv(struct perf_counter_context *ctx1,
			 struct perf_counter_context *ctx2)
{
	return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1009
		&& ctx1->parent_gen == ctx2->parent_gen
1010
		&& !ctx1->pin_count && !ctx2->pin_count;
1011 1012
}

1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
static void __perf_counter_read(void *counter);

static void __perf_counter_sync_stat(struct perf_counter *counter,
				     struct perf_counter *next_counter)
{
	u64 value;

	if (!counter->attr.inherit_stat)
		return;

	/*
	 * Update the counter value, we cannot use perf_counter_read()
	 * because we're in the middle of a context switch and have IRQs
	 * disabled, which upsets smp_call_function_single(), however
	 * we know the counter must be on the current CPU, therefore we
	 * don't need to use it.
	 */
	switch (counter->state) {
	case PERF_COUNTER_STATE_ACTIVE:
		__perf_counter_read(counter);
		break;

	case PERF_COUNTER_STATE_INACTIVE:
		update_counter_times(counter);
		break;

	default:
		break;
	}

	/*
	 * In order to keep per-task stats reliable we need to flip the counter
	 * values when we flip the contexts.
	 */
	value = atomic64_read(&next_counter->count);
	value = atomic64_xchg(&counter->count, value);
	atomic64_set(&next_counter->count, value);

	/*
	 * XXX also sync time_enabled and time_running ?
	 */
}

#define list_next_entry(pos, member) \
	list_entry(pos->member.next, typeof(*pos), member)

static void perf_counter_sync_stat(struct perf_counter_context *ctx,
				   struct perf_counter_context *next_ctx)
{
	struct perf_counter *counter, *next_counter;

	if (!ctx->nr_stat)
		return;

	counter = list_first_entry(&ctx->event_list,
				   struct perf_counter, event_entry);

	next_counter = list_first_entry(&next_ctx->event_list,
					struct perf_counter, event_entry);

	while (&counter->event_entry != &ctx->event_list &&
	       &next_counter->event_entry != &next_ctx->event_list) {

		__perf_counter_sync_stat(counter, next_counter);

		counter = list_next_entry(counter, event_entry);
		next_counter = list_next_entry(counter, event_entry);
	}
}

T
Thomas Gleixner 已提交
1083 1084 1085 1086 1087 1088
/*
 * Called from scheduler to remove the counters of the current task,
 * with interrupts disabled.
 *
 * We stop each counter and update the counter value in counter->count.
 *
I
Ingo Molnar 已提交
1089
 * This does not protect us against NMI, but disable()
T
Thomas Gleixner 已提交
1090 1091 1092 1093
 * sets the disabled bit in the control field of counter _before_
 * accessing the counter control register. If a NMI hits, then it will
 * not restart the counter.
 */
1094 1095
void perf_counter_task_sched_out(struct task_struct *task,
				 struct task_struct *next, int cpu)
T
Thomas Gleixner 已提交
1096 1097
{
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1098
	struct perf_counter_context *ctx = task->perf_counter_ctxp;
1099
	struct perf_counter_context *next_ctx;
1100
	struct perf_counter_context *parent;
1101
	struct pt_regs *regs;
1102
	int do_switch = 1;
T
Thomas Gleixner 已提交
1103

1104
	regs = task_pt_regs(task);
1105
	perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
1106

1107
	if (likely(!ctx || !cpuctx->task_ctx))
T
Thomas Gleixner 已提交
1108 1109
		return;

1110
	update_context_time(ctx);
1111 1112 1113

	rcu_read_lock();
	parent = rcu_dereference(ctx->parent_ctx);
1114
	next_ctx = next->perf_counter_ctxp;
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
	if (parent && next_ctx &&
	    rcu_dereference(next_ctx->parent_ctx) == parent) {
		/*
		 * Looks like the two contexts are clones, so we might be
		 * able to optimize the context switch.  We lock both
		 * contexts and check that they are clones under the
		 * lock (including re-checking that neither has been
		 * uncloned in the meantime).  It doesn't matter which
		 * order we take the locks because no other cpu could
		 * be trying to lock both of these tasks.
		 */
		spin_lock(&ctx->lock);
		spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
		if (context_equiv(ctx, next_ctx)) {
1129 1130 1131 1132
			/*
			 * XXX do we need a memory barrier of sorts
			 * wrt to rcu_dereference() of perf_counter_ctxp
			 */
1133 1134 1135 1136 1137
			task->perf_counter_ctxp = next_ctx;
			next->perf_counter_ctxp = ctx;
			ctx->task = next;
			next_ctx->task = task;
			do_switch = 0;
1138 1139

			perf_counter_sync_stat(ctx, next_ctx);
1140 1141 1142
		}
		spin_unlock(&next_ctx->lock);
		spin_unlock(&ctx->lock);
1143
	}
1144
	rcu_read_unlock();
1145

1146 1147 1148 1149
	if (do_switch) {
		__perf_counter_sched_out(ctx, cpuctx);
		cpuctx->task_ctx = NULL;
	}
T
Thomas Gleixner 已提交
1150 1151
}

1152 1153 1154
/*
 * Called with IRQs disabled
 */
1155 1156 1157 1158
static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);

1159 1160
	if (!cpuctx->task_ctx)
		return;
1161 1162 1163 1164

	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
		return;

1165 1166 1167 1168
	__perf_counter_sched_out(ctx, cpuctx);
	cpuctx->task_ctx = NULL;
}

1169 1170 1171
/*
 * Called with IRQs disabled
 */
1172
static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
1173
{
1174
	__perf_counter_sched_out(&cpuctx->ctx, cpuctx);
1175 1176
}

1177 1178 1179
static void
__perf_counter_sched_in(struct perf_counter_context *ctx,
			struct perf_cpu_context *cpuctx, int cpu)
T
Thomas Gleixner 已提交
1180 1181
{
	struct perf_counter *counter;
1182
	int can_add_hw = 1;
T
Thomas Gleixner 已提交
1183

1184 1185
	spin_lock(&ctx->lock);
	ctx->is_active = 1;
T
Thomas Gleixner 已提交
1186
	if (likely(!ctx->nr_counters))
1187
		goto out;
T
Thomas Gleixner 已提交
1188

1189
	ctx->timestamp = perf_clock();
1190

1191
	perf_disable();
1192 1193 1194 1195 1196 1197 1198

	/*
	 * First go through the list and put on any pinned groups
	 * in order to give them the best chance of going on.
	 */
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
		if (counter->state <= PERF_COUNTER_STATE_OFF ||
1199
		    !counter->attr.pinned)
1200 1201 1202 1203
			continue;
		if (counter->cpu != -1 && counter->cpu != cpu)
			continue;

1204 1205 1206 1207 1208 1209
		if (counter != counter->group_leader)
			counter_sched_in(counter, cpuctx, ctx, cpu);
		else {
			if (group_can_go_on(counter, cpuctx, 1))
				group_sched_in(counter, cpuctx, ctx, cpu);
		}
1210 1211 1212 1213 1214

		/*
		 * If this pinned group hasn't been scheduled,
		 * put it in error state.
		 */
1215 1216
		if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
			update_group_times(counter);
1217
			counter->state = PERF_COUNTER_STATE_ERROR;
1218
		}
1219 1220
	}

1221
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1222 1223 1224 1225 1226
		/*
		 * Ignore counters in OFF or ERROR state, and
		 * ignore pinned counters since we did them already.
		 */
		if (counter->state <= PERF_COUNTER_STATE_OFF ||
1227
		    counter->attr.pinned)
1228 1229
			continue;

1230 1231 1232 1233
		/*
		 * Listen to the 'cpu' scheduling filter constraint
		 * of counters:
		 */
T
Thomas Gleixner 已提交
1234 1235 1236
		if (counter->cpu != -1 && counter->cpu != cpu)
			continue;

1237 1238
		if (counter != counter->group_leader) {
			if (counter_sched_in(counter, cpuctx, ctx, cpu))
1239
				can_add_hw = 0;
1240 1241 1242 1243 1244
		} else {
			if (group_can_go_on(counter, cpuctx, can_add_hw)) {
				if (group_sched_in(counter, cpuctx, ctx, cpu))
					can_add_hw = 0;
			}
1245
		}
T
Thomas Gleixner 已提交
1246
	}
1247
	perf_enable();
1248
 out:
T
Thomas Gleixner 已提交
1249
	spin_unlock(&ctx->lock);
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
}

/*
 * Called from scheduler to add the counters of the current task
 * with interrupts disabled.
 *
 * We restore the counter value and then enable it.
 *
 * This does not protect us against NMI, but enable()
 * sets the enabled bit in the control field of counter _before_
 * accessing the counter control register. If a NMI hits, then it will
 * keep the counter running.
 */
void perf_counter_task_sched_in(struct task_struct *task, int cpu)
{
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1266
	struct perf_counter_context *ctx = task->perf_counter_ctxp;
1267

1268 1269
	if (likely(!ctx))
		return;
1270 1271
	if (cpuctx->task_ctx == ctx)
		return;
1272
	__perf_counter_sched_in(ctx, cpuctx, cpu);
T
Thomas Gleixner 已提交
1273 1274 1275
	cpuctx->task_ctx = ctx;
}

1276 1277 1278 1279 1280 1281 1282
static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
{
	struct perf_counter_context *ctx = &cpuctx->ctx;

	__perf_counter_sched_in(ctx, cpuctx, cpu);
}

1283 1284 1285
#define MAX_INTERRUPTS (~0ULL)

static void perf_log_throttle(struct perf_counter *counter, int enable);
1286 1287
static void perf_log_period(struct perf_counter *counter, u64 period);

1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310
static void perf_adjust_period(struct perf_counter *counter, u64 events)
{
	struct hw_perf_counter *hwc = &counter->hw;
	u64 period, sample_period;
	s64 delta;

	events *= hwc->sample_period;
	period = div64_u64(events, counter->attr.sample_freq);

	delta = (s64)(period - hwc->sample_period);
	delta = (delta + 7) / 8; /* low pass filter */

	sample_period = hwc->sample_period + delta;

	if (!sample_period)
		sample_period = 1;

	perf_log_period(counter, sample_period);

	hwc->sample_period = sample_period;
}

static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1311 1312
{
	struct perf_counter *counter;
1313
	struct hw_perf_counter *hwc;
1314
	u64 interrupts, freq;
1315 1316 1317 1318 1319 1320

	spin_lock(&ctx->lock);
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
		if (counter->state != PERF_COUNTER_STATE_ACTIVE)
			continue;

1321 1322 1323 1324
		hwc = &counter->hw;

		interrupts = hwc->interrupts;
		hwc->interrupts = 0;
1325

1326 1327 1328
		/*
		 * unthrottle counters on the tick
		 */
1329 1330 1331
		if (interrupts == MAX_INTERRUPTS) {
			perf_log_throttle(counter, 1);
			counter->pmu->unthrottle(counter);
1332
			interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
1333 1334
		}

1335
		if (!counter->attr.freq || !counter->attr.sample_freq)
1336 1337
			continue;

1338 1339 1340
		/*
		 * if the specified freq < HZ then we need to skip ticks
		 */
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
		if (counter->attr.sample_freq < HZ) {
			freq = counter->attr.sample_freq;

			hwc->freq_count += freq;
			hwc->freq_interrupts += interrupts;

			if (hwc->freq_count < HZ)
				continue;

			interrupts = hwc->freq_interrupts;
			hwc->freq_interrupts = 0;
			hwc->freq_count -= HZ;
		} else
			freq = HZ;

1356
		perf_adjust_period(counter, freq * interrupts);
1357

1358 1359 1360 1361 1362 1363 1364 1365
		/*
		 * In order to avoid being stalled by an (accidental) huge
		 * sample period, force reset the sample period if we didn't
		 * get any events in this freq period.
		 */
		if (!interrupts) {
			perf_disable();
			counter->pmu->disable(counter);
1366
			atomic64_set(&hwc->period_left, 0);
1367 1368 1369
			counter->pmu->enable(counter);
			perf_enable();
		}
1370 1371 1372 1373
	}
	spin_unlock(&ctx->lock);
}

1374 1375 1376 1377
/*
 * Round-robin a context's counters:
 */
static void rotate_ctx(struct perf_counter_context *ctx)
T
Thomas Gleixner 已提交
1378 1379 1380
{
	struct perf_counter *counter;

1381
	if (!ctx->nr_counters)
T
Thomas Gleixner 已提交
1382 1383 1384 1385
		return;

	spin_lock(&ctx->lock);
	/*
1386
	 * Rotate the first entry last (works just fine for group counters too):
T
Thomas Gleixner 已提交
1387
	 */
1388
	perf_disable();
1389
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1390
		list_move_tail(&counter->list_entry, &ctx->counter_list);
T
Thomas Gleixner 已提交
1391 1392
		break;
	}
1393
	perf_enable();
T
Thomas Gleixner 已提交
1394 1395

	spin_unlock(&ctx->lock);
1396 1397 1398 1399
}

void perf_counter_task_tick(struct task_struct *curr, int cpu)
{
1400 1401 1402 1403 1404 1405 1406
	struct perf_cpu_context *cpuctx;
	struct perf_counter_context *ctx;

	if (!atomic_read(&nr_counters))
		return;

	cpuctx = &per_cpu(perf_cpu_context, cpu);
1407
	ctx = curr->perf_counter_ctxp;
1408

1409
	perf_ctx_adjust_freq(&cpuctx->ctx);
1410
	if (ctx)
1411
		perf_ctx_adjust_freq(ctx);
1412

1413
	perf_counter_cpu_sched_out(cpuctx);
1414 1415
	if (ctx)
		__perf_counter_task_sched_out(ctx);
T
Thomas Gleixner 已提交
1416

1417
	rotate_ctx(&cpuctx->ctx);
1418 1419
	if (ctx)
		rotate_ctx(ctx);
1420

1421
	perf_counter_cpu_sched_in(cpuctx, cpu);
1422 1423
	if (ctx)
		perf_counter_task_sched_in(curr, cpu);
T
Thomas Gleixner 已提交
1424 1425 1426 1427 1428
}

/*
 * Cross CPU call to read the hardware counter
 */
1429
static void __perf_counter_read(void *info)
T
Thomas Gleixner 已提交
1430
{
I
Ingo Molnar 已提交
1431
	struct perf_counter *counter = info;
1432
	struct perf_counter_context *ctx = counter->ctx;
I
Ingo Molnar 已提交
1433
	unsigned long flags;
I
Ingo Molnar 已提交
1434

1435
	local_irq_save(flags);
1436
	if (ctx->is_active)
1437
		update_context_time(ctx);
1438
	counter->pmu->read(counter);
1439
	update_counter_times(counter);
1440
	local_irq_restore(flags);
T
Thomas Gleixner 已提交
1441 1442
}

1443
static u64 perf_counter_read(struct perf_counter *counter)
T
Thomas Gleixner 已提交
1444 1445 1446 1447 1448
{
	/*
	 * If counter is enabled and currently active on a CPU, update the
	 * value in the counter structure:
	 */
1449
	if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
T
Thomas Gleixner 已提交
1450
		smp_call_function_single(counter->oncpu,
1451
					 __perf_counter_read, counter, 1);
1452 1453
	} else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
		update_counter_times(counter);
T
Thomas Gleixner 已提交
1454 1455
	}

1456
	return atomic64_read(&counter->count);
T
Thomas Gleixner 已提交
1457 1458
}

1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474
/*
 * Initialize the perf_counter context in a task_struct:
 */
static void
__perf_counter_init_context(struct perf_counter_context *ctx,
			    struct task_struct *task)
{
	memset(ctx, 0, sizeof(*ctx));
	spin_lock_init(&ctx->lock);
	mutex_init(&ctx->mutex);
	INIT_LIST_HEAD(&ctx->counter_list);
	INIT_LIST_HEAD(&ctx->event_list);
	atomic_set(&ctx->refcount, 1);
	ctx->task = task;
}

T
Thomas Gleixner 已提交
1475 1476
static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
{
1477
	struct perf_counter_context *parent_ctx;
1478 1479
	struct perf_counter_context *ctx;
	struct perf_cpu_context *cpuctx;
T
Thomas Gleixner 已提交
1480
	struct task_struct *task;
1481
	unsigned long flags;
1482
	int err;
T
Thomas Gleixner 已提交
1483 1484 1485 1486 1487 1488

	/*
	 * If cpu is not a wildcard then this is a percpu counter:
	 */
	if (cpu != -1) {
		/* Must be root to operate on a CPU counter: */
1489
		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
T
Thomas Gleixner 已提交
1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
			return ERR_PTR(-EACCES);

		if (cpu < 0 || cpu > num_possible_cpus())
			return ERR_PTR(-EINVAL);

		/*
		 * We could be clever and allow to attach a counter to an
		 * offline CPU and activate it when the CPU comes up, but
		 * that's for later.
		 */
		if (!cpu_isset(cpu, cpu_online_map))
			return ERR_PTR(-ENODEV);

		cpuctx = &per_cpu(perf_cpu_context, cpu);
		ctx = &cpuctx->ctx;
1505
		get_ctx(ctx);
T
Thomas Gleixner 已提交
1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521

		return ctx;
	}

	rcu_read_lock();
	if (!pid)
		task = current;
	else
		task = find_task_by_vpid(pid);
	if (task)
		get_task_struct(task);
	rcu_read_unlock();

	if (!task)
		return ERR_PTR(-ESRCH);

1522 1523 1524 1525 1526 1527 1528
	/*
	 * Can't attach counters to a dying task.
	 */
	err = -ESRCH;
	if (task->flags & PF_EXITING)
		goto errout;

T
Thomas Gleixner 已提交
1529
	/* Reuse ptrace permission checks for now. */
1530 1531 1532 1533 1534
	err = -EACCES;
	if (!ptrace_may_access(task, PTRACE_MODE_READ))
		goto errout;

 retry:
1535
	ctx = perf_lock_task_context(task, &flags);
1536 1537 1538 1539 1540 1541
	if (ctx) {
		parent_ctx = ctx->parent_ctx;
		if (parent_ctx) {
			put_ctx(parent_ctx);
			ctx->parent_ctx = NULL;		/* no longer a clone */
		}
1542
		spin_unlock_irqrestore(&ctx->lock, flags);
T
Thomas Gleixner 已提交
1543 1544
	}

1545 1546
	if (!ctx) {
		ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
1547 1548 1549
		err = -ENOMEM;
		if (!ctx)
			goto errout;
1550
		__perf_counter_init_context(ctx, task);
1551 1552
		get_ctx(ctx);
		if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
1553 1554 1555 1556 1557
			/*
			 * We raced with some other task; use
			 * the context they set.
			 */
			kfree(ctx);
1558
			goto retry;
1559
		}
1560
		get_task_struct(task);
1561 1562
	}

1563
	put_task_struct(task);
T
Thomas Gleixner 已提交
1564
	return ctx;
1565 1566 1567 1568

 errout:
	put_task_struct(task);
	return ERR_PTR(err);
T
Thomas Gleixner 已提交
1569 1570
}

P
Peter Zijlstra 已提交
1571 1572 1573 1574 1575
static void free_counter_rcu(struct rcu_head *head)
{
	struct perf_counter *counter;

	counter = container_of(head, struct perf_counter, rcu_head);
1576 1577
	if (counter->ns)
		put_pid_ns(counter->ns);
P
Peter Zijlstra 已提交
1578 1579 1580
	kfree(counter);
}

1581 1582
static void perf_pending_sync(struct perf_counter *counter);

1583 1584
static void free_counter(struct perf_counter *counter)
{
1585 1586
	perf_pending_sync(counter);

1587 1588 1589 1590 1591 1592 1593
	if (!counter->parent) {
		atomic_dec(&nr_counters);
		if (counter->attr.mmap)
			atomic_dec(&nr_mmap_counters);
		if (counter->attr.comm)
			atomic_dec(&nr_comm_counters);
	}
1594

1595 1596 1597
	if (counter->destroy)
		counter->destroy(counter);

1598
	put_ctx(counter->ctx);
1599 1600 1601
	call_rcu(&counter->rcu_head, free_counter_rcu);
}

T
Thomas Gleixner 已提交
1602 1603 1604 1605 1606 1607 1608 1609 1610 1611
/*
 * Called when the last reference to the file is gone.
 */
static int perf_release(struct inode *inode, struct file *file)
{
	struct perf_counter *counter = file->private_data;
	struct perf_counter_context *ctx = counter->ctx;

	file->private_data = NULL;

1612
	WARN_ON_ONCE(ctx->parent_ctx);
1613
	mutex_lock(&ctx->mutex);
1614
	perf_counter_remove_from_context(counter);
1615
	mutex_unlock(&ctx->mutex);
T
Thomas Gleixner 已提交
1616

1617 1618 1619 1620 1621
	mutex_lock(&counter->owner->perf_counter_mutex);
	list_del_init(&counter->owner_entry);
	mutex_unlock(&counter->owner->perf_counter_mutex);
	put_task_struct(counter->owner);

1622
	free_counter(counter);
T
Thomas Gleixner 已提交
1623 1624 1625 1626 1627 1628 1629 1630 1631 1632

	return 0;
}

/*
 * Read the performance counter - simple non blocking version for now
 */
static ssize_t
perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
{
1633
	u64 values[4];
1634
	int n;
T
Thomas Gleixner 已提交
1635

1636 1637 1638 1639 1640 1641 1642 1643
	/*
	 * Return end-of-file for a read on a counter that is in
	 * error state (i.e. because it was pinned but it couldn't be
	 * scheduled on to the CPU at some point).
	 */
	if (counter->state == PERF_COUNTER_STATE_ERROR)
		return 0;

1644
	WARN_ON_ONCE(counter->ctx->parent_ctx);
1645
	mutex_lock(&counter->child_mutex);
1646 1647
	values[0] = perf_counter_read(counter);
	n = 1;
1648
	if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1649 1650
		values[n++] = counter->total_time_enabled +
			atomic64_read(&counter->child_total_time_enabled);
1651
	if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1652 1653
		values[n++] = counter->total_time_running +
			atomic64_read(&counter->child_total_time_running);
1654
	if (counter->attr.read_format & PERF_FORMAT_ID)
1655
		values[n++] = counter->id;
1656
	mutex_unlock(&counter->child_mutex);
T
Thomas Gleixner 已提交
1657

1658 1659 1660 1661 1662 1663 1664 1665
	if (count < n * sizeof(u64))
		return -EINVAL;
	count = n * sizeof(u64);

	if (copy_to_user(buf, values, count))
		return -EFAULT;

	return count;
T
Thomas Gleixner 已提交
1666 1667 1668 1669 1670 1671 1672
}

static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
	struct perf_counter *counter = file->private_data;

1673
	return perf_read_hw(counter, buf, count);
T
Thomas Gleixner 已提交
1674 1675 1676 1677 1678
}

static unsigned int perf_poll(struct file *file, poll_table *wait)
{
	struct perf_counter *counter = file->private_data;
P
Peter Zijlstra 已提交
1679
	struct perf_mmap_data *data;
1680
	unsigned int events = POLL_HUP;
P
Peter Zijlstra 已提交
1681 1682 1683 1684

	rcu_read_lock();
	data = rcu_dereference(counter->data);
	if (data)
1685
		events = atomic_xchg(&data->poll, 0);
P
Peter Zijlstra 已提交
1686
	rcu_read_unlock();
T
Thomas Gleixner 已提交
1687 1688 1689 1690 1691 1692

	poll_wait(file, &counter->waitq, wait);

	return events;
}

1693 1694
static void perf_counter_reset(struct perf_counter *counter)
{
P
Peter Zijlstra 已提交
1695
	(void)perf_counter_read(counter);
1696
	atomic64_set(&counter->count, 0);
P
Peter Zijlstra 已提交
1697 1698 1699
	perf_counter_update_userpage(counter);
}

1700 1701 1702 1703 1704 1705
/*
 * Holding the top-level counter's child_mutex means that any
 * descendant process that has inherited this counter will block
 * in sync_child_counter if it goes to exit, thus satisfying the
 * task existence requirements of perf_counter_enable/disable.
 */
P
Peter Zijlstra 已提交
1706 1707 1708 1709 1710
static void perf_counter_for_each_child(struct perf_counter *counter,
					void (*func)(struct perf_counter *))
{
	struct perf_counter *child;

1711
	WARN_ON_ONCE(counter->ctx->parent_ctx);
1712
	mutex_lock(&counter->child_mutex);
P
Peter Zijlstra 已提交
1713 1714 1715
	func(counter);
	list_for_each_entry(child, &counter->child_list, child_list)
		func(child);
1716
	mutex_unlock(&counter->child_mutex);
P
Peter Zijlstra 已提交
1717 1718 1719 1720 1721
}

static void perf_counter_for_each(struct perf_counter *counter,
				  void (*func)(struct perf_counter *))
{
1722 1723
	struct perf_counter_context *ctx = counter->ctx;
	struct perf_counter *sibling;
P
Peter Zijlstra 已提交
1724

1725 1726 1727 1728 1729 1730 1731 1732 1733
	WARN_ON_ONCE(ctx->parent_ctx);
	mutex_lock(&ctx->mutex);
	counter = counter->group_leader;

	perf_counter_for_each_child(counter, func);
	func(counter);
	list_for_each_entry(sibling, &counter->sibling_list, list_entry)
		perf_counter_for_each_child(counter, func);
	mutex_unlock(&ctx->mutex);
1734 1735
}

1736 1737 1738 1739 1740 1741 1742
static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
{
	struct perf_counter_context *ctx = counter->ctx;
	unsigned long size;
	int ret = 0;
	u64 value;

1743
	if (!counter->attr.sample_period)
1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
		return -EINVAL;

	size = copy_from_user(&value, arg, sizeof(value));
	if (size != sizeof(value))
		return -EFAULT;

	if (!value)
		return -EINVAL;

	spin_lock_irq(&ctx->lock);
1754
	if (counter->attr.freq) {
1755
		if (value > sysctl_perf_counter_sample_rate) {
1756 1757 1758 1759
			ret = -EINVAL;
			goto unlock;
		}

1760
		counter->attr.sample_freq = value;
1761
	} else {
1762 1763
		perf_log_period(counter, value);

1764
		counter->attr.sample_period = value;
1765 1766 1767 1768 1769 1770 1771 1772
		counter->hw.sample_period = value;
	}
unlock:
	spin_unlock_irq(&ctx->lock);

	return ret;
}

1773 1774 1775
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct perf_counter *counter = file->private_data;
P
Peter Zijlstra 已提交
1776 1777
	void (*func)(struct perf_counter *);
	u32 flags = arg;
1778 1779 1780

	switch (cmd) {
	case PERF_COUNTER_IOC_ENABLE:
P
Peter Zijlstra 已提交
1781
		func = perf_counter_enable;
1782 1783
		break;
	case PERF_COUNTER_IOC_DISABLE:
P
Peter Zijlstra 已提交
1784
		func = perf_counter_disable;
1785
		break;
1786
	case PERF_COUNTER_IOC_RESET:
P
Peter Zijlstra 已提交
1787
		func = perf_counter_reset;
1788
		break;
P
Peter Zijlstra 已提交
1789 1790 1791

	case PERF_COUNTER_IOC_REFRESH:
		return perf_counter_refresh(counter, arg);
1792 1793 1794 1795

	case PERF_COUNTER_IOC_PERIOD:
		return perf_counter_period(counter, (u64 __user *)arg);

1796
	default:
P
Peter Zijlstra 已提交
1797
		return -ENOTTY;
1798
	}
P
Peter Zijlstra 已提交
1799 1800 1801 1802 1803 1804 1805

	if (flags & PERF_IOC_FLAG_GROUP)
		perf_counter_for_each(counter, func);
	else
		perf_counter_for_each_child(counter, func);

	return 0;
1806 1807
}

1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
int perf_counter_task_enable(void)
{
	struct perf_counter *counter;

	mutex_lock(&current->perf_counter_mutex);
	list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
		perf_counter_for_each_child(counter, perf_counter_enable);
	mutex_unlock(&current->perf_counter_mutex);

	return 0;
}

int perf_counter_task_disable(void)
{
	struct perf_counter *counter;

	mutex_lock(&current->perf_counter_mutex);
	list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
		perf_counter_for_each_child(counter, perf_counter_disable);
	mutex_unlock(&current->perf_counter_mutex);

	return 0;
}

1832 1833 1834 1835 1836 1837 1838 1839
static int perf_counter_index(struct perf_counter *counter)
{
	if (counter->state != PERF_COUNTER_STATE_ACTIVE)
		return 0;

	return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET;
}

1840 1841 1842 1843 1844 1845
/*
 * Callers need to ensure there can be no nesting of this function, otherwise
 * the seqlock logic goes bad. We can not serialize this because the arch
 * code calls this from NMI context.
 */
void perf_counter_update_userpage(struct perf_counter *counter)
1846
{
1847
	struct perf_counter_mmap_page *userpg;
1848
	struct perf_mmap_data *data;
1849 1850 1851 1852 1853 1854 1855

	rcu_read_lock();
	data = rcu_dereference(counter->data);
	if (!data)
		goto unlock;

	userpg = data->user_page;
1856

1857 1858 1859 1860 1861
	/*
	 * Disable preemption so as to not let the corresponding user-space
	 * spin too long if we get preempted.
	 */
	preempt_disable();
1862
	++userpg->lock;
1863
	barrier();
1864
	userpg->index = perf_counter_index(counter);
1865 1866 1867
	userpg->offset = atomic64_read(&counter->count);
	if (counter->state == PERF_COUNTER_STATE_ACTIVE)
		userpg->offset -= atomic64_read(&counter->hw.prev_count);
1868

1869 1870 1871 1872 1873 1874
	userpg->time_enabled = counter->total_time_enabled +
			atomic64_read(&counter->child_total_time_enabled);

	userpg->time_running = counter->total_time_running +
			atomic64_read(&counter->child_total_time_running);

1875
	barrier();
1876
	++userpg->lock;
1877
	preempt_enable();
1878
unlock:
1879
	rcu_read_unlock();
1880 1881 1882 1883 1884
}

static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct perf_counter *counter = vma->vm_file->private_data;
1885 1886 1887
	struct perf_mmap_data *data;
	int ret = VM_FAULT_SIGBUS;

1888 1889 1890 1891 1892 1893
	if (vmf->flags & FAULT_FLAG_MKWRITE) {
		if (vmf->pgoff == 0)
			ret = 0;
		return ret;
	}

1894 1895 1896 1897 1898 1899 1900 1901 1902
	rcu_read_lock();
	data = rcu_dereference(counter->data);
	if (!data)
		goto unlock;

	if (vmf->pgoff == 0) {
		vmf->page = virt_to_page(data->user_page);
	} else {
		int nr = vmf->pgoff - 1;
1903

1904 1905
		if ((unsigned)nr > data->nr_pages)
			goto unlock;
1906

1907 1908 1909
		if (vmf->flags & FAULT_FLAG_WRITE)
			goto unlock;

1910 1911
		vmf->page = virt_to_page(data->data_pages[nr]);
	}
1912

1913
	get_page(vmf->page);
1914 1915 1916
	vmf->page->mapping = vma->vm_file->f_mapping;
	vmf->page->index   = vmf->pgoff;

1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949
	ret = 0;
unlock:
	rcu_read_unlock();

	return ret;
}

static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
{
	struct perf_mmap_data *data;
	unsigned long size;
	int i;

	WARN_ON(atomic_read(&counter->mmap_count));

	size = sizeof(struct perf_mmap_data);
	size += nr_pages * sizeof(void *);

	data = kzalloc(size, GFP_KERNEL);
	if (!data)
		goto fail;

	data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
	if (!data->user_page)
		goto fail_user_page;

	for (i = 0; i < nr_pages; i++) {
		data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
		if (!data->data_pages[i])
			goto fail_data_pages;
	}

	data->nr_pages = nr_pages;
1950
	atomic_set(&data->lock, -1);
1951 1952 1953

	rcu_assign_pointer(counter->data, data);

1954
	return 0;
1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968

fail_data_pages:
	for (i--; i >= 0; i--)
		free_page((unsigned long)data->data_pages[i]);

	free_page((unsigned long)data->user_page);

fail_user_page:
	kfree(data);

fail:
	return -ENOMEM;
}

1969 1970 1971 1972 1973 1974 1975 1976
static void perf_mmap_free_page(unsigned long addr)
{
	struct page *page = virt_to_page(addr);

	page->mapping = NULL;
	__free_page(page);
}

1977 1978
static void __perf_mmap_data_free(struct rcu_head *rcu_head)
{
1979
	struct perf_mmap_data *data;
1980 1981
	int i;

1982 1983
	data = container_of(rcu_head, struct perf_mmap_data, rcu_head);

1984
	perf_mmap_free_page((unsigned long)data->user_page);
1985
	for (i = 0; i < data->nr_pages; i++)
1986 1987
		perf_mmap_free_page((unsigned long)data->data_pages[i]);

1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011
	kfree(data);
}

static void perf_mmap_data_free(struct perf_counter *counter)
{
	struct perf_mmap_data *data = counter->data;

	WARN_ON(atomic_read(&counter->mmap_count));

	rcu_assign_pointer(counter->data, NULL);
	call_rcu(&data->rcu_head, __perf_mmap_data_free);
}

static void perf_mmap_open(struct vm_area_struct *vma)
{
	struct perf_counter *counter = vma->vm_file->private_data;

	atomic_inc(&counter->mmap_count);
}

static void perf_mmap_close(struct vm_area_struct *vma)
{
	struct perf_counter *counter = vma->vm_file->private_data;

2012
	WARN_ON_ONCE(counter->ctx->parent_ctx);
2013
	if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
2014 2015 2016
		struct user_struct *user = current_user();

		atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
2017
		vma->vm_mm->locked_vm -= counter->data->nr_locked;
2018 2019 2020
		perf_mmap_data_free(counter);
		mutex_unlock(&counter->mmap_mutex);
	}
2021 2022 2023
}

static struct vm_operations_struct perf_mmap_vmops = {
2024 2025 2026 2027
	.open		= perf_mmap_open,
	.close		= perf_mmap_close,
	.fault		= perf_mmap_fault,
	.page_mkwrite	= perf_mmap_fault,
2028 2029 2030 2031 2032
};

static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct perf_counter *counter = file->private_data;
2033
	unsigned long user_locked, user_lock_limit;
2034
	struct user_struct *user = current_user();
2035
	unsigned long locked, lock_limit;
2036 2037
	unsigned long vma_size;
	unsigned long nr_pages;
2038
	long user_extra, extra;
2039
	int ret = 0;
2040

2041
	if (!(vma->vm_flags & VM_SHARED))
2042
		return -EINVAL;
2043 2044 2045 2046

	vma_size = vma->vm_end - vma->vm_start;
	nr_pages = (vma_size / PAGE_SIZE) - 1;

2047 2048 2049 2050 2051
	/*
	 * If we have data pages ensure they're a power-of-two number, so we
	 * can do bitmasks instead of modulo.
	 */
	if (nr_pages != 0 && !is_power_of_2(nr_pages))
2052 2053
		return -EINVAL;

2054
	if (vma_size != PAGE_SIZE * (1 + nr_pages))
2055 2056
		return -EINVAL;

2057 2058
	if (vma->vm_pgoff != 0)
		return -EINVAL;
2059

2060
	WARN_ON_ONCE(counter->ctx->parent_ctx);
2061 2062 2063 2064 2065 2066 2067
	mutex_lock(&counter->mmap_mutex);
	if (atomic_inc_not_zero(&counter->mmap_count)) {
		if (nr_pages != counter->data->nr_pages)
			ret = -EINVAL;
		goto unlock;
	}

2068 2069
	user_extra = nr_pages + 1;
	user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
I
Ingo Molnar 已提交
2070 2071 2072 2073 2074 2075

	/*
	 * Increase the limit linearly with more CPUs:
	 */
	user_lock_limit *= num_online_cpus();

2076
	user_locked = atomic_long_read(&user->locked_vm) + user_extra;
2077

2078 2079 2080
	extra = 0;
	if (user_locked > user_lock_limit)
		extra = user_locked - user_lock_limit;
2081 2082 2083

	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
	lock_limit >>= PAGE_SHIFT;
2084
	locked = vma->vm_mm->locked_vm + extra;
2085

2086 2087 2088 2089
	if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
		ret = -EPERM;
		goto unlock;
	}
2090 2091 2092

	WARN_ON(counter->data);
	ret = perf_mmap_data_alloc(counter, nr_pages);
2093 2094 2095 2096
	if (ret)
		goto unlock;

	atomic_set(&counter->mmap_count, 1);
2097
	atomic_long_add(user_extra, &user->locked_vm);
2098 2099
	vma->vm_mm->locked_vm += extra;
	counter->data->nr_locked = extra;
2100 2101 2102
	if (vma->vm_flags & VM_WRITE)
		counter->data->writable = 1;

2103
unlock:
2104
	mutex_unlock(&counter->mmap_mutex);
2105 2106 2107

	vma->vm_flags |= VM_RESERVED;
	vma->vm_ops = &perf_mmap_vmops;
2108 2109

	return ret;
2110 2111
}

P
Peter Zijlstra 已提交
2112 2113 2114
static int perf_fasync(int fd, struct file *filp, int on)
{
	struct inode *inode = filp->f_path.dentry->d_inode;
2115
	struct perf_counter *counter = filp->private_data;
P
Peter Zijlstra 已提交
2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127
	int retval;

	mutex_lock(&inode->i_mutex);
	retval = fasync_helper(fd, filp, on, &counter->fasync);
	mutex_unlock(&inode->i_mutex);

	if (retval < 0)
		return retval;

	return 0;
}

T
Thomas Gleixner 已提交
2128 2129 2130 2131
static const struct file_operations perf_fops = {
	.release		= perf_release,
	.read			= perf_read,
	.poll			= perf_poll,
2132 2133
	.unlocked_ioctl		= perf_ioctl,
	.compat_ioctl		= perf_ioctl,
2134
	.mmap			= perf_mmap,
P
Peter Zijlstra 已提交
2135
	.fasync			= perf_fasync,
T
Thomas Gleixner 已提交
2136 2137
};

2138 2139 2140 2141 2142 2143 2144 2145 2146 2147
/*
 * Perf counter wakeup
 *
 * If there's data, ensure we set the poll() state and publish everything
 * to user-space before waking everybody up.
 */

void perf_counter_wakeup(struct perf_counter *counter)
{
	wake_up_all(&counter->waitq);
2148 2149 2150 2151 2152

	if (counter->pending_kill) {
		kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
		counter->pending_kill = 0;
	}
2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163
}

/*
 * Pending wakeups
 *
 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
 *
 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
 * single linked list and use cmpxchg() to add entries lockless.
 */

2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179
static void perf_pending_counter(struct perf_pending_entry *entry)
{
	struct perf_counter *counter = container_of(entry,
			struct perf_counter, pending);

	if (counter->pending_disable) {
		counter->pending_disable = 0;
		perf_counter_disable(counter);
	}

	if (counter->pending_wakeup) {
		counter->pending_wakeup = 0;
		perf_counter_wakeup(counter);
	}
}

2180
#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2181

2182
static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2183 2184 2185
	PENDING_TAIL,
};

2186 2187
static void perf_pending_queue(struct perf_pending_entry *entry,
			       void (*func)(struct perf_pending_entry *))
2188
{
2189
	struct perf_pending_entry **head;
2190

2191
	if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2192 2193
		return;

2194 2195 2196
	entry->func = func;

	head = &get_cpu_var(perf_pending_head);
2197 2198

	do {
2199 2200
		entry->next = *head;
	} while (cmpxchg(head, entry->next, entry) != entry->next);
2201 2202 2203

	set_perf_counter_pending();

2204
	put_cpu_var(perf_pending_head);
2205 2206 2207 2208
}

static int __perf_pending_run(void)
{
2209
	struct perf_pending_entry *list;
2210 2211
	int nr = 0;

2212
	list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2213
	while (list != PENDING_TAIL) {
2214 2215
		void (*func)(struct perf_pending_entry *);
		struct perf_pending_entry *entry = list;
2216 2217 2218

		list = list->next;

2219 2220
		func = entry->func;
		entry->next = NULL;
2221 2222 2223 2224 2225 2226 2227
		/*
		 * Ensure we observe the unqueue before we issue the wakeup,
		 * so that we won't be waiting forever.
		 * -- see perf_not_pending().
		 */
		smp_wmb();

2228
		func(entry);
2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249
		nr++;
	}

	return nr;
}

static inline int perf_not_pending(struct perf_counter *counter)
{
	/*
	 * If we flush on whatever cpu we run, there is a chance we don't
	 * need to wait.
	 */
	get_cpu();
	__perf_pending_run();
	put_cpu();

	/*
	 * Ensure we see the proper queue state before going to sleep
	 * so that we do not miss the wakeup. -- see perf_pending_handle()
	 */
	smp_rmb();
2250
	return counter->pending.next == NULL;
2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262
}

static void perf_pending_sync(struct perf_counter *counter)
{
	wait_event(counter->waitq, perf_not_pending(counter));
}

void perf_counter_do_pending(void)
{
	__perf_pending_run();
}

2263 2264 2265 2266
/*
 * Callchain support -- arch specific
 */

2267
__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2268 2269 2270 2271
{
	return NULL;
}

2272 2273 2274 2275
/*
 * Output
 */

2276 2277 2278
struct perf_output_handle {
	struct perf_counter	*counter;
	struct perf_mmap_data	*data;
2279 2280
	unsigned long		head;
	unsigned long		offset;
2281
	int			nmi;
2282
	int			sample;
2283 2284
	int			locked;
	unsigned long		flags;
2285 2286
};

2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313
static bool perf_output_space(struct perf_mmap_data *data,
			      unsigned int offset, unsigned int head)
{
	unsigned long tail;
	unsigned long mask;

	if (!data->writable)
		return true;

	mask = (data->nr_pages << PAGE_SHIFT) - 1;
	/*
	 * Userspace could choose to issue a mb() before updating the tail
	 * pointer. So that all reads will be completed before the write is
	 * issued.
	 */
	tail = ACCESS_ONCE(data->user_page->data_tail);
	smp_rmb();

	offset = (offset - tail) & mask;
	head   = (head   - tail) & mask;

	if ((int)(head - offset) < 0)
		return false;

	return true;
}

2314
static void perf_output_wakeup(struct perf_output_handle *handle)
2315
{
2316 2317
	atomic_set(&handle->data->poll, POLL_IN);

2318
	if (handle->nmi) {
2319
		handle->counter->pending_wakeup = 1;
2320
		perf_pending_queue(&handle->counter->pending,
2321
				   perf_pending_counter);
2322
	} else
2323 2324 2325
		perf_counter_wakeup(handle->counter);
}

2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
/*
 * Curious locking construct.
 *
 * We need to ensure a later event doesn't publish a head when a former
 * event isn't done writing. However since we need to deal with NMIs we
 * cannot fully serialize things.
 *
 * What we do is serialize between CPUs so we only have to deal with NMI
 * nesting on a single CPU.
 *
 * We only publish the head (and generate a wakeup) when the outer-most
 * event completes.
 */
static void perf_output_lock(struct perf_output_handle *handle)
{
	struct perf_mmap_data *data = handle->data;
	int cpu;

	handle->locked = 0;

	local_irq_save(handle->flags);
	cpu = smp_processor_id();

	if (in_nmi() && atomic_read(&data->lock) == cpu)
		return;

2352
	while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2353 2354 2355 2356 2357 2358 2359 2360
		cpu_relax();

	handle->locked = 1;
}

static void perf_output_unlock(struct perf_output_handle *handle)
{
	struct perf_mmap_data *data = handle->data;
2361 2362
	unsigned long head;
	int cpu;
2363

2364
	data->done_head = data->head;
2365 2366 2367 2368 2369 2370 2371 2372 2373 2374

	if (!handle->locked)
		goto out;

again:
	/*
	 * The xchg implies a full barrier that ensures all writes are done
	 * before we publish the new head, matched by a rmb() in userspace when
	 * reading this position.
	 */
2375
	while ((head = atomic_long_xchg(&data->done_head, 0)))
2376 2377 2378
		data->user_page->data_head = head;

	/*
2379
	 * NMI can happen here, which means we can miss a done_head update.
2380 2381
	 */

2382
	cpu = atomic_xchg(&data->lock, -1);
2383 2384 2385 2386 2387
	WARN_ON_ONCE(cpu != smp_processor_id());

	/*
	 * Therefore we have to validate we did not indeed do so.
	 */
2388
	if (unlikely(atomic_long_read(&data->done_head))) {
2389 2390 2391
		/*
		 * Since we had it locked, we can lock it again.
		 */
2392
		while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2393 2394 2395 2396 2397
			cpu_relax();

		goto again;
	}

2398
	if (atomic_xchg(&data->wakeup, 0))
2399 2400 2401 2402 2403
		perf_output_wakeup(handle);
out:
	local_irq_restore(handle->flags);
}

2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442
static void perf_output_copy(struct perf_output_handle *handle,
			     const void *buf, unsigned int len)
{
	unsigned int pages_mask;
	unsigned int offset;
	unsigned int size;
	void **pages;

	offset		= handle->offset;
	pages_mask	= handle->data->nr_pages - 1;
	pages		= handle->data->data_pages;

	do {
		unsigned int page_offset;
		int nr;

		nr	    = (offset >> PAGE_SHIFT) & pages_mask;
		page_offset = offset & (PAGE_SIZE - 1);
		size	    = min_t(unsigned int, PAGE_SIZE - page_offset, len);

		memcpy(pages[nr] + page_offset, buf, size);

		len	    -= size;
		buf	    += size;
		offset	    += size;
	} while (len);

	handle->offset = offset;

	/*
	 * Check we didn't copy past our reservation window, taking the
	 * possible unsigned int wrap into account.
	 */
	WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
}

#define perf_output_put(handle, x) \
	perf_output_copy((handle), &(x), sizeof(x))

2443
static int perf_output_begin(struct perf_output_handle *handle,
2444
			     struct perf_counter *counter, unsigned int size,
2445
			     int nmi, int sample)
2446
{
2447
	struct perf_mmap_data *data;
2448
	unsigned int offset, head;
2449 2450 2451 2452 2453 2454
	int have_lost;
	struct {
		struct perf_event_header header;
		u64			 id;
		u64			 lost;
	} lost_event;
2455

2456 2457 2458 2459 2460 2461
	/*
	 * For inherited counters we send all the output towards the parent.
	 */
	if (counter->parent)
		counter = counter->parent;

2462 2463 2464 2465 2466
	rcu_read_lock();
	data = rcu_dereference(counter->data);
	if (!data)
		goto out;

2467 2468 2469 2470
	handle->data	= data;
	handle->counter	= counter;
	handle->nmi	= nmi;
	handle->sample	= sample;
2471

2472
	if (!data->nr_pages)
2473
		goto fail;
2474

2475 2476 2477 2478
	have_lost = atomic_read(&data->lost);
	if (have_lost)
		size += sizeof(lost_event);

2479 2480
	perf_output_lock(handle);

2481
	do {
2482
		offset = head = atomic_long_read(&data->head);
P
Peter Zijlstra 已提交
2483
		head += size;
2484 2485
		if (unlikely(!perf_output_space(data, offset, head)))
			goto fail;
2486
	} while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2487

2488
	handle->offset	= offset;
2489
	handle->head	= head;
2490 2491 2492

	if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
		atomic_set(&data->wakeup, 1);
2493

2494 2495 2496 2497 2498 2499 2500 2501 2502 2503
	if (have_lost) {
		lost_event.header.type = PERF_EVENT_LOST;
		lost_event.header.misc = 0;
		lost_event.header.size = sizeof(lost_event);
		lost_event.id          = counter->id;
		lost_event.lost        = atomic_xchg(&data->lost, 0);

		perf_output_put(handle, lost_event);
	}

2504
	return 0;
2505

2506
fail:
2507 2508
	atomic_inc(&data->lost);
	perf_output_unlock(handle);
2509 2510
out:
	rcu_read_unlock();
2511

2512 2513
	return -ENOSPC;
}
2514

2515
static void perf_output_end(struct perf_output_handle *handle)
2516
{
2517 2518 2519
	struct perf_counter *counter = handle->counter;
	struct perf_mmap_data *data = handle->data;

2520
	int wakeup_events = counter->attr.wakeup_events;
P
Peter Zijlstra 已提交
2521

2522
	if (handle->sample && wakeup_events) {
2523
		int events = atomic_inc_return(&data->events);
P
Peter Zijlstra 已提交
2524
		if (events >= wakeup_events) {
2525
			atomic_sub(wakeup_events, &data->events);
2526
			atomic_set(&data->wakeup, 1);
P
Peter Zijlstra 已提交
2527
		}
2528 2529 2530
	}

	perf_output_unlock(handle);
2531
	rcu_read_unlock();
2532 2533
}

2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555
static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
{
	/*
	 * only top level counters have the pid namespace they were created in
	 */
	if (counter->parent)
		counter = counter->parent;

	return task_tgid_nr_ns(p, counter->ns);
}

static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
{
	/*
	 * only top level counters have the pid namespace they were created in
	 */
	if (counter->parent)
		counter = counter->parent;

	return task_pid_nr_ns(p, counter->ns);
}

2556 2557
static void perf_counter_output(struct perf_counter *counter, int nmi,
				struct perf_sample_data *data)
2558
{
2559
	int ret;
2560
	u64 sample_type = counter->attr.sample_type;
2561 2562 2563
	struct perf_output_handle handle;
	struct perf_event_header header;
	u64 ip;
P
Peter Zijlstra 已提交
2564
	struct {
2565
		u32 pid, tid;
2566
	} tid_entry;
2567
	struct {
2568
		u64 id;
2569 2570
		u64 counter;
	} group_entry;
2571 2572
	struct perf_callchain_entry *callchain = NULL;
	int callchain_size = 0;
P
Peter Zijlstra 已提交
2573
	u64 time;
2574 2575 2576
	struct {
		u32 cpu, reserved;
	} cpu_entry;
2577

2578
	header.type = PERF_EVENT_SAMPLE;
2579
	header.size = sizeof(header);
2580

2581
	header.misc = 0;
2582
	header.misc |= perf_misc_flags(data->regs);
2583

2584
	if (sample_type & PERF_SAMPLE_IP) {
2585
		ip = perf_instruction_pointer(data->regs);
2586 2587
		header.size += sizeof(ip);
	}
2588

2589
	if (sample_type & PERF_SAMPLE_TID) {
2590
		/* namespace issues */
2591 2592
		tid_entry.pid = perf_counter_pid(counter, current);
		tid_entry.tid = perf_counter_tid(counter, current);
2593 2594 2595 2596

		header.size += sizeof(tid_entry);
	}

2597
	if (sample_type & PERF_SAMPLE_TIME) {
2598 2599 2600 2601 2602 2603 2604 2605
		/*
		 * Maybe do better on x86 and provide cpu_clock_nmi()
		 */
		time = sched_clock();

		header.size += sizeof(u64);
	}

2606
	if (sample_type & PERF_SAMPLE_ADDR)
2607 2608
		header.size += sizeof(u64);

2609
	if (sample_type & PERF_SAMPLE_ID)
2610 2611
		header.size += sizeof(u64);

2612
	if (sample_type & PERF_SAMPLE_CPU) {
2613 2614 2615 2616 2617
		header.size += sizeof(cpu_entry);

		cpu_entry.cpu = raw_smp_processor_id();
	}

2618
	if (sample_type & PERF_SAMPLE_PERIOD)
2619 2620
		header.size += sizeof(u64);

2621
	if (sample_type & PERF_SAMPLE_GROUP) {
2622 2623 2624 2625
		header.size += sizeof(u64) +
			counter->nr_siblings * sizeof(group_entry);
	}

2626
	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2627
		callchain = perf_callchain(data->regs);
2628 2629

		if (callchain) {
2630
			callchain_size = (1 + callchain->nr) * sizeof(u64);
2631
			header.size += callchain_size;
2632 2633
		} else
			header.size += sizeof(u64);
2634 2635
	}

2636
	ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2637 2638
	if (ret)
		return;
2639

2640
	perf_output_put(&handle, header);
P
Peter Zijlstra 已提交
2641

2642
	if (sample_type & PERF_SAMPLE_IP)
2643
		perf_output_put(&handle, ip);
P
Peter Zijlstra 已提交
2644

2645
	if (sample_type & PERF_SAMPLE_TID)
2646
		perf_output_put(&handle, tid_entry);
P
Peter Zijlstra 已提交
2647

2648
	if (sample_type & PERF_SAMPLE_TIME)
2649 2650
		perf_output_put(&handle, time);

2651
	if (sample_type & PERF_SAMPLE_ADDR)
2652
		perf_output_put(&handle, data->addr);
2653

2654 2655
	if (sample_type & PERF_SAMPLE_ID)
		perf_output_put(&handle, counter->id);
2656

2657
	if (sample_type & PERF_SAMPLE_CPU)
2658 2659
		perf_output_put(&handle, cpu_entry);

2660
	if (sample_type & PERF_SAMPLE_PERIOD)
2661
		perf_output_put(&handle, data->period);
2662

2663
	/*
2664
	 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
2665
	 */
2666
	if (sample_type & PERF_SAMPLE_GROUP) {
2667 2668
		struct perf_counter *leader, *sub;
		u64 nr = counter->nr_siblings;
P
Peter Zijlstra 已提交
2669

2670
		perf_output_put(&handle, nr);
2671

2672 2673 2674
		leader = counter->group_leader;
		list_for_each_entry(sub, &leader->sibling_list, list_entry) {
			if (sub != counter)
2675
				sub->pmu->read(sub);
2676

2677
			group_entry.id = sub->id;
2678
			group_entry.counter = atomic64_read(&sub->count);
2679

2680 2681
			perf_output_put(&handle, group_entry);
		}
2682
	}
P
Peter Zijlstra 已提交
2683

2684 2685 2686 2687 2688 2689 2690 2691
	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
		if (callchain)
			perf_output_copy(&handle, callchain, callchain_size);
		else {
			u64 nr = 0;
			perf_output_put(&handle, nr);
		}
	}
2692

2693
	perf_output_end(&handle);
2694 2695
}

2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755
/*
 * read event
 */

struct perf_read_event {
	struct perf_event_header	header;

	u32				pid;
	u32				tid;
	u64				value;
	u64				format[3];
};

static void
perf_counter_read_event(struct perf_counter *counter,
			struct task_struct *task)
{
	struct perf_output_handle handle;
	struct perf_read_event event = {
		.header = {
			.type = PERF_EVENT_READ,
			.misc = 0,
			.size = sizeof(event) - sizeof(event.format),
		},
		.pid = perf_counter_pid(counter, task),
		.tid = perf_counter_tid(counter, task),
		.value = atomic64_read(&counter->count),
	};
	int ret, i = 0;

	if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
		event.header.size += sizeof(u64);
		event.format[i++] = counter->total_time_enabled;
	}

	if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
		event.header.size += sizeof(u64);
		event.format[i++] = counter->total_time_running;
	}

	if (counter->attr.read_format & PERF_FORMAT_ID) {
		u64 id;

		event.header.size += sizeof(u64);
		if (counter->parent)
			id = counter->parent->id;
		else
			id = counter->id;

		event.format[i++] = id;
	}

	ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
	if (ret)
		return;

	perf_output_copy(&handle, &event, event.header.size);
	perf_output_end(&handle);
}

P
Peter Zijlstra 已提交
2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790
/*
 * fork tracking
 */

struct perf_fork_event {
	struct task_struct	*task;

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				ppid;
	} event;
};

static void perf_counter_fork_output(struct perf_counter *counter,
				     struct perf_fork_event *fork_event)
{
	struct perf_output_handle handle;
	int size = fork_event->event.header.size;
	struct task_struct *task = fork_event->task;
	int ret = perf_output_begin(&handle, counter, size, 0, 0);

	if (ret)
		return;

	fork_event->event.pid = perf_counter_pid(counter, task);
	fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);

	perf_output_put(&handle, fork_event->event);
	perf_output_end(&handle);
}

static int perf_counter_fork_match(struct perf_counter *counter)
{
2791
	if (counter->attr.comm || counter->attr.mmap)
P
Peter Zijlstra 已提交
2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837
		return 1;

	return 0;
}

static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
				  struct perf_fork_event *fork_event)
{
	struct perf_counter *counter;

	if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
		return;

	rcu_read_lock();
	list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
		if (perf_counter_fork_match(counter))
			perf_counter_fork_output(counter, fork_event);
	}
	rcu_read_unlock();
}

static void perf_counter_fork_event(struct perf_fork_event *fork_event)
{
	struct perf_cpu_context *cpuctx;
	struct perf_counter_context *ctx;

	cpuctx = &get_cpu_var(perf_cpu_context);
	perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
	put_cpu_var(perf_cpu_context);

	rcu_read_lock();
	/*
	 * doesn't really matter which of the child contexts the
	 * events ends up in.
	 */
	ctx = rcu_dereference(current->perf_counter_ctxp);
	if (ctx)
		perf_counter_fork_ctx(ctx, fork_event);
	rcu_read_unlock();
}

void perf_counter_fork(struct task_struct *task)
{
	struct perf_fork_event fork_event;

	if (!atomic_read(&nr_comm_counters) &&
2838
	    !atomic_read(&nr_mmap_counters))
P
Peter Zijlstra 已提交
2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853
		return;

	fork_event = (struct perf_fork_event){
		.task	= task,
		.event  = {
			.header = {
				.type = PERF_EVENT_FORK,
				.size = sizeof(fork_event.event),
			},
		},
	};

	perf_counter_fork_event(&fork_event);
}

2854 2855 2856 2857 2858
/*
 * comm tracking
 */

struct perf_comm_event {
2859 2860
	struct task_struct	*task;
	char			*comm;
2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880
	int			comm_size;

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				tid;
	} event;
};

static void perf_counter_comm_output(struct perf_counter *counter,
				     struct perf_comm_event *comm_event)
{
	struct perf_output_handle handle;
	int size = comm_event->event.header.size;
	int ret = perf_output_begin(&handle, counter, size, 0, 0);

	if (ret)
		return;

2881 2882 2883
	comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
	comm_event->event.tid = perf_counter_tid(counter, comm_event->task);

2884 2885 2886 2887 2888 2889
	perf_output_put(&handle, comm_event->event);
	perf_output_copy(&handle, comm_event->comm,
				   comm_event->comm_size);
	perf_output_end(&handle);
}

P
Peter Zijlstra 已提交
2890
static int perf_counter_comm_match(struct perf_counter *counter)
2891
{
P
Peter Zijlstra 已提交
2892
	if (counter->attr.comm)
2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907
		return 1;

	return 0;
}

static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
				  struct perf_comm_event *comm_event)
{
	struct perf_counter *counter;

	if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
		return;

	rcu_read_lock();
	list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
P
Peter Zijlstra 已提交
2908
		if (perf_counter_comm_match(counter))
2909 2910 2911 2912 2913 2914 2915 2916
			perf_counter_comm_output(counter, comm_event);
	}
	rcu_read_unlock();
}

static void perf_counter_comm_event(struct perf_comm_event *comm_event)
{
	struct perf_cpu_context *cpuctx;
2917
	struct perf_counter_context *ctx;
2918 2919 2920
	unsigned int size;
	char *comm = comm_event->task->comm;

2921
	size = ALIGN(strlen(comm)+1, sizeof(u64));
2922 2923 2924 2925 2926 2927 2928 2929 2930

	comm_event->comm = comm;
	comm_event->comm_size = size;

	comm_event->event.header.size = sizeof(comm_event->event) + size;

	cpuctx = &get_cpu_var(perf_cpu_context);
	perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
	put_cpu_var(perf_cpu_context);
2931 2932 2933 2934 2935 2936 2937 2938 2939 2940

	rcu_read_lock();
	/*
	 * doesn't really matter which of the child contexts the
	 * events ends up in.
	 */
	ctx = rcu_dereference(current->perf_counter_ctxp);
	if (ctx)
		perf_counter_comm_ctx(ctx, comm_event);
	rcu_read_unlock();
2941 2942 2943 2944
}

void perf_counter_comm(struct task_struct *task)
{
2945 2946
	struct perf_comm_event comm_event;

P
Peter Zijlstra 已提交
2947
	if (!atomic_read(&nr_comm_counters))
2948
		return;
2949

2950
	comm_event = (struct perf_comm_event){
2951 2952 2953 2954 2955 2956 2957 2958 2959
		.task	= task,
		.event  = {
			.header = { .type = PERF_EVENT_COMM, },
		},
	};

	perf_counter_comm_event(&comm_event);
}

2960 2961 2962 2963 2964
/*
 * mmap tracking
 */

struct perf_mmap_event {
2965 2966 2967 2968
	struct vm_area_struct	*vma;

	const char		*file_name;
	int			file_size;
2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985

	struct {
		struct perf_event_header	header;

		u32				pid;
		u32				tid;
		u64				start;
		u64				len;
		u64				pgoff;
	} event;
};

static void perf_counter_mmap_output(struct perf_counter *counter,
				     struct perf_mmap_event *mmap_event)
{
	struct perf_output_handle handle;
	int size = mmap_event->event.header.size;
2986
	int ret = perf_output_begin(&handle, counter, size, 0, 0);
2987 2988 2989 2990

	if (ret)
		return;

2991 2992 2993
	mmap_event->event.pid = perf_counter_pid(counter, current);
	mmap_event->event.tid = perf_counter_tid(counter, current);

2994 2995 2996
	perf_output_put(&handle, mmap_event->event);
	perf_output_copy(&handle, mmap_event->file_name,
				   mmap_event->file_size);
2997
	perf_output_end(&handle);
2998 2999 3000 3001 3002
}

static int perf_counter_mmap_match(struct perf_counter *counter,
				   struct perf_mmap_event *mmap_event)
{
3003
	if (counter->attr.mmap)
3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027
		return 1;

	return 0;
}

static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
				  struct perf_mmap_event *mmap_event)
{
	struct perf_counter *counter;

	if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
		return;

	rcu_read_lock();
	list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
		if (perf_counter_mmap_match(counter, mmap_event))
			perf_counter_mmap_output(counter, mmap_event);
	}
	rcu_read_unlock();
}

static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
{
	struct perf_cpu_context *cpuctx;
3028
	struct perf_counter_context *ctx;
3029 3030
	struct vm_area_struct *vma = mmap_event->vma;
	struct file *file = vma->vm_file;
3031 3032 3033
	unsigned int size;
	char tmp[16];
	char *buf = NULL;
3034
	const char *name;
3035 3036 3037 3038 3039 3040 3041

	if (file) {
		buf = kzalloc(PATH_MAX, GFP_KERNEL);
		if (!buf) {
			name = strncpy(tmp, "//enomem", sizeof(tmp));
			goto got_name;
		}
3042
		name = d_path(&file->f_path, buf, PATH_MAX);
3043 3044 3045 3046 3047
		if (IS_ERR(name)) {
			name = strncpy(tmp, "//toolong", sizeof(tmp));
			goto got_name;
		}
	} else {
3048 3049 3050 3051 3052 3053 3054 3055 3056
		name = arch_vma_name(mmap_event->vma);
		if (name)
			goto got_name;

		if (!vma->vm_mm) {
			name = strncpy(tmp, "[vdso]", sizeof(tmp));
			goto got_name;
		}

3057 3058 3059 3060 3061
		name = strncpy(tmp, "//anon", sizeof(tmp));
		goto got_name;
	}

got_name:
3062
	size = ALIGN(strlen(name)+1, sizeof(u64));
3063 3064 3065 3066 3067 3068 3069 3070 3071 3072

	mmap_event->file_name = name;
	mmap_event->file_size = size;

	mmap_event->event.header.size = sizeof(mmap_event->event) + size;

	cpuctx = &get_cpu_var(perf_cpu_context);
	perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
	put_cpu_var(perf_cpu_context);

3073 3074 3075 3076 3077 3078 3079 3080 3081 3082
	rcu_read_lock();
	/*
	 * doesn't really matter which of the child contexts the
	 * events ends up in.
	 */
	ctx = rcu_dereference(current->perf_counter_ctxp);
	if (ctx)
		perf_counter_mmap_ctx(ctx, mmap_event);
	rcu_read_unlock();

3083 3084 3085
	kfree(buf);
}

3086
void __perf_counter_mmap(struct vm_area_struct *vma)
3087
{
3088 3089
	struct perf_mmap_event mmap_event;

P
Peter Zijlstra 已提交
3090
	if (!atomic_read(&nr_mmap_counters))
3091 3092 3093
		return;

	mmap_event = (struct perf_mmap_event){
3094
		.vma	= vma,
3095 3096
		.event  = {
			.header = { .type = PERF_EVENT_MMAP, },
3097 3098 3099
			.start  = vma->vm_start,
			.len    = vma->vm_end - vma->vm_start,
			.pgoff  = vma->vm_pgoff,
3100 3101 3102 3103 3104 3105
		},
	};

	perf_counter_mmap_event(&mmap_event);
}

3106
/*
3107
 * Log sample_period changes so that analyzing tools can re-normalize the
3108
 * event flow.
3109 3110
 */

3111 3112 3113 3114 3115 3116 3117
struct freq_event {
	struct perf_event_header	header;
	u64				time;
	u64				id;
	u64				period;
};

3118 3119 3120
static void perf_log_period(struct perf_counter *counter, u64 period)
{
	struct perf_output_handle handle;
3121
	struct freq_event event;
3122 3123
	int ret;

3124 3125 3126 3127 3128 3129 3130
	if (counter->hw.sample_period == period)
		return;

	if (counter->attr.sample_type & PERF_SAMPLE_PERIOD)
		return;

	event = (struct freq_event) {
3131 3132 3133
		.header = {
			.type = PERF_EVENT_PERIOD,
			.misc = 0,
3134
			.size = sizeof(event),
3135 3136
		},
		.time = sched_clock(),
3137
		.id = counter->id,
3138 3139 3140
		.period = period,
	};

3141
	ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0);
3142 3143 3144
	if (ret)
		return;

3145
	perf_output_put(&handle, event);
3146 3147 3148
	perf_output_end(&handle);
}

3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160
/*
 * IRQ throttle logging
 */

static void perf_log_throttle(struct perf_counter *counter, int enable)
{
	struct perf_output_handle handle;
	int ret;

	struct {
		struct perf_event_header	header;
		u64				time;
3161
		u64				id;
3162 3163 3164 3165 3166 3167
	} throttle_event = {
		.header = {
			.type = PERF_EVENT_THROTTLE + 1,
			.misc = 0,
			.size = sizeof(throttle_event),
		},
3168 3169
		.time	= sched_clock(),
		.id	= counter->id,
3170 3171
	};

I
Ingo Molnar 已提交
3172
	ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
3173 3174 3175 3176 3177 3178 3179
	if (ret)
		return;

	perf_output_put(&handle, throttle_event);
	perf_output_end(&handle);
}

3180
/*
3181
 * Generic counter overflow handling, sampling.
3182 3183
 */

3184 3185
int perf_counter_overflow(struct perf_counter *counter, int nmi,
			  struct perf_sample_data *data)
3186
{
3187
	int events = atomic_read(&counter->event_limit);
3188
	int throttle = counter->pmu->unthrottle != NULL;
3189
	struct hw_perf_counter *hwc = &counter->hw;
3190 3191
	int ret = 0;

3192
	if (!throttle) {
3193
		hwc->interrupts++;
3194
	} else {
3195 3196
		if (hwc->interrupts != MAX_INTERRUPTS) {
			hwc->interrupts++;
3197 3198
			if (HZ * hwc->interrupts >
					(u64)sysctl_perf_counter_sample_rate) {
3199
				hwc->interrupts = MAX_INTERRUPTS;
3200 3201 3202 3203 3204 3205 3206 3207 3208
				perf_log_throttle(counter, 0);
				ret = 1;
			}
		} else {
			/*
			 * Keep re-disabling counters even though on the previous
			 * pass we disabled it - just in case we raced with a
			 * sched-in and the counter got enabled again:
			 */
3209 3210 3211
			ret = 1;
		}
	}
3212

3213 3214 3215 3216 3217 3218 3219 3220 3221 3222
	if (counter->attr.freq) {
		u64 now = sched_clock();
		s64 delta = now - hwc->freq_stamp;

		hwc->freq_stamp = now;

		if (delta > 0 && delta < TICK_NSEC)
			perf_adjust_period(counter, NSEC_PER_SEC / (int)delta);
	}

3223 3224 3225 3226 3227
	/*
	 * XXX event_limit might not quite work as expected on inherited
	 * counters
	 */

3228
	counter->pending_kill = POLL_IN;
3229 3230
	if (events && atomic_dec_and_test(&counter->event_limit)) {
		ret = 1;
3231
		counter->pending_kill = POLL_HUP;
3232 3233 3234 3235 3236 3237 3238 3239
		if (nmi) {
			counter->pending_disable = 1;
			perf_pending_queue(&counter->pending,
					   perf_pending_counter);
		} else
			perf_counter_disable(counter);
	}

3240
	perf_counter_output(counter, nmi, data);
3241
	return ret;
3242 3243
}

3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269
/*
 * Generic software counter infrastructure
 */

static void perf_swcounter_update(struct perf_counter *counter)
{
	struct hw_perf_counter *hwc = &counter->hw;
	u64 prev, now;
	s64 delta;

again:
	prev = atomic64_read(&hwc->prev_count);
	now = atomic64_read(&hwc->count);
	if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
		goto again;

	delta = now - prev;

	atomic64_add(delta, &counter->count);
	atomic64_sub(delta, &hwc->period_left);
}

static void perf_swcounter_set_period(struct perf_counter *counter)
{
	struct hw_perf_counter *hwc = &counter->hw;
	s64 left = atomic64_read(&hwc->period_left);
3270
	s64 period = hwc->sample_period;
3271 3272 3273 3274

	if (unlikely(left <= -period)) {
		left = period;
		atomic64_set(&hwc->period_left, left);
3275
		hwc->last_period = period;
3276 3277 3278 3279 3280
	}

	if (unlikely(left <= 0)) {
		left += period;
		atomic64_add(period, &hwc->period_left);
3281
		hwc->last_period = period;
3282 3283 3284 3285 3286 3287
	}

	atomic64_set(&hwc->prev_count, -left);
	atomic64_set(&hwc->count, -left);
}

3288 3289
static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
{
3290
	enum hrtimer_restart ret = HRTIMER_RESTART;
3291
	struct perf_sample_data data;
3292
	struct perf_counter *counter;
3293
	u64 period;
3294 3295

	counter	= container_of(hrtimer, struct perf_counter, hw.hrtimer);
3296
	counter->pmu->read(counter);
3297

3298 3299
	data.addr = 0;
	data.regs = get_irq_regs();
3300 3301 3302 3303
	/*
	 * In case we exclude kernel IPs or are somehow not in interrupt
	 * context, provide the next best thing, the user IP.
	 */
3304
	if ((counter->attr.exclude_kernel || !data.regs) &&
3305
			!counter->attr.exclude_user)
3306
		data.regs = task_pt_regs(current);
3307

3308 3309
	if (data.regs) {
		if (perf_counter_overflow(counter, 0, &data))
3310 3311
			ret = HRTIMER_NORESTART;
	}
3312

3313
	period = max_t(u64, 10000, counter->hw.sample_period);
3314
	hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3315

3316
	return ret;
3317 3318 3319
}

static void perf_swcounter_overflow(struct perf_counter *counter,
3320
				    int nmi, struct perf_sample_data *data)
3321
{
3322
	data->period = counter->hw.last_period;
3323

3324 3325
	perf_swcounter_update(counter);
	perf_swcounter_set_period(counter);
3326
	if (perf_counter_overflow(counter, nmi, data))
3327 3328
		/* soft-disable the counter */
		;
3329 3330
}

3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368
static int perf_swcounter_is_counting(struct perf_counter *counter)
{
	struct perf_counter_context *ctx;
	unsigned long flags;
	int count;

	if (counter->state == PERF_COUNTER_STATE_ACTIVE)
		return 1;

	if (counter->state != PERF_COUNTER_STATE_INACTIVE)
		return 0;

	/*
	 * If the counter is inactive, it could be just because
	 * its task is scheduled out, or because it's in a group
	 * which could not go on the PMU.  We want to count in
	 * the first case but not the second.  If the context is
	 * currently active then an inactive software counter must
	 * be the second case.  If it's not currently active then
	 * we need to know whether the counter was active when the
	 * context was last active, which we can determine by
	 * comparing counter->tstamp_stopped with ctx->time.
	 *
	 * We are within an RCU read-side critical section,
	 * which protects the existence of *ctx.
	 */
	ctx = counter->ctx;
	spin_lock_irqsave(&ctx->lock, flags);
	count = 1;
	/* Re-check state now we have the lock */
	if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
	    counter->ctx->is_active ||
	    counter->tstamp_stopped < ctx->time)
		count = 0;
	spin_unlock_irqrestore(&ctx->lock, flags);
	return count;
}

3369
static int perf_swcounter_match(struct perf_counter *counter,
P
Peter Zijlstra 已提交
3370
				enum perf_type_id type,
3371
				u32 event, struct pt_regs *regs)
3372
{
3373
	if (!perf_swcounter_is_counting(counter))
3374 3375
		return 0;

3376 3377 3378
	if (counter->attr.type != type)
		return 0;
	if (counter->attr.config != event)
3379 3380
		return 0;

3381
	if (regs) {
3382
		if (counter->attr.exclude_user && user_mode(regs))
3383
			return 0;
3384

3385
		if (counter->attr.exclude_kernel && !user_mode(regs))
3386 3387
			return 0;
	}
3388 3389 3390 3391

	return 1;
}

3392
static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3393
			       int nmi, struct perf_sample_data *data)
3394 3395
{
	int neg = atomic64_add_negative(nr, &counter->hw.count);
3396

3397 3398
	if (counter->hw.sample_period && !neg && data->regs)
		perf_swcounter_overflow(counter, nmi, data);
3399 3400
}

3401
static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3402 3403 3404
				     enum perf_type_id type,
				     u32 event, u64 nr, int nmi,
				     struct perf_sample_data *data)
3405 3406 3407
{
	struct perf_counter *counter;

3408
	if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3409 3410
		return;

P
Peter Zijlstra 已提交
3411 3412
	rcu_read_lock();
	list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3413 3414
		if (perf_swcounter_match(counter, type, event, data->regs))
			perf_swcounter_add(counter, nr, nmi, data);
3415
	}
P
Peter Zijlstra 已提交
3416
	rcu_read_unlock();
3417 3418
}

P
Peter Zijlstra 已提交
3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432
static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
{
	if (in_nmi())
		return &cpuctx->recursion[3];

	if (in_irq())
		return &cpuctx->recursion[2];

	if (in_softirq())
		return &cpuctx->recursion[1];

	return &cpuctx->recursion[0];
}

3433 3434 3435
static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
				    u64 nr, int nmi,
				    struct perf_sample_data *data)
3436 3437
{
	struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
P
Peter Zijlstra 已提交
3438
	int *recursion = perf_swcounter_recursion_context(cpuctx);
3439
	struct perf_counter_context *ctx;
P
Peter Zijlstra 已提交
3440 3441 3442 3443 3444 3445

	if (*recursion)
		goto out;

	(*recursion)++;
	barrier();
3446

3447
	perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
3448
				 nr, nmi, data);
3449 3450 3451 3452 3453 3454 3455
	rcu_read_lock();
	/*
	 * doesn't really matter which of the child contexts the
	 * events ends up in.
	 */
	ctx = rcu_dereference(current->perf_counter_ctxp);
	if (ctx)
3456
		perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data);
3457
	rcu_read_unlock();
3458

P
Peter Zijlstra 已提交
3459 3460 3461 3462
	barrier();
	(*recursion)--;

out:
3463 3464 3465
	put_cpu_var(perf_cpu_context);
}

3466 3467
void __perf_swcounter_event(u32 event, u64 nr, int nmi,
			    struct pt_regs *regs, u64 addr)
3468
{
3469 3470 3471 3472 3473 3474
	struct perf_sample_data data = {
		.regs = regs,
		.addr = addr,
	};

	do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data);
3475 3476
}

3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492
static void perf_swcounter_read(struct perf_counter *counter)
{
	perf_swcounter_update(counter);
}

static int perf_swcounter_enable(struct perf_counter *counter)
{
	perf_swcounter_set_period(counter);
	return 0;
}

static void perf_swcounter_disable(struct perf_counter *counter)
{
	perf_swcounter_update(counter);
}

3493
static const struct pmu perf_ops_generic = {
3494 3495 3496 3497 3498
	.enable		= perf_swcounter_enable,
	.disable	= perf_swcounter_disable,
	.read		= perf_swcounter_read,
};

3499 3500 3501 3502
/*
 * Software counter: cpu wall time clock
 */

3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514
static void cpu_clock_perf_counter_update(struct perf_counter *counter)
{
	int cpu = raw_smp_processor_id();
	s64 prev;
	u64 now;

	now = cpu_clock(cpu);
	prev = atomic64_read(&counter->hw.prev_count);
	atomic64_set(&counter->hw.prev_count, now);
	atomic64_add(now - prev, &counter->count);
}

3515 3516 3517 3518 3519 3520
static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
{
	struct hw_perf_counter *hwc = &counter->hw;
	int cpu = raw_smp_processor_id();

	atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3521 3522
	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hwc->hrtimer.function = perf_swcounter_hrtimer;
3523 3524
	if (hwc->sample_period) {
		u64 period = max_t(u64, 10000, hwc->sample_period);
3525
		__hrtimer_start_range_ns(&hwc->hrtimer,
3526
				ns_to_ktime(period), 0,
3527 3528 3529 3530 3531 3532
				HRTIMER_MODE_REL, 0);
	}

	return 0;
}

3533 3534
static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
{
3535
	if (counter->hw.sample_period)
3536
		hrtimer_cancel(&counter->hw.hrtimer);
3537
	cpu_clock_perf_counter_update(counter);
3538 3539 3540 3541
}

static void cpu_clock_perf_counter_read(struct perf_counter *counter)
{
3542
	cpu_clock_perf_counter_update(counter);
3543 3544
}

3545
static const struct pmu perf_ops_cpu_clock = {
I
Ingo Molnar 已提交
3546 3547 3548
	.enable		= cpu_clock_perf_counter_enable,
	.disable	= cpu_clock_perf_counter_disable,
	.read		= cpu_clock_perf_counter_read,
3549 3550
};

3551 3552 3553 3554
/*
 * Software counter: task time clock
 */

3555
static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
I
Ingo Molnar 已提交
3556
{
3557
	u64 prev;
I
Ingo Molnar 已提交
3558 3559
	s64 delta;

3560
	prev = atomic64_xchg(&counter->hw.prev_count, now);
I
Ingo Molnar 已提交
3561 3562
	delta = now - prev;
	atomic64_add(delta, &counter->count);
3563 3564
}

3565
static int task_clock_perf_counter_enable(struct perf_counter *counter)
I
Ingo Molnar 已提交
3566
{
3567
	struct hw_perf_counter *hwc = &counter->hw;
3568 3569 3570
	u64 now;

	now = counter->ctx->time;
3571

3572
	atomic64_set(&hwc->prev_count, now);
3573 3574
	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	hwc->hrtimer.function = perf_swcounter_hrtimer;
3575 3576
	if (hwc->sample_period) {
		u64 period = max_t(u64, 10000, hwc->sample_period);
3577
		__hrtimer_start_range_ns(&hwc->hrtimer,
3578
				ns_to_ktime(period), 0,
3579 3580
				HRTIMER_MODE_REL, 0);
	}
3581 3582

	return 0;
I
Ingo Molnar 已提交
3583 3584 3585
}

static void task_clock_perf_counter_disable(struct perf_counter *counter)
3586
{
3587
	if (counter->hw.sample_period)
3588
		hrtimer_cancel(&counter->hw.hrtimer);
3589 3590
	task_clock_perf_counter_update(counter, counter->ctx->time);

3591
}
I
Ingo Molnar 已提交
3592

3593 3594
static void task_clock_perf_counter_read(struct perf_counter *counter)
{
3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606
	u64 time;

	if (!in_nmi()) {
		update_context_time(counter->ctx);
		time = counter->ctx->time;
	} else {
		u64 now = perf_clock();
		u64 delta = now - counter->ctx->timestamp;
		time = counter->ctx->time + delta;
	}

	task_clock_perf_counter_update(counter, time);
3607 3608
}

3609
static const struct pmu perf_ops_task_clock = {
I
Ingo Molnar 已提交
3610 3611 3612
	.enable		= task_clock_perf_counter_enable,
	.disable	= task_clock_perf_counter_disable,
	.read		= task_clock_perf_counter_read,
3613 3614
};

3615 3616 3617
#ifdef CONFIG_EVENT_PROFILE
void perf_tpcounter_event(int event_id)
{
3618 3619 3620 3621
	struct perf_sample_data data = {
		.regs = get_irq_regs();
		.addr = 0,
	};
3622

3623 3624
	if (!data.regs)
		data.regs = task_pt_regs(current);
3625

3626
	do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data);
3627
}
3628
EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3629 3630 3631 3632 3633 3634

extern int ftrace_profile_enable(int);
extern void ftrace_profile_disable(int);

static void tp_perf_counter_destroy(struct perf_counter *counter)
{
3635
	ftrace_profile_disable(perf_event_id(&counter->attr));
3636 3637
}

3638
static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3639
{
3640
	int event_id = perf_event_id(&counter->attr);
3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651
	int ret;

	ret = ftrace_profile_enable(event_id);
	if (ret)
		return NULL;

	counter->destroy = tp_perf_counter_destroy;

	return &perf_ops_generic;
}
#else
3652
static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3653 3654 3655 3656 3657
{
	return NULL;
}
#endif

3658 3659 3660 3661 3662 3663
atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];

static void sw_perf_counter_destroy(struct perf_counter *counter)
{
	u64 event = counter->attr.config;

3664 3665
	WARN_ON(counter->parent);

3666 3667 3668
	atomic_dec(&perf_swcounter_enabled[event]);
}

3669
static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
3670
{
3671
	const struct pmu *pmu = NULL;
3672
	u64 event = counter->attr.config;
3673

3674 3675 3676 3677 3678 3679 3680
	/*
	 * Software counters (currently) can't in general distinguish
	 * between user, kernel and hypervisor events.
	 * However, context switches and cpu migrations are considered
	 * to be kernel events, and page faults are never hypervisor
	 * events.
	 */
3681
	switch (event) {
3682
	case PERF_COUNT_SW_CPU_CLOCK:
3683
		pmu = &perf_ops_cpu_clock;
3684

3685
		break;
3686
	case PERF_COUNT_SW_TASK_CLOCK:
3687 3688 3689 3690 3691
		/*
		 * If the user instantiates this as a per-cpu counter,
		 * use the cpu_clock counter instead.
		 */
		if (counter->ctx->task)
3692
			pmu = &perf_ops_task_clock;
3693
		else
3694
			pmu = &perf_ops_cpu_clock;
3695

3696
		break;
3697 3698 3699 3700 3701
	case PERF_COUNT_SW_PAGE_FAULTS:
	case PERF_COUNT_SW_PAGE_FAULTS_MIN:
	case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
	case PERF_COUNT_SW_CONTEXT_SWITCHES:
	case PERF_COUNT_SW_CPU_MIGRATIONS:
3702 3703 3704 3705
		if (!counter->parent) {
			atomic_inc(&perf_swcounter_enabled[event]);
			counter->destroy = sw_perf_counter_destroy;
		}
3706
		pmu = &perf_ops_generic;
3707
		break;
3708
	}
3709

3710
	return pmu;
3711 3712
}

T
Thomas Gleixner 已提交
3713 3714 3715 3716
/*
 * Allocate and initialize a counter structure
 */
static struct perf_counter *
3717
perf_counter_alloc(struct perf_counter_attr *attr,
3718
		   int cpu,
3719
		   struct perf_counter_context *ctx,
3720
		   struct perf_counter *group_leader,
3721
		   struct perf_counter *parent_counter,
3722
		   gfp_t gfpflags)
T
Thomas Gleixner 已提交
3723
{
3724
	const struct pmu *pmu;
I
Ingo Molnar 已提交
3725
	struct perf_counter *counter;
3726
	struct hw_perf_counter *hwc;
3727
	long err;
T
Thomas Gleixner 已提交
3728

3729
	counter = kzalloc(sizeof(*counter), gfpflags);
T
Thomas Gleixner 已提交
3730
	if (!counter)
3731
		return ERR_PTR(-ENOMEM);
T
Thomas Gleixner 已提交
3732

3733 3734 3735 3736 3737 3738 3739
	/*
	 * Single counters are their own group leaders, with an
	 * empty sibling list:
	 */
	if (!group_leader)
		group_leader = counter;

3740 3741 3742
	mutex_init(&counter->child_mutex);
	INIT_LIST_HEAD(&counter->child_list);

3743
	INIT_LIST_HEAD(&counter->list_entry);
P
Peter Zijlstra 已提交
3744
	INIT_LIST_HEAD(&counter->event_entry);
3745
	INIT_LIST_HEAD(&counter->sibling_list);
T
Thomas Gleixner 已提交
3746 3747
	init_waitqueue_head(&counter->waitq);

3748 3749
	mutex_init(&counter->mmap_mutex);

3750
	counter->cpu		= cpu;
3751
	counter->attr		= *attr;
3752 3753 3754 3755 3756
	counter->group_leader	= group_leader;
	counter->pmu		= NULL;
	counter->ctx		= ctx;
	counter->oncpu		= -1;

3757 3758
	counter->parent		= parent_counter;

3759 3760 3761 3762
	counter->ns		= get_pid_ns(current->nsproxy->pid_ns);
	counter->id		= atomic64_inc_return(&perf_counter_id);

	counter->state		= PERF_COUNTER_STATE_INACTIVE;
3763

3764
	if (attr->disabled)
3765 3766
		counter->state = PERF_COUNTER_STATE_OFF;

3767
	pmu = NULL;
3768

3769
	hwc = &counter->hw;
3770
	hwc->sample_period = attr->sample_period;
3771
	if (attr->freq && attr->sample_freq)
3772 3773 3774
		hwc->sample_period = 1;

	atomic64_set(&hwc->period_left, hwc->sample_period);
3775

3776
	/*
3777
	 * we currently do not support PERF_SAMPLE_GROUP on inherited counters
3778
	 */
3779
	if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
3780 3781
		goto done;

3782
	switch (attr->type) {
3783
	case PERF_TYPE_RAW:
3784
	case PERF_TYPE_HARDWARE:
3785
	case PERF_TYPE_HW_CACHE:
3786
		pmu = hw_perf_counter_init(counter);
3787 3788 3789
		break;

	case PERF_TYPE_SOFTWARE:
3790
		pmu = sw_perf_counter_init(counter);
3791 3792 3793
		break;

	case PERF_TYPE_TRACEPOINT:
3794
		pmu = tp_perf_counter_init(counter);
3795
		break;
3796 3797 3798

	default:
		break;
3799
	}
3800 3801
done:
	err = 0;
3802
	if (!pmu)
3803
		err = -EINVAL;
3804 3805
	else if (IS_ERR(pmu))
		err = PTR_ERR(pmu);
3806

3807
	if (err) {
3808 3809
		if (counter->ns)
			put_pid_ns(counter->ns);
I
Ingo Molnar 已提交
3810
		kfree(counter);
3811
		return ERR_PTR(err);
I
Ingo Molnar 已提交
3812
	}
3813

3814
	counter->pmu = pmu;
T
Thomas Gleixner 已提交
3815

3816 3817 3818 3819 3820 3821 3822
	if (!counter->parent) {
		atomic_inc(&nr_counters);
		if (counter->attr.mmap)
			atomic_inc(&nr_mmap_counters);
		if (counter->attr.comm)
			atomic_inc(&nr_comm_counters);
	}
3823

T
Thomas Gleixner 已提交
3824 3825 3826
	return counter;
}

3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905
static int perf_copy_attr(struct perf_counter_attr __user *uattr,
			  struct perf_counter_attr *attr)
{
	int ret;
	u32 size;

	if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
		return -EFAULT;

	/*
	 * zero the full structure, so that a short copy will be nice.
	 */
	memset(attr, 0, sizeof(*attr));

	ret = get_user(size, &uattr->size);
	if (ret)
		return ret;

	if (size > PAGE_SIZE)	/* silly large */
		goto err_size;

	if (!size)		/* abi compat */
		size = PERF_ATTR_SIZE_VER0;

	if (size < PERF_ATTR_SIZE_VER0)
		goto err_size;

	/*
	 * If we're handed a bigger struct than we know of,
	 * ensure all the unknown bits are 0.
	 */
	if (size > sizeof(*attr)) {
		unsigned long val;
		unsigned long __user *addr;
		unsigned long __user *end;

		addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr),
				sizeof(unsigned long));
		end  = PTR_ALIGN((void __user *)uattr + size,
				sizeof(unsigned long));

		for (; addr < end; addr += sizeof(unsigned long)) {
			ret = get_user(val, addr);
			if (ret)
				return ret;
			if (val)
				goto err_size;
		}
	}

	ret = copy_from_user(attr, uattr, size);
	if (ret)
		return -EFAULT;

	/*
	 * If the type exists, the corresponding creation will verify
	 * the attr->config.
	 */
	if (attr->type >= PERF_TYPE_MAX)
		return -EINVAL;

	if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
		return -EINVAL;

	if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
		return -EINVAL;

	if (attr->read_format & ~(PERF_FORMAT_MAX-1))
		return -EINVAL;

out:
	return ret;

err_size:
	put_user(sizeof(*attr), &uattr->size);
	ret = -E2BIG;
	goto out;
}

T
Thomas Gleixner 已提交
3906
/**
3907
 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
I
Ingo Molnar 已提交
3908
 *
3909
 * @attr_uptr:	event type attributes for monitoring/sampling
T
Thomas Gleixner 已提交
3910
 * @pid:		target pid
I
Ingo Molnar 已提交
3911 3912
 * @cpu:		target cpu
 * @group_fd:		group leader counter fd
T
Thomas Gleixner 已提交
3913
 */
3914
SYSCALL_DEFINE5(perf_counter_open,
3915
		struct perf_counter_attr __user *, attr_uptr,
3916
		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
T
Thomas Gleixner 已提交
3917
{
3918
	struct perf_counter *counter, *group_leader;
3919
	struct perf_counter_attr attr;
3920
	struct perf_counter_context *ctx;
3921
	struct file *counter_file = NULL;
3922 3923
	struct file *group_file = NULL;
	int fput_needed = 0;
3924
	int fput_needed2 = 0;
T
Thomas Gleixner 已提交
3925 3926
	int ret;

3927 3928 3929 3930
	/* for future expandability... */
	if (flags)
		return -EINVAL;

3931 3932 3933
	ret = perf_copy_attr(attr_uptr, &attr);
	if (ret)
		return ret;
3934

3935 3936 3937 3938 3939
	if (!attr.exclude_kernel) {
		if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
			return -EACCES;
	}

3940 3941 3942 3943 3944
	if (attr.freq) {
		if (attr.sample_freq > sysctl_perf_counter_sample_rate)
			return -EINVAL;
	}

3945
	/*
I
Ingo Molnar 已提交
3946 3947 3948 3949 3950 3951 3952 3953
	 * Get the target context (task or percpu):
	 */
	ctx = find_get_context(pid, cpu);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	/*
	 * Look up the group leader (we will attach this counter to it):
3954 3955 3956 3957 3958 3959
	 */
	group_leader = NULL;
	if (group_fd != -1) {
		ret = -EINVAL;
		group_file = fget_light(group_fd, &fput_needed);
		if (!group_file)
I
Ingo Molnar 已提交
3960
			goto err_put_context;
3961
		if (group_file->f_op != &perf_fops)
I
Ingo Molnar 已提交
3962
			goto err_put_context;
3963 3964 3965

		group_leader = group_file->private_data;
		/*
I
Ingo Molnar 已提交
3966 3967 3968 3969 3970 3971 3972 3973
		 * Do not allow a recursive hierarchy (this new sibling
		 * becoming part of another group-sibling):
		 */
		if (group_leader->group_leader != group_leader)
			goto err_put_context;
		/*
		 * Do not allow to attach to a group in a different
		 * task or CPU context:
3974
		 */
I
Ingo Molnar 已提交
3975 3976
		if (group_leader->ctx != ctx)
			goto err_put_context;
3977 3978 3979
		/*
		 * Only a group leader can be exclusive or pinned
		 */
3980
		if (attr.exclusive || attr.pinned)
3981
			goto err_put_context;
3982 3983
	}

3984
	counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
3985
				     NULL, GFP_KERNEL);
3986 3987
	ret = PTR_ERR(counter);
	if (IS_ERR(counter))
T
Thomas Gleixner 已提交
3988 3989 3990 3991
		goto err_put_context;

	ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
	if (ret < 0)
3992 3993 3994 3995 3996 3997 3998
		goto err_free_put_context;

	counter_file = fget_light(ret, &fput_needed2);
	if (!counter_file)
		goto err_free_put_context;

	counter->filp = counter_file;
3999
	WARN_ON_ONCE(ctx->parent_ctx);
4000
	mutex_lock(&ctx->mutex);
4001
	perf_install_in_context(ctx, counter, cpu);
4002
	++ctx->generation;
4003
	mutex_unlock(&ctx->mutex);
4004

4005 4006 4007 4008 4009 4010
	counter->owner = current;
	get_task_struct(current);
	mutex_lock(&current->perf_counter_mutex);
	list_add_tail(&counter->owner_entry, &current->perf_counter_list);
	mutex_unlock(&current->perf_counter_mutex);

4011
	fput_light(counter_file, fput_needed2);
T
Thomas Gleixner 已提交
4012

4013 4014 4015
out_fput:
	fput_light(group_file, fput_needed);

T
Thomas Gleixner 已提交
4016 4017
	return ret;

4018
err_free_put_context:
T
Thomas Gleixner 已提交
4019 4020 4021
	kfree(counter);

err_put_context:
4022
	put_ctx(ctx);
T
Thomas Gleixner 已提交
4023

4024
	goto out_fput;
T
Thomas Gleixner 已提交
4025 4026
}

4027 4028 4029
/*
 * inherit a counter from parent task to child task:
 */
4030
static struct perf_counter *
4031 4032 4033 4034
inherit_counter(struct perf_counter *parent_counter,
	      struct task_struct *parent,
	      struct perf_counter_context *parent_ctx,
	      struct task_struct *child,
4035
	      struct perf_counter *group_leader,
4036 4037 4038 4039
	      struct perf_counter_context *child_ctx)
{
	struct perf_counter *child_counter;

4040 4041 4042 4043 4044 4045 4046 4047 4048
	/*
	 * Instead of creating recursive hierarchies of counters,
	 * we link inherited counters back to the original parent,
	 * which has a filp for sure, which we use as the reference
	 * count:
	 */
	if (parent_counter->parent)
		parent_counter = parent_counter->parent;

4049
	child_counter = perf_counter_alloc(&parent_counter->attr,
4050
					   parent_counter->cpu, child_ctx,
4051 4052
					   group_leader, parent_counter,
					   GFP_KERNEL);
4053 4054
	if (IS_ERR(child_counter))
		return child_counter;
4055
	get_ctx(child_ctx);
4056

4057 4058
	/*
	 * Make the child state follow the state of the parent counter,
4059
	 * not its attr.disabled bit.  We hold the parent's mutex,
4060
	 * so we won't race with perf_counter_{en, dis}able_family.
4061 4062 4063 4064 4065 4066
	 */
	if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
		child_counter->state = PERF_COUNTER_STATE_INACTIVE;
	else
		child_counter->state = PERF_COUNTER_STATE_OFF;

4067 4068 4069
	if (parent_counter->attr.freq)
		child_counter->hw.sample_period = parent_counter->hw.sample_period;

4070 4071 4072
	/*
	 * Link it up in the child's context:
	 */
4073
	add_counter_to_ctx(child_counter, child_ctx);
4074 4075 4076 4077 4078 4079 4080 4081 4082

	/*
	 * Get a reference to the parent filp - we will fput it
	 * when the child counter exits. This is safe to do because
	 * we are in the parent and we know that the filp still
	 * exists and has a nonzero count:
	 */
	atomic_long_inc(&parent_counter->filp->f_count);

4083 4084 4085
	/*
	 * Link this into the parent counter's child list
	 */
4086
	WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
4087
	mutex_lock(&parent_counter->child_mutex);
4088
	list_add_tail(&child_counter->child_list, &parent_counter->child_list);
4089
	mutex_unlock(&parent_counter->child_mutex);
4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101

	return child_counter;
}

static int inherit_group(struct perf_counter *parent_counter,
	      struct task_struct *parent,
	      struct perf_counter_context *parent_ctx,
	      struct task_struct *child,
	      struct perf_counter_context *child_ctx)
{
	struct perf_counter *leader;
	struct perf_counter *sub;
4102
	struct perf_counter *child_ctr;
4103 4104 4105

	leader = inherit_counter(parent_counter, parent, parent_ctx,
				 child, NULL, child_ctx);
4106 4107
	if (IS_ERR(leader))
		return PTR_ERR(leader);
4108
	list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
4109 4110 4111 4112
		child_ctr = inherit_counter(sub, parent, parent_ctx,
					    child, leader, child_ctx);
		if (IS_ERR(child_ctr))
			return PTR_ERR(child_ctr);
4113
	}
4114 4115 4116
	return 0;
}

4117
static void sync_child_counter(struct perf_counter *child_counter,
4118
			       struct task_struct *child)
4119
{
4120
	struct perf_counter *parent_counter = child_counter->parent;
4121
	u64 child_val;
4122

4123 4124
	if (child_counter->attr.inherit_stat)
		perf_counter_read_event(child_counter, child);
4125

4126 4127 4128 4129 4130 4131
	child_val = atomic64_read(&child_counter->count);

	/*
	 * Add back the child's count to the parent's count:
	 */
	atomic64_add(child_val, &parent_counter->count);
4132 4133 4134 4135
	atomic64_add(child_counter->total_time_enabled,
		     &parent_counter->child_total_time_enabled);
	atomic64_add(child_counter->total_time_running,
		     &parent_counter->child_total_time_running);
4136 4137 4138 4139

	/*
	 * Remove this counter from the parent's list
	 */
4140
	WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
4141
	mutex_lock(&parent_counter->child_mutex);
4142
	list_del_init(&child_counter->child_list);
4143
	mutex_unlock(&parent_counter->child_mutex);
4144 4145 4146 4147 4148 4149 4150 4151

	/*
	 * Release the parent counter, if this was the last
	 * reference to it.
	 */
	fput(parent_counter->filp);
}

4152
static void
4153
__perf_counter_exit_task(struct perf_counter *child_counter,
4154 4155
			 struct perf_counter_context *child_ctx,
			 struct task_struct *child)
4156 4157 4158
{
	struct perf_counter *parent_counter;

4159
	update_counter_times(child_counter);
4160
	perf_counter_remove_from_context(child_counter);
4161

4162 4163 4164 4165 4166 4167
	parent_counter = child_counter->parent;
	/*
	 * It can happen that parent exits first, and has counters
	 * that are still around due to the child reference. These
	 * counters need to be zapped - but otherwise linger.
	 */
4168
	if (parent_counter) {
4169
		sync_child_counter(child_counter, child);
4170
		free_counter(child_counter);
4171
	}
4172 4173 4174
}

/*
4175
 * When a child task exits, feed back counter values to parent counters.
4176 4177 4178 4179 4180
 */
void perf_counter_exit_task(struct task_struct *child)
{
	struct perf_counter *child_counter, *tmp;
	struct perf_counter_context *child_ctx;
4181
	unsigned long flags;
4182

4183
	if (likely(!child->perf_counter_ctxp))
4184 4185
		return;

4186
	local_irq_save(flags);
4187 4188 4189 4190 4191 4192 4193
	/*
	 * We can't reschedule here because interrupts are disabled,
	 * and either child is current or it is a task that can't be
	 * scheduled, so we are now safe from rescheduling changing
	 * our context.
	 */
	child_ctx = child->perf_counter_ctxp;
4194
	__perf_counter_task_sched_out(child_ctx);
4195 4196 4197 4198 4199 4200 4201

	/*
	 * Take the context lock here so that if find_get_context is
	 * reading child->perf_counter_ctxp, we wait until it has
	 * incremented the context's refcount before we do put_ctx below.
	 */
	spin_lock(&child_ctx->lock);
4202
	child->perf_counter_ctxp = NULL;
4203 4204 4205 4206 4207 4208 4209 4210 4211
	if (child_ctx->parent_ctx) {
		/*
		 * This context is a clone; unclone it so it can't get
		 * swapped to another process while we're removing all
		 * the counters from it.
		 */
		put_ctx(child_ctx->parent_ctx);
		child_ctx->parent_ctx = NULL;
	}
4212
	spin_unlock(&child_ctx->lock);
4213 4214
	local_irq_restore(flags);

4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226
	/*
	 * We can recurse on the same lock type through:
	 *
	 *   __perf_counter_exit_task()
	 *     sync_child_counter()
	 *       fput(parent_counter->filp)
	 *         perf_release()
	 *           mutex_lock(&ctx->mutex)
	 *
	 * But since its the parent context it won't be the same instance.
	 */
	mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
4227

4228
again:
4229 4230
	list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
				 list_entry)
4231
		__perf_counter_exit_task(child_counter, child_ctx, child);
4232 4233 4234 4235 4236 4237 4238 4239

	/*
	 * If the last counter was a group counter, it will have appended all
	 * its siblings to the list, but we obtained 'tmp' before that which
	 * will still point to the list head terminating the iteration.
	 */
	if (!list_empty(&child_ctx->counter_list))
		goto again;
4240 4241 4242 4243

	mutex_unlock(&child_ctx->mutex);

	put_ctx(child_ctx);
4244 4245
}

4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283
/*
 * free an unexposed, unused context as created by inheritance by
 * init_task below, used by fork() in case of fail.
 */
void perf_counter_free_task(struct task_struct *task)
{
	struct perf_counter_context *ctx = task->perf_counter_ctxp;
	struct perf_counter *counter, *tmp;

	if (!ctx)
		return;

	mutex_lock(&ctx->mutex);
again:
	list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
		struct perf_counter *parent = counter->parent;

		if (WARN_ON_ONCE(!parent))
			continue;

		mutex_lock(&parent->child_mutex);
		list_del_init(&counter->child_list);
		mutex_unlock(&parent->child_mutex);

		fput(parent->filp);

		list_del_counter(counter, ctx);
		free_counter(counter);
	}

	if (!list_empty(&ctx->counter_list))
		goto again;

	mutex_unlock(&ctx->mutex);

	put_ctx(ctx);
}

4284 4285 4286
/*
 * Initialize the perf_counter context in task_struct
 */
4287
int perf_counter_init_task(struct task_struct *child)
4288 4289
{
	struct perf_counter_context *child_ctx, *parent_ctx;
4290
	struct perf_counter_context *cloned_ctx;
4291
	struct perf_counter *counter;
4292
	struct task_struct *parent = current;
4293
	int inherited_all = 1;
4294
	int ret = 0;
4295

4296
	child->perf_counter_ctxp = NULL;
4297

4298 4299 4300
	mutex_init(&child->perf_counter_mutex);
	INIT_LIST_HEAD(&child->perf_counter_list);

4301
	if (likely(!parent->perf_counter_ctxp))
4302 4303
		return 0;

4304 4305
	/*
	 * This is executed from the parent task context, so inherit
4306 4307
	 * counters that have been marked for cloning.
	 * First allocate and initialize a context for the child.
4308 4309
	 */

4310 4311
	child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
	if (!child_ctx)
4312
		return -ENOMEM;
4313

4314 4315
	__perf_counter_init_context(child_ctx, child);
	child->perf_counter_ctxp = child_ctx;
4316
	get_task_struct(child);
4317

4318
	/*
4319 4320
	 * If the parent's context is a clone, pin it so it won't get
	 * swapped under us.
4321
	 */
4322 4323
	parent_ctx = perf_pin_task_context(parent);

4324 4325 4326 4327 4328 4329 4330
	/*
	 * No need to check if parent_ctx != NULL here; since we saw
	 * it non-NULL earlier, the only reason for it to become NULL
	 * is if we exit, and since we're currently in the middle of
	 * a fork we can't be exiting at the same time.
	 */

4331 4332 4333 4334
	/*
	 * Lock the parent list. No need to lock the child - not PID
	 * hashed yet and not running, so nobody can access it.
	 */
4335
	mutex_lock(&parent_ctx->mutex);
4336 4337 4338 4339 4340

	/*
	 * We dont have to disable NMIs - we are only looking at
	 * the list, not manipulating it:
	 */
4341 4342 4343 4344
	list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
		if (counter != counter->group_leader)
			continue;

4345
		if (!counter->attr.inherit) {
4346
			inherited_all = 0;
4347
			continue;
4348
		}
4349

4350 4351 4352
		ret = inherit_group(counter, parent, parent_ctx,
					     child, child_ctx);
		if (ret) {
4353
			inherited_all = 0;
4354
			break;
4355 4356 4357 4358 4359 4360 4361
		}
	}

	if (inherited_all) {
		/*
		 * Mark the child context as a clone of the parent
		 * context, or of whatever the parent is a clone of.
4362 4363 4364 4365
		 * Note that if the parent is a clone, it could get
		 * uncloned at any point, but that doesn't matter
		 * because the list of counters and the generation
		 * count can't have changed since we took the mutex.
4366
		 */
4367 4368 4369
		cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
		if (cloned_ctx) {
			child_ctx->parent_ctx = cloned_ctx;
4370
			child_ctx->parent_gen = parent_ctx->parent_gen;
4371 4372 4373 4374 4375
		} else {
			child_ctx->parent_ctx = parent_ctx;
			child_ctx->parent_gen = parent_ctx->generation;
		}
		get_ctx(child_ctx->parent_ctx);
4376 4377
	}

4378
	mutex_unlock(&parent_ctx->mutex);
4379

4380
	perf_unpin_context(parent_ctx);
4381

4382
	return ret;
4383 4384
}

4385
static void __cpuinit perf_counter_init_cpu(int cpu)
T
Thomas Gleixner 已提交
4386
{
4387
	struct perf_cpu_context *cpuctx;
T
Thomas Gleixner 已提交
4388

4389 4390
	cpuctx = &per_cpu(perf_cpu_context, cpu);
	__perf_counter_init_context(&cpuctx->ctx, NULL);
T
Thomas Gleixner 已提交
4391

4392
	spin_lock(&perf_resource_lock);
4393
	cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
4394
	spin_unlock(&perf_resource_lock);
4395

4396
	hw_perf_counter_setup(cpu);
T
Thomas Gleixner 已提交
4397 4398 4399
}

#ifdef CONFIG_HOTPLUG_CPU
4400
static void __perf_counter_exit_cpu(void *info)
T
Thomas Gleixner 已提交
4401 4402 4403 4404 4405
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter_context *ctx = &cpuctx->ctx;
	struct perf_counter *counter, *tmp;

4406 4407
	list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
		__perf_counter_remove_from_context(counter);
T
Thomas Gleixner 已提交
4408
}
4409
static void perf_counter_exit_cpu(int cpu)
T
Thomas Gleixner 已提交
4410
{
4411 4412 4413 4414
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
	struct perf_counter_context *ctx = &cpuctx->ctx;

	mutex_lock(&ctx->mutex);
4415
	smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
4416
	mutex_unlock(&ctx->mutex);
T
Thomas Gleixner 已提交
4417 4418
}
#else
4419
static inline void perf_counter_exit_cpu(int cpu) { }
T
Thomas Gleixner 已提交
4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430
#endif

static int __cpuinit
perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
	unsigned int cpu = (long)hcpu;

	switch (action) {

	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
4431
		perf_counter_init_cpu(cpu);
T
Thomas Gleixner 已提交
4432 4433 4434 4435
		break;

	case CPU_DOWN_PREPARE:
	case CPU_DOWN_PREPARE_FROZEN:
4436
		perf_counter_exit_cpu(cpu);
T
Thomas Gleixner 已提交
4437 4438 4439 4440 4441 4442 4443 4444 4445
		break;

	default:
		break;
	}

	return NOTIFY_OK;
}

4446 4447 4448
/*
 * This has to have a higher priority than migration_notifier in sched.c.
 */
T
Thomas Gleixner 已提交
4449 4450
static struct notifier_block __cpuinitdata perf_cpu_nb = {
	.notifier_call		= perf_cpu_notify,
4451
	.priority		= 20,
T
Thomas Gleixner 已提交
4452 4453
};

4454
void __init perf_counter_init(void)
T
Thomas Gleixner 已提交
4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480
{
	perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
			(void *)(long)smp_processor_id());
	register_cpu_notifier(&perf_cpu_nb);
}

static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
{
	return sprintf(buf, "%d\n", perf_reserved_percpu);
}

static ssize_t
perf_set_reserve_percpu(struct sysdev_class *class,
			const char *buf,
			size_t count)
{
	struct perf_cpu_context *cpuctx;
	unsigned long val;
	int err, cpu, mpt;

	err = strict_strtoul(buf, 10, &val);
	if (err)
		return err;
	if (val > perf_max_counters)
		return -EINVAL;

4481
	spin_lock(&perf_resource_lock);
T
Thomas Gleixner 已提交
4482 4483 4484 4485 4486 4487 4488 4489 4490
	perf_reserved_percpu = val;
	for_each_online_cpu(cpu) {
		cpuctx = &per_cpu(perf_cpu_context, cpu);
		spin_lock_irq(&cpuctx->ctx.lock);
		mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
			  perf_max_counters - perf_reserved_percpu);
		cpuctx->max_pertask = mpt;
		spin_unlock_irq(&cpuctx->ctx.lock);
	}
4491
	spin_unlock(&perf_resource_lock);
T
Thomas Gleixner 已提交
4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512

	return count;
}

static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
{
	return sprintf(buf, "%d\n", perf_overcommit);
}

static ssize_t
perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
{
	unsigned long val;
	int err;

	err = strict_strtoul(buf, 10, &val);
	if (err)
		return err;
	if (val > 1)
		return -EINVAL;

4513
	spin_lock(&perf_resource_lock);
T
Thomas Gleixner 已提交
4514
	perf_overcommit = val;
4515
	spin_unlock(&perf_resource_lock);
T
Thomas Gleixner 已提交
4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550

	return count;
}

static SYSDEV_CLASS_ATTR(
				reserve_percpu,
				0644,
				perf_show_reserve_percpu,
				perf_set_reserve_percpu
			);

static SYSDEV_CLASS_ATTR(
				overcommit,
				0644,
				perf_show_overcommit,
				perf_set_overcommit
			);

static struct attribute *perfclass_attrs[] = {
	&attr_reserve_percpu.attr,
	&attr_overcommit.attr,
	NULL
};

static struct attribute_group perfclass_attr_group = {
	.attrs			= perfclass_attrs,
	.name			= "perf_counters",
};

static int __init perf_counter_sysfs_init(void)
{
	return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
				  &perfclass_attr_group);
}
device_initcall(perf_counter_sysfs_init);