perf_counter.c 25.4 KB
Newer Older
T
Thomas Gleixner 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Performance counter core code
 *
 *  Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
 *  Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
 *
 *  For licencing details see kernel-base/COPYING
 */

#include <linux/fs.h>
#include <linux/cpu.h>
#include <linux/smp.h>
13
#include <linux/file.h>
T
Thomas Gleixner 已提交
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
#include <linux/poll.h>
#include <linux/sysfs.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
#include <linux/perf_counter.h>

/*
 * Each CPU has a list of per CPU counters:
 */
DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);

int perf_max_counters __read_mostly;
static int perf_reserved_percpu __read_mostly;
static int perf_overcommit __read_mostly = 1;

/*
 * Mutex for (sysadmin-configurable) counter reservations:
 */
static DEFINE_MUTEX(perf_resource_mutex);

/*
 * Architecture provided APIs - weak aliases:
 */
I
Ingo Molnar 已提交
40 41
extern __weak struct hw_perf_counter_ops *
hw_perf_counter_init(struct perf_counter *counter)
T
Thomas Gleixner 已提交
42
{
I
Ingo Molnar 已提交
43
	return ERR_PTR(-EINVAL);
T
Thomas Gleixner 已提交
44 45
}

I
Ingo Molnar 已提交
46 47 48
void __weak hw_perf_disable_all(void)	 { }
void __weak hw_perf_enable_all(void)	 { }
void __weak hw_perf_counter_setup(void)	 { }
T
Thomas Gleixner 已提交
49 50 51 52 53 54 55

#if BITS_PER_LONG == 64

/*
 * Read the cached counter in counter safe against cross CPU / NMI
 * modifications. 64 bit version - no complications.
 */
56
static inline u64 perf_counter_read_safe(struct perf_counter *counter)
T
Thomas Gleixner 已提交
57 58 59 60 61 62 63 64 65 66
{
	return (u64) atomic64_read(&counter->count);
}

#else

/*
 * Read the cached counter in counter safe against cross CPU / NMI
 * modifications. 32 bit version.
 */
67
static u64 perf_counter_read_safe(struct perf_counter *counter)
T
Thomas Gleixner 已提交
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
{
	u32 cntl, cnth;

	local_irq_disable();
	do {
		cnth = atomic_read(&counter->count32[1]);
		cntl = atomic_read(&counter->count32[0]);
	} while (cnth != atomic_read(&counter->count32[1]));

	local_irq_enable();

	return cntl | ((u64) cnth) << 32;
}

#endif

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
static void
list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{
	struct perf_counter *group_leader = counter->group_leader;

	/*
	 * Depending on whether it is a standalone or sibling counter,
	 * add it straight to the context's counter list, or to the group
	 * leader's sibling list:
	 */
	if (counter->group_leader == counter)
		list_add_tail(&counter->list_entry, &ctx->counter_list);
	else
		list_add_tail(&counter->list_entry, &group_leader->sibling_list);
}

static void
list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
{
	struct perf_counter *sibling, *tmp;

	list_del_init(&counter->list_entry);

	/*
	 * If this was a group counter with sibling counters then
	 * upgrade the siblings to singleton counters by adding them
	 * to the context list directly:
	 */
	list_for_each_entry_safe(sibling, tmp,
				 &counter->sibling_list, list_entry) {

		list_del_init(&sibling->list_entry);
		list_add_tail(&sibling->list_entry, &ctx->counter_list);
		WARN_ON_ONCE(!sibling->group_leader);
		WARN_ON_ONCE(sibling->group_leader == sibling);
		sibling->group_leader = sibling;
	}
}

T
Thomas Gleixner 已提交
123 124 125 126 127 128
/*
 * Cross CPU call to remove a performance counter
 *
 * We disable the counter on the hardware level first. After that we
 * remove it from the context list.
 */
129
static void __perf_counter_remove_from_context(void *info)
T
Thomas Gleixner 已提交
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter *counter = info;
	struct perf_counter_context *ctx = counter->ctx;

	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu. If not it has been
	 * scheduled out before the smp call arrived.
	 */
	if (ctx->task && cpuctx->task_ctx != ctx)
		return;

	spin_lock(&ctx->lock);

	if (counter->active) {
I
Ingo Molnar 已提交
146
		counter->hw_ops->hw_perf_counter_disable(counter);
T
Thomas Gleixner 已提交
147 148 149 150 151 152 153 154 155 156 157 158
		counter->active = 0;
		ctx->nr_active--;
		cpuctx->active_oncpu--;
		counter->task = NULL;
	}
	ctx->nr_counters--;

	/*
	 * Protect the list operation against NMI by disabling the
	 * counters on a global level. NOP for non NMI based counters.
	 */
	hw_perf_disable_all();
159
	list_del_counter(counter, ctx);
T
Thomas Gleixner 已提交
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
	hw_perf_enable_all();

	if (!ctx->task) {
		/*
		 * Allow more per task counters with respect to the
		 * reservation:
		 */
		cpuctx->max_pertask =
			min(perf_max_counters - ctx->nr_counters,
			    perf_max_counters - perf_reserved_percpu);
	}

	spin_unlock(&ctx->lock);
}


/*
 * Remove the counter from a task's (or a CPU's) list of counters.
 *
 * Must be called with counter->mutex held.
 *
 * CPU counters are removed with a smp call. For task counters we only
 * call when the task is on a CPU.
 */
184
static void perf_counter_remove_from_context(struct perf_counter *counter)
T
Thomas Gleixner 已提交
185 186 187 188 189 190 191 192 193 194
{
	struct perf_counter_context *ctx = counter->ctx;
	struct task_struct *task = ctx->task;

	if (!task) {
		/*
		 * Per cpu counters are removed via an smp call and
		 * the removal is always sucessful.
		 */
		smp_call_function_single(counter->cpu,
195
					 __perf_counter_remove_from_context,
T
Thomas Gleixner 已提交
196 197 198 199 200
					 counter, 1);
		return;
	}

retry:
201
	task_oncpu_function_call(task, __perf_counter_remove_from_context,
T
Thomas Gleixner 已提交
202 203 204 205 206 207
				 counter);

	spin_lock_irq(&ctx->lock);
	/*
	 * If the context is active we need to retry the smp call.
	 */
208
	if (ctx->nr_active && !list_empty(&counter->list_entry)) {
T
Thomas Gleixner 已提交
209 210 211 212 213 214
		spin_unlock_irq(&ctx->lock);
		goto retry;
	}

	/*
	 * The lock prevents that this context is scheduled in so we
215
	 * can remove the counter safely, if the call above did not
T
Thomas Gleixner 已提交
216 217
	 * succeed.
	 */
218
	if (!list_empty(&counter->list_entry)) {
T
Thomas Gleixner 已提交
219
		ctx->nr_counters--;
220
		list_del_counter(counter, ctx);
T
Thomas Gleixner 已提交
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
		counter->task = NULL;
	}
	spin_unlock_irq(&ctx->lock);
}

/*
 * Cross CPU call to install and enable a preformance counter
 */
static void __perf_install_in_context(void *info)
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter *counter = info;
	struct perf_counter_context *ctx = counter->ctx;
	int cpu = smp_processor_id();

	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu. If not it has been
	 * scheduled out before the smp call arrived.
	 */
	if (ctx->task && cpuctx->task_ctx != ctx)
		return;

	spin_lock(&ctx->lock);

	/*
	 * Protect the list operation against NMI by disabling the
	 * counters on a global level. NOP for non NMI based counters.
	 */
	hw_perf_disable_all();
251
	list_add_counter(counter, ctx);
T
Thomas Gleixner 已提交
252 253 254 255 256
	hw_perf_enable_all();

	ctx->nr_counters++;

	if (cpuctx->active_oncpu < perf_max_counters) {
I
Ingo Molnar 已提交
257
		counter->hw_ops->hw_perf_counter_enable(counter);
T
Thomas Gleixner 已提交
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
		counter->active = 1;
		counter->oncpu = cpu;
		ctx->nr_active++;
		cpuctx->active_oncpu++;
	}

	if (!ctx->task && cpuctx->max_pertask)
		cpuctx->max_pertask--;

	spin_unlock(&ctx->lock);
}

/*
 * Attach a performance counter to a context
 *
 * First we add the counter to the list with the hardware enable bit
 * in counter->hw_config cleared.
 *
 * If the counter is attached to a task which is on a CPU we use a smp
 * call to enable it in the task context. The task might have been
 * scheduled away, but we check this in the smp call again.
 */
static void
perf_install_in_context(struct perf_counter_context *ctx,
			struct perf_counter *counter,
			int cpu)
{
	struct task_struct *task = ctx->task;

	counter->ctx = ctx;
	if (!task) {
		/*
		 * Per cpu counters are installed via an smp call and
		 * the install is always sucessful.
		 */
		smp_call_function_single(cpu, __perf_install_in_context,
					 counter, 1);
		return;
	}

	counter->task = task;
retry:
	task_oncpu_function_call(task, __perf_install_in_context,
				 counter);

	spin_lock_irq(&ctx->lock);
	/*
	 * If the context is active and the counter has not been added
	 * we need to retry the smp call.
	 */
308
	if (ctx->nr_active && list_empty(&counter->list_entry)) {
T
Thomas Gleixner 已提交
309 310 311 312 313 314 315 316 317
		spin_unlock_irq(&ctx->lock);
		goto retry;
	}

	/*
	 * The lock prevents that this context is scheduled in so we
	 * can add the counter safely, if it the call above did not
	 * succeed.
	 */
318 319
	if (list_empty(&counter->list_entry)) {
		list_add_counter(counter, ctx);
T
Thomas Gleixner 已提交
320 321 322 323 324
		ctx->nr_counters++;
	}
	spin_unlock_irq(&ctx->lock);
}

325 326 327 328 329 330 331 332
static void
counter_sched_out(struct perf_counter *counter,
		  struct perf_cpu_context *cpuctx,
		  struct perf_counter_context *ctx)
{
	if (!counter->active)
		return;

I
Ingo Molnar 已提交
333
	counter->hw_ops->hw_perf_counter_disable(counter);
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
	counter->active	=  0;
	counter->oncpu	= -1;

	cpuctx->active_oncpu--;
	ctx->nr_active--;
}

static void
group_sched_out(struct perf_counter *group_counter,
		struct perf_cpu_context *cpuctx,
		struct perf_counter_context *ctx)
{
	struct perf_counter *counter;

	counter_sched_out(group_counter, cpuctx, ctx);

	/*
	 * Schedule out siblings (if any):
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
		counter_sched_out(counter, cpuctx, ctx);
}

T
Thomas Gleixner 已提交
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
/*
 * Called from scheduler to remove the counters of the current task,
 * with interrupts disabled.
 *
 * We stop each counter and update the counter value in counter->count.
 *
 * This does not protect us against NMI, but hw_perf_counter_disable()
 * sets the disabled bit in the control field of counter _before_
 * accessing the counter control register. If a NMI hits, then it will
 * not restart the counter.
 */
void perf_counter_task_sched_out(struct task_struct *task, int cpu)
{
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
	struct perf_counter_context *ctx = &task->perf_counter_ctx;
	struct perf_counter *counter;

	if (likely(!cpuctx->task_ctx))
		return;

	spin_lock(&ctx->lock);
378 379 380
	if (ctx->nr_active) {
		list_for_each_entry(counter, &ctx->counter_list, list_entry)
			group_sched_out(counter, cpuctx, ctx);
T
Thomas Gleixner 已提交
381 382 383 384 385
	}
	spin_unlock(&ctx->lock);
	cpuctx->task_ctx = NULL;
}

386 387 388 389 390 391
static void
counter_sched_in(struct perf_counter *counter,
		 struct perf_cpu_context *cpuctx,
		 struct perf_counter_context *ctx,
		 int cpu)
{
I
Ingo Molnar 已提交
392
	counter->hw_ops->hw_perf_counter_enable(counter);
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
	counter->active = 1;
	counter->oncpu = cpu;	/* TODO: put 'cpu' into cpuctx->cpu */

	cpuctx->active_oncpu++;
	ctx->nr_active++;
}

static void
group_sched_in(struct perf_counter *group_counter,
	       struct perf_cpu_context *cpuctx,
	       struct perf_counter_context *ctx,
	       int cpu)
{
	struct perf_counter *counter;

	counter_sched_in(group_counter, cpuctx, ctx, cpu);

	/*
	 * Schedule in siblings as one group (if any):
	 */
	list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
		counter_sched_in(counter, cpuctx, ctx, cpu);
}

T
Thomas Gleixner 已提交
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
/*
 * Called from scheduler to add the counters of the current task
 * with interrupts disabled.
 *
 * We restore the counter value and then enable it.
 *
 * This does not protect us against NMI, but hw_perf_counter_enable()
 * sets the enabled bit in the control field of counter _before_
 * accessing the counter control register. If a NMI hits, then it will
 * keep the counter running.
 */
void perf_counter_task_sched_in(struct task_struct *task, int cpu)
{
	struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
	struct perf_counter_context *ctx = &task->perf_counter_ctx;
	struct perf_counter *counter;

	if (likely(!ctx->nr_counters))
		return;

	spin_lock(&ctx->lock);
438
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
T
Thomas Gleixner 已提交
439 440
		if (ctx->nr_active == cpuctx->max_pertask)
			break;
441 442 443 444 445

		/*
		 * Listen to the 'cpu' scheduling filter constraint
		 * of counters:
		 */
T
Thomas Gleixner 已提交
446 447 448
		if (counter->cpu != -1 && counter->cpu != cpu)
			continue;

449
		group_sched_in(counter, cpuctx, ctx, cpu);
T
Thomas Gleixner 已提交
450 451
	}
	spin_unlock(&ctx->lock);
452

T
Thomas Gleixner 已提交
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
	cpuctx->task_ctx = ctx;
}

void perf_counter_task_tick(struct task_struct *curr, int cpu)
{
	struct perf_counter_context *ctx = &curr->perf_counter_ctx;
	struct perf_counter *counter;

	if (likely(!ctx->nr_counters))
		return;

	perf_counter_task_sched_out(curr, cpu);

	spin_lock(&ctx->lock);

	/*
469
	 * Rotate the first entry last (works just fine for group counters too):
T
Thomas Gleixner 已提交
470 471
	 */
	hw_perf_disable_all();
472 473 474
	list_for_each_entry(counter, &ctx->counter_list, list_entry) {
		list_del(&counter->list_entry);
		list_add_tail(&counter->list_entry, &ctx->counter_list);
T
Thomas Gleixner 已提交
475 476 477 478 479 480 481 482 483
		break;
	}
	hw_perf_enable_all();

	spin_unlock(&ctx->lock);

	perf_counter_task_sched_in(curr, cpu);
}

484 485 486 487 488 489 490 491 492 493 494 495
/*
 * Initialize the perf_counter context in a task_struct:
 */
static void
__perf_counter_init_context(struct perf_counter_context *ctx,
			    struct task_struct *task)
{
	spin_lock_init(&ctx->lock);
	INIT_LIST_HEAD(&ctx->counter_list);
	ctx->nr_counters	= 0;
	ctx->task		= task;
}
T
Thomas Gleixner 已提交
496 497 498 499 500
/*
 * Initialize the perf_counter context in task_struct
 */
void perf_counter_init_task(struct task_struct *task)
{
501
	__perf_counter_init_context(&task->perf_counter_ctx, task);
T
Thomas Gleixner 已提交
502 503 504 505 506 507 508
}

/*
 * Cross CPU call to read the hardware counter
 */
static void __hw_perf_counter_read(void *info)
{
I
Ingo Molnar 已提交
509 510 511
	struct perf_counter *counter = info;

	counter->hw_ops->hw_perf_counter_read(counter);
T
Thomas Gleixner 已提交
512 513
}

514
static u64 perf_counter_read(struct perf_counter *counter)
T
Thomas Gleixner 已提交
515 516 517 518 519 520 521 522 523 524
{
	/*
	 * If counter is enabled and currently active on a CPU, update the
	 * value in the counter structure:
	 */
	if (counter->active) {
		smp_call_function_single(counter->oncpu,
					 __hw_perf_counter_read, counter, 1);
	}

525
	return perf_counter_read_safe(counter);
T
Thomas Gleixner 已提交
526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
}

/*
 * Cross CPU call to switch performance data pointers
 */
static void __perf_switch_irq_data(void *info)
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter *counter = info;
	struct perf_counter_context *ctx = counter->ctx;
	struct perf_data *oldirqdata = counter->irqdata;

	/*
	 * If this is a task context, we need to check whether it is
	 * the current task context of this cpu. If not it has been
	 * scheduled out before the smp call arrived.
	 */
	if (ctx->task) {
		if (cpuctx->task_ctx != ctx)
			return;
		spin_lock(&ctx->lock);
	}

	/* Change the pointer NMI safe */
	atomic_long_set((atomic_long_t *)&counter->irqdata,
			(unsigned long) counter->usrdata);
	counter->usrdata = oldirqdata;

	if (ctx->task)
		spin_unlock(&ctx->lock);
}

static struct perf_data *perf_switch_irq_data(struct perf_counter *counter)
{
	struct perf_counter_context *ctx = counter->ctx;
	struct perf_data *oldirqdata = counter->irqdata;
	struct task_struct *task = ctx->task;

	if (!task) {
		smp_call_function_single(counter->cpu,
					 __perf_switch_irq_data,
					 counter, 1);
		return counter->usrdata;
	}

retry:
	spin_lock_irq(&ctx->lock);
	if (!counter->active) {
		counter->irqdata = counter->usrdata;
		counter->usrdata = oldirqdata;
		spin_unlock_irq(&ctx->lock);
		return oldirqdata;
	}
	spin_unlock_irq(&ctx->lock);
	task_oncpu_function_call(task, __perf_switch_irq_data, counter);
	/* Might have failed, because task was scheduled out */
	if (counter->irqdata == oldirqdata)
		goto retry;

	return counter->usrdata;
}

static void put_context(struct perf_counter_context *ctx)
{
	if (ctx->task)
		put_task_struct(ctx->task);
}

static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
{
	struct perf_cpu_context *cpuctx;
	struct perf_counter_context *ctx;
	struct task_struct *task;

	/*
	 * If cpu is not a wildcard then this is a percpu counter:
	 */
	if (cpu != -1) {
		/* Must be root to operate on a CPU counter: */
		if (!capable(CAP_SYS_ADMIN))
			return ERR_PTR(-EACCES);

		if (cpu < 0 || cpu > num_possible_cpus())
			return ERR_PTR(-EINVAL);

		/*
		 * We could be clever and allow to attach a counter to an
		 * offline CPU and activate it when the CPU comes up, but
		 * that's for later.
		 */
		if (!cpu_isset(cpu, cpu_online_map))
			return ERR_PTR(-ENODEV);

		cpuctx = &per_cpu(perf_cpu_context, cpu);
		ctx = &cpuctx->ctx;

		WARN_ON_ONCE(ctx->task);
		return ctx;
	}

	rcu_read_lock();
	if (!pid)
		task = current;
	else
		task = find_task_by_vpid(pid);
	if (task)
		get_task_struct(task);
	rcu_read_unlock();

	if (!task)
		return ERR_PTR(-ESRCH);

	ctx = &task->perf_counter_ctx;
	ctx->task = task;

	/* Reuse ptrace permission checks for now. */
	if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
		put_context(ctx);
		return ERR_PTR(-EACCES);
	}

	return ctx;
}

/*
 * Called when the last reference to the file is gone.
 */
static int perf_release(struct inode *inode, struct file *file)
{
	struct perf_counter *counter = file->private_data;
	struct perf_counter_context *ctx = counter->ctx;

	file->private_data = NULL;

	mutex_lock(&counter->mutex);

662
	perf_counter_remove_from_context(counter);
T
Thomas Gleixner 已提交
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
	put_context(ctx);

	mutex_unlock(&counter->mutex);

	kfree(counter);

	return 0;
}

/*
 * Read the performance counter - simple non blocking version for now
 */
static ssize_t
perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
{
	u64 cntval;

	if (count != sizeof(cntval))
		return -EINVAL;

	mutex_lock(&counter->mutex);
684
	cntval = perf_counter_read(counter);
T
Thomas Gleixner 已提交
685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
	mutex_unlock(&counter->mutex);

	return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval);
}

static ssize_t
perf_copy_usrdata(struct perf_data *usrdata, char __user *buf, size_t count)
{
	if (!usrdata->len)
		return 0;

	count = min(count, (size_t)usrdata->len);
	if (copy_to_user(buf, usrdata->data + usrdata->rd_idx, count))
		return -EFAULT;

	/* Adjust the counters */
	usrdata->len -= count;
	if (!usrdata->len)
		usrdata->rd_idx = 0;
	else
		usrdata->rd_idx += count;

	return count;
}

static ssize_t
perf_read_irq_data(struct perf_counter	*counter,
		   char __user		*buf,
		   size_t		count,
		   int			nonblocking)
{
	struct perf_data *irqdata, *usrdata;
	DECLARE_WAITQUEUE(wait, current);
	ssize_t res;

	irqdata = counter->irqdata;
	usrdata = counter->usrdata;

	if (usrdata->len + irqdata->len >= count)
		goto read_pending;

	if (nonblocking)
		return -EAGAIN;

	spin_lock_irq(&counter->waitq.lock);
	__add_wait_queue(&counter->waitq, &wait);
	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (usrdata->len + irqdata->len >= count)
			break;

		if (signal_pending(current))
			break;

		spin_unlock_irq(&counter->waitq.lock);
		schedule();
		spin_lock_irq(&counter->waitq.lock);
	}
	__remove_wait_queue(&counter->waitq, &wait);
	__set_current_state(TASK_RUNNING);
	spin_unlock_irq(&counter->waitq.lock);

	if (usrdata->len + irqdata->len < count)
		return -ERESTARTSYS;
read_pending:
	mutex_lock(&counter->mutex);

	/* Drain pending data first: */
	res = perf_copy_usrdata(usrdata, buf, count);
	if (res < 0 || res == count)
		goto out;

	/* Switch irq buffer: */
	usrdata = perf_switch_irq_data(counter);
	if (perf_copy_usrdata(usrdata, buf + res, count - res) < 0) {
		if (!res)
			res = -EFAULT;
	} else {
		res = count;
	}
out:
	mutex_unlock(&counter->mutex);

	return res;
}

static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
	struct perf_counter *counter = file->private_data;

I
Ingo Molnar 已提交
776
	switch (counter->hw_event.record_type) {
T
Thomas Gleixner 已提交
777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
	case PERF_RECORD_SIMPLE:
		return perf_read_hw(counter, buf, count);

	case PERF_RECORD_IRQ:
	case PERF_RECORD_GROUP:
		return perf_read_irq_data(counter, buf, count,
					  file->f_flags & O_NONBLOCK);
	}
	return -EINVAL;
}

static unsigned int perf_poll(struct file *file, poll_table *wait)
{
	struct perf_counter *counter = file->private_data;
	unsigned int events = 0;
	unsigned long flags;

	poll_wait(file, &counter->waitq, wait);

	spin_lock_irqsave(&counter->waitq.lock, flags);
	if (counter->usrdata->len || counter->irqdata->len)
		events |= POLLIN;
	spin_unlock_irqrestore(&counter->waitq.lock, flags);

	return events;
}

static const struct file_operations perf_fops = {
	.release		= perf_release,
	.read			= perf_read,
	.poll			= perf_poll,
};

/*
 * Allocate and initialize a counter structure
 */
static struct perf_counter *
814 815 816
perf_counter_alloc(struct perf_counter_hw_event *hw_event,
		   int cpu,
		   struct perf_counter *group_leader)
T
Thomas Gleixner 已提交
817
{
I
Ingo Molnar 已提交
818 819
	struct hw_perf_counter_ops *hw_ops;
	struct perf_counter *counter;
T
Thomas Gleixner 已提交
820

I
Ingo Molnar 已提交
821
	counter = kzalloc(sizeof(*counter), GFP_KERNEL);
T
Thomas Gleixner 已提交
822 823 824
	if (!counter)
		return NULL;

825 826 827 828 829 830 831
	/*
	 * Single counters are their own group leaders, with an
	 * empty sibling list:
	 */
	if (!group_leader)
		group_leader = counter;

T
Thomas Gleixner 已提交
832
	mutex_init(&counter->mutex);
833 834
	INIT_LIST_HEAD(&counter->list_entry);
	INIT_LIST_HEAD(&counter->sibling_list);
T
Thomas Gleixner 已提交
835 836
	init_waitqueue_head(&counter->waitq);

I
Ingo Molnar 已提交
837 838 839 840 841
	counter->irqdata		= &counter->data[0];
	counter->usrdata		= &counter->data[1];
	counter->cpu			= cpu;
	counter->hw_event		= *hw_event;
	counter->wakeup_pending		= 0;
842
	counter->group_leader		= group_leader;
I
Ingo Molnar 已提交
843 844 845 846 847 848 849 850
	counter->hw_ops			= NULL;

	hw_ops = hw_perf_counter_init(counter);
	if (!hw_ops) {
		kfree(counter);
		return NULL;
	}
	counter->hw_ops = hw_ops;
T
Thomas Gleixner 已提交
851 852 853 854 855

	return counter;
}

/**
I
Ingo Molnar 已提交
856 857 858
 * sys_perf_task_open - open a performance counter, associate it to a task/cpu
 *
 * @hw_event_uptr:	event type attributes for monitoring/sampling
T
Thomas Gleixner 已提交
859
 * @pid:		target pid
I
Ingo Molnar 已提交
860 861
 * @cpu:		target cpu
 * @group_fd:		group leader counter fd
T
Thomas Gleixner 已提交
862
 */
I
Ingo Molnar 已提交
863 864 865 866 867 868 869
asmlinkage int sys_perf_counter_open(

	struct perf_counter_hw_event	*hw_event_uptr		__user,
	pid_t				pid,
	int				cpu,
	int				group_fd)

T
Thomas Gleixner 已提交
870
{
871
	struct perf_counter *counter, *group_leader;
I
Ingo Molnar 已提交
872
	struct perf_counter_hw_event hw_event;
873 874 875
	struct perf_counter_context *ctx;
	struct file *group_file = NULL;
	int fput_needed = 0;
T
Thomas Gleixner 已提交
876 877
	int ret;

I
Ingo Molnar 已提交
878
	if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
879 880
		return -EFAULT;

881
	/*
I
Ingo Molnar 已提交
882 883 884 885 886 887 888 889
	 * Get the target context (task or percpu):
	 */
	ctx = find_get_context(pid, cpu);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	/*
	 * Look up the group leader (we will attach this counter to it):
890 891 892 893 894 895
	 */
	group_leader = NULL;
	if (group_fd != -1) {
		ret = -EINVAL;
		group_file = fget_light(group_fd, &fput_needed);
		if (!group_file)
I
Ingo Molnar 已提交
896
			goto err_put_context;
897
		if (group_file->f_op != &perf_fops)
I
Ingo Molnar 已提交
898
			goto err_put_context;
899 900 901

		group_leader = group_file->private_data;
		/*
I
Ingo Molnar 已提交
902 903 904 905 906 907 908 909
		 * Do not allow a recursive hierarchy (this new sibling
		 * becoming part of another group-sibling):
		 */
		if (group_leader->group_leader != group_leader)
			goto err_put_context;
		/*
		 * Do not allow to attach to a group in a different
		 * task or CPU context:
910
		 */
I
Ingo Molnar 已提交
911 912
		if (group_leader->ctx != ctx)
			goto err_put_context;
913 914
	}

T
Thomas Gleixner 已提交
915
	ret = -ENOMEM;
916
	counter = perf_counter_alloc(&hw_event, cpu, group_leader);
T
Thomas Gleixner 已提交
917 918 919 920 921 922 923 924 925
	if (!counter)
		goto err_put_context;

	perf_install_in_context(ctx, counter, cpu);

	ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
	if (ret < 0)
		goto err_remove_free_put_context;

926 927 928
out_fput:
	fput_light(group_file, fput_needed);

T
Thomas Gleixner 已提交
929 930 931 932
	return ret;

err_remove_free_put_context:
	mutex_lock(&counter->mutex);
933
	perf_counter_remove_from_context(counter);
T
Thomas Gleixner 已提交
934 935 936 937 938 939
	mutex_unlock(&counter->mutex);
	kfree(counter);

err_put_context:
	put_context(ctx);

940
	goto out_fput;
T
Thomas Gleixner 已提交
941 942
}

943
static void __cpuinit perf_counter_init_cpu(int cpu)
T
Thomas Gleixner 已提交
944
{
945
	struct perf_cpu_context *cpuctx;
T
Thomas Gleixner 已提交
946

947 948
	cpuctx = &per_cpu(perf_cpu_context, cpu);
	__perf_counter_init_context(&cpuctx->ctx, NULL);
T
Thomas Gleixner 已提交
949 950

	mutex_lock(&perf_resource_mutex);
951
	cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
T
Thomas Gleixner 已提交
952
	mutex_unlock(&perf_resource_mutex);
953

T
Thomas Gleixner 已提交
954 955 956 957
	hw_perf_counter_setup();
}

#ifdef CONFIG_HOTPLUG_CPU
958
static void __perf_counter_exit_cpu(void *info)
T
Thomas Gleixner 已提交
959 960 961 962 963
{
	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
	struct perf_counter_context *ctx = &cpuctx->ctx;
	struct perf_counter *counter, *tmp;

964 965
	list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
		__perf_counter_remove_from_context(counter);
T
Thomas Gleixner 已提交
966 967

}
968
static void perf_counter_exit_cpu(int cpu)
T
Thomas Gleixner 已提交
969
{
970
	smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
T
Thomas Gleixner 已提交
971 972
}
#else
973
static inline void perf_counter_exit_cpu(int cpu) { }
T
Thomas Gleixner 已提交
974 975 976 977 978 979 980 981 982 983 984
#endif

static int __cpuinit
perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
	unsigned int cpu = (long)hcpu;

	switch (action) {

	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
985
		perf_counter_init_cpu(cpu);
T
Thomas Gleixner 已提交
986 987 988 989
		break;

	case CPU_DOWN_PREPARE:
	case CPU_DOWN_PREPARE_FROZEN:
990
		perf_counter_exit_cpu(cpu);
T
Thomas Gleixner 已提交
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
		break;

	default:
		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block __cpuinitdata perf_cpu_nb = {
	.notifier_call		= perf_cpu_notify,
};

static int __init perf_counter_init(void)
{
	perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
			(void *)(long)smp_processor_id());
	register_cpu_notifier(&perf_cpu_nb);

	return 0;
}
early_initcall(perf_counter_init);

static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
{
	return sprintf(buf, "%d\n", perf_reserved_percpu);
}

static ssize_t
perf_set_reserve_percpu(struct sysdev_class *class,
			const char *buf,
			size_t count)
{
	struct perf_cpu_context *cpuctx;
	unsigned long val;
	int err, cpu, mpt;

	err = strict_strtoul(buf, 10, &val);
	if (err)
		return err;
	if (val > perf_max_counters)
		return -EINVAL;

	mutex_lock(&perf_resource_mutex);
	perf_reserved_percpu = val;
	for_each_online_cpu(cpu) {
		cpuctx = &per_cpu(perf_cpu_context, cpu);
		spin_lock_irq(&cpuctx->ctx.lock);
		mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
			  perf_max_counters - perf_reserved_percpu);
		cpuctx->max_pertask = mpt;
		spin_unlock_irq(&cpuctx->ctx.lock);
	}
	mutex_unlock(&perf_resource_mutex);

	return count;
}

static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
{
	return sprintf(buf, "%d\n", perf_overcommit);
}

static ssize_t
perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
{
	unsigned long val;
	int err;

	err = strict_strtoul(buf, 10, &val);
	if (err)
		return err;
	if (val > 1)
		return -EINVAL;

	mutex_lock(&perf_resource_mutex);
	perf_overcommit = val;
	mutex_unlock(&perf_resource_mutex);

	return count;
}

static SYSDEV_CLASS_ATTR(
				reserve_percpu,
				0644,
				perf_show_reserve_percpu,
				perf_set_reserve_percpu
			);

static SYSDEV_CLASS_ATTR(
				overcommit,
				0644,
				perf_show_overcommit,
				perf_set_overcommit
			);

static struct attribute *perfclass_attrs[] = {
	&attr_reserve_percpu.attr,
	&attr_overcommit.attr,
	NULL
};

static struct attribute_group perfclass_attr_group = {
	.attrs			= perfclass_attrs,
	.name			= "perf_counters",
};

static int __init perf_counter_sysfs_init(void)
{
	return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
				  &perfclass_attr_group);
}
device_initcall(perf_counter_sysfs_init);