trace_clock.c 2.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * tracing clocks
 *
 *  Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *
 * Implements 3 trace clock variants, with differing scalability/precision
 * tradeoffs:
 *
 *  -   local: CPU-local trace clock
 *  -  medium: scalable global clock with some jitter
 *  -  global: globally monotonic, serialized clock
 *
 * Tracer plugins will chose a default from these clocks.
 */
#include <linux/spinlock.h>
16
#include <linux/irqflags.h>
17 18 19 20 21
#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/ktime.h>
22
#include <linux/trace_clock.h>
23

24 25
#include "trace.h"

26 27 28 29 30 31 32 33
/*
 * trace_clock_local(): the simplest and least coherent tracing clock.
 *
 * Useful for tracing that does not cross to other CPUs nor
 * does it go through idle events.
 */
u64 notrace trace_clock_local(void)
{
P
Peter Zijlstra 已提交
34
	u64 clock;
35
	int resched;
P
Peter Zijlstra 已提交
36

37 38 39 40 41
	/*
	 * sched_clock() is an architecture implemented, fast, scalable,
	 * lockless clock. It is not guaranteed to be coherent across
	 * CPUs, nor across CPU idle events.
	 */
42
	resched = ftrace_preempt_disable();
P
Peter Zijlstra 已提交
43
	clock = sched_clock();
44
	ftrace_preempt_enable(resched);
P
Peter Zijlstra 已提交
45 46

	return clock;
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
}

/*
 * trace_clock(): 'inbetween' trace clock. Not completely serialized,
 * but not completely incorrect when crossing CPUs either.
 *
 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
 * jitter between CPUs. So it's a pretty scalable clock, but there
 * can be offsets in the trace data.
 */
u64 notrace trace_clock(void)
{
	return cpu_clock(raw_smp_processor_id());
}


/*
 * trace_clock_global(): special globally coherent trace clock
 *
 * It has higher overhead than the other trace clocks but is still
 * an order of magnitude faster than GTOD derived hardware clocks.
 *
 * Used by plugins that need globally coherent timestamps.
 */

72 73 74
/* keep prev_time and lock in the same cacheline. */
static struct {
	u64 prev_time;
75
	arch_spinlock_t lock;
76 77
} trace_clock_struct ____cacheline_aligned_in_smp =
	{
78
		.lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
79
	};
80 81 82 83 84 85 86

u64 notrace trace_clock_global(void)
{
	unsigned long flags;
	int this_cpu;
	u64 now;

87
	local_irq_save(flags);
88 89 90 91 92 93 94 95 96 97

	this_cpu = raw_smp_processor_id();
	now = cpu_clock(this_cpu);
	/*
	 * If in an NMI context then dont risk lockups and return the
	 * cpu_clock() time:
	 */
	if (unlikely(in_nmi()))
		goto out;

98
	arch_spin_lock(&trace_clock_struct.lock);
99 100 101

	/*
	 * TODO: if this happens often then maybe we should reset
102
	 * my_scd->clock to prev_time+1, to make sure
103 104
	 * we start ticking with the local clock from now on?
	 */
105 106
	if ((s64)(now - trace_clock_struct.prev_time) < 0)
		now = trace_clock_struct.prev_time + 1;
107

108
	trace_clock_struct.prev_time = now;
109

110
	arch_spin_unlock(&trace_clock_struct.lock);
111 112

 out:
113
	local_irq_restore(flags);
114 115 116

	return now;
}