提交 3daeb4da 编写于 作者: I Ingo Molnar

Merge branch 'tip/tracing/urgent' of...

Merge branch 'tip/tracing/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/urgent
...@@ -620,12 +620,6 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -620,12 +620,6 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
kfree(cpu_buffer); kfree(cpu_buffer);
} }
/*
* Causes compile errors if the struct buffer_page gets bigger
* than the struct page.
*/
extern int ring_buffer_page_too_big(void);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static int rb_cpu_notify(struct notifier_block *self, static int rb_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu); unsigned long action, void *hcpu);
...@@ -648,11 +642,6 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, ...@@ -648,11 +642,6 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
int bsize; int bsize;
int cpu; int cpu;
/* Paranoid! Optimizes out when all is well */
if (sizeof(struct buffer_page) > sizeof(struct page))
ring_buffer_page_too_big();
/* keep it in its own cache line */ /* keep it in its own cache line */
buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
GFP_KERNEL); GFP_KERNEL);
...@@ -668,8 +657,8 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, ...@@ -668,8 +657,8 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
buffer->reader_lock_key = key; buffer->reader_lock_key = key;
/* need at least two pages */ /* need at least two pages */
if (buffer->pages == 1) if (buffer->pages < 2)
buffer->pages++; buffer->pages = 2;
/* /*
* In case of non-hotplug cpu, if the ring-buffer is allocated * In case of non-hotplug cpu, if the ring-buffer is allocated
...@@ -1013,7 +1002,7 @@ rb_event_index(struct ring_buffer_event *event) ...@@ -1013,7 +1002,7 @@ rb_event_index(struct ring_buffer_event *event)
{ {
unsigned long addr = (unsigned long)event; unsigned long addr = (unsigned long)event;
return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE); return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
} }
static inline int static inline int
...@@ -1334,9 +1323,6 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1334,9 +1323,6 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
/* We reserved something on the buffer */ /* We reserved something on the buffer */
if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
return NULL;
event = __rb_page_index(tail_page, tail); event = __rb_page_index(tail_page, tail);
rb_update_event(event, type, length); rb_update_event(event, type, length);
...@@ -2480,6 +2466,21 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) ...@@ -2480,6 +2466,21 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
} }
EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
static inline int rb_ok_to_lock(void)
{
/*
* If an NMI die dumps out the content of the ring buffer
* do not grab locks. We also permanently disable the ring
* buffer too. A one time deal is all you get from reading
* the ring buffer from an NMI.
*/
if (likely(!in_nmi() && !oops_in_progress))
return 1;
tracing_off_permanent();
return 0;
}
/** /**
* ring_buffer_peek - peek at the next event to be read * ring_buffer_peek - peek at the next event to be read
* @buffer: The ring buffer to read * @buffer: The ring buffer to read
...@@ -2495,14 +2496,20 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) ...@@ -2495,14 +2496,20 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_event *event; struct ring_buffer_event *event;
unsigned long flags; unsigned long flags;
int dolock;
if (!cpumask_test_cpu(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL; return NULL;
dolock = rb_ok_to_lock();
again: again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags); local_irq_save(flags);
if (dolock)
spin_lock(&cpu_buffer->reader_lock);
event = rb_buffer_peek(buffer, cpu, ts); event = rb_buffer_peek(buffer, cpu, ts);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); if (dolock)
spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags);
if (event && event->type_len == RINGBUF_TYPE_PADDING) { if (event && event->type_len == RINGBUF_TYPE_PADDING) {
cpu_relax(); cpu_relax();
...@@ -2554,6 +2561,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) ...@@ -2554,6 +2561,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event = NULL; struct ring_buffer_event *event = NULL;
unsigned long flags; unsigned long flags;
int dolock;
dolock = rb_ok_to_lock();
again: again:
/* might be called in atomic */ /* might be called in atomic */
...@@ -2563,7 +2573,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) ...@@ -2563,7 +2573,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
goto out; goto out;
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
spin_lock_irqsave(&cpu_buffer->reader_lock, flags); local_irq_save(flags);
if (dolock)
spin_lock(&cpu_buffer->reader_lock);
event = rb_buffer_peek(buffer, cpu, ts); event = rb_buffer_peek(buffer, cpu, ts);
if (!event) if (!event)
...@@ -2572,7 +2584,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) ...@@ -2572,7 +2584,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
rb_advance_reader(cpu_buffer); rb_advance_reader(cpu_buffer);
out_unlock: out_unlock:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); if (dolock)
spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags);
out: out:
preempt_enable(); preempt_enable();
...@@ -2770,12 +2784,25 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset); ...@@ -2770,12 +2784,25 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset);
int ring_buffer_empty(struct ring_buffer *buffer) int ring_buffer_empty(struct ring_buffer *buffer)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
int dolock;
int cpu; int cpu;
int ret;
dolock = rb_ok_to_lock();
/* yes this is racy, but if you don't like the race, lock the buffer */ /* yes this is racy, but if you don't like the race, lock the buffer */
for_each_buffer_cpu(buffer, cpu) { for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
if (!rb_per_cpu_empty(cpu_buffer)) local_irq_save(flags);
if (dolock)
spin_lock(&cpu_buffer->reader_lock);
ret = rb_per_cpu_empty(cpu_buffer);
if (dolock)
spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags);
if (!ret)
return 0; return 0;
} }
...@@ -2791,14 +2818,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty); ...@@ -2791,14 +2818,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty);
int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
int dolock;
int ret; int ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 1; return 1;
dolock = rb_ok_to_lock();
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags);
if (dolock)
spin_lock(&cpu_buffer->reader_lock);
ret = rb_per_cpu_empty(cpu_buffer); ret = rb_per_cpu_empty(cpu_buffer);
if (dolock)
spin_unlock(&cpu_buffer->reader_lock);
local_irq_restore(flags);
return ret; return ret;
} }
......
...@@ -203,7 +203,7 @@ static void ring_buffer_producer(void) ...@@ -203,7 +203,7 @@ static void ring_buffer_producer(void)
* Hammer the buffer for 10 secs (this may * Hammer the buffer for 10 secs (this may
* make the system stall) * make the system stall)
*/ */
pr_info("Starting ring buffer hammer\n"); trace_printk("Starting ring buffer hammer\n");
do_gettimeofday(&start_tv); do_gettimeofday(&start_tv);
do { do {
struct ring_buffer_event *event; struct ring_buffer_event *event;
...@@ -239,7 +239,7 @@ static void ring_buffer_producer(void) ...@@ -239,7 +239,7 @@ static void ring_buffer_producer(void)
#endif #endif
} while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test); } while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test);
pr_info("End ring buffer hammer\n"); trace_printk("End ring buffer hammer\n");
if (consumer) { if (consumer) {
/* Init both completions here to avoid races */ /* Init both completions here to avoid races */
...@@ -262,49 +262,50 @@ static void ring_buffer_producer(void) ...@@ -262,49 +262,50 @@ static void ring_buffer_producer(void)
overruns = ring_buffer_overruns(buffer); overruns = ring_buffer_overruns(buffer);
if (kill_test) if (kill_test)
pr_info("ERROR!\n"); trace_printk("ERROR!\n");
pr_info("Time: %lld (usecs)\n", time); trace_printk("Time: %lld (usecs)\n", time);
pr_info("Overruns: %lld\n", overruns); trace_printk("Overruns: %lld\n", overruns);
if (disable_reader) if (disable_reader)
pr_info("Read: (reader disabled)\n"); trace_printk("Read: (reader disabled)\n");
else else
pr_info("Read: %ld (by %s)\n", read, trace_printk("Read: %ld (by %s)\n", read,
read_events ? "events" : "pages"); read_events ? "events" : "pages");
pr_info("Entries: %lld\n", entries); trace_printk("Entries: %lld\n", entries);
pr_info("Total: %lld\n", entries + overruns + read); trace_printk("Total: %lld\n", entries + overruns + read);
pr_info("Missed: %ld\n", missed); trace_printk("Missed: %ld\n", missed);
pr_info("Hit: %ld\n", hit); trace_printk("Hit: %ld\n", hit);
/* Convert time from usecs to millisecs */ /* Convert time from usecs to millisecs */
do_div(time, USEC_PER_MSEC); do_div(time, USEC_PER_MSEC);
if (time) if (time)
hit /= (long)time; hit /= (long)time;
else else
pr_info("TIME IS ZERO??\n"); trace_printk("TIME IS ZERO??\n");
pr_info("Entries per millisec: %ld\n", hit); trace_printk("Entries per millisec: %ld\n", hit);
if (hit) { if (hit) {
/* Calculate the average time in nanosecs */ /* Calculate the average time in nanosecs */
avg = NSEC_PER_MSEC / hit; avg = NSEC_PER_MSEC / hit;
pr_info("%ld ns per entry\n", avg); trace_printk("%ld ns per entry\n", avg);
} }
if (missed) { if (missed) {
if (time) if (time)
missed /= (long)time; missed /= (long)time;
pr_info("Total iterations per millisec: %ld\n", hit + missed); trace_printk("Total iterations per millisec: %ld\n",
hit + missed);
/* it is possible that hit + missed will overflow and be zero */ /* it is possible that hit + missed will overflow and be zero */
if (!(hit + missed)) { if (!(hit + missed)) {
pr_info("hit + missed overflowed and totalled zero!\n"); trace_printk("hit + missed overflowed and totalled zero!\n");
hit--; /* make it non zero */ hit--; /* make it non zero */
} }
/* Caculate the average time in nanosecs */ /* Caculate the average time in nanosecs */
avg = NSEC_PER_MSEC / (hit + missed); avg = NSEC_PER_MSEC / (hit + missed);
pr_info("%ld ns per entry\n", avg); trace_printk("%ld ns per entry\n", avg);
} }
} }
...@@ -355,7 +356,7 @@ static int ring_buffer_producer_thread(void *arg) ...@@ -355,7 +356,7 @@ static int ring_buffer_producer_thread(void *arg)
ring_buffer_producer(); ring_buffer_producer();
pr_info("Sleeping for 10 secs\n"); trace_printk("Sleeping for 10 secs\n");
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ * SLEEP_TIME); schedule_timeout(HZ * SLEEP_TIME);
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册