提交 5a50e33c 编写于 作者: S Steven Rostedt 提交者: Steven Rostedt

ring-buffer: Move access to commit_page up into function used

With the change of the way we process commits. Where a commit only happens
at the outer most level, and that we don't need to worry about
a commit ending after the rb_start_commit() has been called, the code
use to grab the commit page before the tail page to prevent a possible
race. But this race no longer exists with the rb_start_commit()
rb_end_commit() interface.
Signed-off-by: NSteven Rostedt <rostedt@goodmis.org>
上级 8b2a5dac
...@@ -1785,9 +1785,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1785,9 +1785,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
static struct ring_buffer_event * static struct ring_buffer_event *
rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
unsigned long length, unsigned long tail, unsigned long length, unsigned long tail,
struct buffer_page *commit_page,
struct buffer_page *tail_page, u64 *ts) struct buffer_page *tail_page, u64 *ts)
{ {
struct buffer_page *commit_page = cpu_buffer->commit_page;
struct ring_buffer *buffer = cpu_buffer->buffer; struct ring_buffer *buffer = cpu_buffer->buffer;
struct buffer_page *next_page; struct buffer_page *next_page;
int ret; int ret;
...@@ -1890,13 +1890,10 @@ static struct ring_buffer_event * ...@@ -1890,13 +1890,10 @@ static struct ring_buffer_event *
__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
unsigned type, unsigned long length, u64 *ts) unsigned type, unsigned long length, u64 *ts)
{ {
struct buffer_page *tail_page, *commit_page; struct buffer_page *tail_page;
struct ring_buffer_event *event; struct ring_buffer_event *event;
unsigned long tail, write; unsigned long tail, write;
commit_page = cpu_buffer->commit_page;
/* we just need to protect against interrupts */
barrier();
tail_page = cpu_buffer->tail_page; tail_page = cpu_buffer->tail_page;
write = local_add_return(length, &tail_page->write); write = local_add_return(length, &tail_page->write);
...@@ -1907,7 +1904,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1907,7 +1904,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
/* See if we shot pass the end of this buffer page */ /* See if we shot pass the end of this buffer page */
if (write > BUF_PAGE_SIZE) if (write > BUF_PAGE_SIZE)
return rb_move_tail(cpu_buffer, length, tail, return rb_move_tail(cpu_buffer, length, tail,
commit_page, tail_page, ts); tail_page, ts);
/* We reserved something on the buffer */ /* We reserved something on the buffer */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册