Discussion:
[RFC][PATCH 02/11] ring-buffer: Have ring_buffer_empty() not depend on tracing stopped
(too old to reply)
Steven Rostedt
2020-03-17 21:32:24 UTC
Permalink
From: "Steven Rostedt (VMware)" <***@goodmis.org>

It was complained about that when the trace file is read, that the tracing
is disabled, as the iterator expects writing to the buffer it reads is not
updated. Several steps are needed to make the iterator handle a writer,
by testing if things have changed as it reads.

This step is to make ring_buffer_empty() expect the buffer to be changing.
Note if the current location of the iterator is overwritten, then it will
return false as new data is being added. Note, that this means that data
will be skipped.

Signed-off-by: Steven Rostedt (VMware) <***@goodmis.org>
---
kernel/trace/ring_buffer.c | 25 +++++++++++++++++++++++--
1 file changed, 23 insertions(+), 2 deletions(-)

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 61f0e92ace99..1718520a2809 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3590,16 +3590,37 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
struct buffer_page *reader;
struct buffer_page *head_page;
struct buffer_page *commit_page;
+ struct buffer_page *curr_commit_page;
unsigned commit;
+ u64 curr_commit_ts;
+ u64 commit_ts;

cpu_buffer = iter->cpu_buffer;
-
- /* Remember, trace recording is off when iterator is in use */
reader = cpu_buffer->reader_page;
head_page = cpu_buffer->head_page;
commit_page = cpu_buffer->commit_page;
+ commit_ts = commit_page->page->time_stamp;
+
+ /*
+ * When the writer goes across pages, it issues a cmpxchg which
+ * is a mb(), which will synchronize with the rmb here.
+ * (see rb_tail_page_update())
+ */
+ smp_rmb();
commit = rb_page_commit(commit_page);
+ /* We want to make sure that the commit page doesn't change */
+ smp_rmb();
+
+ /* Make sure commit page didn't change */
+ curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
+ curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
+
+ /* If the commit page changed, then there's more data */
+ if (curr_commit_page != commit_page ||
+ curr_commit_ts != commit_ts)
+ return 0;

+ /* Still racy, as it may return a false positive, but that's OK */
return ((iter->head_page == commit_page && iter->head == commit) ||
(iter->head_page == reader && commit_page == head_page &&
head_page->read == commit &&
--
2.25.1
Steven Rostedt
2020-03-17 21:32:23 UTC
Permalink
From: "Steven Rostedt (VMware)" <***@goodmis.org>

In order to have the iterator read the buffer even when it's still updating,
it requires that the ring buffer iterator saves each event in a separate
location outside the ring buffer such that its use is immutable.

There's one use case that saves off the event returned from the ring buffer
interator and calls it again to look at the next event, before going back to
use the first event. As the ring buffer iterator will only have a single
copy, this use case will no longer be supported.

Instead, have the one use case create its own buffer to store the first
event when looking at the next event. This way, when looking at the first
event again, it wont be corrupted by the second read.

Signed-off-by: Steven Rostedt (VMware) <***@goodmis.org>
---
include/linux/trace_events.h | 2 ++
kernel/trace/trace.c | 27 ++++++++++++++++++++++++++-
kernel/trace/trace_output.c | 15 ++++++---------
3 files changed, 34 insertions(+), 10 deletions(-)

diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 6c7a10a6d71e..5c6943354049 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -85,6 +85,8 @@ struct trace_iterator {
struct mutex mutex;
struct ring_buffer_iter **buffer_iter;
unsigned long iter_flags;
+ void *temp; /* temp holder */
+ unsigned int temp_size;

/* trace_seq for __print_flags() and __print_symbolic() etc. */
struct trace_seq tmp_seq;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 6b11e4e2150c..52425aaf26c2 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3466,7 +3466,31 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts)
{
- return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
+ /* __find_next_entry will reset ent_size */
+ int ent_size = iter->ent_size;
+ struct trace_entry *entry;
+
+ /*
+ * The __find_next_entry() may update iter->ent, making
+ * the current iter->ent pointing to stale data.
+ * Need to copy it over.
+ */
+ if (iter->ent && iter->ent != iter->temp) {
+ if (!iter->temp || iter->temp_size < iter->ent_size) {
+ kfree(iter->temp);
+ iter->temp = kmalloc(iter->ent_size, GFP_KERNEL);
+ if (!iter->temp)
+ return NULL;
+ }
+ memcpy(iter->temp, iter->ent, iter->ent_size);
+ iter->temp_size = iter->ent_size;
+ iter->ent = iter->temp;
+ }
+ entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
+ /* Put back the original ent_size */
+ iter->ent_size = ent_size;
+
+ return entry;
}

/* Find the next real entry, and increment the iterator to the next entry */
@@ -4344,6 +4368,7 @@ static int tracing_release(struct inode *inode, struct file *file)

mutex_destroy(&iter->mutex);
free_cpumask_var(iter->started);
+ kfree(iter->temp);
kfree(iter->trace);
kfree(iter->buffer_iter);
seq_release_private(inode, file);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index e25a7da79c6b..9a121e147102 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -617,22 +617,19 @@ int trace_print_context(struct trace_iterator *iter)

int trace_print_lat_context(struct trace_iterator *iter)
{
+ struct trace_entry *entry, *next_entry;
struct trace_array *tr = iter->tr;
- /* trace_find_next_entry will reset ent_size */
- int ent_size = iter->ent_size;
struct trace_seq *s = &iter->seq;
- u64 next_ts;
- struct trace_entry *entry = iter->ent,
- *next_entry = trace_find_next_entry(iter, NULL,
- &next_ts);
unsigned long verbose = (tr->trace_flags & TRACE_ITER_VERBOSE);
+ u64 next_ts;

- /* Restore the original ent_size */
- iter->ent_size = ent_size;
-
+ next_entry = trace_find_next_entry(iter, NULL, &next_ts);
if (!next_entry)
next_ts = iter->ts;

+ /* trace_find_next_entry() may change iter->ent */
+ entry = iter->ent;
+
if (verbose) {
char comm[TASK_COMM_LEN];
--
2.25.1
Steven Rostedt
2020-03-17 21:32:25 UTC
Permalink
From: "Steven Rostedt (VMware)" <***@goodmis.org>

When the ring buffer was first created, the iterator followed the normal
producer/consumer operations where it had both a peek() operation, that just
returned the event at the current location, and a read(), that would return
the event at the current location and also increment the iterator such that
the next peek() or read() will return the next event.

The only use of the ring_buffer_read() is currently to move the iterator to
the next location and nothing now actually reads the event it returns.
Rename this function to its actual use case to ring_buffer_iter_advance(),
which also adds the "iter" part to the name, which is more meaningful. As
the timestamp returned by ring_buffer_read() was never used, there's no
reason that this new version should bother having returning it. It will also
become a void function.

Signed-off-by: Steven Rostedt (VMware) <***@goodmis.org>
---
include/linux/ring_buffer.h | 3 +--
kernel/trace/ring_buffer.c | 23 ++++++-----------------
kernel/trace/trace.c | 4 ++--
kernel/trace/trace_functions_graph.c | 2 +-
4 files changed, 10 insertions(+), 22 deletions(-)

diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index df0124eabece..0ae603b79b0e 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -135,8 +135,7 @@ void ring_buffer_read_finish(struct ring_buffer_iter *iter);

struct ring_buffer_event *
ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
-struct ring_buffer_event *
-ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
+void ring_buffer_iter_advance(struct ring_buffer_iter *iter);
void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
int ring_buffer_iter_empty(struct ring_buffer_iter *iter);

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 1718520a2809..f57eeaa80e3e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4318,35 +4318,24 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
EXPORT_SYMBOL_GPL(ring_buffer_read_finish);

/**
- * ring_buffer_read - read the next item in the ring buffer by the iterator
+ * ring_buffer_iter_advance - advance the iterator to the next location
* @iter: The ring buffer iterator
- * @ts: The time stamp of the event read.
*
- * This reads the next event in the ring buffer and increments the iterator.
+ * Move the location of the iterator such that the next read will
+ * be the next location of the iterator.
*/
-struct ring_buffer_event *
-ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
+void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
{
- struct ring_buffer_event *event;
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;

raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- again:
- event = rb_iter_peek(iter, ts);
- if (!event)
- goto out;
-
- if (event->type_len == RINGBUF_TYPE_PADDING)
- goto again;

rb_advance_iter(iter);
- out:
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);

- return event;
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
-EXPORT_SYMBOL_GPL(ring_buffer_read);
+EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);

/**
* ring_buffer_size - return the size of the ring buffer (in bytes)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 52425aaf26c2..1085b3d5d0dc 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3378,7 +3378,7 @@ static void trace_iterator_increment(struct trace_iterator *iter)

iter->idx++;
if (buf_iter)
- ring_buffer_read(buf_iter, NULL);
+ ring_buffer_iter_advance(buf_iter);
}

static struct trace_entry *
@@ -3562,7 +3562,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
if (ts >= iter->array_buffer->time_start)
break;
entries++;
- ring_buffer_read(buf_iter, NULL);
+ ring_buffer_iter_advance(buf_iter);
}

per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 7d71546ba00a..4a9c49c08ec9 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -482,7 +482,7 @@ get_return_for_leaf(struct trace_iterator *iter,

/* this is a leaf, now advance the iterator */
if (ring_iter)
- ring_buffer_read(ring_iter, NULL);
+ ring_buffer_iter_advance(ring_iter);

return next;
}
--
2.25.1
Steven Rostedt
2020-03-17 21:32:27 UTC
Permalink
From: "Steven Rostedt (VMware)" <***@goodmis.org>

Have the ring_buffer_iter structure have a place to store an event, such
that it can not be overwritten by a writer, and load it in such a way via
rb_iter_head_event() that it will return NULL and reset the iter to the
start of the current page if a writer updated the page.

Signed-off-by: Steven Rostedt (VMware) <***@goodmis.org>
---
kernel/trace/ring_buffer.c | 106 ++++++++++++++++++++++++++-----------
1 file changed, 75 insertions(+), 31 deletions(-)

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index e689bdcb53e8..3d718add73c1 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -503,11 +503,13 @@ struct trace_buffer {
struct ring_buffer_iter {
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long head;
+ unsigned long next_event;
struct buffer_page *head_page;
struct buffer_page *cache_reader_page;
unsigned long cache_read;
u64 read_stamp;
u64 page_stamp;
+ struct ring_buffer_event *event;
};

/**
@@ -1914,15 +1916,59 @@ rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->reader_page->read);
}

-static __always_inline struct ring_buffer_event *
-rb_iter_head_event(struct ring_buffer_iter *iter)
+static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
{
- return __rb_page_index(iter->head_page, iter->head);
+ return local_read(&bpage->page->commit);
}

-static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
+static struct ring_buffer_event *
+rb_iter_head_event(struct ring_buffer_iter *iter)
{
- return local_read(&bpage->page->commit);
+ struct ring_buffer_event *event;
+ struct buffer_page *iter_head_page = iter->head_page;
+ unsigned long commit;
+ unsigned length;
+
+ /*
+ * When the writer goes across pages, it issues a cmpxchg which
+ * is a mb(), which will synchronize with the rmb here.
+ * (see rb_tail_page_update() and __rb_reserve_next())
+ */
+ commit = rb_page_commit(iter_head_page);
+ smp_rmb();
+ event = __rb_page_index(iter_head_page, iter->head);
+ length = rb_event_length(event);
+
+ /*
+ * READ_ONCE() doesn't work on functions and we don't want the
+ * compiler doing any crazy optimizations with length.
+ */
+ barrier();
+
+ if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE)
+ /* Writer corrupted the read? */
+ goto reset;
+
+ memcpy(iter->event, event, length);
+ /*
+ * If the page stamp is still the same after this rmb() then the
+ * event was safely copied without the writer entering the page.
+ */
+ smp_rmb();
+
+ /* Make sure the page didn't change since we read this */
+ if (iter->page_stamp != iter_head_page->page->time_stamp ||
+ commit > rb_page_commit(iter_head_page))
+ goto reset;
+
+ iter->next_event = iter->head + length;
+ return iter->event;
+ reset:
+ /* Reset to the beginning */
+ iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
+ iter->head = 0;
+ iter->next_event = 0;
+ return NULL;
}

/* Size is determined by what has been committed */
@@ -1962,6 +2008,7 @@ static void rb_inc_iter(struct ring_buffer_iter *iter)

iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
iter->head = 0;
+ iter->next_event = 0;
}

/*
@@ -3548,6 +3595,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
/* Iterator usage is expected to have record disabled */
iter->head_page = cpu_buffer->reader_page;
iter->head = cpu_buffer->reader_page->read;
+ iter->next_event = iter->head;

iter->cache_reader_page = iter->head_page;
iter->cache_read = cpu_buffer->read;
@@ -3625,7 +3673,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
return 0;

/* Still racy, as it may return a false positive, but that's OK */
- return ((iter->head_page == commit_page && iter->head == commit) ||
+ return ((iter->head_page == commit_page && iter->head >= commit) ||
(iter->head_page == reader && commit_page == head_page &&
head_page->read == commit &&
iter->head == rb_page_commit(cpu_buffer->reader_page)));
@@ -3853,15 +3901,22 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
static void rb_advance_iter(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer;
- struct ring_buffer_event *event;
- unsigned length;

cpu_buffer = iter->cpu_buffer;

+ /* If head == next_event then we need to jump to the next event */
+ if (iter->head == iter->next_event) {
+ /* If the event gets overwritten again, there's nothing to do */
+ if (rb_iter_head_event(iter) == NULL)
+ return;
+ }
+
+ iter->head = iter->next_event;
+
/*
* Check if we are at the end of the buffer.
*/
- if (iter->head >= rb_page_size(iter->head_page)) {
+ if (iter->next_event >= rb_page_size(iter->head_page)) {
/* discarded commits can make the page empty */
if (iter->head_page == cpu_buffer->commit_page)
return;
@@ -3869,27 +3924,7 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
return;
}

- event = rb_iter_head_event(iter);
-
- length = rb_event_length(event);
-
- /*
- * This should not be called to advance the header if we are
- * at the tail of the buffer.
- */
- if (RB_WARN_ON(cpu_buffer,
- (iter->head_page == cpu_buffer->commit_page) &&
- (iter->head + length > rb_commit_index(cpu_buffer))))
- return;
-
- rb_update_iter_read_stamp(iter, event);
-
- iter->head += length;
-
- /* check for end of page padding */
- if ((iter->head >= rb_page_size(iter->head_page)) &&
- (iter->head_page != cpu_buffer->commit_page))
- rb_inc_iter(iter);
+ rb_update_iter_read_stamp(iter, iter->event);
}

static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
@@ -4017,6 +4052,8 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
}

event = rb_iter_head_event(iter);
+ if (!event)
+ goto again;

switch (event->type_len) {
case RINGBUF_TYPE_PADDING:
@@ -4233,10 +4270,16 @@ ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;

- iter = kmalloc(sizeof(*iter), flags);
+ iter = kzalloc(sizeof(*iter), flags);
if (!iter)
return NULL;

+ iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags);
+ if (!iter->event) {
+ kfree(iter);
+ return NULL;
+ }
+
cpu_buffer = buffer->buffers[cpu];

iter->cpu_buffer = cpu_buffer;
@@ -4317,6 +4360,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)

atomic_dec(&cpu_buffer->record_disabled);
atomic_dec(&cpu_buffer->buffer->resize_disabled);
+ kfree(iter->event);
kfree(iter);
}
EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
--
2.25.1
Steven Rostedt
2020-03-17 21:32:32 UTC
Permalink
From: "Steven Rostedt (VMware)" <***@goodmis.org>

Have the ring_buffer_iterator set a flag if events were dropped as it were
to go and peek at the next event. Have the trace file display this fact if
it happened with a "LOST EVENTS" message.

Signed-off-by: Steven Rostedt (VMware) <***@goodmis.org>
---
include/linux/ring_buffer.h | 1 +
kernel/trace/ring_buffer.c | 16 ++++++++++++++++
kernel/trace/trace.c | 16 ++++++++++++----
3 files changed, 29 insertions(+), 4 deletions(-)

diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 0ae603b79b0e..c76b2f3b3ac4 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -138,6 +138,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
void ring_buffer_iter_advance(struct ring_buffer_iter *iter);
void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
+bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);

unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 8bafba674ec0..87bbb519505f 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -510,6 +510,7 @@ struct ring_buffer_iter {
u64 read_stamp;
u64 page_stamp;
struct ring_buffer_event *event;
+ int missed_events;
};

/**
@@ -1971,6 +1972,7 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
iter->head = 0;
iter->next_event = 0;
+ iter->missed_events = 1;
return NULL;
}

@@ -4174,6 +4176,20 @@ ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
return event;
}

+/** ring_buffer_iter_dropped - report if there are dropped events
+ * @iter: The ring buffer iterator
+ *
+ * Returns true if there was dropped events since the last peek.
+ */
+bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
+{
+ bool ret = iter->missed_events != 0;
+
+ iter->missed_events = 0;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
+
/**
* ring_buffer_iter_peek - peek at the next event to be read
* @iter: The ring buffer iterator
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a60f49c401f9..65eb38131b6d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3388,11 +3388,15 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
struct ring_buffer_event *event;
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);

- if (buf_iter)
+ if (buf_iter) {
event = ring_buffer_iter_peek(buf_iter, ts);
- else
+ if (lost_events)
+ *lost_events = ring_buffer_iter_dropped(buf_iter) ?
+ (unsigned long)-1 : 0;
+ } else {
event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
lost_events);
+ }

if (event) {
iter->ent_size = ring_buffer_event_length(event);
@@ -4005,8 +4009,12 @@ enum print_line_t print_trace_line(struct trace_iterator *iter)
enum print_line_t ret;

if (iter->lost_events) {
- trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
- iter->cpu, iter->lost_events);
+ if (iter->lost_events == (unsigned long)-1)
+ trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
+ iter->cpu);
+ else
+ trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
+ iter->cpu, iter->lost_events);
if (trace_seq_has_overflowed(&iter->seq))
return TRACE_TYPE_PARTIAL_LINE;
}
--
2.25.1
Steven Rostedt
2020-03-17 21:32:33 UTC
Permalink
From: "Steven Rostedt (VMware)" <***@goodmis.org>

Now that reading the trace file does not temporarly stop tracing while it is
open, update the document to reflect this fact.

Signed-off-by: Steven Rostedt (VMware) <***@goodmis.org>
---
Documentation/trace/ftrace.rst | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index 99a0890e20ec..86e76f2a40dc 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -125,10 +125,13 @@ of ftrace. Here is a list of some of the key files:
trace:

This file holds the output of the trace in a human
- readable format (described below). Note, tracing is temporarily
- disabled when the file is open for reading. Once all readers
- are closed, tracing is re-enabled. Opening this file for
+ readable format (described below). Opening this file for
writing with the O_TRUNC flag clears the ring buffer content.
+ Note, this file is not a consumer. If tracing is off
+ (no tracer running, or tracing_on is zero), it will produce
+ the same output each time it is read. When tracing is on,
+ it may produce inconsistent results as it tries to read
+ the entire buffer without consuming it.

trace_pipe:

@@ -142,9 +145,7 @@ of ftrace. Here is a list of some of the key files:
will not be read again with a sequential read. The
"trace" file is static, and if the tracer is not
adding more data, it will display the same
- information every time it is read. Unlike the
- "trace" file, opening this file for reading will not
- temporarily disable tracing.
+ information every time it is read.

trace_options:
--
2.25.1
Steven Rostedt
2020-03-17 21:32:29 UTC
Permalink
From: "Steven Rostedt (VMware)" <***@goodmis.org>

As it is fine to perform several "peeks" of event data in the ring buffer
via the iterator before moving it forward, do not re-read the event, just
return what was read before. Otherwise, it can cause inconsistent results,
especially when testing multiple CPU buffers to interleave them.

Signed-off-by: Steven Rostedt (VMware) <***@goodmis.org>
---
kernel/trace/ring_buffer.c | 3 +++
1 file changed, 3 insertions(+)

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 475338fda969..5979327254f9 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1929,6 +1929,9 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
unsigned long commit;
unsigned length;

+ if (iter->head != iter->next_event)
+ return iter->event;
+
/*
* When the writer goes across pages, it issues a cmpxchg which
* is a mb(), which will synchronize with the rmb here.
--
2.25.1
Steven Rostedt
2020-03-17 21:32:31 UTC
Permalink
From: "Steven Rostedt (VMware)" <***@goodmis.org>

When opening the "trace" file, it is no longer necessary to disable tracing.

Signed-off-by: Steven Rostedt (VMware) <***@goodmis.org>
---
kernel/trace/trace.c | 8 --------
1 file changed, 8 deletions(-)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1085b3d5d0dc..a60f49c401f9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4261,10 +4261,6 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
if (trace_clocks[tr->clock_id].in_ns)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;

- /* stop the trace while dumping if we are not opening "snapshot" */
- if (!iter->snapshot)
- tracing_stop_tr(tr);
-
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {
iter->buffer_iter[cpu] =
@@ -4358,10 +4354,6 @@ static int tracing_release(struct inode *inode, struct file *file)
if (iter->trace && iter->trace->close)
iter->trace->close(iter);

- if (!iter->snapshot)
- /* reenable tracing if it was previously enabled */
- tracing_start_tr(tr);
-
__trace_array_put(tr);

mutex_unlock(&trace_types_lock);
--
2.25.1
Steven Rostedt
2020-03-17 21:32:26 UTC
Permalink
From: "Steven Rostedt (VMware)" <***@goodmis.org>

Have the ring_buffer_iter structure contain a page_stamp, such that it can
be used to see if the writer entered the page the iterator is on. When going
to a new page, the iterator will record the time stamp of that page. When
reading events, it can copy the event to an internal buffer on the iterator
(to be implemented later), then check the page's time stamp with its own to
see if the writer entered the page. If so, it will need to try to read the
event again.

Signed-off-by: Steven Rostedt (VMware) <***@goodmis.org>
---
kernel/trace/ring_buffer.c | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f57eeaa80e3e..e689bdcb53e8 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -507,6 +507,7 @@ struct ring_buffer_iter {
struct buffer_page *cache_reader_page;
unsigned long cache_read;
u64 read_stamp;
+ u64 page_stamp;
};

/**
@@ -1959,7 +1960,7 @@ static void rb_inc_iter(struct ring_buffer_iter *iter)
else
rb_inc_page(cpu_buffer, &iter->head_page);

- iter->read_stamp = iter->head_page->page->time_stamp;
+ iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
iter->head = 0;
}

@@ -3551,10 +3552,13 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
iter->cache_reader_page = iter->head_page;
iter->cache_read = cpu_buffer->read;

- if (iter->head)
+ if (iter->head) {
iter->read_stamp = cpu_buffer->read_stamp;
- else
+ iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
+ } else {
iter->read_stamp = iter->head_page->page->time_stamp;
+ iter->page_stamp = iter->read_stamp;
+ }
}

/**
--
2.25.1
Steven Rostedt
2020-03-17 21:32:30 UTC
Permalink
From: "Steven Rostedt (VMware)" <***@goodmis.org>

Now that the iterator can handle a concurrent writer, do not disable writing
to the ring buffer when there is an iterator present.

Signed-off-by: Steven Rostedt (VMware) <***@goodmis.org>
---
kernel/trace/ring_buffer.c | 2 --
1 file changed, 2 deletions(-)

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 5979327254f9..8bafba674ec0 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4295,7 +4295,6 @@ ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
iter->cpu_buffer = cpu_buffer;

atomic_inc(&buffer->resize_disabled);
- atomic_inc(&cpu_buffer->record_disabled);

return iter;
}
@@ -4368,7 +4367,6 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
rb_check_pages(cpu_buffer);
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);

- atomic_dec(&cpu_buffer->record_disabled);
atomic_dec(&cpu_buffer->buffer->resize_disabled);
kfree(iter->event);
kfree(iter);
--
2.25.1
Steven Rostedt
2020-03-17 21:32:28 UTC
Permalink
From: "Steven Rostedt (VMware)" <***@goodmis.org>

As the iterator will be reading a live buffer, and if the event being read
is on a page that a writer crosses, it will fail and try again, the
condition in rb_iter_peek() that only allows a retry to happen three times
is no longer valid. Allow rb_iter_peek() to retry more than three times
without killing the ring buffer, but only if rb_iter_head_event() had failed
at least once.

Signed-off-by: Steven Rostedt (VMware) <***@goodmis.org>
---
kernel/trace/ring_buffer.c | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 3d718add73c1..475338fda969 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4012,6 +4012,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
int nr_loops = 0;
+ bool failed = false;

if (ts)
*ts = 0;
@@ -4038,10 +4039,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
* to a data event, we should never loop more than three times.
* Once for going to next page, once on time extend, and
* finally once to get the event.
- * (We never hit the following condition more than thrice).
+ * We should never hit the following condition more than thrice,
+ * unless the buffer is very small, and there's a writer
+ * that is causing the reader to fail getting an event.
*/
- if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
+ if (++nr_loops > 3) {
+ RB_WARN_ON(cpu_buffer, !failed);
return NULL;
+ }

if (rb_per_cpu_empty(cpu_buffer))
return NULL;
@@ -4052,8 +4057,10 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
}

event = rb_iter_head_event(iter);
- if (!event)
+ if (!event) {
+ failed = true;
goto again;
+ }

switch (event->type_len) {
case RINGBUF_TYPE_PADDING:
--
2.25.1
Loading...