aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWang Nan <[email protected]>2017-12-04 16:51:06 +0000
committerArnaldo Carvalho de Melo <[email protected]>2017-12-05 15:59:37 -0300
commit7fb4b407a1242dbc85ea3ed1be065dca8f9a6f5b (patch)
tree456f8de768c54049f294f386ee79ab4f5cd9cc5b
parent71f566a34986f4a86a8c546c7a36f70f0132b8a9 (diff)
perf mmap: Don't discard prev in backward mode
'perf record' can switch its output data file. The new output should only store the data after switching. However, in overwrite backward mode, the new output still can have data from before switching. That also brings extra overhead. At the end of mmap_read(), the position of the processed ring buffer is saved in md->prev. Next mmap_read should be end in md->prev if it is not overwriten. That avoids processing duplicate data. However, md->prev is discarded. So next the mmap_read() has to process whole valid ring buffer, which probably includes old processed data. Avoid calling backward_rb_find_range() when md->prev is still available. Signed-off-by: Wang Nan <[email protected]> Tested-by: Kan Liang <[email protected]> Acked-by: Namhyung Kim <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Mengting Zhang <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
-rw-r--r--tools/perf/util/mmap.c33
1 files changed, 15 insertions, 18 deletions
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 3f262e707a41..5f8cb1583e53 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -267,18 +267,6 @@ static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64
return -1;
}
-static int rb_find_range(void *data, int mask, u64 head, u64 old,
- u64 *start, u64 *end, bool backward)
-{
- if (!backward) {
- *start = old;
- *end = head;
- return 0;
- }
-
- return backward_rb_find_range(data, mask, head, start, end);
-}
-
int perf_mmap__push(struct perf_mmap *md, bool backward,
void *to, int push(void *to, void *buf, size_t size))
{
@@ -290,19 +278,28 @@ int perf_mmap__push(struct perf_mmap *md, bool backward,
void *buf;
int rc = 0;
- if (rb_find_range(data, md->mask, head, old, &start, &end, backward))
- return -1;
+ start = backward ? head : old;
+ end = backward ? old : head;
if (start == end)
return 0;
size = end - start;
if (size > (unsigned long)(md->mask) + 1) {
- WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
+ if (!backward) {
+ WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
- md->prev = head;
- perf_mmap__consume(md, backward);
- return 0;
+ md->prev = head;
+ perf_mmap__consume(md, backward);
+ return 0;
+ }
+
+ /*
+ * Backward ring buffer is full. We still have a chance to read
+ * most of data from it.
+ */
+ if (backward_rb_find_range(data, md->mask, head, &start, &end))
+ return -1;
}
if ((start & md->mask) + size != (end & md->mask)) {