perf mmap: Simplify perf_mmap__consume()

It isn't necessary to pass the 'overwrite' argument to
perf_mmap__consume().  Discard it.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Suggested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1520350567-80082-6-git-send-email-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Kan Liang 2018-03-06 10:36:05 -05:00 committed by Arnaldo Carvalho de Melo
parent bdec8b2f7e
commit d6ace3df43
15 changed files with 18 additions and 18 deletions

View file

@ -134,7 +134,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
comm2_time = sample.time;
}
next_event:
perf_mmap__consume(md, false);
perf_mmap__consume(md);
}
perf_mmap__read_done(md);
}

View file

@ -760,7 +760,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
while ((event = perf_mmap__read_event(md, false, &start, end)) != NULL) {
err = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
if (err) {
perf_mmap__consume(md, false);
perf_mmap__consume(md);
pr_err("Failed to parse sample\n");
return -1;
}
@ -770,7 +770,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
* FIXME: Here we can't consume the event, as perf_session__queue_event will
* point to it, and it'll get possibly overwritten by the kernel.
*/
perf_mmap__consume(md, false);
perf_mmap__consume(md);
if (err) {
pr_err("Failed to enqueue sample: %d\n", err);

View file

@ -879,7 +879,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
} else
++session->evlist->stats.nr_unknown_events;
next_event:
perf_mmap__consume(md, opts->overwrite);
perf_mmap__consume(md);
}
perf_mmap__read_done(md);

View file

@ -2522,7 +2522,7 @@ again:
trace__handle_event(trace, event, &sample);
next_event:
perf_mmap__consume(md, false);
perf_mmap__consume(md);
if (interrupted)
goto out_disable;

View file

@ -420,7 +420,7 @@ static int process_events(struct machine *machine, struct perf_evlist *evlist,
while ((event = perf_mmap__read_event(md, false, &start, end)) != NULL) {
ret = process_event(machine, evlist, event, state);
perf_mmap__consume(md, false);
perf_mmap__consume(md);
if (ret < 0)
return ret;
}

View file

@ -42,7 +42,7 @@ static int find_comm(struct perf_evlist *evlist, const char *comm)
(pid_t)event->comm.tid == getpid() &&
strcmp(event->comm.comm, comm) == 0)
found += 1;
perf_mmap__consume(md, false);
perf_mmap__consume(md);
}
perf_mmap__read_done(md);
}

View file

@ -135,7 +135,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
goto out_delete_evlist;
}
nr_events[evsel->idx]++;
perf_mmap__consume(md, false);
perf_mmap__consume(md);
}
perf_mmap__read_done(md);

View file

@ -101,7 +101,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
++nr_events;
if (type != PERF_RECORD_SAMPLE) {
perf_mmap__consume(md, false);
perf_mmap__consume(md);
continue;
}

View file

@ -272,7 +272,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
++errs;
}
perf_mmap__consume(md, false);
perf_mmap__consume(md);
}
perf_mmap__read_done(md);
}

View file

@ -114,7 +114,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
total_periods += sample.period;
nr_samples++;
next_event:
perf_mmap__consume(md, false);
perf_mmap__consume(md);
}
perf_mmap__read_done(md);

View file

@ -270,7 +270,7 @@ static int process_events(struct perf_evlist *evlist,
while ((event = perf_mmap__read_event(md, false, &start, end)) != NULL) {
cnt += 1;
ret = add_event(evlist, &events, event);
perf_mmap__consume(md, false);
perf_mmap__consume(md);
if (ret < 0)
goto out_free_nodes;
}

View file

@ -120,7 +120,7 @@ retry:
if (event->header.type == PERF_RECORD_EXIT)
nr_exit++;
perf_mmap__consume(md, false);
perf_mmap__consume(md);
}
perf_mmap__read_done(md);

View file

@ -118,7 +118,7 @@ void perf_mmap__put(struct perf_mmap *map)
perf_mmap__munmap(map);
}
void perf_mmap__consume(struct perf_mmap *map, bool overwrite __maybe_unused)
void perf_mmap__consume(struct perf_mmap *map)
{
if (!map->overwrite) {
u64 old = map->prev;
@ -260,7 +260,7 @@ int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
md->prev = head;
perf_mmap__consume(md, overwrite);
perf_mmap__consume(md);
return -EAGAIN;
}
@ -314,7 +314,7 @@ int perf_mmap__push(struct perf_mmap *md, void *to,
}
md->prev = head;
perf_mmap__consume(md, md->overwrite);
perf_mmap__consume(md);
out:
return rc;
}

View file

@ -66,7 +66,7 @@ void perf_mmap__munmap(struct perf_mmap *map);
void perf_mmap__get(struct perf_mmap *map);
void perf_mmap__put(struct perf_mmap *map);
void perf_mmap__consume(struct perf_mmap *map, bool overwrite);
void perf_mmap__consume(struct perf_mmap *map);
static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
{

View file

@ -1013,7 +1013,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
err = perf_evsel__parse_sample(evsel, event, &pevent->sample);
/* Consume the even only after we parsed it out. */
perf_mmap__consume(md, false);
perf_mmap__consume(md);
if (err)
return PyErr_Format(PyExc_OSError,