aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace/trace_functions_graph.c
diff options
context:
space:
mode:
authorRodrigo Vivi <[email protected]>2020-02-25 09:29:58 -0800
committerRodrigo Vivi <[email protected]>2020-02-25 09:39:23 -0800
commitff36e78fdb251b9fa65028554689806961e011eb (patch)
treef5af925d509224e06a10936196be6c06bcbdc6ae /kernel/trace/trace_functions_graph.c
parent143d9c3e7b6aa2b785abba04266ed75f2b52e94a (diff)
parent1b245ec5b685ebf8e6e5d1e6b5bcc03b6608e8b0 (diff)
Merge drm/drm-next into drm-intel-next-queued
Some DSI and VBT pending patches from Hans will apply cleanly and with less ugly conflicts if they are rebuilt on top of other patches that recently landed on drm-next. Reference: https://patchwork.freedesktop.org/series/70952/ Cc: Hans de Goede <[email protected]> Signed-off-by: Rodrigo Vivi <[email protected]
Diffstat (limited to 'kernel/trace/trace_functions_graph.c')
-rw-r--r--kernel/trace/trace_functions_graph.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 78af97163147..7d71546ba00a 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -101,7 +101,7 @@ int __trace_graph_entry(struct trace_array *tr,
{
struct trace_event_call *call = &event_funcgraph_entry;
struct ring_buffer_event *event;
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ftrace_graph_ent_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -171,7 +171,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
local_irq_save(flags);
cpu = raw_smp_processor_id();
- data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+ data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
@@ -221,7 +221,7 @@ void __trace_graph_return(struct trace_array *tr,
{
struct trace_event_call *call = &event_funcgraph_exit;
struct ring_buffer_event *event;
- struct ring_buffer *buffer = tr->trace_buffer.buffer;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ftrace_graph_ret_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
@@ -252,7 +252,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
local_irq_save(flags);
cpu = raw_smp_processor_id();
- data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+ data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
@@ -444,9 +444,9 @@ get_return_for_leaf(struct trace_iterator *iter,
* We need to consume the current entry to see
* the next one.
*/
- ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
+ ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
NULL, NULL);
- event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
+ event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
NULL, NULL);
}
@@ -503,7 +503,7 @@ print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
{
unsigned long long usecs;
- usecs = iter->ts - iter->trace_buffer->time_start;
+ usecs = iter->ts - iter->array_buffer->time_start;
do_div(usecs, NSEC_PER_USEC);
trace_seq_printf(s, "%9llu us | ", usecs);