diff options
Diffstat (limited to 'kernel/trace/trace.c')
| -rw-r--r-- | kernel/trace/trace.c | 459 | 
1 files changed, 262 insertions, 197 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 5b6ee4aadc26..6b11e4e2150c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -162,8 +162,8 @@ union trace_eval_map_item {  static union trace_eval_map_item *trace_eval_maps;  #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ -static int tracing_set_tracer(struct trace_array *tr, const char *buf); -static void ftrace_trace_userstack(struct ring_buffer *buffer, +int tracing_set_tracer(struct trace_array *tr, const char *buf); +static void ftrace_trace_userstack(struct trace_buffer *buffer,  				   unsigned long flags, int pc);  #define MAX_TRACER_SIZE		100 @@ -338,7 +338,7 @@ int tracing_check_open_get_tr(struct trace_array *tr)  }  int call_filter_check_discard(struct trace_event_call *call, void *rec, -			      struct ring_buffer *buffer, +			      struct trace_buffer *buffer,  			      struct ring_buffer_event *event)  {  	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && @@ -603,7 +603,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,  	return read;  } -static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu) +static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)  {  	u64 ts; @@ -619,7 +619,7 @@ static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)  u64 ftrace_now(int cpu)  { -	return buffer_ftrace_now(&global_trace.trace_buffer, cpu); +	return buffer_ftrace_now(&global_trace.array_buffer, cpu);  }  /** @@ -747,22 +747,22 @@ static inline void trace_access_lock_init(void)  #endif  #ifdef CONFIG_STACKTRACE -static void __ftrace_trace_stack(struct ring_buffer *buffer, +static void __ftrace_trace_stack(struct trace_buffer *buffer,  				 unsigned long flags,  				 int skip, int pc, struct pt_regs *regs);  static inline void ftrace_trace_stack(struct trace_array *tr, -				      struct ring_buffer *buffer, +				      struct trace_buffer *buffer,  				      unsigned long flags,  				      int skip, int pc, struct pt_regs *regs);  #else -static inline void __ftrace_trace_stack(struct ring_buffer *buffer, +static inline void __ftrace_trace_stack(struct trace_buffer *buffer,  					unsigned long flags,  					int skip, int pc, struct pt_regs *regs)  {  }  static inline void ftrace_trace_stack(struct trace_array *tr, -				      struct ring_buffer *buffer, +				      struct trace_buffer *buffer,  				      unsigned long flags,  				      int skip, int pc, struct pt_regs *regs)  { @@ -780,7 +780,7 @@ trace_event_setup(struct ring_buffer_event *event,  }  static __always_inline struct ring_buffer_event * -__trace_buffer_lock_reserve(struct ring_buffer *buffer, +__trace_buffer_lock_reserve(struct trace_buffer *buffer,  			  int type,  			  unsigned long len,  			  unsigned long flags, int pc) @@ -796,8 +796,8 @@ __trace_buffer_lock_reserve(struct ring_buffer *buffer,  void tracer_tracing_on(struct trace_array *tr)  { -	if (tr->trace_buffer.buffer) -		ring_buffer_record_on(tr->trace_buffer.buffer); +	if (tr->array_buffer.buffer) +		ring_buffer_record_on(tr->array_buffer.buffer);  	/*  	 * This flag is looked at when buffers haven't been allocated  	 * yet, or by some tracers (like irqsoff), that just want to @@ -825,7 +825,7 @@ EXPORT_SYMBOL_GPL(tracing_on);  static __always_inline void -__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) +__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)  {  	__this_cpu_write(trace_taskinfo_save, true); @@ -848,7 +848,7 @@ __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *eve  int __trace_puts(unsigned long ip, const char *str, int size)  {  	struct ring_buffer_event *event; -	struct ring_buffer *buffer; +	struct trace_buffer *buffer;  	struct print_entry *entry;  	unsigned long irq_flags;  	int alloc; @@ -865,11 +865,14 @@ int __trace_puts(unsigned long ip, const char *str, int size)  	alloc = sizeof(*entry) + size + 2; /* possible \n added */  	local_save_flags(irq_flags); -	buffer = global_trace.trace_buffer.buffer; +	buffer = global_trace.array_buffer.buffer; +	ring_buffer_nest_start(buffer);  	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,   					    irq_flags, pc); -	if (!event) -		return 0; +	if (!event) { +		size = 0; +		goto out; +	}  	entry = ring_buffer_event_data(event);  	entry->ip = ip; @@ -885,7 +888,8 @@ int __trace_puts(unsigned long ip, const char *str, int size)  	__buffer_unlock_commit(buffer, event);  	ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); - + out: +	ring_buffer_nest_end(buffer);  	return size;  }  EXPORT_SYMBOL_GPL(__trace_puts); @@ -898,10 +902,11 @@ EXPORT_SYMBOL_GPL(__trace_puts);  int __trace_bputs(unsigned long ip, const char *str)  {  	struct ring_buffer_event *event; -	struct ring_buffer *buffer; +	struct trace_buffer *buffer;  	struct bputs_entry *entry;  	unsigned long irq_flags;  	int size = sizeof(struct bputs_entry); +	int ret = 0;  	int pc;  	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) @@ -913,11 +918,13 @@ int __trace_bputs(unsigned long ip, const char *str)  		return 0;  	local_save_flags(irq_flags); -	buffer = global_trace.trace_buffer.buffer; +	buffer = global_trace.array_buffer.buffer; + +	ring_buffer_nest_start(buffer);  	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,  					    irq_flags, pc);  	if (!event) -		return 0; +		goto out;  	entry = ring_buffer_event_data(event);  	entry->ip			= ip; @@ -926,7 +933,10 @@ int __trace_bputs(unsigned long ip, const char *str)  	__buffer_unlock_commit(buffer, event);  	ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); -	return 1; +	ret = 1; + out: +	ring_buffer_nest_end(buffer); +	return ret;  }  EXPORT_SYMBOL_GPL(__trace_bputs); @@ -1036,9 +1046,9 @@ void *tracing_cond_snapshot_data(struct trace_array *tr)  }  EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); -static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, -					struct trace_buffer *size_buf, int cpu_id); -static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); +static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, +					struct array_buffer *size_buf, int cpu_id); +static void set_buffer_entries(struct array_buffer *buf, unsigned long val);  int tracing_alloc_snapshot_instance(struct trace_array *tr)  { @@ -1048,7 +1058,7 @@ int tracing_alloc_snapshot_instance(struct trace_array *tr)  		/* allocate spare buffer */  		ret = resize_buffer_duplicate_size(&tr->max_buffer, -				   &tr->trace_buffer, RING_BUFFER_ALL_CPUS); +				   &tr->array_buffer, RING_BUFFER_ALL_CPUS);  		if (ret < 0)  			return ret; @@ -1251,8 +1261,8 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);  void tracer_tracing_off(struct trace_array *tr)  { -	if (tr->trace_buffer.buffer) -		ring_buffer_record_off(tr->trace_buffer.buffer); +	if (tr->array_buffer.buffer) +		ring_buffer_record_off(tr->array_buffer.buffer);  	/*  	 * This flag is looked at when buffers haven't been allocated  	 * yet, or by some tracers (like irqsoff), that just want to @@ -1294,8 +1304,8 @@ void disable_trace_on_warning(void)   */  bool tracer_tracing_is_on(struct trace_array *tr)  { -	if (tr->trace_buffer.buffer) -		return ring_buffer_record_is_on(tr->trace_buffer.buffer); +	if (tr->array_buffer.buffer) +		return ring_buffer_record_is_on(tr->array_buffer.buffer);  	return !tr->buffer_disabled;  } @@ -1590,8 +1600,8 @@ void latency_fsnotify(struct trace_array *tr)  static void  __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)  { -	struct trace_buffer *trace_buf = &tr->trace_buffer; -	struct trace_buffer *max_buf = &tr->max_buffer; +	struct array_buffer *trace_buf = &tr->array_buffer; +	struct array_buffer *max_buf = &tr->max_buffer;  	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);  	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); @@ -1649,8 +1659,8 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,  	arch_spin_lock(&tr->max_lock); -	/* Inherit the recordable setting from trace_buffer */ -	if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer)) +	/* Inherit the recordable setting from array_buffer */ +	if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))  		ring_buffer_record_on(tr->max_buffer.buffer);  	else  		ring_buffer_record_off(tr->max_buffer.buffer); @@ -1659,7 +1669,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,  	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))  		goto out_unlock;  #endif -	swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); +	swap(tr->array_buffer.buffer, tr->max_buffer.buffer);  	__update_max_tr(tr, tsk, cpu); @@ -1692,7 +1702,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)  	arch_spin_lock(&tr->max_lock); -	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); +	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);  	if (ret == -EBUSY) {  		/* @@ -1718,7 +1728,7 @@ static int wait_on_pipe(struct trace_iterator *iter, int full)  	if (trace_buffer_iter(iter, iter->cpu_file))  		return 0; -	return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, +	return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,  				full);  } @@ -1769,7 +1779,7 @@ static int run_tracer_selftest(struct tracer *type)  	 * internal tracing to verify that everything is in order.  	 * If we fail, we do not register this tracer.  	 */ -	tracing_reset_online_cpus(&tr->trace_buffer); +	tracing_reset_online_cpus(&tr->array_buffer);  	tr->current_trace = type; @@ -1795,7 +1805,7 @@ static int run_tracer_selftest(struct tracer *type)  		return -1;  	}  	/* Only reset on passing, to avoid touching corrupted buffers */ -	tracing_reset_online_cpus(&tr->trace_buffer); +	tracing_reset_online_cpus(&tr->array_buffer);  #ifdef CONFIG_TRACER_MAX_TRACE  	if (type->use_max_tr) { @@ -1827,6 +1837,7 @@ static __init int init_trace_selftests(void)  	pr_info("Running postponed tracer tests:\n"); +	tracing_selftest_running = true;  	list_for_each_entry_safe(p, n, &postponed_selftests, list) {  		/* This loop can take minutes when sanitizers are enabled, so  		 * lets make sure we allow RCU processing. @@ -1849,6 +1860,7 @@ static __init int init_trace_selftests(void)  		list_del(&p->list);  		kfree(p);  	} +	tracing_selftest_running = false;   out:  	mutex_unlock(&trace_types_lock); @@ -1962,9 +1974,9 @@ int __init register_tracer(struct tracer *type)  	return ret;  } -static void tracing_reset_cpu(struct trace_buffer *buf, int cpu) +static void tracing_reset_cpu(struct array_buffer *buf, int cpu)  { -	struct ring_buffer *buffer = buf->buffer; +	struct trace_buffer *buffer = buf->buffer;  	if (!buffer)  		return; @@ -1978,9 +1990,9 @@ static void tracing_reset_cpu(struct trace_buffer *buf, int cpu)  	ring_buffer_record_enable(buffer);  } -void tracing_reset_online_cpus(struct trace_buffer *buf) +void tracing_reset_online_cpus(struct array_buffer *buf)  { -	struct ring_buffer *buffer = buf->buffer; +	struct trace_buffer *buffer = buf->buffer;  	int cpu;  	if (!buffer) @@ -2008,7 +2020,7 @@ void tracing_reset_all_online_cpus(void)  		if (!tr->clear_trace)  			continue;  		tr->clear_trace = false; -		tracing_reset_online_cpus(&tr->trace_buffer); +		tracing_reset_online_cpus(&tr->array_buffer);  #ifdef CONFIG_TRACER_MAX_TRACE  		tracing_reset_online_cpus(&tr->max_buffer);  #endif @@ -2098,7 +2110,7 @@ int is_tracing_stopped(void)   */  void tracing_start(void)  { -	struct ring_buffer *buffer; +	struct trace_buffer *buffer;  	unsigned long flags;  	if (tracing_disabled) @@ -2117,7 +2129,7 @@ void tracing_start(void)  	/* Prevent the buffers from switching */  	arch_spin_lock(&global_trace.max_lock); -	buffer = global_trace.trace_buffer.buffer; +	buffer = global_trace.array_buffer.buffer;  	if (buffer)  		ring_buffer_record_enable(buffer); @@ -2135,7 +2147,7 @@ void tracing_start(void)  static void tracing_start_tr(struct trace_array *tr)  { -	struct ring_buffer *buffer; +	struct trace_buffer *buffer;  	unsigned long flags;  	if (tracing_disabled) @@ -2156,7 +2168,7 @@ static void tracing_start_tr(struct trace_array *tr)  		goto out;  	} -	buffer = tr->trace_buffer.buffer; +	buffer = tr->array_buffer.buffer;  	if (buffer)  		ring_buffer_record_enable(buffer); @@ -2172,7 +2184,7 @@ static void tracing_start_tr(struct trace_array *tr)   */  void tracing_stop(void)  { -	struct ring_buffer *buffer; +	struct trace_buffer *buffer;  	unsigned long flags;  	raw_spin_lock_irqsave(&global_trace.start_lock, flags); @@ -2182,7 +2194,7 @@ void tracing_stop(void)  	/* Prevent the buffers from switching */  	arch_spin_lock(&global_trace.max_lock); -	buffer = global_trace.trace_buffer.buffer; +	buffer = global_trace.array_buffer.buffer;  	if (buffer)  		ring_buffer_record_disable(buffer); @@ -2200,7 +2212,7 @@ void tracing_stop(void)  static void tracing_stop_tr(struct trace_array *tr)  { -	struct ring_buffer *buffer; +	struct trace_buffer *buffer;  	unsigned long flags;  	/* If global, we need to also stop the max tracer */ @@ -2211,7 +2223,7 @@ static void tracing_stop_tr(struct trace_array *tr)  	if (tr->stop_count++)  		goto out; -	buffer = tr->trace_buffer.buffer; +	buffer = tr->array_buffer.buffer;  	if (buffer)  		ring_buffer_record_disable(buffer); @@ -2442,7 +2454,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,  EXPORT_SYMBOL_GPL(tracing_generic_entry_update);  struct ring_buffer_event * -trace_buffer_lock_reserve(struct ring_buffer *buffer, +trace_buffer_lock_reserve(struct trace_buffer *buffer,  			  int type,  			  unsigned long len,  			  unsigned long flags, int pc) @@ -2561,10 +2573,10 @@ void trace_buffered_event_disable(void)  	preempt_enable();  } -static struct ring_buffer *temp_buffer; +static struct trace_buffer *temp_buffer;  struct ring_buffer_event * -trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, +trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,  			  struct trace_event_file *trace_file,  			  int type, unsigned long len,  			  unsigned long flags, int pc) @@ -2572,7 +2584,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,  	struct ring_buffer_event *entry;  	int val; -	*current_rb = trace_file->tr->trace_buffer.buffer; +	*current_rb = trace_file->tr->array_buffer.buffer;  	if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &  	     (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) && @@ -2610,6 +2622,7 @@ static DEFINE_MUTEX(tracepoint_printk_mutex);  static void output_printk(struct trace_event_buffer *fbuffer)  {  	struct trace_event_call *event_call; +	struct trace_event_file *file;  	struct trace_event *event;  	unsigned long flags;  	struct trace_iterator *iter = tracepoint_print_iter; @@ -2623,6 +2636,12 @@ static void output_printk(struct trace_event_buffer *fbuffer)  	    !event_call->event.funcs->trace)  		return; +	file = fbuffer->trace_file; +	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || +	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && +	     !filter_match_preds(file->filter, fbuffer->entry))) +		return; +  	event = &fbuffer->trace_file->event_call->event;  	spin_lock_irqsave(&tracepoint_iter_lock, flags); @@ -2673,9 +2692,9 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)  	if (static_key_false(&tracepoint_printk_key.key))  		output_printk(fbuffer); -	event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer, +	event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,  				    fbuffer->event, fbuffer->entry, -				    fbuffer->flags, fbuffer->pc); +				    fbuffer->flags, fbuffer->pc, fbuffer->regs);  }  EXPORT_SYMBOL_GPL(trace_event_buffer_commit); @@ -2689,7 +2708,7 @@ EXPORT_SYMBOL_GPL(trace_event_buffer_commit);  # define STACK_SKIP 3  void trace_buffer_unlock_commit_regs(struct trace_array *tr, -				     struct ring_buffer *buffer, +				     struct trace_buffer *buffer,  				     struct ring_buffer_event *event,  				     unsigned long flags, int pc,  				     struct pt_regs *regs) @@ -2710,7 +2729,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,   * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.   */  void -trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, +trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,  				   struct ring_buffer_event *event)  {  	__buffer_unlock_commit(buffer, event); @@ -2845,7 +2864,7 @@ trace_function(struct trace_array *tr,  	       int pc)  {  	struct trace_event_call *call = &event_function; -	struct ring_buffer *buffer = tr->trace_buffer.buffer; +	struct trace_buffer *buffer = tr->array_buffer.buffer;  	struct ring_buffer_event *event;  	struct ftrace_entry *entry; @@ -2883,7 +2902,7 @@ struct ftrace_stacks {  static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);  static DEFINE_PER_CPU(int, ftrace_stack_reserve); -static void __ftrace_trace_stack(struct ring_buffer *buffer, +static void __ftrace_trace_stack(struct trace_buffer *buffer,  				 unsigned long flags,  				 int skip, int pc, struct pt_regs *regs)  { @@ -2958,7 +2977,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,  }  static inline void ftrace_trace_stack(struct trace_array *tr, -				      struct ring_buffer *buffer, +				      struct trace_buffer *buffer,  				      unsigned long flags,  				      int skip, int pc, struct pt_regs *regs)  { @@ -2971,7 +2990,7 @@ static inline void ftrace_trace_stack(struct trace_array *tr,  void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,  		   int pc)  { -	struct ring_buffer *buffer = tr->trace_buffer.buffer; +	struct trace_buffer *buffer = tr->array_buffer.buffer;  	if (rcu_is_watching()) {  		__ftrace_trace_stack(buffer, flags, skip, pc, NULL); @@ -3009,7 +3028,7 @@ void trace_dump_stack(int skip)  	/* Skip 1 to skip this function. */  	skip++;  #endif -	__ftrace_trace_stack(global_trace.trace_buffer.buffer, +	__ftrace_trace_stack(global_trace.array_buffer.buffer,  			     flags, skip, preempt_count(), NULL);  }  EXPORT_SYMBOL_GPL(trace_dump_stack); @@ -3018,7 +3037,7 @@ EXPORT_SYMBOL_GPL(trace_dump_stack);  static DEFINE_PER_CPU(int, user_stack_count);  static void -ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) +ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)  {  	struct trace_event_call *call = &event_user_stack;  	struct ring_buffer_event *event; @@ -3063,7 +3082,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)  	preempt_enable();  }  #else /* CONFIG_USER_STACKTRACE_SUPPORT */ -static void ftrace_trace_userstack(struct ring_buffer *buffer, +static void ftrace_trace_userstack(struct trace_buffer *buffer,  				   unsigned long flags, int pc)  {  } @@ -3109,7 +3128,7 @@ static int alloc_percpu_trace_buffer(void)  	struct trace_buffer_struct *buffers;  	buffers = alloc_percpu(struct trace_buffer_struct); -	if (WARN(!buffers, "Could not allocate percpu trace_printk buffer")) +	if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))  		return -ENOMEM;  	trace_percpu_buffer = buffers; @@ -3154,7 +3173,7 @@ void trace_printk_init_buffers(void)  	 * directly here. If the global_trace.buffer is already  	 * allocated here, then this was called by module code.  	 */ -	if (global_trace.trace_buffer.buffer) +	if (global_trace.array_buffer.buffer)  		tracing_start_cmdline_record();  }  EXPORT_SYMBOL_GPL(trace_printk_init_buffers); @@ -3188,7 +3207,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)  {  	struct trace_event_call *call = &event_bprint;  	struct ring_buffer_event *event; -	struct ring_buffer *buffer; +	struct trace_buffer *buffer;  	struct trace_array *tr = &global_trace;  	struct bprint_entry *entry;  	unsigned long flags; @@ -3213,11 +3232,12 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)  	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);  	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) -		goto out; +		goto out_put;  	local_save_flags(flags);  	size = sizeof(*entry) + sizeof(u32) * len; -	buffer = tr->trace_buffer.buffer; +	buffer = tr->array_buffer.buffer; +	ring_buffer_nest_start(buffer);  	event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,  					    flags, pc);  	if (!event) @@ -3233,6 +3253,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)  	}  out: +	ring_buffer_nest_end(buffer); +out_put:  	put_trace_buf();  out_nobuffer: @@ -3245,7 +3267,7 @@ EXPORT_SYMBOL_GPL(trace_vbprintk);  __printf(3, 0)  static int -__trace_array_vprintk(struct ring_buffer *buffer, +__trace_array_vprintk(struct trace_buffer *buffer,  		      unsigned long ip, const char *fmt, va_list args)  {  	struct trace_event_call *call = &event_print; @@ -3275,6 +3297,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,  	local_save_flags(flags);  	size = sizeof(*entry) + len + 1; +	ring_buffer_nest_start(buffer);  	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,  					    flags, pc);  	if (!event) @@ -3289,6 +3312,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,  	}  out: +	ring_buffer_nest_end(buffer);  	put_trace_buf();  out_nobuffer: @@ -3302,7 +3326,7 @@ __printf(3, 0)  int trace_array_vprintk(struct trace_array *tr,  			unsigned long ip, const char *fmt, va_list args)  { -	return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); +	return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);  }  __printf(3, 0) @@ -3326,7 +3350,7 @@ int trace_array_printk(struct trace_array *tr,  EXPORT_SYMBOL_GPL(trace_array_printk);  __printf(3, 4) -int trace_array_printk_buf(struct ring_buffer *buffer, +int trace_array_printk_buf(struct trace_buffer *buffer,  			   unsigned long ip, const char *fmt, ...)  {  	int ret; @@ -3367,7 +3391,7 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,  	if (buf_iter)  		event = ring_buffer_iter_peek(buf_iter, ts);  	else -		event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, +		event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,  					 lost_events);  	if (event) { @@ -3382,7 +3406,7 @@ static struct trace_entry *  __find_next_entry(struct trace_iterator *iter, int *ent_cpu,  		  unsigned long *missing_events, u64 *ent_ts)  { -	struct ring_buffer *buffer = iter->trace_buffer->buffer; +	struct trace_buffer *buffer = iter->array_buffer->buffer;  	struct trace_entry *ent, *next = NULL;  	unsigned long lost_events = 0, next_lost = 0;  	int cpu_file = iter->cpu_file; @@ -3459,7 +3483,7 @@ void *trace_find_next_entry_inc(struct trace_iterator *iter)  static void trace_consume(struct trace_iterator *iter)  { -	ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts, +	ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,  			    &iter->lost_events);  } @@ -3497,7 +3521,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)  	unsigned long entries = 0;  	u64 ts; -	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; +	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;  	buf_iter = trace_buffer_iter(iter, cpu);  	if (!buf_iter) @@ -3511,13 +3535,13 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)  	 * by the timestamp being before the start of the buffer.  	 */  	while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { -		if (ts >= iter->trace_buffer->time_start) +		if (ts >= iter->array_buffer->time_start)  			break;  		entries++;  		ring_buffer_read(buf_iter, NULL);  	} -	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; +	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;  }  /* @@ -3602,7 +3626,7 @@ static void s_stop(struct seq_file *m, void *p)  }  static void -get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total, +get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,  		      unsigned long *entries, int cpu)  {  	unsigned long count; @@ -3624,7 +3648,7 @@ get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,  }  static void -get_total_entries(struct trace_buffer *buf, +get_total_entries(struct array_buffer *buf,  		  unsigned long *total, unsigned long *entries)  {  	unsigned long t, e; @@ -3647,7 +3671,7 @@ unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)  	if (!tr)  		tr = &global_trace; -	get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu); +	get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);  	return entries;  } @@ -3659,7 +3683,7 @@ unsigned long trace_total_entries(struct trace_array *tr)  	if (!tr)  		tr = &global_trace; -	get_total_entries(&tr->trace_buffer, &total, &entries); +	get_total_entries(&tr->array_buffer, &total, &entries);  	return entries;  } @@ -3676,7 +3700,7 @@ static void print_lat_help_header(struct seq_file *m)  		    "#     \\   /      |||||  \\    |   /         \n");  } -static void print_event_info(struct trace_buffer *buf, struct seq_file *m) +static void print_event_info(struct array_buffer *buf, struct seq_file *m)  {  	unsigned long total;  	unsigned long entries; @@ -3687,7 +3711,7 @@ static void print_event_info(struct trace_buffer *buf, struct seq_file *m)  	seq_puts(m, "#\n");  } -static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, +static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,  				   unsigned int flags)  {  	bool tgid = flags & TRACE_ITER_RECORD_TGID; @@ -3698,7 +3722,7 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,  	seq_printf(m, "#              | |     %s    |       |         |\n",	 tgid ? "  |      " : "");  } -static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, +static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,  				       unsigned int flags)  {  	bool tgid = flags & TRACE_ITER_RECORD_TGID; @@ -3720,7 +3744,7 @@ void  print_trace_header(struct seq_file *m, struct trace_iterator *iter)  {  	unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); -	struct trace_buffer *buf = iter->trace_buffer; +	struct array_buffer *buf = iter->array_buffer;  	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);  	struct tracer *type = iter->trace;  	unsigned long entries; @@ -3795,7 +3819,7 @@ static void test_cpu_buff_start(struct trace_iterator *iter)  	    cpumask_test_cpu(iter->cpu, iter->started))  		return; -	if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) +	if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)  		return;  	if (cpumask_available(iter->started)) @@ -3929,7 +3953,7 @@ int trace_empty(struct trace_iterator *iter)  			if (!ring_buffer_iter_empty(buf_iter))  				return 0;  		} else { -			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) +			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))  				return 0;  		}  		return 1; @@ -3941,7 +3965,7 @@ int trace_empty(struct trace_iterator *iter)  			if (!ring_buffer_iter_empty(buf_iter))  				return 0;  		} else { -			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) +			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))  				return 0;  		}  	} @@ -4031,10 +4055,10 @@ void trace_default_header(struct seq_file *m)  	} else {  		if (!(trace_flags & TRACE_ITER_VERBOSE)) {  			if (trace_flags & TRACE_ITER_IRQ_INFO) -				print_func_help_header_irq(iter->trace_buffer, +				print_func_help_header_irq(iter->array_buffer,  							   m, trace_flags);  			else -				print_func_help_header(iter->trace_buffer, m, +				print_func_help_header(iter->array_buffer, m,  						       trace_flags);  		}  	} @@ -4192,21 +4216,21 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)  #ifdef CONFIG_TRACER_MAX_TRACE  	/* Currently only the top directory has a snapshot */  	if (tr->current_trace->print_max || snapshot) -		iter->trace_buffer = &tr->max_buffer; +		iter->array_buffer = &tr->max_buffer;  	else  #endif -		iter->trace_buffer = &tr->trace_buffer; +		iter->array_buffer = &tr->array_buffer;  	iter->snapshot = snapshot;  	iter->pos = -1;  	iter->cpu_file = tracing_get_cpu(inode);  	mutex_init(&iter->mutex);  	/* Notify the tracer early; before we stop tracing. */ -	if (iter->trace && iter->trace->open) +	if (iter->trace->open)  		iter->trace->open(iter);  	/* Annotate start of buffers if we had overruns */ -	if (ring_buffer_overruns(iter->trace_buffer->buffer)) +	if (ring_buffer_overruns(iter->array_buffer->buffer))  		iter->iter_flags |= TRACE_FILE_ANNOTATE;  	/* Output in nanoseconds only if we are using a clock in nanoseconds. */ @@ -4220,7 +4244,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)  	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {  		for_each_tracing_cpu(cpu) {  			iter->buffer_iter[cpu] = -				ring_buffer_read_prepare(iter->trace_buffer->buffer, +				ring_buffer_read_prepare(iter->array_buffer->buffer,  							 cpu, GFP_KERNEL);  		}  		ring_buffer_read_prepare_sync(); @@ -4231,7 +4255,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)  	} else {  		cpu = iter->cpu_file;  		iter->buffer_iter[cpu] = -			ring_buffer_read_prepare(iter->trace_buffer->buffer, +			ring_buffer_read_prepare(iter->array_buffer->buffer,  						 cpu, GFP_KERNEL);  		ring_buffer_read_prepare_sync();  		ring_buffer_read_start(iter->buffer_iter[cpu]); @@ -4357,7 +4381,7 @@ static int tracing_open(struct inode *inode, struct file *file)  	/* If this file was open for write, then erase contents */  	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {  		int cpu = tracing_get_cpu(inode); -		struct trace_buffer *trace_buf = &tr->trace_buffer; +		struct array_buffer *trace_buf = &tr->array_buffer;  #ifdef CONFIG_TRACER_MAX_TRACE  		if (tr->current_trace->print_max) @@ -4554,20 +4578,13 @@ out_err:  	return count;  } -static ssize_t -tracing_cpumask_write(struct file *filp, const char __user *ubuf, -		      size_t count, loff_t *ppos) +int tracing_set_cpumask(struct trace_array *tr, +			cpumask_var_t tracing_cpumask_new)  { -	struct trace_array *tr = file_inode(filp)->i_private; -	cpumask_var_t tracing_cpumask_new; -	int err, cpu; - -	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) -		return -ENOMEM; +	int cpu; -	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); -	if (err) -		goto err_unlock; +	if (!tr) +		return -EINVAL;  	local_irq_disable();  	arch_spin_lock(&tr->max_lock); @@ -4578,24 +4595,47 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,  		 */  		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&  				!cpumask_test_cpu(cpu, tracing_cpumask_new)) { -			atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); -			ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); +			atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); +			ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);  		}  		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&  				cpumask_test_cpu(cpu, tracing_cpumask_new)) { -			atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); -			ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); +			atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); +			ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);  		}  	}  	arch_spin_unlock(&tr->max_lock);  	local_irq_enable();  	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); + +	return 0; +} + +static ssize_t +tracing_cpumask_write(struct file *filp, const char __user *ubuf, +		      size_t count, loff_t *ppos) +{ +	struct trace_array *tr = file_inode(filp)->i_private; +	cpumask_var_t tracing_cpumask_new; +	int err; + +	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) +		return -ENOMEM; + +	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); +	if (err) +		goto err_free; + +	err = tracing_set_cpumask(tr, tracing_cpumask_new); +	if (err) +		goto err_free; +  	free_cpumask_var(tracing_cpumask_new);  	return count; -err_unlock: +err_free:  	free_cpumask_var(tracing_cpumask_new);  	return err; @@ -4726,7 +4766,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)  		ftrace_pid_follow_fork(tr, enabled);  	if (mask == TRACE_ITER_OVERWRITE) { -		ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); +		ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);  #ifdef CONFIG_TRACER_MAX_TRACE  		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);  #endif @@ -4740,7 +4780,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)  	return 0;  } -static int trace_set_options(struct trace_array *tr, char *option) +int trace_set_options(struct trace_array *tr, char *option)  {  	char *cmp;  	int neg = 0; @@ -5361,14 +5401,12 @@ static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)  	 * Paranoid! If ptr points to end, we don't want to increment past it.  	 * This really should never happen.  	 */ +	(*pos)++;  	ptr = update_eval_map(ptr);  	if (WARN_ON_ONCE(!ptr))  		return NULL;  	ptr++; - -	(*pos)++; -  	ptr = update_eval_map(ptr);  	return ptr; @@ -5534,11 +5572,11 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,  int tracer_init(struct tracer *t, struct trace_array *tr)  { -	tracing_reset_online_cpus(&tr->trace_buffer); +	tracing_reset_online_cpus(&tr->array_buffer);  	return t->init(tr);  } -static void set_buffer_entries(struct trace_buffer *buf, unsigned long val) +static void set_buffer_entries(struct array_buffer *buf, unsigned long val)  {  	int cpu; @@ -5548,8 +5586,8 @@ static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)  #ifdef CONFIG_TRACER_MAX_TRACE  /* resize @tr's buffer to the size of @size_tr's entries */ -static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, -					struct trace_buffer *size_buf, int cpu_id) +static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, +					struct array_buffer *size_buf, int cpu_id)  {  	int cpu, ret = 0; @@ -5587,10 +5625,10 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,  	ring_buffer_expanded = true;  	/* May be called before buffers are initialized */ -	if (!tr->trace_buffer.buffer) +	if (!tr->array_buffer.buffer)  		return 0; -	ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu); +	ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);  	if (ret < 0)  		return ret; @@ -5601,8 +5639,8 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,  	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);  	if (ret < 0) { -		int r = resize_buffer_duplicate_size(&tr->trace_buffer, -						     &tr->trace_buffer, cpu); +		int r = resize_buffer_duplicate_size(&tr->array_buffer, +						     &tr->array_buffer, cpu);  		if (r < 0) {  			/*  			 * AARGH! We are left with different @@ -5633,15 +5671,15 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,  #endif /* CONFIG_TRACER_MAX_TRACE */  	if (cpu == RING_BUFFER_ALL_CPUS) -		set_buffer_entries(&tr->trace_buffer, size); +		set_buffer_entries(&tr->array_buffer, size);  	else -		per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; +		per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;  	return ret;  } -static ssize_t tracing_resize_ring_buffer(struct trace_array *tr, -					  unsigned long size, int cpu_id) +ssize_t tracing_resize_ring_buffer(struct trace_array *tr, +				  unsigned long size, int cpu_id)  {  	int ret = size; @@ -5720,7 +5758,7 @@ static void add_tracer_options(struct trace_array *tr, struct tracer *t)  	create_trace_option_files(tr, t);  } -static int tracing_set_tracer(struct trace_array *tr, const char *buf) +int tracing_set_tracer(struct trace_array *tr, const char *buf)  {  	struct tracer *t;  #ifdef CONFIG_TRACER_MAX_TRACE @@ -5979,7 +6017,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)  		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;  	iter->tr = tr; -	iter->trace_buffer = &tr->trace_buffer; +	iter->array_buffer = &tr->array_buffer;  	iter->cpu_file = tracing_get_cpu(inode);  	mutex_init(&iter->mutex);  	filp->private_data = iter; @@ -6039,7 +6077,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl  		 */  		return EPOLLIN | EPOLLRDNORM;  	else -		return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, +		return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,  					     filp, poll_table);  } @@ -6356,8 +6394,8 @@ tracing_entries_read(struct file *filp, char __user *ubuf,  		for_each_tracing_cpu(cpu) {  			/* fill in the size from first enabled cpu */  			if (size == 0) -				size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; -			if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { +				size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries; +			if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {  				buf_size_same = 0;  				break;  			} @@ -6373,7 +6411,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,  		} else  			r = sprintf(buf, "X\n");  	} else -		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); +		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);  	mutex_unlock(&trace_types_lock); @@ -6420,7 +6458,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,  	mutex_lock(&trace_types_lock);  	for_each_tracing_cpu(cpu) { -		size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; +		size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;  		if (!ring_buffer_expanded)  			expanded_size += trace_buf_size >> 10;  	} @@ -6470,7 +6508,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,  	struct trace_array *tr = filp->private_data;  	struct ring_buffer_event *event;  	enum event_trigger_type tt = ETT_NONE; -	struct ring_buffer *buffer; +	struct trace_buffer *buffer;  	struct print_entry *entry;  	unsigned long irq_flags;  	ssize_t written; @@ -6499,7 +6537,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,  	if (cnt < FAULTED_SIZE)  		size += FAULTED_SIZE - cnt; -	buffer = tr->trace_buffer.buffer; +	buffer = tr->array_buffer.buffer;  	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,  					    irq_flags, preempt_count());  	if (unlikely(!event)) @@ -6550,7 +6588,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,  {  	struct trace_array *tr = filp->private_data;  	struct ring_buffer_event *event; -	struct ring_buffer *buffer; +	struct trace_buffer *buffer;  	struct raw_data_entry *entry;  	unsigned long irq_flags;  	ssize_t written; @@ -6579,7 +6617,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,  	if (cnt < FAULT_SIZE_ID)  		size += FAULT_SIZE_ID - cnt; -	buffer = tr->trace_buffer.buffer; +	buffer = tr->array_buffer.buffer;  	event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,  					    irq_flags, preempt_count());  	if (!event) @@ -6634,13 +6672,13 @@ int tracing_set_clock(struct trace_array *tr, const char *clockstr)  	tr->clock_id = i; -	ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func); +	ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);  	/*  	 * New clock may not be consistent with the previous clock.  	 * Reset the buffer so that it doesn't have incomparable timestamps.  	 */ -	tracing_reset_online_cpus(&tr->trace_buffer); +	tracing_reset_online_cpus(&tr->array_buffer);  #ifdef CONFIG_TRACER_MAX_TRACE  	if (tr->max_buffer.buffer) @@ -6703,7 +6741,7 @@ static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)  	mutex_lock(&trace_types_lock); -	if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer)) +	if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))  		seq_puts(m, "delta [absolute]\n");  	else  		seq_puts(m, "[delta] absolute\n"); @@ -6748,7 +6786,7 @@ int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)  			goto out;  	} -	ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs); +	ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);  #ifdef CONFIG_TRACER_MAX_TRACE  	if (tr->max_buffer.buffer) @@ -6797,7 +6835,7 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)  		ret = 0;  		iter->tr = tr; -		iter->trace_buffer = &tr->max_buffer; +		iter->array_buffer = &tr->max_buffer;  		iter->cpu_file = tracing_get_cpu(inode);  		m->private = iter;  		file->private_data = m; @@ -6860,7 +6898,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,  #endif  		if (tr->allocated_snapshot)  			ret = resize_buffer_duplicate_size(&tr->max_buffer, -					&tr->trace_buffer, iter->cpu_file); +					&tr->array_buffer, iter->cpu_file);  		else  			ret = tracing_alloc_snapshot_instance(tr);  		if (ret < 0) @@ -6935,7 +6973,7 @@ static int snapshot_raw_open(struct inode *inode, struct file *filp)  	}  	info->iter.snapshot = true; -	info->iter.trace_buffer = &info->iter.tr->max_buffer; +	info->iter.array_buffer = &info->iter.tr->max_buffer;  	return ret;  } @@ -7310,7 +7348,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)  	info->iter.tr		= tr;  	info->iter.cpu_file	= tracing_get_cpu(inode);  	info->iter.trace	= tr->current_trace; -	info->iter.trace_buffer = &tr->trace_buffer; +	info->iter.array_buffer = &tr->array_buffer;  	info->spare		= NULL;  	/* Force reading ring buffer for first read */  	info->read		= (unsigned int)-1; @@ -7355,7 +7393,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,  #endif  	if (!info->spare) { -		info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, +		info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,  							  iter->cpu_file);  		if (IS_ERR(info->spare)) {  			ret = PTR_ERR(info->spare); @@ -7373,7 +7411,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,   again:  	trace_access_lock(iter->cpu_file); -	ret = ring_buffer_read_page(iter->trace_buffer->buffer, +	ret = ring_buffer_read_page(iter->array_buffer->buffer,  				    &info->spare,  				    count,  				    iter->cpu_file, 0); @@ -7423,7 +7461,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)  	__trace_array_put(iter->tr);  	if (info->spare) -		ring_buffer_free_read_page(iter->trace_buffer->buffer, +		ring_buffer_free_read_page(iter->array_buffer->buffer,  					   info->spare_cpu, info->spare);  	kfree(info); @@ -7433,7 +7471,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)  }  struct buffer_ref { -	struct ring_buffer	*buffer; +	struct trace_buffer	*buffer;  	void			*page;  	int			cpu;  	refcount_t		refcount; @@ -7528,7 +7566,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,   again:  	trace_access_lock(iter->cpu_file); -	entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); +	entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);  	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {  		struct page *page; @@ -7541,7 +7579,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,  		}  		refcount_set(&ref->refcount, 1); -		ref->buffer = iter->trace_buffer->buffer; +		ref->buffer = iter->array_buffer->buffer;  		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);  		if (IS_ERR(ref->page)) {  			ret = PTR_ERR(ref->page); @@ -7569,7 +7607,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,  		spd.nr_pages++;  		*ppos += PAGE_SIZE; -		entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); +		entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);  	}  	trace_access_unlock(iter->cpu_file); @@ -7613,7 +7651,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,  {  	struct inode *inode = file_inode(filp);  	struct trace_array *tr = inode->i_private; -	struct trace_buffer *trace_buf = &tr->trace_buffer; +	struct array_buffer *trace_buf = &tr->array_buffer;  	int cpu = tracing_get_cpu(inode);  	struct trace_seq *s;  	unsigned long cnt; @@ -7894,7 +7932,7 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)  	tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); -	WARN_ONCE(!tr->percpu_dir, +	MEM_FAIL(!tr->percpu_dir,  		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);  	return tr->percpu_dir; @@ -8215,7 +8253,7 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer)  	for (cnt = 0; opts[cnt].name; cnt++) {  		create_trace_option_file(tr, &topts[cnt], flags,  					 &opts[cnt]); -		WARN_ONCE(topts[cnt].entry == NULL, +		MEM_FAIL(topts[cnt].entry == NULL,  			  "Failed to create trace option: %s",  			  opts[cnt].name);  	} @@ -8272,7 +8310,7 @@ rb_simple_write(struct file *filp, const char __user *ubuf,  		size_t cnt, loff_t *ppos)  {  	struct trace_array *tr = filp->private_data; -	struct ring_buffer *buffer = tr->trace_buffer.buffer; +	struct trace_buffer *buffer = tr->array_buffer.buffer;  	unsigned long val;  	int ret; @@ -8362,7 +8400,7 @@ static void  init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);  static int -allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) +allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)  {  	enum ring_buffer_flags rb_flags; @@ -8382,8 +8420,8 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size  	}  	/* Allocate the first page for all buffers */ -	set_buffer_entries(&tr->trace_buffer, -			   ring_buffer_size(tr->trace_buffer.buffer, 0)); +	set_buffer_entries(&tr->array_buffer, +			   ring_buffer_size(tr->array_buffer.buffer, 0));  	return 0;  } @@ -8392,18 +8430,18 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)  {  	int ret; -	ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); +	ret = allocate_trace_buffer(tr, &tr->array_buffer, size);  	if (ret)  		return ret;  #ifdef CONFIG_TRACER_MAX_TRACE  	ret = allocate_trace_buffer(tr, &tr->max_buffer,  				    allocate_snapshot ? size : 1); -	if (WARN_ON(ret)) { -		ring_buffer_free(tr->trace_buffer.buffer); -		tr->trace_buffer.buffer = NULL; -		free_percpu(tr->trace_buffer.data); -		tr->trace_buffer.data = NULL; +	if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) { +		ring_buffer_free(tr->array_buffer.buffer); +		tr->array_buffer.buffer = NULL; +		free_percpu(tr->array_buffer.data); +		tr->array_buffer.data = NULL;  		return -ENOMEM;  	}  	tr->allocated_snapshot = allocate_snapshot; @@ -8417,7 +8455,7 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)  	return 0;  } -static void free_trace_buffer(struct trace_buffer *buf) +static void free_trace_buffer(struct array_buffer *buf)  {  	if (buf->buffer) {  		ring_buffer_free(buf->buffer); @@ -8432,7 +8470,7 @@ static void free_trace_buffers(struct trace_array *tr)  	if (!tr)  		return; -	free_trace_buffer(&tr->trace_buffer); +	free_trace_buffer(&tr->array_buffer);  #ifdef CONFIG_TRACER_MAX_TRACE  	free_trace_buffer(&tr->max_buffer); @@ -8463,6 +8501,34 @@ static void update_tracer_options(struct trace_array *tr)  	mutex_unlock(&trace_types_lock);  } +/* Must have trace_types_lock held */ +struct trace_array *trace_array_find(const char *instance) +{ +	struct trace_array *tr, *found = NULL; + +	list_for_each_entry(tr, &ftrace_trace_arrays, list) { +		if (tr->name && strcmp(tr->name, instance) == 0) { +			found = tr; +			break; +		} +	} + +	return found; +} + +struct trace_array *trace_array_find_get(const char *instance) +{ +	struct trace_array *tr; + +	mutex_lock(&trace_types_lock); +	tr = trace_array_find(instance); +	if (tr) +		tr->ref++; +	mutex_unlock(&trace_types_lock); + +	return tr; +} +  static struct trace_array *trace_array_create(const char *name)  {  	struct trace_array *tr; @@ -8504,7 +8570,7 @@ static struct trace_array *trace_array_create(const char *name)  	ret = event_trace_add_tracer(tr->dir, tr);  	if (ret) { -		tracefs_remove_recursive(tr->dir); +		tracefs_remove(tr->dir);  		goto out_free_tr;  	} @@ -8539,10 +8605,8 @@ static int instance_mkdir(const char *name)  	mutex_lock(&trace_types_lock);  	ret = -EEXIST; -	list_for_each_entry(tr, &ftrace_trace_arrays, list) { -		if (tr->name && strcmp(tr->name, name) == 0) -			goto out_unlock; -	} +	if (trace_array_find(name)) +		goto out_unlock;  	tr = trace_array_create(name); @@ -8564,6 +8628,10 @@ out_unlock:   * NOTE: This function increments the reference counter associated with the   * trace array returned. This makes sure it cannot be freed while in use.   * Use trace_array_put() once the trace array is no longer needed. + * If the trace_array is to be freed, trace_array_destroy() needs to + * be called after the trace_array_put(), or simply let user space delete + * it from the tracefs instances directory. But until the + * trace_array_put() is called, user space can not delete it.   *   */  struct trace_array *trace_array_get_by_name(const char *name) @@ -8613,7 +8681,7 @@ static int __remove_instance(struct trace_array *tr)  	event_trace_del_tracer(tr);  	ftrace_clear_pids(tr);  	ftrace_destroy_function_files(tr); -	tracefs_remove_recursive(tr->dir); +	tracefs_remove(tr->dir);  	free_trace_buffers(tr);  	for (i = 0; i < tr->nr_topts; i++) { @@ -8666,12 +8734,9 @@ static int instance_rmdir(const char *name)  	mutex_lock(&trace_types_lock);  	ret = -ENODEV; -	list_for_each_entry(tr, &ftrace_trace_arrays, list) { -		if (tr->name && strcmp(tr->name, name) == 0) { -			ret = __remove_instance(tr); -			break; -		} -	} +	tr = trace_array_find(name); +	if (tr) +		ret = __remove_instance(tr);  	mutex_unlock(&trace_types_lock);  	mutex_unlock(&event_mutex); @@ -8684,7 +8749,7 @@ static __init void create_trace_instances(struct dentry *d_tracer)  	trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,  							 instance_mkdir,  							 instance_rmdir); -	if (WARN_ON(!trace_instance_dir)) +	if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))  		return;  } @@ -8754,7 +8819,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)  #endif  	if (ftrace_create_function_files(tr, d_tracer)) -		WARN(1, "Could not allocate function filter files"); +		MEM_FAIL(1, "Could not allocate function filter files");  #ifdef CONFIG_TRACER_SNAPSHOT  	trace_create_file("snapshot", 0644, d_tracer, @@ -9036,13 +9101,13 @@ void trace_init_global_iter(struct trace_iterator *iter)  	iter->tr = &global_trace;  	iter->trace = iter->tr->current_trace;  	iter->cpu_file = RING_BUFFER_ALL_CPUS; -	iter->trace_buffer = &global_trace.trace_buffer; +	iter->array_buffer = &global_trace.array_buffer;  	if (iter->trace && iter->trace->open)  		iter->trace->open(iter);  	/* Annotate start of buffers if we had overruns */ -	if (ring_buffer_overruns(iter->trace_buffer->buffer)) +	if (ring_buffer_overruns(iter->array_buffer->buffer))  		iter->iter_flags |= TRACE_FILE_ANNOTATE;  	/* Output in nanoseconds only if we are using a clock in nanoseconds. */ @@ -9083,7 +9148,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)  	trace_init_global_iter(&iter);  	for_each_tracing_cpu(cpu) { -		atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); +		atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);  	}  	old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; @@ -9151,7 +9216,7 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)  	tr->trace_flags |= old_userobj;  	for_each_tracing_cpu(cpu) { -		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); +		atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);  	}  	atomic_dec(&dump_running);  	printk_nmi_direct_exit(); @@ -9306,8 +9371,7 @@ __init static int tracer_alloc_buffers(void)  	/* TODO: make the number of buffers hot pluggable with CPUS */  	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { -		printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); -		WARN_ON(1); +		MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");  		goto out_free_savedcmd;  	} @@ -9380,7 +9444,8 @@ void __init early_trace_init(void)  	if (tracepoint_printk) {  		tracepoint_print_iter =  			kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); -		if (WARN_ON(!tracepoint_print_iter)) +		if (MEM_FAIL(!tracepoint_print_iter, +			     "Failed to allocate trace iterator\n"))  			tracepoint_printk = 0;  		else  			static_key_enable(&tracepoint_printk_key.key);  |