diff options
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/Kconfig | 11 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 17 | 
2 files changed, 4 insertions, 24 deletions
| diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 743647005f64..24876faac753 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -10,11 +10,6 @@ config USER_STACKTRACE_SUPPORT  config NOP_TRACER  	bool -config HAVE_FTRACE_NMI_ENTER -	bool -	help -	  See Documentation/trace/ftrace-design.rst -  config HAVE_FUNCTION_TRACER  	bool  	help @@ -72,11 +67,6 @@ config RING_BUFFER  	select TRACE_CLOCK  	select IRQ_WORK -config FTRACE_NMI_ENTER -       bool -       depends on HAVE_FTRACE_NMI_ENTER -       default y -  config EVENT_TRACING  	select CONTEXT_SWITCH_TRACER  	select GLOB @@ -158,6 +148,7 @@ config FUNCTION_TRACER  	select CONTEXT_SWITCH_TRACER  	select GLOB  	select TASKS_RCU if PREEMPTION +	select TASKS_RUDE_RCU  	help  	  Enable the kernel to trace every kernel function. This is done  	  by using a compiler feature to insert a small, 5-byte No-Operation diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index bd030b1b9514..b5765aeea698 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -160,17 +160,6 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,  	op->saved_func(ip, parent_ip, op, regs);  } -static void ftrace_sync(struct work_struct *work) -{ -	/* -	 * This function is just a stub to implement a hard force -	 * of synchronize_rcu(). This requires synchronizing -	 * tasks even in userspace and idle. -	 * -	 * Yes, function tracing is rude. -	 */ -} -  static void ftrace_sync_ipi(void *data)  {  	/* Probably not needed, but do it anyway */ @@ -256,7 +245,7 @@ static void update_ftrace_function(void)  	 * Make sure all CPUs see this. Yes this is slow, but static  	 * tracing is slow and nasty to have enabled.  	 */ -	schedule_on_each_cpu(ftrace_sync); +	synchronize_rcu_tasks_rude();  	/* Now all cpus are using the list ops. */  	function_trace_op = set_function_trace_op;  	/* Make sure the function_trace_op is visible on all CPUs */ @@ -2932,7 +2921,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)  		 * infrastructure to do the synchronization, thus we must do it  		 * ourselves.  		 */ -		schedule_on_each_cpu(ftrace_sync); +		synchronize_rcu_tasks_rude();  		/*  		 * When the kernel is preeptive, tasks can be preempted @@ -5888,7 +5877,7 @@ ftrace_graph_release(struct inode *inode, struct file *file)  		 * infrastructure to do the synchronization, thus we must do it  		 * ourselves.  		 */ -		schedule_on_each_cpu(ftrace_sync); +		synchronize_rcu_tasks_rude();  		free_ftrace_hash(old_hash);  	} |