diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/fork.c | 13 | ||||
| -rw-r--r-- | kernel/trace/ftrace_internal.h | 22 | ||||
| -rw-r--r-- | kernel/trace/preemptirq_delay_test.c | 12 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 34 | 
4 files changed, 30 insertions, 51 deletions
| diff --git a/kernel/fork.c b/kernel/fork.c index 8c700f881d92..48ed22774efa 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2486,11 +2486,11 @@ long do_fork(unsigned long clone_flags,  	      int __user *child_tidptr)  {  	struct kernel_clone_args args = { -		.flags		= (clone_flags & ~CSIGNAL), +		.flags		= (lower_32_bits(clone_flags) & ~CSIGNAL),  		.pidfd		= parent_tidptr,  		.child_tid	= child_tidptr,  		.parent_tid	= parent_tidptr, -		.exit_signal	= (clone_flags & CSIGNAL), +		.exit_signal	= (lower_32_bits(clone_flags) & CSIGNAL),  		.stack		= stack_start,  		.stack_size	= stack_size,  	}; @@ -2508,8 +2508,9 @@ long do_fork(unsigned long clone_flags,  pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)  {  	struct kernel_clone_args args = { -		.flags		= ((flags | CLONE_VM | CLONE_UNTRACED) & ~CSIGNAL), -		.exit_signal	= (flags & CSIGNAL), +		.flags		= ((lower_32_bits(flags) | CLONE_VM | +				    CLONE_UNTRACED) & ~CSIGNAL), +		.exit_signal	= (lower_32_bits(flags) & CSIGNAL),  		.stack		= (unsigned long)fn,  		.stack_size	= (unsigned long)arg,  	}; @@ -2570,11 +2571,11 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,  #endif  {  	struct kernel_clone_args args = { -		.flags		= (clone_flags & ~CSIGNAL), +		.flags		= (lower_32_bits(clone_flags) & ~CSIGNAL),  		.pidfd		= parent_tidptr,  		.child_tid	= child_tidptr,  		.parent_tid	= parent_tidptr, -		.exit_signal	= (clone_flags & CSIGNAL), +		.exit_signal	= (lower_32_bits(clone_flags) & CSIGNAL),  		.stack		= newsp,  		.tls		= tls,  	}; diff --git a/kernel/trace/ftrace_internal.h b/kernel/trace/ftrace_internal.h index 0456e0a3dab1..382775edf690 100644 --- a/kernel/trace/ftrace_internal.h +++ b/kernel/trace/ftrace_internal.h @@ -4,28 +4,6 @@  #ifdef CONFIG_FUNCTION_TRACER -/* - * Traverse the ftrace_global_list, invoking all entries.  The reason that we - * can use rcu_dereference_raw_check() is that elements removed from this list - * are simply leaked, so there is no need to interact with a grace-period - * mechanism.  The rcu_dereference_raw_check() calls are needed to handle - * concurrent insertions into the ftrace_global_list. - * - * Silly Alpha and silly pointer-speculation compiler optimizations! - */ -#define do_for_each_ftrace_op(op, list)			\ -	op = rcu_dereference_raw_check(list);			\ -	do - -/* - * Optimized for just a single item in the list (as that is the normal case). - */ -#define while_for_each_ftrace_op(op)				\ -	while (likely(op = rcu_dereference_raw_check((op)->next)) &&	\ -	       unlikely((op) != &ftrace_list_end)) - -extern struct ftrace_ops __rcu *ftrace_ops_list; -extern struct ftrace_ops ftrace_list_end;  extern struct mutex ftrace_lock;  extern struct ftrace_ops global_ops; diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c index c4c86de63cf9..312d1a0ca3b6 100644 --- a/kernel/trace/preemptirq_delay_test.c +++ b/kernel/trace/preemptirq_delay_test.c @@ -16,6 +16,7 @@  #include <linux/printk.h>  #include <linux/string.h>  #include <linux/sysfs.h> +#include <linux/completion.h>  static ulong delay = 100;  static char test_mode[12] = "irq"; @@ -28,6 +29,8 @@ MODULE_PARM_DESC(delay, "Period in microseconds (100 us default)");  MODULE_PARM_DESC(test_mode, "Mode of the test such as preempt, irq, or alternate (default irq)");  MODULE_PARM_DESC(burst_size, "The size of a burst (default 1)"); +static struct completion done; +  #define MIN(x, y) ((x) < (y) ? (x) : (y))  static void busy_wait(ulong time) @@ -114,6 +117,8 @@ static int preemptirq_delay_run(void *data)  	for (i = 0; i < s; i++)  		(testfuncs[i])(i); +	complete(&done); +  	set_current_state(TASK_INTERRUPTIBLE);  	while (!kthread_should_stop()) {  		schedule(); @@ -128,15 +133,18 @@ static int preemptirq_delay_run(void *data)  static int preemptirq_run_test(void)  {  	struct task_struct *task; -  	char task_name[50]; +	init_completion(&done); +  	snprintf(task_name, sizeof(task_name), "%s_test", test_mode);  	task =  kthread_run(preemptirq_delay_run, NULL, task_name);  	if (IS_ERR(task))  		return PTR_ERR(task); -	if (task) +	if (task) { +		wait_for_completion(&done);  		kthread_stop(task); +	}  	return 0;  } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 6f0b42ceeb00..b8e1ca48be50 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -193,7 +193,7 @@ rb_event_length(struct ring_buffer_event *event)  	case RINGBUF_TYPE_DATA:  		return rb_event_data_length(event);  	default: -		BUG(); +		WARN_ON_ONCE(1);  	}  	/* not hit */  	return 0; @@ -249,7 +249,7 @@ rb_event_data(struct ring_buffer_event *event)  {  	if (extended_time(event))  		event = skip_time_extend(event); -	BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); +	WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);  	/* If length is in len field, then array[0] has the data */  	if (event->type_len)  		return (void *)&event->array[0]; @@ -3727,7 +3727,7 @@ rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,  		return;  	default: -		BUG(); +		RB_WARN_ON(cpu_buffer, 1);  	}  	return;  } @@ -3757,7 +3757,7 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter,  		return;  	default: -		BUG(); +		RB_WARN_ON(iter->cpu_buffer, 1);  	}  	return;  } @@ -4020,7 +4020,7 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,  		return event;  	default: -		BUG(); +		RB_WARN_ON(cpu_buffer, 1);  	}  	return NULL; @@ -4034,7 +4034,6 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)  	struct ring_buffer_per_cpu *cpu_buffer;  	struct ring_buffer_event *event;  	int nr_loops = 0; -	bool failed = false;  	if (ts)  		*ts = 0; @@ -4056,19 +4055,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)  		return NULL;  	/* -	 * We repeat when a time extend is encountered or we hit -	 * the end of the page. Since the time extend is always attached -	 * to a data event, we should never loop more than three times. -	 * Once for going to next page, once on time extend, and -	 * finally once to get the event. -	 * We should never hit the following condition more than thrice, -	 * unless the buffer is very small, and there's a writer -	 * that is causing the reader to fail getting an event. +	 * As the writer can mess with what the iterator is trying +	 * to read, just give up if we fail to get an event after +	 * three tries. The iterator is not as reliable when reading +	 * the ring buffer with an active write as the consumer is. +	 * Do not warn if the three failures is reached.  	 */ -	if (++nr_loops > 3) { -		RB_WARN_ON(cpu_buffer, !failed); +	if (++nr_loops > 3)  		return NULL; -	}  	if (rb_per_cpu_empty(cpu_buffer))  		return NULL; @@ -4079,10 +4073,8 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)  	}  	event = rb_iter_head_event(iter); -	if (!event) { -		failed = true; +	if (!event)  		goto again; -	}  	switch (event->type_len) {  	case RINGBUF_TYPE_PADDING: @@ -4117,7 +4109,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)  		return event;  	default: -		BUG(); +		RB_WARN_ON(cpu_buffer, 1);  	}  	return NULL; |