diff options
Diffstat (limited to 'kernel/timer.c')
| -rw-r--r-- | kernel/timer.c | 178 | 
1 files changed, 130 insertions, 48 deletions
| diff --git a/kernel/timer.c b/kernel/timer.c index 13dd64fe143d..b4555568b4e4 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -491,14 +491,18 @@ static inline void debug_timer_free(struct timer_list *timer)  	debug_object_free(timer, &timer_debug_descr);  } -static void __init_timer(struct timer_list *timer); +static void __init_timer(struct timer_list *timer, +			 const char *name, +			 struct lock_class_key *key); -void init_timer_on_stack(struct timer_list *timer) +void init_timer_on_stack_key(struct timer_list *timer, +			     const char *name, +			     struct lock_class_key *key)  {  	debug_object_init_on_stack(timer, &timer_debug_descr); -	__init_timer(timer); +	__init_timer(timer, name, key);  } -EXPORT_SYMBOL_GPL(init_timer_on_stack); +EXPORT_SYMBOL_GPL(init_timer_on_stack_key);  void destroy_timer_on_stack(struct timer_list *timer)  { @@ -512,7 +516,9 @@ static inline void debug_timer_activate(struct timer_list *timer) { }  static inline void debug_timer_deactivate(struct timer_list *timer) { }  #endif -static void __init_timer(struct timer_list *timer) +static void __init_timer(struct timer_list *timer, +			 const char *name, +			 struct lock_class_key *key)  {  	timer->entry.next = NULL;  	timer->base = __raw_get_cpu_var(tvec_bases); @@ -521,6 +527,7 @@ static void __init_timer(struct timer_list *timer)  	timer->start_pid = -1;  	memset(timer->start_comm, 0, TASK_COMM_LEN);  #endif +	lockdep_init_map(&timer->lockdep_map, name, key, 0);  }  /** @@ -530,19 +537,23 @@ static void __init_timer(struct timer_list *timer)   * init_timer() must be done to a timer prior calling *any* of the   * other timer functions.   */ -void init_timer(struct timer_list *timer) +void init_timer_key(struct timer_list *timer, +		    const char *name, +		    struct lock_class_key *key)  {  	debug_timer_init(timer); -	__init_timer(timer); +	__init_timer(timer, name, key);  } -EXPORT_SYMBOL(init_timer); +EXPORT_SYMBOL(init_timer_key); -void init_timer_deferrable(struct timer_list *timer) +void init_timer_deferrable_key(struct timer_list *timer, +			       const char *name, +			       struct lock_class_key *key)  { -	init_timer(timer); +	init_timer_key(timer, name, key);  	timer_set_deferrable(timer);  } -EXPORT_SYMBOL(init_timer_deferrable); +EXPORT_SYMBOL(init_timer_deferrable_key);  static inline void detach_timer(struct timer_list *timer,  				int clear_pending) @@ -589,11 +600,14 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,  	}  } -int __mod_timer(struct timer_list *timer, unsigned long expires) +static inline int +__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)  {  	struct tvec_base *base, *new_base;  	unsigned long flags; -	int ret = 0; +	int ret; + +	ret = 0;  	timer_stats_timer_set_start_info(timer);  	BUG_ON(!timer->function); @@ -603,6 +617,9 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)  	if (timer_pending(timer)) {  		detach_timer(timer, 0);  		ret = 1; +	} else { +		if (pending_only) +			goto out_unlock;  	}  	debug_timer_activate(timer); @@ -629,42 +646,28 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)  	timer->expires = expires;  	internal_add_timer(base, timer); + +out_unlock:  	spin_unlock_irqrestore(&base->lock, flags);  	return ret;  } -EXPORT_SYMBOL(__mod_timer); -  /** - * add_timer_on - start a timer on a particular CPU - * @timer: the timer to be added - * @cpu: the CPU to start it on + * mod_timer_pending - modify a pending timer's timeout + * @timer: the pending timer to be modified + * @expires: new timeout in jiffies   * - * This is not very scalable on SMP. Double adds are not possible. + * mod_timer_pending() is the same for pending timers as mod_timer(), + * but will not re-activate and modify already deleted timers. + * + * It is useful for unserialized use of timers.   */ -void add_timer_on(struct timer_list *timer, int cpu) +int mod_timer_pending(struct timer_list *timer, unsigned long expires)  { -	struct tvec_base *base = per_cpu(tvec_bases, cpu); -	unsigned long flags; - -	timer_stats_timer_set_start_info(timer); -	BUG_ON(timer_pending(timer) || !timer->function); -	spin_lock_irqsave(&base->lock, flags); -	timer_set_base(timer, base); -	debug_timer_activate(timer); -	internal_add_timer(base, timer); -	/* -	 * Check whether the other CPU is idle and needs to be -	 * triggered to reevaluate the timer wheel when nohz is -	 * active. We are protected against the other CPU fiddling -	 * with the timer by holding the timer base lock. This also -	 * makes sure that a CPU on the way to idle can not evaluate -	 * the timer wheel. -	 */ -	wake_up_idle_cpu(cpu); -	spin_unlock_irqrestore(&base->lock, flags); +	return __mod_timer(timer, expires, true);  } +EXPORT_SYMBOL(mod_timer_pending);  /**   * mod_timer - modify a timer's timeout @@ -688,9 +691,6 @@ void add_timer_on(struct timer_list *timer, int cpu)   */  int mod_timer(struct timer_list *timer, unsigned long expires)  { -	BUG_ON(!timer->function); - -	timer_stats_timer_set_start_info(timer);  	/*  	 * This is a common optimization triggered by the  	 * networking code - if the timer is re-modified @@ -699,12 +699,62 @@ int mod_timer(struct timer_list *timer, unsigned long expires)  	if (timer->expires == expires && timer_pending(timer))  		return 1; -	return __mod_timer(timer, expires); +	return __mod_timer(timer, expires, false);  } -  EXPORT_SYMBOL(mod_timer);  /** + * add_timer - start a timer + * @timer: the timer to be added + * + * The kernel will do a ->function(->data) callback from the + * timer interrupt at the ->expires point in the future. The + * current time is 'jiffies'. + * + * The timer's ->expires, ->function (and if the handler uses it, ->data) + * fields must be set prior calling this function. + * + * Timers with an ->expires field in the past will be executed in the next + * timer tick. + */ +void add_timer(struct timer_list *timer) +{ +	BUG_ON(timer_pending(timer)); +	mod_timer(timer, timer->expires); +} +EXPORT_SYMBOL(add_timer); + +/** + * add_timer_on - start a timer on a particular CPU + * @timer: the timer to be added + * @cpu: the CPU to start it on + * + * This is not very scalable on SMP. Double adds are not possible. + */ +void add_timer_on(struct timer_list *timer, int cpu) +{ +	struct tvec_base *base = per_cpu(tvec_bases, cpu); +	unsigned long flags; + +	timer_stats_timer_set_start_info(timer); +	BUG_ON(timer_pending(timer) || !timer->function); +	spin_lock_irqsave(&base->lock, flags); +	timer_set_base(timer, base); +	debug_timer_activate(timer); +	internal_add_timer(base, timer); +	/* +	 * Check whether the other CPU is idle and needs to be +	 * triggered to reevaluate the timer wheel when nohz is +	 * active. We are protected against the other CPU fiddling +	 * with the timer by holding the timer base lock. This also +	 * makes sure that a CPU on the way to idle can not evaluate +	 * the timer wheel. +	 */ +	wake_up_idle_cpu(cpu); +	spin_unlock_irqrestore(&base->lock, flags); +} + +/**   * del_timer - deactive a timer.   * @timer: the timer to be deactivated   * @@ -733,7 +783,6 @@ int del_timer(struct timer_list *timer)  	return ret;  } -  EXPORT_SYMBOL(del_timer);  #ifdef CONFIG_SMP @@ -767,7 +816,6 @@ out:  	return ret;  } -  EXPORT_SYMBOL(try_to_del_timer_sync);  /** @@ -789,6 +837,15 @@ EXPORT_SYMBOL(try_to_del_timer_sync);   */  int del_timer_sync(struct timer_list *timer)  { +#ifdef CONFIG_LOCKDEP +	unsigned long flags; + +	local_irq_save(flags); +	lock_map_acquire(&timer->lockdep_map); +	lock_map_release(&timer->lockdep_map); +	local_irq_restore(flags); +#endif +  	for (;;) {  		int ret = try_to_del_timer_sync(timer);  		if (ret >= 0) @@ -796,7 +853,6 @@ int del_timer_sync(struct timer_list *timer)  		cpu_relax();  	}  } -  EXPORT_SYMBOL(del_timer_sync);  #endif @@ -861,10 +917,36 @@ static inline void __run_timers(struct tvec_base *base)  			set_running_timer(base, timer);  			detach_timer(timer, 1); +  			spin_unlock_irq(&base->lock);  			{  				int preempt_count = preempt_count(); + +#ifdef CONFIG_LOCKDEP +				/* +				 * It is permissible to free the timer from +				 * inside the function that is called from +				 * it, this we need to take into account for +				 * lockdep too. To avoid bogus "held lock +				 * freed" warnings as well as problems when +				 * looking into timer->lockdep_map, make a +				 * copy and use that here. +				 */ +				struct lockdep_map lockdep_map = +					timer->lockdep_map; +#endif +				/* +				 * Couple the lock chain with the lock chain at +				 * del_timer_sync() by acquiring the lock_map +				 * around the fn() call here and in +				 * del_timer_sync(). +				 */ +				lock_map_acquire(&lockdep_map); +  				fn(data); + +				lock_map_release(&lockdep_map); +  				if (preempt_count != preempt_count()) {  					printk(KERN_ERR "huh, entered %p "  					       "with preempt_count %08x, exited" @@ -1268,7 +1350,7 @@ signed long __sched schedule_timeout(signed long timeout)  	expire = timeout + jiffies;  	setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); -	__mod_timer(&timer, expire); +	__mod_timer(&timer, expire, false);  	schedule();  	del_singleshot_timer_sync(&timer); |