diff options
Diffstat (limited to 'kernel/time/timer.c')
| -rw-r--r-- | kernel/time/timer.c | 427 | 
1 files changed, 329 insertions, 98 deletions
| diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 717fcb9fb14a..63a8ce7177dd 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1017,7 +1017,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option  	unsigned int idx = UINT_MAX;  	int ret = 0; -	BUG_ON(!timer->function); +	debug_assert_init(timer);  	/*  	 * This is a common optimization triggered by the networking code - if @@ -1044,6 +1044,14 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option  		 * dequeue/enqueue dance.  		 */  		base = lock_timer_base(timer, &flags); +		/* +		 * Has @timer been shutdown? This needs to be evaluated +		 * while holding base lock to prevent a race against the +		 * shutdown code. +		 */ +		if (!timer->function) +			goto out_unlock; +  		forward_timer_base(base);  		if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) && @@ -1070,6 +1078,14 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option  		}  	} else {  		base = lock_timer_base(timer, &flags); +		/* +		 * Has @timer been shutdown? This needs to be evaluated +		 * while holding base lock to prevent a race against the +		 * shutdown code. +		 */ +		if (!timer->function) +			goto out_unlock; +  		forward_timer_base(base);  	} @@ -1083,7 +1099,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option  		/*  		 * We are trying to schedule the timer on the new base.  		 * However we can't change timer's base while it is running, -		 * otherwise del_timer_sync() can't detect that the timer's +		 * otherwise timer_delete_sync() can't detect that the timer's  		 * handler yet has not finished. This also guarantees that the  		 * timer is serialized wrt itself.  		 */ @@ -1121,14 +1137,20 @@ out_unlock:  }  /** - * mod_timer_pending - modify a pending timer's timeout - * @timer: the pending timer to be modified - * @expires: new timeout in jiffies + * mod_timer_pending - Modify a pending timer's timeout + * @timer:	The pending timer to be modified + * @expires:	New absolute timeout in jiffies + * + * mod_timer_pending() is the same for pending timers as mod_timer(), but + * will not activate inactive timers.   * - * mod_timer_pending() is the same for pending timers as mod_timer(), - * but will not re-activate and modify already deleted timers. + * If @timer->function == NULL then the start operation is silently + * discarded.   * - * It is useful for unserialized use of timers. + * Return: + * * %0 - The timer was inactive and not modified or was in + *	  shutdown state and the operation was discarded + * * %1 - The timer was active and requeued to expire at @expires   */  int mod_timer_pending(struct timer_list *timer, unsigned long expires)  { @@ -1137,24 +1159,31 @@ int mod_timer_pending(struct timer_list *timer, unsigned long expires)  EXPORT_SYMBOL(mod_timer_pending);  /** - * mod_timer - modify a timer's timeout - * @timer: the timer to be modified - * @expires: new timeout in jiffies - * - * mod_timer() is a more efficient way to update the expire field of an - * active timer (if the timer is inactive it will be activated) + * mod_timer - Modify a timer's timeout + * @timer:	The timer to be modified + * @expires:	New absolute timeout in jiffies   *   * mod_timer(timer, expires) is equivalent to:   *   *     del_timer(timer); timer->expires = expires; add_timer(timer);   * + * mod_timer() is more efficient than the above open coded sequence. In + * case that the timer is inactive, the del_timer() part is a NOP. The + * timer is in any case activated with the new expiry time @expires. + *   * Note that if there are multiple unserialized concurrent users of the   * same timer, then mod_timer() is the only safe way to modify the timeout,   * since add_timer() cannot modify an already running timer.   * - * The function returns whether it has modified a pending timer or not. - * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an - * active timer returns 1.) + * If @timer->function == NULL then the start operation is silently + * discarded. In this case the return value is 0 and meaningless. + * + * Return: + * * %0 - The timer was inactive and started or was in shutdown + *	  state and the operation was discarded + * * %1 - The timer was active and requeued to expire at @expires or + *	  the timer was active and not modified because @expires did + *	  not change the effective expiry time   */  int mod_timer(struct timer_list *timer, unsigned long expires)  { @@ -1165,11 +1194,22 @@ EXPORT_SYMBOL(mod_timer);  /**   * timer_reduce - Modify a timer's timeout if it would reduce the timeout   * @timer:	The timer to be modified - * @expires:	New timeout in jiffies + * @expires:	New absolute timeout in jiffies   *   * timer_reduce() is very similar to mod_timer(), except that it will only - * modify a running timer if that would reduce the expiration time (it will - * start a timer that isn't running). + * modify an enqueued timer if that would reduce the expiration time. If + * @timer is not enqueued it starts the timer. + * + * If @timer->function == NULL then the start operation is silently + * discarded. + * + * Return: + * * %0 - The timer was inactive and started or was in shutdown + *	  state and the operation was discarded + * * %1 - The timer was active and requeued to expire at @expires or + *	  the timer was active and not modified because @expires + *	  did not change the effective expiry time such that the + *	  timer would expire earlier than already scheduled   */  int timer_reduce(struct timer_list *timer, unsigned long expires)  { @@ -1178,39 +1218,51 @@ int timer_reduce(struct timer_list *timer, unsigned long expires)  EXPORT_SYMBOL(timer_reduce);  /** - * add_timer - start a timer - * @timer: the timer to be added + * add_timer - Start a timer + * @timer:	The timer to be started   * - * The kernel will do a ->function(@timer) callback from the - * timer interrupt at the ->expires point in the future. The - * current time is 'jiffies'. + * Start @timer to expire at @timer->expires in the future. @timer->expires + * is the absolute expiry time measured in 'jiffies'. When the timer expires + * timer->function(timer) will be invoked from soft interrupt context.   * - * The timer's ->expires, ->function fields must be set prior calling this - * function. + * The @timer->expires and @timer->function fields must be set prior + * to calling this function.   * - * Timers with an ->expires field in the past will be executed in the next - * timer tick. + * If @timer->function == NULL then the start operation is silently + * discarded. + * + * If @timer->expires is already in the past @timer will be queued to + * expire at the next timer tick. + * + * This can only operate on an inactive timer. Attempts to invoke this on + * an active timer are rejected with a warning.   */  void add_timer(struct timer_list *timer)  { -	BUG_ON(timer_pending(timer)); +	if (WARN_ON_ONCE(timer_pending(timer))) +		return;  	__mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING);  }  EXPORT_SYMBOL(add_timer);  /** - * add_timer_on - start a timer on a particular CPU - * @timer: the timer to be added - * @cpu: the CPU to start it on + * add_timer_on - Start a timer on a particular CPU + * @timer:	The timer to be started + * @cpu:	The CPU to start it on + * + * Same as add_timer() except that it starts the timer on the given CPU.   * - * This is not very scalable on SMP. Double adds are not possible. + * See add_timer() for further details.   */  void add_timer_on(struct timer_list *timer, int cpu)  {  	struct timer_base *new_base, *base;  	unsigned long flags; -	BUG_ON(timer_pending(timer) || !timer->function); +	debug_assert_init(timer); + +	if (WARN_ON_ONCE(timer_pending(timer))) +		return;  	new_base = get_timer_cpu_base(timer->flags, cpu); @@ -1220,6 +1272,13 @@ void add_timer_on(struct timer_list *timer, int cpu)  	 * wrong base locked.  See lock_timer_base().  	 */  	base = lock_timer_base(timer, &flags); +	/* +	 * Has @timer been shutdown? This needs to be evaluated while +	 * holding base lock to prevent a race against the shutdown code. +	 */ +	if (!timer->function) +		goto out_unlock; +  	if (base != new_base) {  		timer->flags |= TIMER_MIGRATING; @@ -1233,22 +1292,27 @@ void add_timer_on(struct timer_list *timer, int cpu)  	debug_timer_activate(timer);  	internal_add_timer(base, timer); +out_unlock:  	raw_spin_unlock_irqrestore(&base->lock, flags);  }  EXPORT_SYMBOL_GPL(add_timer_on);  /** - * del_timer - deactivate a timer. - * @timer: the timer to be deactivated - * - * del_timer() deactivates a timer - this works on both active and inactive - * timers. - * - * The function returns whether it has deactivated a pending timer or not. - * (ie. del_timer() of an inactive timer returns 0, del_timer() of an - * active timer returns 1.) + * __timer_delete - Internal function: Deactivate a timer + * @timer:	The timer to be deactivated + * @shutdown:	If true, this indicates that the timer is about to be + *		shutdown permanently. + * + * If @shutdown is true then @timer->function is set to NULL under the + * timer base lock which prevents further rearming of the time. In that + * case any attempt to rearm @timer after this function returns will be + * silently ignored. + * + * Return: + * * %0 - The timer was not pending + * * %1 - The timer was pending and deactivated   */ -int del_timer(struct timer_list *timer) +static int __timer_delete(struct timer_list *timer, bool shutdown)  {  	struct timer_base *base;  	unsigned long flags; @@ -1256,24 +1320,90 @@ int del_timer(struct timer_list *timer)  	debug_assert_init(timer); -	if (timer_pending(timer)) { +	/* +	 * If @shutdown is set then the lock has to be taken whether the +	 * timer is pending or not to protect against a concurrent rearm +	 * which might hit between the lockless pending check and the lock +	 * aquisition. By taking the lock it is ensured that such a newly +	 * enqueued timer is dequeued and cannot end up with +	 * timer->function == NULL in the expiry code. +	 * +	 * If timer->function is currently executed, then this makes sure +	 * that the callback cannot requeue the timer. +	 */ +	if (timer_pending(timer) || shutdown) {  		base = lock_timer_base(timer, &flags);  		ret = detach_if_pending(timer, base, true); +		if (shutdown) +			timer->function = NULL;  		raw_spin_unlock_irqrestore(&base->lock, flags);  	}  	return ret;  } -EXPORT_SYMBOL(del_timer);  /** - * try_to_del_timer_sync - Try to deactivate a timer - * @timer: timer to delete + * timer_delete - Deactivate a timer + * @timer:	The timer to be deactivated + * + * The function only deactivates a pending timer, but contrary to + * timer_delete_sync() it does not take into account whether the timer's + * callback function is concurrently executed on a different CPU or not. + * It neither prevents rearming of the timer.  If @timer can be rearmed + * concurrently then the return value of this function is meaningless. + * + * Return: + * * %0 - The timer was not pending + * * %1 - The timer was pending and deactivated + */ +int timer_delete(struct timer_list *timer) +{ +	return __timer_delete(timer, false); +} +EXPORT_SYMBOL(timer_delete); + +/** + * timer_shutdown - Deactivate a timer and prevent rearming + * @timer:	The timer to be deactivated   * - * This function tries to deactivate a timer. Upon successful (ret >= 0) - * exit the timer is not queued and the handler is not running on any CPU. + * The function does not wait for an eventually running timer callback on a + * different CPU but it prevents rearming of the timer. Any attempt to arm + * @timer after this function returns will be silently ignored. + * + * This function is useful for teardown code and should only be used when + * timer_shutdown_sync() cannot be invoked due to locking or context constraints. + * + * Return: + * * %0 - The timer was not pending + * * %1 - The timer was pending   */ -int try_to_del_timer_sync(struct timer_list *timer) +int timer_shutdown(struct timer_list *timer) +{ +	return __timer_delete(timer, true); +} +EXPORT_SYMBOL_GPL(timer_shutdown); + +/** + * __try_to_del_timer_sync - Internal function: Try to deactivate a timer + * @timer:	Timer to deactivate + * @shutdown:	If true, this indicates that the timer is about to be + *		shutdown permanently. + * + * If @shutdown is true then @timer->function is set to NULL under the + * timer base lock which prevents further rearming of the timer. Any + * attempt to rearm @timer after this function returns will be silently + * ignored. + * + * This function cannot guarantee that the timer cannot be rearmed + * right after dropping the base lock if @shutdown is false. That + * needs to be prevented by the calling code if necessary. + * + * Return: + * * %0  - The timer was not pending + * * %1  - The timer was pending and deactivated + * * %-1 - The timer callback function is running on a different CPU + */ +static int __try_to_del_timer_sync(struct timer_list *timer, bool shutdown)  {  	struct timer_base *base;  	unsigned long flags; @@ -1285,11 +1415,34 @@ int try_to_del_timer_sync(struct timer_list *timer)  	if (base->running_timer != timer)  		ret = detach_if_pending(timer, base, true); +	if (shutdown) +		timer->function = NULL;  	raw_spin_unlock_irqrestore(&base->lock, flags);  	return ret;  } + +/** + * try_to_del_timer_sync - Try to deactivate a timer + * @timer:	Timer to deactivate + * + * This function tries to deactivate a timer. On success the timer is not + * queued and the timer callback function is not running on any CPU. + * + * This function does not guarantee that the timer cannot be rearmed right + * after dropping the base lock. That needs to be prevented by the calling + * code if necessary. + * + * Return: + * * %0  - The timer was not pending + * * %1  - The timer was pending and deactivated + * * %-1 - The timer callback function is running on a different CPU + */ +int try_to_del_timer_sync(struct timer_list *timer) +{ +	return __try_to_del_timer_sync(timer, false); +}  EXPORT_SYMBOL(try_to_del_timer_sync);  #ifdef CONFIG_PREEMPT_RT @@ -1365,44 +1518,29 @@ static inline void timer_sync_wait_running(struct timer_base *base) { }  static inline void del_timer_wait_running(struct timer_list *timer) { }  #endif -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)  /** - * del_timer_sync - deactivate a timer and wait for the handler to finish. - * @timer: the timer to be deactivated - * - * This function only differs from del_timer() on SMP: besides deactivating - * the timer it also makes sure the handler has finished executing on other - * CPUs. - * - * Synchronization rules: Callers must prevent restarting of the timer, - * otherwise this function is meaningless. It must not be called from - * interrupt contexts unless the timer is an irqsafe one. The caller must - * not hold locks which would prevent completion of the timer's - * handler. The timer's handler must not call add_timer_on(). Upon exit the - * timer is not queued and the handler is not running on any CPU. - * - * Note: For !irqsafe timers, you must not hold locks that are held in - *   interrupt context while calling this function. Even if the lock has - *   nothing to do with the timer in question.  Here's why:: - * - *    CPU0                             CPU1 - *    ----                             ---- - *                                     <SOFTIRQ> - *                                       call_timer_fn(); - *                                       base->running_timer = mytimer; - *    spin_lock_irq(somelock); - *                                     <IRQ> - *                                        spin_lock(somelock); - *    del_timer_sync(mytimer); - *    while (base->running_timer == mytimer); - * - * Now del_timer_sync() will never return and never release somelock. - * The interrupt on the other CPU is waiting to grab somelock but - * it has interrupted the softirq that CPU0 is waiting to finish. - * - * The function returns whether it has deactivated a pending timer or not. + * __timer_delete_sync - Internal function: Deactivate a timer and wait + *			 for the handler to finish. + * @timer:	The timer to be deactivated + * @shutdown:	If true, @timer->function will be set to NULL under the + *		timer base lock which prevents rearming of @timer + * + * If @shutdown is not set the timer can be rearmed later. If the timer can + * be rearmed concurrently, i.e. after dropping the base lock then the + * return value is meaningless. + * + * If @shutdown is set then @timer->function is set to NULL under timer + * base lock which prevents rearming of the timer. Any attempt to rearm + * a shutdown timer is silently ignored. + * + * If the timer should be reused after shutdown it has to be initialized + * again. + * + * Return: + * * %0	- The timer was not pending + * * %1	- The timer was pending and deactivated   */ -int del_timer_sync(struct timer_list *timer) +static int __timer_delete_sync(struct timer_list *timer, bool shutdown)  {  	int ret; @@ -1422,7 +1560,7 @@ int del_timer_sync(struct timer_list *timer)  	 * don't use it in hardirq context, because it  	 * could lead to deadlock.  	 */ -	WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE)); +	WARN_ON(in_hardirq() && !(timer->flags & TIMER_IRQSAFE));  	/*  	 * Must be able to sleep on PREEMPT_RT because of the slowpath in @@ -1432,7 +1570,7 @@ int del_timer_sync(struct timer_list *timer)  		lockdep_assert_preemption_enabled();  	do { -		ret = try_to_del_timer_sync(timer); +		ret = __try_to_del_timer_sync(timer, shutdown);  		if (unlikely(ret < 0)) {  			del_timer_wait_running(timer); @@ -1442,8 +1580,96 @@ int del_timer_sync(struct timer_list *timer)  	return ret;  } -EXPORT_SYMBOL(del_timer_sync); -#endif + +/** + * timer_delete_sync - Deactivate a timer and wait for the handler to finish. + * @timer:	The timer to be deactivated + * + * Synchronization rules: Callers must prevent restarting of the timer, + * otherwise this function is meaningless. It must not be called from + * interrupt contexts unless the timer is an irqsafe one. The caller must + * not hold locks which would prevent completion of the timer's callback + * function. The timer's handler must not call add_timer_on(). Upon exit + * the timer is not queued and the handler is not running on any CPU. + * + * For !irqsafe timers, the caller must not hold locks that are held in + * interrupt context. Even if the lock has nothing to do with the timer in + * question.  Here's why:: + * + *    CPU0                             CPU1 + *    ----                             ---- + *                                     <SOFTIRQ> + *                                       call_timer_fn(); + *                                       base->running_timer = mytimer; + *    spin_lock_irq(somelock); + *                                     <IRQ> + *                                        spin_lock(somelock); + *    timer_delete_sync(mytimer); + *    while (base->running_timer == mytimer); + * + * Now timer_delete_sync() will never return and never release somelock. + * The interrupt on the other CPU is waiting to grab somelock but it has + * interrupted the softirq that CPU0 is waiting to finish. + * + * This function cannot guarantee that the timer is not rearmed again by + * some concurrent or preempting code, right after it dropped the base + * lock. If there is the possibility of a concurrent rearm then the return + * value of the function is meaningless. + * + * If such a guarantee is needed, e.g. for teardown situations then use + * timer_shutdown_sync() instead. + * + * Return: + * * %0	- The timer was not pending + * * %1	- The timer was pending and deactivated + */ +int timer_delete_sync(struct timer_list *timer) +{ +	return __timer_delete_sync(timer, false); +} +EXPORT_SYMBOL(timer_delete_sync); + +/** + * timer_shutdown_sync - Shutdown a timer and prevent rearming + * @timer: The timer to be shutdown + * + * When the function returns it is guaranteed that: + *   - @timer is not queued + *   - The callback function of @timer is not running + *   - @timer cannot be enqueued again. Any attempt to rearm + *     @timer is silently ignored. + * + * See timer_delete_sync() for synchronization rules. + * + * This function is useful for final teardown of an infrastructure where + * the timer is subject to a circular dependency problem. + * + * A common pattern for this is a timer and a workqueue where the timer can + * schedule work and work can arm the timer. On shutdown the workqueue must + * be destroyed and the timer must be prevented from rearming. Unless the + * code has conditionals like 'if (mything->in_shutdown)' to prevent that + * there is no way to get this correct with timer_delete_sync(). + * + * timer_shutdown_sync() is solving the problem. The correct ordering of + * calls in this case is: + * + *	timer_shutdown_sync(&mything->timer); + *	workqueue_destroy(&mything->workqueue); + * + * After this 'mything' can be safely freed. + * + * This obviously implies that the timer is not required to be functional + * for the rest of the shutdown operation. + * + * Return: + * * %0 - The timer was not pending + * * %1 - The timer was pending + */ +int timer_shutdown_sync(struct timer_list *timer) +{ +	return __timer_delete_sync(timer, true); +} +EXPORT_SYMBOL_GPL(timer_shutdown_sync);  static void call_timer_fn(struct timer_list *timer,  			  void (*fn)(struct timer_list *), @@ -1465,8 +1691,8 @@ static void call_timer_fn(struct timer_list *timer,  #endif  	/*  	 * Couple the lock chain with the lock chain at -	 * del_timer_sync() by acquiring the lock_map around the fn() -	 * call here and in del_timer_sync(). +	 * timer_delete_sync() by acquiring the lock_map around the fn() +	 * call here and in timer_delete_sync().  	 */  	lock_map_acquire(&lockdep_map); @@ -1509,6 +1735,12 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)  		fn = timer->function; +		if (WARN_ON_ONCE(!fn)) { +			/* Should never happen. Emphasis on should! */ +			base->running_timer = NULL; +			continue; +		} +  		if (timer->flags & TIMER_IRQSAFE) {  			raw_spin_unlock(&base->lock);  			call_timer_fn(timer, fn, baseclk); @@ -1933,7 +2165,7 @@ signed long __sched schedule_timeout(signed long timeout)  	timer_setup_on_stack(&timer.timer, process_timeout, 0);  	__mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING);  	schedule(); -	del_singleshot_timer_sync(&timer.timer); +	del_timer_sync(&timer.timer);  	/* Remove the timer from the object tracker */  	destroy_timer_on_stack(&timer.timer); @@ -2017,8 +2249,6 @@ int timers_dead_cpu(unsigned int cpu)  	struct timer_base *new_base;  	int b, i; -	BUG_ON(cpu_online(cpu)); -  	for (b = 0; b < NR_BASES; b++) {  		old_base = per_cpu_ptr(&timer_bases[b], cpu);  		new_base = get_cpu_ptr(&timer_bases[b]); @@ -2035,7 +2265,8 @@ int timers_dead_cpu(unsigned int cpu)  		 */  		forward_timer_base(new_base); -		BUG_ON(old_base->running_timer); +		WARN_ON_ONCE(old_base->running_timer); +		old_base->running_timer = NULL;  		for (i = 0; i < WHEEL_SIZE; i++)  			migrate_timer_list(new_base, old_base->vectors + i); |