diff options
Diffstat (limited to 'kernel/time/hrtimer.c')
| -rw-r--r-- | kernel/time/hrtimer.c | 62 | 
1 files changed, 40 insertions, 22 deletions
| diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 743c852e10f2..5c9d968187ae 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -546,8 +546,11 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,  }  /* - * Recomputes cpu_base::*next_timer and returns the earliest expires_next but - * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram. + * Recomputes cpu_base::*next_timer and returns the earliest expires_next + * but does not set cpu_base::*expires_next, that is done by + * hrtimer[_force]_reprogram and hrtimer_interrupt only. When updating + * cpu_base::*expires_next right away, reprogramming logic would no longer + * work.   *   * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases,   * those timers will get run whenever the softirq gets handled, at the end of @@ -588,6 +591,37 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_  	return expires_next;  } +static ktime_t hrtimer_update_next_event(struct hrtimer_cpu_base *cpu_base) +{ +	ktime_t expires_next, soft = KTIME_MAX; + +	/* +	 * If the soft interrupt has already been activated, ignore the +	 * soft bases. They will be handled in the already raised soft +	 * interrupt. +	 */ +	if (!cpu_base->softirq_activated) { +		soft = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT); +		/* +		 * Update the soft expiry time. clock_settime() might have +		 * affected it. +		 */ +		cpu_base->softirq_expires_next = soft; +	} + +	expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD); +	/* +	 * If a softirq timer is expiring first, update cpu_base->next_timer +	 * and program the hardware with the soft expiry time. +	 */ +	if (expires_next > soft) { +		cpu_base->next_timer = cpu_base->softirq_next_timer; +		expires_next = soft; +	} + +	return expires_next; +} +  static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)  {  	ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; @@ -628,23 +662,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)  {  	ktime_t expires_next; -	/* -	 * Find the current next expiration time. -	 */ -	expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); - -	if (cpu_base->next_timer && cpu_base->next_timer->is_soft) { -		/* -		 * When the softirq is activated, hrtimer has to be -		 * programmed with the first hard hrtimer because soft -		 * timer interrupt could occur too late. -		 */ -		if (cpu_base->softirq_activated) -			expires_next = __hrtimer_get_next_event(cpu_base, -								HRTIMER_ACTIVE_HARD); -		else -			cpu_base->softirq_expires_next = expires_next; -	} +	expires_next = hrtimer_update_next_event(cpu_base);  	if (skip_equal && expires_next == cpu_base->expires_next)  		return; @@ -1644,8 +1662,8 @@ retry:  	__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); -	/* Reevaluate the clock bases for the next expiry */ -	expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); +	/* Reevaluate the clock bases for the [soft] next expiry */ +	expires_next = hrtimer_update_next_event(cpu_base);  	/*  	 * Store the new expiry value so the migration code can verify  	 * against it. @@ -1939,9 +1957,9 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,  	}  	restart = ¤t->restart_block; -	restart->fn = hrtimer_nanosleep_restart;  	restart->nanosleep.clockid = t.timer.base->clockid;  	restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); +	set_restart_fn(restart, hrtimer_nanosleep_restart);  out:  	destroy_hrtimer_on_stack(&t.timer);  	return ret; |