diff options
| author | Dmitry Torokhov <[email protected]> | 2023-05-01 15:20:08 -0700 | 
|---|---|---|
| committer | Dmitry Torokhov <[email protected]> | 2023-05-01 15:20:08 -0700 | 
| commit | 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e (patch) | |
| tree | d57f3a63479a07b4e0cece029886e76e04feb984 /kernel/sched/clock.c | |
| parent | 5dc63e56a9cf8df0b59c234a505a1653f1bdf885 (diff) | |
| parent | 53bea86b5712c7491bb3dae12e271666df0a308c (diff) | |
Merge branch 'next' into for-linus
Prepare input updates for 6.4 merge window.
Diffstat (limited to 'kernel/sched/clock.c')
| -rw-r--r-- | kernel/sched/clock.c | 27 | 
1 files changed, 21 insertions, 6 deletions
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index e374c0c923da..5732fa75ebab 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -93,7 +93,7 @@ struct sched_clock_data {  static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); -notrace static inline struct sched_clock_data *this_scd(void) +static __always_inline struct sched_clock_data *this_scd(void)  {  	return this_cpu_ptr(&sched_clock_data);  } @@ -244,12 +244,12 @@ late_initcall(sched_clock_init_late);   * min, max except they take wrapping into account   */ -notrace static inline u64 wrap_min(u64 x, u64 y) +static __always_inline u64 wrap_min(u64 x, u64 y)  {  	return (s64)(x - y) < 0 ? x : y;  } -notrace static inline u64 wrap_max(u64 x, u64 y) +static __always_inline u64 wrap_max(u64 x, u64 y)  {  	return (s64)(x - y) > 0 ? x : y;  } @@ -260,7 +260,7 @@ notrace static inline u64 wrap_max(u64 x, u64 y)   *  - filter out backward motion   *  - use the GTOD tick value to create a window to filter crazy TSC values   */ -notrace static u64 sched_clock_local(struct sched_clock_data *scd) +static __always_inline u64 sched_clock_local(struct sched_clock_data *scd)  {  	u64 now, clock, old_clock, min_clock, max_clock, gtod;  	s64 delta; @@ -287,13 +287,28 @@ again:  	clock = wrap_max(clock, min_clock);  	clock = wrap_min(clock, max_clock); -	if (!try_cmpxchg64(&scd->clock, &old_clock, clock)) +	if (!arch_try_cmpxchg64(&scd->clock, &old_clock, clock))  		goto again;  	return clock;  } -notrace static u64 sched_clock_remote(struct sched_clock_data *scd) +noinstr u64 local_clock(void) +{ +	u64 clock; + +	if (static_branch_likely(&__sched_clock_stable)) +		return sched_clock() + __sched_clock_offset; + +	preempt_disable_notrace(); +	clock = sched_clock_local(this_scd()); +	preempt_enable_notrace(); + +	return clock; +} +EXPORT_SYMBOL_GPL(local_clock); + +static notrace u64 sched_clock_remote(struct sched_clock_data *scd)  {  	struct sched_clock_data *my_scd = this_scd();  	u64 this_clock, remote_clock;  |