diff options
Diffstat (limited to 'arch/x86/include/asm/vgtod.h')
| -rw-r--r-- | arch/x86/include/asm/vgtod.h | 79 | 
1 files changed, 33 insertions, 46 deletions
| diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index 53748541c487..913a133f8e6f 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -5,33 +5,46 @@  #include <linux/compiler.h>  #include <linux/clocksource.h> +#include <uapi/linux/time.h> +  #ifdef BUILD_VDSO32_64  typedef u64 gtod_long_t;  #else  typedef unsigned long gtod_long_t;  #endif + +/* + * There is one of these objects in the vvar page for each + * vDSO-accelerated clockid.  For high-resolution clocks, this encodes + * the time corresponding to vsyscall_gtod_data.cycle_last.  For coarse + * clocks, this encodes the actual time. + * + * To confuse the reader, for high-resolution clocks, nsec is left-shifted + * by vsyscall_gtod_data.shift. + */ +struct vgtod_ts { +	u64		sec; +	u64		nsec; +}; + +#define VGTOD_BASES	(CLOCK_TAI + 1) +#define VGTOD_HRES	(BIT(CLOCK_REALTIME) | BIT(CLOCK_MONOTONIC) | BIT(CLOCK_TAI)) +#define VGTOD_COARSE	(BIT(CLOCK_REALTIME_COARSE) | BIT(CLOCK_MONOTONIC_COARSE)) +  /*   * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time   * so be carefull by modifying this structure.   */  struct vsyscall_gtod_data { -	unsigned seq; - -	int vclock_mode; -	u64	cycle_last; -	u64	mask; -	u32	mult; -	u32	shift; - -	/* open coded 'struct timespec' */ -	u64		wall_time_snsec; -	gtod_long_t	wall_time_sec; -	gtod_long_t	monotonic_time_sec; -	u64		monotonic_time_snsec; -	gtod_long_t	wall_time_coarse_sec; -	gtod_long_t	wall_time_coarse_nsec; -	gtod_long_t	monotonic_time_coarse_sec; -	gtod_long_t	monotonic_time_coarse_nsec; +	unsigned int	seq; + +	int		vclock_mode; +	u64		cycle_last; +	u64		mask; +	u32		mult; +	u32		shift; + +	struct vgtod_ts	basetime[VGTOD_BASES];  	int		tz_minuteswest;  	int		tz_dsttime; @@ -44,9 +57,9 @@ static inline bool vclock_was_used(int vclock)  	return READ_ONCE(vclocks_used) & (1 << vclock);  } -static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s) +static inline unsigned int gtod_read_begin(const struct vsyscall_gtod_data *s)  { -	unsigned ret; +	unsigned int ret;  repeat:  	ret = READ_ONCE(s->seq); @@ -59,7 +72,7 @@ repeat:  }  static inline int gtod_read_retry(const struct vsyscall_gtod_data *s, -					unsigned start) +				  unsigned int start)  {  	smp_rmb();  	return unlikely(s->seq != start); @@ -77,30 +90,4 @@ static inline void gtod_write_end(struct vsyscall_gtod_data *s)  	++s->seq;  } -#ifdef CONFIG_X86_64 - -#define VGETCPU_CPU_MASK 0xfff - -static inline unsigned int __getcpu(void) -{ -	unsigned int p; - -	/* -	 * Load per CPU data from GDT.  LSL is faster than RDTSCP and -	 * works on all CPUs.  This is volatile so that it orders -	 * correctly wrt barrier() and to keep gcc from cleverly -	 * hoisting it out of the calling function. -	 * -	 * If RDPID is available, use it. -	 */ -	alternative_io ("lsl %[seg],%[p]", -			".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ -			X86_FEATURE_RDPID, -			[p] "=a" (p), [seg] "r" (__PER_CPU_SEG)); - -	return p; -} - -#endif /* CONFIG_X86_64 */ -  #endif /* _ASM_X86_VGTOD_H */ |