diff options
Diffstat (limited to 'include/linux/seqlock.h')
| -rw-r--r-- | include/linux/seqlock.h | 368 | 
1 files changed, 285 insertions, 83 deletions
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 54bc20496392..962d9768945f 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -10,13 +10,16 @@   *   * Copyrights:   * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli + * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH   */ -#include <linux/spinlock.h> -#include <linux/preempt.h> -#include <linux/lockdep.h>  #include <linux/compiler.h>  #include <linux/kcsan-checks.h> +#include <linux/lockdep.h> +#include <linux/mutex.h> +#include <linux/preempt.h> +#include <linux/spinlock.h> +  #include <asm/processor.h>  /* @@ -48,6 +51,10 @@   * This mechanism can't be used if the protected data contains pointers,   * as the writer can invalidate a pointer that a reader is following.   * + * If the write serialization mechanism is one of the common kernel + * locking primitives, use a sequence counter with associated lock + * (seqcount_LOCKTYPE_t) instead. + *   * If it's desired to automatically handle the sequence counter writer   * serialization and non-preemptibility requirements, use a sequential   * lock (seqlock_t) instead. @@ -72,17 +79,18 @@ static inline void __seqcount_init(seqcount_t *s, const char *name,  }  #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define SEQCOUNT_DEP_MAP_INIT(lockname) \ -		.dep_map = { .name = #lockname } \ + +# define SEQCOUNT_DEP_MAP_INIT(lockname)				\ +		.dep_map = { .name = #lockname }  /**   * seqcount_init() - runtime initializer for seqcount_t   * @s: Pointer to the seqcount_t instance   */ -# define seqcount_init(s)				\ -	do {						\ -		static struct lock_class_key __key;	\ -		__seqcount_init((s), #s, &__key);	\ +# define seqcount_init(s)						\ +	do {								\ +		static struct lock_class_key __key;			\ +		__seqcount_init((s), #s, &__key);			\  	} while (0)  static inline void seqcount_lockdep_reader_access(const seqcount_t *s) @@ -108,9 +116,143 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)   */  #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) } +/* + * Sequence counters with associated locks (seqcount_LOCKTYPE_t) + * + * A sequence counter which associates the lock used for writer + * serialization at initialization time. This enables lockdep to validate + * that the write side critical section is properly serialized. + * + * For associated locks which do not implicitly disable preemption, + * preemption protection is enforced in the write side function. + * + * Lockdep is never used in any for the raw write variants. + * + * See Documentation/locking/seqlock.rst + */ + +#ifdef CONFIG_LOCKDEP +#define __SEQ_LOCK(expr)	expr +#else +#define __SEQ_LOCK(expr) +#endif + +/** + * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPR associated + * @seqcount:	The real sequence counter + * @lock:	Pointer to the associated spinlock + * + * A plain sequence counter with external writer synchronization by a + * spinlock. The spinlock is associated to the sequence count in the + * static initializer or init function. This enables lockdep to validate + * that the write side critical section is properly serialized. + */ + +/** + * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t + * @s:		Pointer to the seqcount_LOCKNAME_t instance + * @lock:	Pointer to the associated LOCKTYPE + */ + +/* + * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers + * @locktype:		actual typename + * @lockname:		name + * @preemptible:	preemptibility of above locktype + * @lockmember:		argument for lockdep_assert_held() + */ +#define SEQCOUNT_LOCKTYPE(locktype, lockname, preemptible, lockmember)	\ +typedef struct seqcount_##lockname {					\ +	seqcount_t		seqcount;				\ +	__SEQ_LOCK(locktype	*lock);					\ +} seqcount_##lockname##_t;						\ +									\ +static __always_inline void						\ +seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock)	\ +{									\ +	seqcount_init(&s->seqcount);					\ +	__SEQ_LOCK(s->lock = lock);					\ +}									\ +									\ +static __always_inline seqcount_t *					\ +__seqcount_##lockname##_ptr(seqcount_##lockname##_t *s)			\ +{									\ +	return &s->seqcount;						\ +}									\ +									\ +static __always_inline bool						\ +__seqcount_##lockname##_preemptible(seqcount_##lockname##_t *s)		\ +{									\ +	return preemptible;						\ +}									\ +									\ +static __always_inline void						\ +__seqcount_##lockname##_assert(seqcount_##lockname##_t *s)		\ +{									\ +	__SEQ_LOCK(lockdep_assert_held(lockmember));			\ +} + +/* + * __seqprop() for seqcount_t + */ + +static inline seqcount_t *__seqcount_ptr(seqcount_t *s) +{ +	return s; +} + +static inline bool __seqcount_preemptible(seqcount_t *s) +{ +	return false; +} + +static inline void __seqcount_assert(seqcount_t *s) +{ +	lockdep_assert_preemption_disabled(); +} + +SEQCOUNT_LOCKTYPE(raw_spinlock_t,	raw_spinlock,	false,	s->lock) +SEQCOUNT_LOCKTYPE(spinlock_t,		spinlock,	false,	s->lock) +SEQCOUNT_LOCKTYPE(rwlock_t,		rwlock,		false,	s->lock) +SEQCOUNT_LOCKTYPE(struct mutex,		mutex,		true,	s->lock) +SEQCOUNT_LOCKTYPE(struct ww_mutex,	ww_mutex,	true,	&s->lock->base) + +/** + * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t + * @name:	Name of the seqcount_LOCKNAME_t instance + * @lock:	Pointer to the associated LOCKTYPE + */ + +#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) {			\ +	.seqcount		= SEQCNT_ZERO(seq_name.seqcount),	\ +	__SEQ_LOCK(.lock	= (assoc_lock))				\ +} + +#define SEQCNT_SPINLOCK_ZERO(name, lock)	SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock)	SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_RWLOCK_ZERO(name, lock)		SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_MUTEX_ZERO(name, lock)		SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_WW_MUTEX_ZERO(name, lock) 	SEQCOUNT_LOCKTYPE_ZERO(name, lock) + + +#define __seqprop_case(s, lockname, prop)				\ +	seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s)) + +#define __seqprop(s, prop) _Generic(*(s),				\ +	seqcount_t:		__seqcount_##prop((void *)(s)),		\ +	__seqprop_case((s),	raw_spinlock,	prop),			\ +	__seqprop_case((s),	spinlock,	prop),			\ +	__seqprop_case((s),	rwlock,		prop),			\ +	__seqprop_case((s),	mutex,		prop),			\ +	__seqprop_case((s),	ww_mutex,	prop)) + +#define __seqcount_ptr(s)		__seqprop(s, ptr) +#define __seqcount_lock_preemptible(s)	__seqprop(s, preemptible) +#define __seqcount_assert_lock_held(s)	__seqprop(s, assert) +  /**   * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants   *   * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()   * barrier. Callers should ensure that smp_rmb() or equivalent ordering is @@ -122,7 +264,10 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)   *   * Return: count to be passed to read_seqcount_retry()   */ -static inline unsigned __read_seqcount_begin(const seqcount_t *s) +#define __read_seqcount_begin(s)					\ +	__read_seqcount_t_begin(__seqcount_ptr(s)) + +static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)  {  	unsigned ret; @@ -138,32 +283,38 @@ repeat:  /**   * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants   *   * Return: count to be passed to read_seqcount_retry()   */ -static inline unsigned raw_read_seqcount_begin(const seqcount_t *s) +#define raw_read_seqcount_begin(s)					\ +	raw_read_seqcount_t_begin(__seqcount_ptr(s)) + +static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)  { -	unsigned ret = __read_seqcount_begin(s); +	unsigned ret = __read_seqcount_t_begin(s);  	smp_rmb();  	return ret;  }  /**   * read_seqcount_begin() - begin a seqcount_t read critical section - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants   *   * Return: count to be passed to read_seqcount_retry()   */ -static inline unsigned read_seqcount_begin(const seqcount_t *s) +#define read_seqcount_begin(s)						\ +	read_seqcount_t_begin(__seqcount_ptr(s)) + +static inline unsigned read_seqcount_t_begin(const seqcount_t *s)  {  	seqcount_lockdep_reader_access(s); -	return raw_read_seqcount_begin(s); +	return raw_read_seqcount_t_begin(s);  }  /**   * raw_read_seqcount() - read the raw seqcount_t counter value - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants   *   * raw_read_seqcount opens a read critical section of the given   * seqcount_t, without any lockdep checking, and without checking or @@ -172,7 +323,10 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)   *   * Return: count to be passed to read_seqcount_retry()   */ -static inline unsigned raw_read_seqcount(const seqcount_t *s) +#define raw_read_seqcount(s)						\ +	raw_read_seqcount_t(__seqcount_ptr(s)) + +static inline unsigned raw_read_seqcount_t(const seqcount_t *s)  {  	unsigned ret = READ_ONCE(s->sequence);  	smp_rmb(); @@ -183,7 +337,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)  /**   * raw_seqcount_begin() - begin a seqcount_t read critical section w/o   *                        lockdep and w/o counter stabilization - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants   *   * raw_seqcount_begin opens a read critical section of the given   * seqcount_t. Unlike read_seqcount_begin(), this function will not wait @@ -197,18 +351,21 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)   *   * Return: count to be passed to read_seqcount_retry()   */ -static inline unsigned raw_seqcount_begin(const seqcount_t *s) +#define raw_seqcount_begin(s)						\ +	raw_seqcount_t_begin(__seqcount_ptr(s)) + +static inline unsigned raw_seqcount_t_begin(const seqcount_t *s)  {  	/*  	 * If the counter is odd, let read_seqcount_retry() fail  	 * by decrementing the counter.  	 */ -	return raw_read_seqcount(s) & ~1; +	return raw_read_seqcount_t(s) & ~1;  }  /**   * __read_seqcount_retry() - end a seqcount_t read section w/o barrier - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants   * @start: count, from read_seqcount_begin()   *   * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() @@ -221,7 +378,10 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)   *   * Return: true if a read section retry is required, else false   */ -static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) +#define __read_seqcount_retry(s, start)					\ +	__read_seqcount_t_retry(__seqcount_ptr(s), start) + +static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)  {  	kcsan_atomic_next(0);  	return unlikely(READ_ONCE(s->sequence) != start); @@ -229,7 +389,7 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)  /**   * read_seqcount_retry() - end a seqcount_t read critical section - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants   * @start: count, from read_seqcount_begin()   *   * read_seqcount_retry closes the read critical section of given @@ -238,17 +398,28 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)   *   * Return: true if a read section retry is required, else false   */ -static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) +#define read_seqcount_retry(s, start)					\ +	read_seqcount_t_retry(__seqcount_ptr(s), start) + +static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)  {  	smp_rmb(); -	return __read_seqcount_retry(s, start); +	return __read_seqcount_t_retry(s, start);  }  /**   * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants   */ -static inline void raw_write_seqcount_begin(seqcount_t *s) +#define raw_write_seqcount_begin(s)					\ +do {									\ +	if (__seqcount_lock_preemptible(s))				\ +		preempt_disable();					\ +									\ +	raw_write_seqcount_t_begin(__seqcount_ptr(s));			\ +} while (0) + +static inline void raw_write_seqcount_t_begin(seqcount_t *s)  {  	kcsan_nestable_atomic_begin();  	s->sequence++; @@ -257,49 +428,50 @@ static inline void raw_write_seqcount_begin(seqcount_t *s)  /**   * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants   */ -static inline void raw_write_seqcount_end(seqcount_t *s) +#define raw_write_seqcount_end(s)					\ +do {									\ +	raw_write_seqcount_t_end(__seqcount_ptr(s));			\ +									\ +	if (__seqcount_lock_preemptible(s))				\ +		preempt_enable();					\ +} while (0) + +static inline void raw_write_seqcount_t_end(seqcount_t *s)  {  	smp_wmb();  	s->sequence++;  	kcsan_nestable_atomic_end();  } -static inline void __write_seqcount_begin_nested(seqcount_t *s, int subclass) -{ -	raw_write_seqcount_begin(s); -	seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); -} -  /**   * write_seqcount_begin_nested() - start a seqcount_t write section with   *                                 custom lockdep nesting level - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants   * @subclass: lockdep nesting level   *   * See Documentation/locking/lockdep-design.rst   */ -static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) +#define write_seqcount_begin_nested(s, subclass)			\ +do {									\ +	__seqcount_assert_lock_held(s);					\ +									\ +	if (__seqcount_lock_preemptible(s))				\ +		preempt_disable();					\ +									\ +	write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass);	\ +} while (0) + +static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)  { -	lockdep_assert_preemption_disabled(); -	__write_seqcount_begin_nested(s, subclass); -} - -/* - * A write_seqcount_begin() variant w/o lockdep non-preemptibility checks. - * - * Use for internal seqlock.h code where it's known that preemption is - * already disabled. For example, seqlock_t write side functions. - */ -static inline void __write_seqcount_begin(seqcount_t *s) -{ -	__write_seqcount_begin_nested(s, 0); +	raw_write_seqcount_t_begin(s); +	seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);  }  /**   * write_seqcount_begin() - start a seqcount_t write side critical section - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants   *   * write_seqcount_begin opens a write side critical section of the given   * seqcount_t. @@ -308,26 +480,44 @@ static inline void __write_seqcount_begin(seqcount_t *s)   * non-preemptible. If readers can be invoked from hardirq or softirq   * context, interrupts or bottom halves must be respectively disabled.   */ -static inline void write_seqcount_begin(seqcount_t *s) +#define write_seqcount_begin(s)						\ +do {									\ +	__seqcount_assert_lock_held(s);					\ +									\ +	if (__seqcount_lock_preemptible(s))				\ +		preempt_disable();					\ +									\ +	write_seqcount_t_begin(__seqcount_ptr(s));			\ +} while (0) + +static inline void write_seqcount_t_begin(seqcount_t *s)  { -	write_seqcount_begin_nested(s, 0); +	write_seqcount_t_begin_nested(s, 0);  }  /**   * write_seqcount_end() - end a seqcount_t write side critical section - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants   *   * The write section must've been opened with write_seqcount_begin().   */ -static inline void write_seqcount_end(seqcount_t *s) +#define write_seqcount_end(s)						\ +do {									\ +	write_seqcount_t_end(__seqcount_ptr(s));			\ +									\ +	if (__seqcount_lock_preemptible(s))				\ +		preempt_enable();					\ +} while (0) + +static inline void write_seqcount_t_end(seqcount_t *s)  {  	seqcount_release(&s->dep_map, _RET_IP_); -	raw_write_seqcount_end(s); +	raw_write_seqcount_t_end(s);  }  /**   * raw_write_seqcount_barrier() - do a seqcount_t write barrier - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants   *   * This can be used to provide an ordering guarantee instead of the usual   * consistency guarantee. It is one wmb cheaper, because it can collapse @@ -366,7 +556,10 @@ static inline void write_seqcount_end(seqcount_t *s)   *		WRITE_ONCE(X, false);   *      }   */ -static inline void raw_write_seqcount_barrier(seqcount_t *s) +#define raw_write_seqcount_barrier(s)					\ +	raw_write_seqcount_t_barrier(__seqcount_ptr(s)) + +static inline void raw_write_seqcount_t_barrier(seqcount_t *s)  {  	kcsan_nestable_atomic_begin();  	s->sequence++; @@ -378,12 +571,15 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)  /**   * write_seqcount_invalidate() - invalidate in-progress seqcount_t read   *                               side operations - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants   *   * After write_seqcount_invalidate, no seqcount_t read side operations   * will complete successfully and see data older than this.   */ -static inline void write_seqcount_invalidate(seqcount_t *s) +#define write_seqcount_invalidate(s)					\ +	write_seqcount_t_invalidate(__seqcount_ptr(s)) + +static inline void write_seqcount_t_invalidate(seqcount_t *s)  {  	smp_wmb();  	kcsan_nestable_atomic_begin(); @@ -393,7 +589,7 @@ static inline void write_seqcount_invalidate(seqcount_t *s)  /**   * raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants   *   * Use seqcount_t latching to switch between two storage places protected   * by a sequence counter. Doing so allows having interruptible, preemptible, @@ -406,7 +602,10 @@ static inline void write_seqcount_invalidate(seqcount_t *s)   * picking which data copy to read. The full counter value must then be   * checked with read_seqcount_retry().   */ -static inline int raw_read_seqcount_latch(seqcount_t *s) +#define raw_read_seqcount_latch(s)					\ +	raw_read_seqcount_t_latch(__seqcount_ptr(s)) + +static inline int raw_read_seqcount_t_latch(seqcount_t *s)  {  	/* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */  	int seq = READ_ONCE(s->sequence); /* ^^^ */ @@ -415,7 +614,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)  /**   * raw_write_seqcount_latch() - redirect readers to even/odd copy - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants   *   * The latch technique is a multiversion concurrency control method that allows   * queries during non-atomic modifications. If you can guarantee queries never @@ -494,7 +693,10 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)   *	When data is a dynamic data structure; one should use regular RCU   *	patterns to manage the lifetimes of the objects within.   */ -static inline void raw_write_seqcount_latch(seqcount_t *s) +#define raw_write_seqcount_latch(s)					\ +	raw_write_seqcount_t_latch(__seqcount_ptr(s)) + +static inline void raw_write_seqcount_t_latch(seqcount_t *s)  {         smp_wmb();      /* prior stores before incrementing "sequence" */         s->sequence++; @@ -516,20 +718,20 @@ typedef struct {  	spinlock_t lock;  } seqlock_t; -#define __SEQLOCK_UNLOCKED(lockname)			\ -	{						\ -		.seqcount = SEQCNT_ZERO(lockname),	\ -		.lock =	__SPIN_LOCK_UNLOCKED(lockname)	\ +#define __SEQLOCK_UNLOCKED(lockname)					\ +	{								\ +		.seqcount = SEQCNT_ZERO(lockname),			\ +		.lock =	__SPIN_LOCK_UNLOCKED(lockname)			\  	}  /**   * seqlock_init() - dynamic initializer for seqlock_t   * @sl: Pointer to the seqlock_t instance   */ -#define seqlock_init(sl)				\ -	do {						\ -		seqcount_init(&(sl)->seqcount);		\ -		spin_lock_init(&(sl)->lock);		\ +#define seqlock_init(sl)						\ +	do {								\ +		seqcount_init(&(sl)->seqcount);				\ +		spin_lock_init(&(sl)->lock);				\  	} while (0)  /** @@ -592,7 +794,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)  static inline void write_seqlock(seqlock_t *sl)  {  	spin_lock(&sl->lock); -	__write_seqcount_begin(&sl->seqcount); +	write_seqcount_t_begin(&sl->seqcount);  }  /** @@ -604,7 +806,7 @@ static inline void write_seqlock(seqlock_t *sl)   */  static inline void write_sequnlock(seqlock_t *sl)  { -	write_seqcount_end(&sl->seqcount); +	write_seqcount_t_end(&sl->seqcount);  	spin_unlock(&sl->lock);  } @@ -618,7 +820,7 @@ static inline void write_sequnlock(seqlock_t *sl)  static inline void write_seqlock_bh(seqlock_t *sl)  {  	spin_lock_bh(&sl->lock); -	__write_seqcount_begin(&sl->seqcount); +	write_seqcount_t_begin(&sl->seqcount);  }  /** @@ -631,7 +833,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)   */  static inline void write_sequnlock_bh(seqlock_t *sl)  { -	write_seqcount_end(&sl->seqcount); +	write_seqcount_t_end(&sl->seqcount);  	spin_unlock_bh(&sl->lock);  } @@ -645,7 +847,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)  static inline void write_seqlock_irq(seqlock_t *sl)  {  	spin_lock_irq(&sl->lock); -	__write_seqcount_begin(&sl->seqcount); +	write_seqcount_t_begin(&sl->seqcount);  }  /** @@ -657,7 +859,7 @@ static inline void write_seqlock_irq(seqlock_t *sl)   */  static inline void write_sequnlock_irq(seqlock_t *sl)  { -	write_seqcount_end(&sl->seqcount); +	write_seqcount_t_end(&sl->seqcount);  	spin_unlock_irq(&sl->lock);  } @@ -666,7 +868,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)  	unsigned long flags;  	spin_lock_irqsave(&sl->lock, flags); -	__write_seqcount_begin(&sl->seqcount); +	write_seqcount_t_begin(&sl->seqcount);  	return flags;  } @@ -695,13 +897,13 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)  static inline void  write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)  { -	write_seqcount_end(&sl->seqcount); +	write_seqcount_t_end(&sl->seqcount);  	spin_unlock_irqrestore(&sl->lock, flags);  }  /**   * read_seqlock_excl() - begin a seqlock_t locking reader section - * @sl: Pointer to seqlock_t + * @sl:	Pointer to seqlock_t   *   * read_seqlock_excl opens a seqlock_t locking reader critical section.  A   * locking reader exclusively locks out *both* other writers *and* other  |