diff options
| author | Ben Gardon <[email protected]> | 2021-02-02 10:57:14 -0800 |
|---|---|---|
| committer | Paolo Bonzini <[email protected]> | 2021-02-04 05:27:43 -0500 |
| commit | f3d4b4b1dc1c5fb9ea17cac14133463bfe72f170 (patch) | |
| tree | e5c72ff9e9518c7817fca537fb67f09cf6fef844 /include/linux | |
| parent | a09a689a534183c48f200bc2de1ae61ae9c462ad (diff) | |
sched: Add cond_resched_rwlock
Safely rescheduling while holding a spin lock is essential for keeping
long running kernel operations running smoothly. Add the facility to
cond_resched rwlocks.
CC: Ingo Molnar <[email protected]>
CC: Will Deacon <[email protected]>
Acked-by: Peter Zijlstra <[email protected]>
Acked-by: Davidlohr Bueso <[email protected]>
Acked-by: Waiman Long <[email protected]>
Acked-by: Paolo Bonzini <[email protected]>
Signed-off-by: Ben Gardon <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/sched.h | 12 |
1 files changed, 12 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 5d1378e5a040..3052d16da3cf 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1883,12 +1883,24 @@ static inline int _cond_resched(void) { return 0; } }) extern int __cond_resched_lock(spinlock_t *lock); +extern int __cond_resched_rwlock_read(rwlock_t *lock); +extern int __cond_resched_rwlock_write(rwlock_t *lock); #define cond_resched_lock(lock) ({ \ ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ __cond_resched_lock(lock); \ }) +#define cond_resched_rwlock_read(lock) ({ \ + __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ + __cond_resched_rwlock_read(lock); \ +}) + +#define cond_resched_rwlock_write(lock) ({ \ + __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ + __cond_resched_rwlock_write(lock); \ +}) + static inline void cond_resched_rcu(void) { #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) |