aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorJens Axboe <[email protected]>2023-09-28 07:47:07 -0600
committerJens Axboe <[email protected]>2023-09-28 07:47:07 -0600
commit52e856c38761bae0cea09b25cfbb4d46cd930d45 (patch)
treee94c5b41e4ea4c8f95b993c6d875e2e7db78bbf8 /include
parent73c7e7a91f041f4c2e3c0db1e727163b331c60c9 (diff)
parentcfa92b6d52071aaa8f27d21affdcb14e7448fbc1 (diff)
Merge branch 'locking/core' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into io_uring-futex
Pull in locking/core from the tip tree, to get the futex2 dependencies from Peter Zijlstra. * 'locking/core' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits) locking/ww_mutex/test: Make sure we bail out instead of livelock locking/ww_mutex/test: Fix potential workqueue corruption locking/ww_mutex/test: Use prng instead of rng to avoid hangs at bootup futex: Add sys_futex_requeue() futex: Add flags2 argument to futex_requeue() futex: Propagate flags into get_futex_key() futex: Add sys_futex_wait() futex: FLAGS_STRICT futex: Add sys_futex_wake() futex: Validate futex value against futex size futex: Flag conversion futex: Extend the FUTEX2 flags futex: Clarify FUTEX2 flags asm-generic: ticket-lock: Optimize arch_spin_value_unlocked() futex/pi: Fix recursive rt_mutex waiter state locking/rtmutex: Add a lockdep assert to catch potential nested blocking locking/rtmutex: Use rt_mutex specific scheduler helpers sched: Provide rt_mutex specific scheduler helpers sched: Extract __schedule_loop() locking/rtmutex: Avoid unconditional slowpath for DEBUG_RT_MUTEXES ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/spinlock.h16
-rw-r--r--include/linux/cleanup.h39
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/sched/rt.h4
-rw-r--r--include/linux/syscalls.h10
-rw-r--r--include/uapi/asm-generic/unistd.h8
-rw-r--r--include/uapi/linux/futex.h31
7 files changed, 97 insertions, 14 deletions
diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h
index fdfebcb050f4..90803a826ba0 100644
--- a/include/asm-generic/spinlock.h
+++ b/include/asm-generic/spinlock.h
@@ -68,11 +68,18 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
smp_store_release(ptr, (u16)val + 1);
}
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ u32 val = lock.counter;
+
+ return ((val >> 16) == (val & 0xffff));
+}
+
static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- u32 val = atomic_read(lock);
+ arch_spinlock_t val = READ_ONCE(*lock);
- return ((val >> 16) != (val & 0xffff));
+ return !arch_spin_value_unlocked(val);
}
static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock)
@@ -82,11 +89,6 @@ static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock)
return (s16)((val >> 16) - (val & 0xffff)) > 1;
}
-static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
-{
- return !arch_spin_is_locked(&lock);
-}
-
#include <asm/qrwlock.h>
#endif /* __ASM_GENERIC_SPINLOCK_H */
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index 53f1a7a932b0..9f1a9c455b68 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -7,8 +7,9 @@
/*
* DEFINE_FREE(name, type, free):
* simple helper macro that defines the required wrapper for a __free()
- * based cleanup function. @free is an expression using '_T' to access
- * the variable.
+ * based cleanup function. @free is an expression using '_T' to access the
+ * variable. @free should typically include a NULL test before calling a
+ * function, see the example below.
*
* __free(name):
* variable attribute to add a scoped based cleanup to the variable.
@@ -17,6 +18,9 @@
* like a non-atomic xchg(var, NULL), such that the cleanup function will
* be inhibited -- provided it sanely deals with a NULL value.
*
+ * NOTE: this has __must_check semantics so that it is harder to accidentally
+ * leak the resource.
+ *
* return_ptr(p):
* returns p while inhibiting the __free().
*
@@ -24,6 +28,8 @@
*
* DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
*
+ * void *alloc_obj(...)
+ * {
* struct obj *p __free(kfree) = kmalloc(...);
* if (!p)
* return NULL;
@@ -32,6 +38,24 @@
* return NULL;
*
* return_ptr(p);
+ * }
+ *
+ * NOTE: the DEFINE_FREE()'s @free expression includes a NULL test even though
+ * kfree() is fine to be called with a NULL value. This is on purpose. This way
+ * the compiler sees the end of our alloc_obj() function as:
+ *
+ * tmp = p;
+ * p = NULL;
+ * if (p)
+ * kfree(p);
+ * return tmp;
+ *
+ * And through the magic of value-propagation and dead-code-elimination, it
+ * eliminates the actual cleanup call and compiles into:
+ *
+ * return p;
+ *
+ * Without the NULL test it turns into a mess and the compiler can't help us.
*/
#define DEFINE_FREE(_name, _type, _free) \
@@ -39,8 +63,17 @@
#define __free(_name) __cleanup(__free_##_name)
+#define __get_and_null_ptr(p) \
+ ({ __auto_type __ptr = &(p); \
+ __auto_type __val = *__ptr; \
+ *__ptr = NULL; __val; })
+
+static inline __must_check
+const volatile void * __must_check_fn(const volatile void *val)
+{ return val; }
+
#define no_free_ptr(p) \
- ({ __auto_type __ptr = (p); (p) = NULL; __ptr; })
+ ((typeof(p)) __must_check_fn(__get_and_null_ptr(p)))
#define return_ptr(p) return no_free_ptr(p)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 77f01ac385f7..67623ffd4a8e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -911,6 +911,9 @@ struct task_struct {
* ->sched_remote_wakeup gets used, so it can be in this word.
*/
unsigned sched_remote_wakeup:1;
+#ifdef CONFIG_RT_MUTEXES
+ unsigned sched_rt_mutex:1;
+#endif
/* Bit to tell LSMs we're in execve(): */
unsigned in_execve:1;
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index 994c25640e15..b2b9e6eb9683 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -30,6 +30,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
}
#ifdef CONFIG_RT_MUTEXES
+extern void rt_mutex_pre_schedule(void);
+extern void rt_mutex_schedule(void);
+extern void rt_mutex_post_schedule(void);
+
/*
* Must hold either p->pi_lock or task_rq(p)->lock.
*/
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 22bc6bc147f8..0901af60d971 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -549,6 +549,16 @@ asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
asmlinkage long sys_futex_waitv(struct futex_waitv *waiters,
unsigned int nr_futexes, unsigned int flags,
struct __kernel_timespec __user *timeout, clockid_t clockid);
+
+asmlinkage long sys_futex_wake(void __user *uaddr, unsigned long mask, int nr, unsigned int flags);
+
+asmlinkage long sys_futex_wait(void __user *uaddr, unsigned long val, unsigned long mask,
+ unsigned int flags, struct __kernel_timespec __user *timespec,
+ clockid_t clockid);
+
+asmlinkage long sys_futex_requeue(struct futex_waitv __user *waiters,
+ unsigned int flags, int nr_wake, int nr_requeue);
+
asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp,
struct __kernel_timespec __user *rmtp);
asmlinkage long sys_nanosleep_time32(struct old_timespec32 __user *rqtp,
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index abe087c53b4b..d9e9cd13e577 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -822,9 +822,15 @@ __SYSCALL(__NR_cachestat, sys_cachestat)
#define __NR_fchmodat2 452
__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
+#define __NR_futex_wake 454
+__SYSCALL(__NR_futex_wake, sys_futex_wake)
+#define __NR_futex_wait 455
+__SYSCALL(__NR_futex_wait, sys_futex_wait)
+#define __NR_futex_requeue 456
+__SYSCALL(__NR_futex_requeue, sys_futex_requeue)
#undef __NR_syscalls
-#define __NR_syscalls 453
+#define __NR_syscalls 457
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h
index 71a5df8d2689..d2ee625ea189 100644
--- a/include/uapi/linux/futex.h
+++ b/include/uapi/linux/futex.h
@@ -44,10 +44,35 @@
FUTEX_PRIVATE_FLAG)
/*
- * Flags to specify the bit length of the futex word for futex2 syscalls.
- * Currently, only 32 is supported.
+ * Flags for futex2 syscalls.
+ *
+ * NOTE: these are not pure flags, they can also be seen as:
+ *
+ * union {
+ * u32 flags;
+ * struct {
+ * u32 size : 2,
+ * numa : 1,
+ * : 4,
+ * private : 1;
+ * };
+ * };
*/
-#define FUTEX_32 2
+#define FUTEX2_SIZE_U8 0x00
+#define FUTEX2_SIZE_U16 0x01
+#define FUTEX2_SIZE_U32 0x02
+#define FUTEX2_SIZE_U64 0x03
+#define FUTEX2_NUMA 0x04
+ /* 0x08 */
+ /* 0x10 */
+ /* 0x20 */
+ /* 0x40 */
+#define FUTEX2_PRIVATE FUTEX_PRIVATE_FLAG
+
+#define FUTEX2_SIZE_MASK 0x03
+
+/* do not use */
+#define FUTEX_32 FUTEX2_SIZE_U32 /* historical accident :-( */
/*
* Max numbers of elements in a futex_waitv array