aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMathieu Desnoyers <[email protected]>2019-09-19 13:37:00 -0400
committerIngo Molnar <[email protected]>2019-09-25 17:42:30 +0200
commit09554009c0cad4cb2223dd943c813c9257c6883a (patch)
treee66d1a7c79665dff8a7846f837402d2fa4a74eb3
parentfc0d77387cb5ae883fd774fc559e056a8dde024c (diff)
sched/membarrier: Remove redundant check
Checking that the number of threads is 1 is redundant with checking mm_users == 1. No change in functionality intended. Suggested-by: Oleg Nesterov <[email protected]> Signed-off-by: Mathieu Desnoyers <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Chris Metcalf <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Eric W. Biederman <[email protected]> Cc: Kirill Tkhai <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Paul E. McKenney <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Russell King - ARM Linux admin <[email protected]> Cc: Thomas Gleixner <[email protected]> Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
-rw-r--r--kernel/sched/membarrier.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index d48b95fc4026..7ccbd0e19626 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -186,7 +186,7 @@ static int membarrier_register_global_expedited(void)
MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY)
return 0;
atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED, &mm->membarrier_state);
- if (atomic_read(&mm->mm_users) == 1 && get_nr_threads(p) == 1) {
+ if (atomic_read(&mm->mm_users) == 1) {
/*
* For single mm user, single threaded process, we can
* simply issue a memory barrier after setting
@@ -232,7 +232,7 @@ static int membarrier_register_private_expedited(int flags)
if (flags & MEMBARRIER_FLAG_SYNC_CORE)
atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE,
&mm->membarrier_state);
- if (!(atomic_read(&mm->mm_users) == 1 && get_nr_threads(p) == 1)) {
+ if (atomic_read(&mm->mm_users) != 1) {
/*
* Ensure all future scheduler executions will observe the
* new thread flag state for this process.