diff options
author | Frederic Weisbecker <[email protected]> | 2020-11-13 13:13:27 +0100 |
---|---|---|
committer | Paul E. McKenney <[email protected]> | 2021-01-06 16:24:59 -0800 |
commit | 32aa2f4170d22f0b9fcb75ab05679ab122fae373 (patch) | |
tree | 668df1c7f89bdfb995a220f390dc17f8468aa893 | |
parent | e3abe959fbd57aa751bc533677a35c411cee9b16 (diff) |
rcu/nocb: Process batch locally as long as offloading isn't complete
This commit makes sure to process the callbacks locally (via either
RCU_SOFTIRQ or the rcuc kthread) whenever the segcblist isn't entirely
offloaded. This ensures that callbacks are invoked one way or another
while a CPU is in the middle of a toggle operation.
Cc: Josh Triplett <[email protected]>
Cc: Steven Rostedt <[email protected]>
Cc: Mathieu Desnoyers <[email protected]>
Cc: Lai Jiangshan <[email protected]>
Cc: Joel Fernandes <[email protected]>
Cc: Neeraj Upadhyay <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Inspired-by: Paul E. McKenney <[email protected]>
Tested-by: Boqun Feng <[email protected]>
Signed-off-by: Frederic Weisbecker <[email protected]>
Signed-off-by: Paul E. McKenney <[email protected]>
-rw-r--r-- | kernel/rcu/rcu_segcblist.h | 12 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 3 |
2 files changed, 14 insertions, 1 deletions
diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h index 28c9a5225afc..afad6fc6311c 100644 --- a/kernel/rcu/rcu_segcblist.h +++ b/kernel/rcu/rcu_segcblist.h @@ -95,6 +95,18 @@ static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp) return false; } +static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rsclp) +{ + int flags = SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | SEGCBLIST_OFFLOADED; + + if (IS_ENABLED(CONFIG_RCU_NOCB_CPU)) { + if ((rsclp->flags & flags) == flags) + return true; + } + + return false; +} + /* * Are all segments following the specified segment of the specified * rcu_segcblist structure empty of callbacks? (The specified diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 4ef59a5416a3..ec14c017c0e3 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2700,6 +2700,7 @@ static __latent_entropy void rcu_core(void) struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); struct rcu_node *rnp = rdp->mynode; const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist); + const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist); if (cpu_is_offline(smp_processor_id())) return; @@ -2729,7 +2730,7 @@ static __latent_entropy void rcu_core(void) rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); /* If there are callbacks ready, invoke them. */ - if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) && + if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) && likely(READ_ONCE(rcu_scheduler_fully_active))) rcu_do_batch(rdp); |