diff options
author | Mel Gorman <[email protected]> | 2020-11-20 09:06:27 +0000 |
---|---|---|
committer | Peter Zijlstra <[email protected]> | 2020-11-24 16:47:47 +0100 |
commit | abeae76a47005aa3f07c9be12d8076365622e25c (patch) | |
tree | fa6ea9e6a913665bb9eded4488a883fd85a2db85 | |
parent | 74d862b682f51e45d25b95b1ecf212428a4967b0 (diff) |
sched/numa: Rename nr_running and break out the magic number
This is simply a preparation patch to make the following patches easier
to read. No functional change.
Signed-off-by: Mel Gorman <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Vincent Guittot <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
-rw-r--r-- | kernel/sched/fair.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6691e28fa3da..9d10abe00f72 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1559,7 +1559,7 @@ struct task_numa_env { static unsigned long cpu_load(struct rq *rq); static unsigned long cpu_runnable(struct rq *rq); static unsigned long cpu_util(int cpu); -static inline long adjust_numa_imbalance(int imbalance, int nr_running); +static inline long adjust_numa_imbalance(int imbalance, int dst_running); static inline enum numa_type numa_classify(unsigned int imbalance_pct, @@ -8991,7 +8991,9 @@ next_group: } } -static inline long adjust_numa_imbalance(int imbalance, int nr_running) +#define NUMA_IMBALANCE_MIN 2 + +static inline long adjust_numa_imbalance(int imbalance, int dst_running) { unsigned int imbalance_min; @@ -8999,8 +9001,8 @@ static inline long adjust_numa_imbalance(int imbalance, int nr_running) * Allow a small imbalance based on a simple pair of communicating * tasks that remain local when the source domain is almost idle. */ - imbalance_min = 2; - if (nr_running <= imbalance_min) + imbalance_min = NUMA_IMBALANCE_MIN; + if (dst_running <= imbalance_min) return 0; return imbalance; |