diff options
| author | Vincent Guittot <[email protected]> | 2020-02-24 09:52:17 +0000 |
|---|---|---|
| committer | Ingo Molnar <[email protected]> | 2020-02-24 11:36:36 +0100 |
| commit | 0dacee1bfa70e171be3a12a30414c228453048d2 (patch) | |
| tree | 1820b6a3f33b1761994d96f9705a5aba5a4fe03d /include | |
| parent | fb86f5b2119245afd339280099b4e9417cc0b03a (diff) | |
sched/pelt: Remove unused runnable load average
Now that runnable_load_avg is no more used, we can remove it to make
space for a new signal.
Signed-off-by: Vincent Guittot <[email protected]>
Signed-off-by: Mel Gorman <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Reviewed-by: "Dietmar Eggemann <[email protected]>"
Acked-by: Peter Zijlstra <[email protected]>
Cc: Juri Lelli <[email protected]>
Cc: Valentin Schneider <[email protected]>
Cc: Phil Auld <[email protected]>
Cc: Hillf Danton <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/sched.h | 5 |
1 files changed, 1 insertions, 4 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 04278493bf15..037eaffabc24 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -357,7 +357,7 @@ struct util_est { /* * The load_avg/util_avg accumulates an infinite geometric series - * (see __update_load_avg() in kernel/sched/fair.c). + * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c). * * [load_avg definition] * @@ -401,11 +401,9 @@ struct util_est { struct sched_avg { u64 last_update_time; u64 load_sum; - u64 runnable_load_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; - unsigned long runnable_load_avg; unsigned long util_avg; struct util_est util_est; } ____cacheline_aligned; @@ -449,7 +447,6 @@ struct sched_statistics { struct sched_entity { /* For load-balancing: */ struct load_weight load; - unsigned long runnable_weight; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; |