aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <[email protected]>2016-06-22 14:58:02 +0200
committerIngo Molnar <[email protected]>2016-06-27 12:17:55 +0200
commit8663e24d56dc1f093232783c23ea17f2a6f61c03 (patch)
treee9f4d49bcf7324a34c78cdd7ed38b32f9c385843
parent3d30544f02120b884bba2a9466c87dba980e3be5 (diff)
sched/fair: Reorder cgroup creation code
A future patch needs rq->lock held _after_ we link the task_group into the hierarchy. In order to avoid taking every rq->lock twice, reorder things a little and create online_fair_sched_group() to be called after we link the task_group. All this code is still ran from css_alloc() so css_online() isn't in fact used for this. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Konstantin Khlebnikov <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Cc: [email protected] Signed-off-by: Ingo Molnar <[email protected]>
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c22
-rw-r--r--kernel/sched/sched.h1
3 files changed, 21 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 14afa518948c..4ede4fc65653 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7717,6 +7717,8 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
INIT_LIST_HEAD(&tg->children);
list_add_rcu(&tg->siblings, &parent->children);
spin_unlock_irqrestore(&task_group_lock, flags);
+
+ online_fair_sched_group(tg);
}
/* rcu callback to free various structures associated with a task group */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 781788d54736..62d5e7dcc7f8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8624,10 +8624,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
init_cfs_rq(cfs_rq);
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
init_entity_runnable_average(se);
-
- raw_spin_lock_irq(&rq->lock);
- post_init_entity_util_avg(se);
- raw_spin_unlock_irq(&rq->lock);
}
return 1;
@@ -8638,6 +8634,22 @@ err:
return 0;
}
+void online_fair_sched_group(struct task_group *tg)
+{
+ struct sched_entity *se;
+ struct rq *rq;
+ int i;
+
+ for_each_possible_cpu(i) {
+ rq = cpu_rq(i);
+ se = tg->se[i];
+
+ raw_spin_lock_irq(&rq->lock);
+ post_init_entity_util_avg(se);
+ raw_spin_unlock_irq(&rq->lock);
+ }
+}
+
void unregister_fair_sched_group(struct task_group *tg)
{
unsigned long flags;
@@ -8742,6 +8754,8 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
return 1;
}
+void online_fair_sched_group(struct task_group *tg) { }
+
void unregister_fair_sched_group(struct task_group *tg) { }
#endif /* CONFIG_FAIR_GROUP_SCHED */
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 307bd0418095..28c42b789f70 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -321,6 +321,7 @@ extern int tg_nop(struct task_group *tg, void *data);
extern void free_fair_sched_group(struct task_group *tg);
extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
+extern void online_fair_sched_group(struct task_group *tg);
extern void unregister_fair_sched_group(struct task_group *tg);
extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *se, int cpu,