diff options
Diffstat (limited to 'kernel/cgroup/cpuset.c')
| -rw-r--r-- | kernel/cgroup/cpuset.c | 55 | 
1 files changed, 40 insertions, 15 deletions
| diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index b474289c15b8..a29c0b13706b 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -550,11 +550,15 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)  /*   * update task's spread flag if cpuset's page/slab spread flag is set   * - * Call with callback_lock or cpuset_rwsem held. + * Call with callback_lock or cpuset_rwsem held. The check can be skipped + * if on default hierarchy.   */ -static void cpuset_update_task_spread_flag(struct cpuset *cs, +static void cpuset_update_task_spread_flags(struct cpuset *cs,  					struct task_struct *tsk)  { +	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) +		return; +  	if (is_spread_page(cs))  		task_set_spread_page(tsk);  	else @@ -2153,7 +2157,7 @@ static void update_tasks_flags(struct cpuset *cs)  	css_task_iter_start(&cs->css, 0, &it);  	while ((task = css_task_iter_next(&it))) -		cpuset_update_task_spread_flag(cs, task); +		cpuset_update_task_spread_flags(cs, task);  	css_task_iter_end(&it);  } @@ -2509,12 +2513,28 @@ static void cpuset_attach(struct cgroup_taskset *tset)  	struct cgroup_subsys_state *css;  	struct cpuset *cs;  	struct cpuset *oldcs = cpuset_attach_old_cs; +	bool cpus_updated, mems_updated;  	cgroup_taskset_first(tset, &css);  	cs = css_cs(css);  	lockdep_assert_cpus_held();	/* see cgroup_attach_lock() */  	percpu_down_write(&cpuset_rwsem); +	cpus_updated = !cpumask_equal(cs->effective_cpus, +				      oldcs->effective_cpus); +	mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); + +	/* +	 * In the default hierarchy, enabling cpuset in the child cgroups +	 * will trigger a number of cpuset_attach() calls with no change +	 * in effective cpus and mems. In that case, we can optimize out +	 * by skipping the task iteration and update. +	 */ +	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && +	    !cpus_updated && !mems_updated) { +		cpuset_attach_nodemask_to = cs->effective_mems; +		goto out; +	}  	guarantee_online_mems(cs, &cpuset_attach_nodemask_to); @@ -2530,14 +2550,19 @@ static void cpuset_attach(struct cgroup_taskset *tset)  		WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));  		cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); -		cpuset_update_task_spread_flag(cs, task); +		cpuset_update_task_spread_flags(cs, task);  	}  	/*  	 * Change mm for all threadgroup leaders. This is expensive and may -	 * sleep and should be moved outside migration path proper. +	 * sleep and should be moved outside migration path proper. Skip it +	 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is +	 * not set.  	 */  	cpuset_attach_nodemask_to = cs->effective_mems; +	if (!is_memory_migrate(cs) && !mems_updated) +		goto out; +  	cgroup_taskset_for_each_leader(leader, css, tset) {  		struct mm_struct *mm = get_task_mm(leader); @@ -2560,6 +2585,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)  		}  	} +out:  	cs->old_mems_allowed = cpuset_attach_nodemask_to;  	cs->attach_in_progress--; @@ -3046,11 +3072,15 @@ static struct cftype dfl_files[] = {  }; -/* - *	cpuset_css_alloc - allocate a cpuset css - *	cgrp:	control group that the new cpuset will be part of +/** + * cpuset_css_alloc - Allocate a cpuset css + * @parent_css: Parent css of the control group that the new cpuset will be + *              part of + * Return: cpuset css on success, -ENOMEM on failure. + * + * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return + * top cpuset css otherwise.   */ -  static struct cgroup_subsys_state *  cpuset_css_alloc(struct cgroup_subsys_state *parent_css)  { @@ -3630,11 +3660,6 @@ static int cpuset_track_online_nodes(struct notifier_block *self,  	return NOTIFY_OK;  } -static struct notifier_block cpuset_track_online_nodes_nb = { -	.notifier_call = cpuset_track_online_nodes, -	.priority = 10,		/* ??! */ -}; -  /**   * cpuset_init_smp - initialize cpus_allowed   * @@ -3652,7 +3677,7 @@ void __init cpuset_init_smp(void)  	cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);  	top_cpuset.effective_mems = node_states[N_MEMORY]; -	register_hotmemory_notifier(&cpuset_track_online_nodes_nb); +	hotplug_memory_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);  	cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);  	BUG_ON(!cpuset_migrate_mm_wq); |