Commit 1599a185 authored by Prateek Sood's avatar Prateek Sood Committed by Tejun Heo
Browse files

cpuset: Make cpuset hotplug synchronous



Convert cpuset_hotplug_workfn() into synchronous call for cpu hotplug
path. For memory hotplug path it still gets queued as a work item.

Since cpuset_hotplug_workfn() can be made synchronous for cpu hotplug
path, it is not required to wait for cpuset hotplug while thawing
processes.

Signed-off-by: default avatarPrateek Sood <prsood@codeaurora.org>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent aa24163b
Loading
Loading
Loading
Loading
+0 −6
Original line number Diff line number Diff line
@@ -52,9 +52,7 @@ static inline void cpuset_dec(void)

extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void);
extern void cpuset_wait_for_hotplug(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -167,15 +165,11 @@ static inline bool cpusets_enabled(void) { return false; }
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}

static inline void cpuset_force_rebuild(void) { }

static inline void cpuset_update_active_cpus(void)
{
	partition_sched_domains(1, NULL, NULL);
}

static inline void cpuset_wait_for_hotplug(void) { }

static inline void cpuset_cpus_allowed(struct task_struct *p,
				       struct cpumask *mask)
{
+20 −21
Original line number Diff line number Diff line
@@ -2277,15 +2277,8 @@ retry:
	mutex_unlock(&cpuset_mutex);
}

static bool force_rebuild;

void cpuset_force_rebuild(void)
{
	force_rebuild = true;
}

/**
 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
 * cpuset_hotplug - handle CPU/memory hotunplug for a cpuset
 *
 * This function is called after either CPU or memory configuration has
 * changed and updates cpuset accordingly.  The top_cpuset is always
@@ -2300,7 +2293,7 @@ void cpuset_force_rebuild(void)
 * Note that CPU offlining during suspend is ignored.  We don't modify
 * cpusets across suspend/resume cycles at all.
 */
static void cpuset_hotplug_workfn(struct work_struct *work)
static void cpuset_hotplug(bool use_cpu_hp_lock)
{
	static cpumask_t new_cpus;
	static nodemask_t new_mems;
@@ -2358,25 +2351,31 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
	}

	/* rebuild sched domains if cpus_allowed has changed */
	if (cpus_updated || force_rebuild) {
		force_rebuild = false;
	if (cpus_updated) {
		if (use_cpu_hp_lock)
			rebuild_sched_domains();
		else {
			/* Acquiring cpu_hotplug_lock is not required.
			 * When cpuset_hotplug() is called in hotplug path,
			 * cpu_hotplug_lock is held by the hotplug context
			 * which is waiting for cpuhp_thread_fun to indicate
			 * completion of callback.
			 */
			mutex_lock(&cpuset_mutex);
			rebuild_sched_domains_cpuslocked();
			mutex_unlock(&cpuset_mutex);
		}
	}
}

void cpuset_update_active_cpus(void)
static void cpuset_hotplug_workfn(struct work_struct *work)
{
	/*
	 * We're inside cpu hotplug critical region which usually nests
	 * inside cgroup synchronization.  Bounce actual hotplug processing
	 * to a work item to avoid reverse locking order.
	 */
	schedule_work(&cpuset_hotplug_work);
	cpuset_hotplug(true);
}

void cpuset_wait_for_hotplug(void)
void cpuset_update_active_cpus(void)
{
	flush_work(&cpuset_hotplug_work);
	cpuset_hotplug(false);
}

/*
+0 −2
Original line number Diff line number Diff line
@@ -204,8 +204,6 @@ void thaw_processes(void)
	__usermodehelper_set_disable_depth(UMH_FREEZING);
	thaw_workqueues();

	cpuset_wait_for_hotplug();

	read_lock(&tasklist_lock);
	for_each_process_thread(g, p) {
		/* No other threads should have PF_SUSPEND_TASK set */
+0 −1
Original line number Diff line number Diff line
@@ -5624,7 +5624,6 @@ static void cpuset_cpu_active(void)
		 * restore the original sched domains by considering the
		 * cpuset configurations.
		 */
		cpuset_force_rebuild();
	}
	cpuset_update_active_cpus();
}