Commit 040b9d7c authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Three fixes:

   - fix a suspend/resume cpusets bug

   - fix a !CONFIG_NUMA_BALANCING bug

   - fix a kerneldoc warning"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/fair: Fix nuisance kernel-doc warning
  sched/cpuset/pm: Fix cpuset vs. suspend-resume bugs
  sched/fair: Fix wake_affine_llc() balancing rules
parents e6328a7a 46123355
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -51,7 +51,9 @@ static inline void cpuset_dec(void)

extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void);
extern void cpuset_wait_for_hotplug(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -164,11 +166,15 @@ static inline bool cpusets_enabled(void) { return false; }
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}

static inline void cpuset_force_rebuild(void) { }

static inline void cpuset_update_active_cpus(void)
{
	partition_sched_domains(1, NULL, NULL);
}

static inline void cpuset_wait_for_hotplug(void) { }

static inline void cpuset_cpus_allowed(struct task_struct *p,
				       struct cpumask *mask)
{
+15 −1
Original line number Diff line number Diff line
@@ -2275,6 +2275,13 @@ retry:
	mutex_unlock(&cpuset_mutex);
}

static bool force_rebuild;

void cpuset_force_rebuild(void)
{
	force_rebuild = true;
}

/**
 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
 *
@@ -2349,9 +2356,11 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
	}

	/* rebuild sched domains if cpus_allowed has changed */
	if (cpus_updated)
	if (cpus_updated || force_rebuild) {
		force_rebuild = false;
		rebuild_sched_domains();
	}
}

void cpuset_update_active_cpus(void)
{
@@ -2363,6 +2372,11 @@ void cpuset_update_active_cpus(void)
	schedule_work(&cpuset_hotplug_work);
}

void cpuset_wait_for_hotplug(void)
{
	flush_work(&cpuset_hotplug_work);
}

/*
 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
 * Call this routine anytime after node_states[N_MEMORY] changes.
+4 −1
Original line number Diff line number Diff line
@@ -20,6 +20,7 @@
#include <linux/workqueue.h>
#include <linux/kmod.h>
#include <trace/events/power.h>
#include <linux/cpuset.h>

/*
 * Timeout for stopping processes
@@ -202,6 +203,8 @@ void thaw_processes(void)
	__usermodehelper_set_disable_depth(UMH_FREEZING);
	thaw_workqueues();

	cpuset_wait_for_hotplug();

	read_lock(&tasklist_lock);
	for_each_process_thread(g, p) {
		/* No other threads should have PF_SUSPEND_TASK set */
+3 −4
Original line number Diff line number Diff line
@@ -5556,16 +5556,15 @@ static void cpuset_cpu_active(void)
		 * operation in the resume sequence, just build a single sched
		 * domain, ignoring cpusets.
		 */
		num_cpus_frozen--;
		if (likely(num_cpus_frozen)) {
		partition_sched_domains(1, NULL, NULL);
		if (--num_cpus_frozen)
			return;
		}
		/*
		 * This is the last CPU online operation. So fall through and
		 * restore the original sched domains by considering the
		 * cpuset configurations.
		 */
		cpuset_force_rebuild();
	}
	cpuset_update_active_cpus();
}
+2 −2
Original line number Diff line number Diff line
@@ -5424,7 +5424,7 @@ wake_affine_llc(struct sched_domain *sd, struct task_struct *p,
		return false;

	/* if this cache has capacity, come here */
	if (this_stats.has_capacity && this_stats.nr_running < prev_stats.nr_running+1)
	if (this_stats.has_capacity && this_stats.nr_running+1 < prev_stats.nr_running)
		return true;

	/*
@@ -7708,7 +7708,7 @@ next_group:
 * number.
 *
 * Return: 1 when packing is required and a task should be moved to
 * this CPU.  The amount of the imbalance is returned in *imbalance.
 * this CPU.  The amount of the imbalance is returned in env->imbalance.
 *
 * @env: The load balancing environment.
 * @sds: Statistics of the sched_domain which is to be packed