Commit 9e91d61d authored by Dietmar Eggemann's avatar Dietmar Eggemann Committed by Ingo Molnar
Browse files

sched/fair: Name utilization related data and functions consistently



Use the advent of the per-entity load tracking rewrite to streamline the
naming of utilization related data and functions by using
{prefix_}util{_suffix} consistently. Moreover call both signals
({se,cfs}.avg.util_avg) utilization.

Signed-off-by: default avatarDietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: default avatarMorten Rasmussen <morten.rasmussen@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Dietmar Eggemann <Dietmar.Eggemann@arm.com>
Cc: Juri Lelli <Juri.Lelli@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: daniel.lezcano@linaro.org
Cc: mturquette@baylibre.com
Cc: pang.xunlei@zte.com.cn
Cc: rjw@rjwysocki.net
Cc: sgurrappadi@nvidia.com
Cc: vincent.guittot@linaro.org
Cc: yuyang.du@intel.com
Link: http://lkml.kernel.org/r/1439569394-11974-5-git-send-email-morten.rasmussen@arm.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e3279a2e
Loading
Loading
Loading
Loading
+19 −18
Original line number Diff line number Diff line
@@ -4863,31 +4863,32 @@ done:
	return target;
}
/*
 * get_cpu_usage returns the amount of capacity of a CPU that is used by CFS
 * cpu_util returns the amount of capacity of a CPU that is used by CFS
 * tasks. The unit of the return value must be the one of capacity so we can
 * compare the usage with the capacity of the CPU that is available for CFS
 * task (ie cpu_capacity).
 * compare the utilization with the capacity of the CPU that is available for
 * CFS task (ie cpu_capacity).
 * cfs.avg.util_avg is the sum of running time of runnable tasks on a
 * CPU. It represents the amount of utilization of a CPU in the range
 * [0..SCHED_LOAD_SCALE].  The usage of a CPU can't be higher than the full
 * capacity of the CPU because it's about the running time on this CPU.
 * [0..SCHED_LOAD_SCALE]. The utilization of a CPU can't be higher than the
 * full capacity of the CPU because it's about the running time on this CPU.
 * Nevertheless, cfs.avg.util_avg can be higher than SCHED_LOAD_SCALE
 * because of unfortunate rounding in util_avg or just
 * after migrating tasks until the average stabilizes with the new running
 * time. So we need to check that the usage stays into the range
 * time. So we need to check that the utilization stays into the range
 * [0..cpu_capacity_orig] and cap if necessary.
 * Without capping the usage, a group could be seen as overloaded (CPU0 usage
 * at 121% + CPU1 usage at 80%) whereas CPU1 has 20% of available capacity
 * Without capping the utilization, a group could be seen as overloaded (CPU0
 * utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
 * available capacity.
 */
static int get_cpu_usage(int cpu)
static int cpu_util(int cpu)
{
	unsigned long usage = cpu_rq(cpu)->cfs.avg.util_avg;
	unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
	unsigned long capacity = capacity_orig_of(cpu);

	if (usage >= SCHED_LOAD_SCALE)
	if (util >= SCHED_LOAD_SCALE)
		return capacity;

	return (usage * capacity) >> SCHED_LOAD_SHIFT;
	return (util * capacity) >> SCHED_LOAD_SHIFT;
}

/*
@@ -5979,7 +5980,7 @@ struct sg_lb_stats {
	unsigned long sum_weighted_load; /* Weighted load of group's tasks */
	unsigned long load_per_task;
	unsigned long group_capacity;
	unsigned long group_usage; /* Total usage of the group */
	unsigned long group_util; /* Total utilization of the group */
	unsigned int sum_nr_running; /* Nr tasks running in the group */
	unsigned int idle_cpus;
	unsigned int group_weight;
@@ -6212,8 +6213,8 @@ static inline int sg_imbalanced(struct sched_group *group)
 * group_has_capacity returns true if the group has spare capacity that could
 * be used by some tasks.
 * We consider that a group has spare capacity if the  * number of task is
 * smaller than the number of CPUs or if the usage is lower than the available
 * capacity for CFS tasks.
 * smaller than the number of CPUs or if the utilization is lower than the
 * available capacity for CFS tasks.
 * For the latter, we use a threshold to stabilize the state, to take into
 * account the variance of the tasks' load and to return true if the available
 * capacity in meaningful for the load balancer.
@@ -6227,7 +6228,7 @@ group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
		return true;

	if ((sgs->group_capacity * 100) >
			(sgs->group_usage * env->sd->imbalance_pct))
			(sgs->group_util * env->sd->imbalance_pct))
		return true;

	return false;
@@ -6248,7 +6249,7 @@ group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
		return false;

	if ((sgs->group_capacity * 100) <
			(sgs->group_usage * env->sd->imbalance_pct))
			(sgs->group_util * env->sd->imbalance_pct))
		return true;

	return false;
@@ -6296,7 +6297,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
			load = source_load(i, load_idx);

		sgs->group_load += load;
		sgs->group_usage += get_cpu_usage(i);
		sgs->group_util += cpu_util(i);
		sgs->sum_nr_running += rq->cfs.h_nr_running;

		if (rq->nr_running > 1)