Commit df217913 authored by Vincent Guittot's avatar Vincent Guittot Committed by Ingo Molnar
Browse files

sched/fair: Factorize attach/detach entity



Factorize post_init_entity_util_avg() and part of attach_task_cfs_rq()
in one function attach_entity_cfs_rq().

Create symmetric detach_entity_cfs_rq() function.

Signed-off-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarDietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Morten.Rasmussen@arm.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bsegall@google.com
Cc: kernellwp@gmail.com
Cc: pjt@google.com
Cc: yuyang.du@intel.com
Link: http://lkml.kernel.org/r/1478598827-32372-2-git-send-email-vincent.guittot@linaro.org


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 893c5d22
Loading
Loading
Loading
Loading
+31 −22
Original line number Original line Diff line number Diff line
@@ -701,9 +701,7 @@ void init_entity_runnable_average(struct sched_entity *se)
}
}


static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq);
static void attach_entity_cfs_rq(struct sched_entity *se);
static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se);


/*
/*
 * With new tasks being created, their initial util_avgs are extrapolated
 * With new tasks being created, their initial util_avgs are extrapolated
@@ -735,7 +733,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
	struct cfs_rq *cfs_rq = cfs_rq_of(se);
	struct cfs_rq *cfs_rq = cfs_rq_of(se);
	struct sched_avg *sa = &se->avg;
	struct sched_avg *sa = &se->avg;
	long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
	long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
	u64 now = cfs_rq_clock_task(cfs_rq);


	if (cap > 0) {
	if (cap > 0) {
		if (cfs_rq->avg.util_avg != 0) {
		if (cfs_rq->avg.util_avg != 0) {
@@ -763,14 +760,12 @@ void post_init_entity_util_avg(struct sched_entity *se)
			 * such that the next switched_to_fair() has the
			 * such that the next switched_to_fair() has the
			 * expected state.
			 * expected state.
			 */
			 */
			se->avg.last_update_time = now;
			se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);
			return;
			return;
		}
		}
	}
	}


	update_cfs_rq_load_avg(now, cfs_rq, false);
	attach_entity_cfs_rq(se);
	attach_entity_load_avg(cfs_rq, se);
	update_tg_load_avg(cfs_rq, false);
}
}


#else /* !CONFIG_SMP */
#else /* !CONFIG_SMP */
@@ -8783,30 +8778,19 @@ static inline bool vruntime_normalized(struct task_struct *p)
	return false;
	return false;
}
}


static void detach_task_cfs_rq(struct task_struct *p)
static void detach_entity_cfs_rq(struct sched_entity *se)
{
{
	struct sched_entity *se = &p->se;
	struct cfs_rq *cfs_rq = cfs_rq_of(se);
	struct cfs_rq *cfs_rq = cfs_rq_of(se);
	u64 now = cfs_rq_clock_task(cfs_rq);
	u64 now = cfs_rq_clock_task(cfs_rq);


	if (!vruntime_normalized(p)) {
		/*
		 * Fix up our vruntime so that the current sleep doesn't
		 * cause 'unlimited' sleep bonus.
		 */
		place_entity(cfs_rq, se, 0);
		se->vruntime -= cfs_rq->min_vruntime;
	}

	/* Catch up with the cfs_rq and remove our load when we leave */
	/* Catch up with the cfs_rq and remove our load when we leave */
	update_cfs_rq_load_avg(now, cfs_rq, false);
	update_cfs_rq_load_avg(now, cfs_rq, false);
	detach_entity_load_avg(cfs_rq, se);
	detach_entity_load_avg(cfs_rq, se);
	update_tg_load_avg(cfs_rq, false);
	update_tg_load_avg(cfs_rq, false);
}
}


static void attach_task_cfs_rq(struct task_struct *p)
static void attach_entity_cfs_rq(struct sched_entity *se)
{
{
	struct sched_entity *se = &p->se;
	struct cfs_rq *cfs_rq = cfs_rq_of(se);
	struct cfs_rq *cfs_rq = cfs_rq_of(se);
	u64 now = cfs_rq_clock_task(cfs_rq);
	u64 now = cfs_rq_clock_task(cfs_rq);


@@ -8818,10 +8802,35 @@ static void attach_task_cfs_rq(struct task_struct *p)
	se->depth = se->parent ? se->parent->depth + 1 : 0;
	se->depth = se->parent ? se->parent->depth + 1 : 0;
#endif
#endif


	/* Synchronize task with its cfs_rq */
	/* Synchronize entity with its cfs_rq */
	update_cfs_rq_load_avg(now, cfs_rq, false);
	update_cfs_rq_load_avg(now, cfs_rq, false);
	attach_entity_load_avg(cfs_rq, se);
	attach_entity_load_avg(cfs_rq, se);
	update_tg_load_avg(cfs_rq, false);
	update_tg_load_avg(cfs_rq, false);
}

static void detach_task_cfs_rq(struct task_struct *p)
{
	struct sched_entity *se = &p->se;
	struct cfs_rq *cfs_rq = cfs_rq_of(se);

	if (!vruntime_normalized(p)) {
		/*
		 * Fix up our vruntime so that the current sleep doesn't
		 * cause 'unlimited' sleep bonus.
		 */
		place_entity(cfs_rq, se, 0);
		se->vruntime -= cfs_rq->min_vruntime;
	}

	detach_entity_cfs_rq(se);
}

static void attach_task_cfs_rq(struct task_struct *p)
{
	struct sched_entity *se = &p->se;
	struct cfs_rq *cfs_rq = cfs_rq_of(se);

	attach_entity_cfs_rq(se);


	if (!vruntime_normalized(p))
	if (!vruntime_normalized(p))
		se->vruntime += cfs_rq->min_vruntime;
		se->vruntime += cfs_rq->min_vruntime;