Commit 5f2a45fc authored by Peter Zijlstra's avatar Peter Zijlstra
Browse files

sched: Allow put_prev_task() to drop rq->lock



Currently the pick_next_task() loop is convoluted and ugly because of
how it can drop the rq->lock and needs to restart the picking.

For the RT/Deadline classes, it is put_prev_task() where we do
balancing, and we could do this before the picking loop. Make this
possible.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: Aaron Lu <aaron.lwe@gmail.com>
Cc: mingo@kernel.org
Cc: Phil Auld <pauld@redhat.com>
Cc: Julien Desfossez <jdesfossez@digitalocean.com>
Cc: Nishanth Aravamudan <naravamudan@digitalocean.com>
Link: https://lkml.kernel.org/r/e4519f6850477ab7f3d257062796e6425ee4ba7c.1559129225.git.vpillai@digitalocean.com
parent 5ba553ef
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -6090,7 +6090,7 @@ static struct task_struct *__pick_migrate_task(struct rq *rq)
	for_each_class(class) {
		next = class->pick_next_task(rq, NULL, NULL);
		if (next) {
			next->sched_class->put_prev_task(rq, next);
			next->sched_class->put_prev_task(rq, next, NULL);
			return next;
		}
	}
+13 −1
Original line number Diff line number Diff line
@@ -1804,13 +1804,25 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
	return p;
}

static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
{
	update_curr_dl(rq);

	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
	if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
		enqueue_pushable_dl_task(rq, p);

	if (rf && !on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
		/*
		 * This is OK, because current is on_cpu, which avoids it being
		 * picked for load-balance and preemption/IRQs are still
		 * disabled avoiding further scheduler activity on it and we've
		 * not yet started the picking loop.
		 */
		rq_unpin_lock(rq, rf);
		pull_dl_task(rq);
		rq_repin_lock(rq, rf);
	}
}

/*
+1 −1
Original line number Diff line number Diff line
@@ -6901,7 +6901,7 @@ idle:
/*
 * Account for a descheduled task:
 */
static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
	struct sched_entity *se = &prev->se;
	struct cfs_rq *cfs_rq;
+1 −1
Original line number Diff line number Diff line
@@ -374,7 +374,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
	resched_curr(rq);
}

static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
}

+13 −1
Original line number Diff line number Diff line
@@ -1592,7 +1592,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
	return p;
}

static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
{
	update_curr_rt(rq);

@@ -1604,6 +1604,18 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
	 */
	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
		enqueue_pushable_task(rq, p);

	if (rf && !on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
		/*
		 * This is OK, because current is on_cpu, which avoids it being
		 * picked for load-balance and preemption/IRQs are still
		 * disabled avoiding further scheduler activity on it and we've
		 * not yet started the picking loop.
		 */
		rq_unpin_lock(rq, rf);
		pull_rt_task(rq);
		rq_repin_lock(rq, rf);
	}
}

#ifdef CONFIG_SMP
Loading