Commit 03b7fad1 authored by Peter Zijlstra's avatar Peter Zijlstra
Browse files

sched: Add task_struct pointer to sched_class::set_curr_task



In preparation of further separating pick_next_task() and
set_curr_task() we have to pass the actual task into it, while there,
rename the thing to better pair with put_prev_task().

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Aaron Lu <aaron.lwe@gmail.com>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: mingo@kernel.org
Cc: Phil Auld <pauld@redhat.com>
Cc: Julien Desfossez <jdesfossez@digitalocean.com>
Cc: Nishanth Aravamudan <naravamudan@digitalocean.com>
Link: https://lkml.kernel.org/r/a96d1bcdd716db4a4c5da2fece647a1456c0ed78.1559129225.git.vpillai@digitalocean.com
parent 10e7071b
Loading
Loading
Loading
Loading
+6 −6
Original line number Original line Diff line number Diff line
@@ -1494,7 +1494,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
	if (queued)
	if (queued)
		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
	if (running)
	if (running)
		set_curr_task(rq, p);
		set_next_task(rq, p);
}
}


/*
/*
@@ -4325,7 +4325,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
	if (queued)
	if (queued)
		enqueue_task(rq, p, queue_flag);
		enqueue_task(rq, p, queue_flag);
	if (running)
	if (running)
		set_curr_task(rq, p);
		set_next_task(rq, p);


	check_class_changed(rq, p, prev_class, oldprio);
	check_class_changed(rq, p, prev_class, oldprio);
out_unlock:
out_unlock:
@@ -4392,7 +4392,7 @@ void set_user_nice(struct task_struct *p, long nice)
			resched_curr(rq);
			resched_curr(rq);
	}
	}
	if (running)
	if (running)
		set_curr_task(rq, p);
		set_next_task(rq, p);
out_unlock:
out_unlock:
	task_rq_unlock(rq, p, &rf);
	task_rq_unlock(rq, p, &rf);
}
}
@@ -4840,7 +4840,7 @@ change:
		enqueue_task(rq, p, queue_flags);
		enqueue_task(rq, p, queue_flags);
	}
	}
	if (running)
	if (running)
		set_curr_task(rq, p);
		set_next_task(rq, p);


	check_class_changed(rq, p, prev_class, oldprio);
	check_class_changed(rq, p, prev_class, oldprio);


@@ -6042,7 +6042,7 @@ void sched_setnuma(struct task_struct *p, int nid)
	if (queued)
	if (queued)
		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
	if (running)
	if (running)
		set_curr_task(rq, p);
		set_next_task(rq, p);
	task_rq_unlock(rq, p, &rf);
	task_rq_unlock(rq, p, &rf);
}
}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_NUMA_BALANCING */
@@ -6919,7 +6919,7 @@ void sched_move_task(struct task_struct *tsk)
	if (queued)
	if (queued)
		enqueue_task(rq, tsk, queue_flags);
		enqueue_task(rq, tsk, queue_flags);
	if (running)
	if (running)
		set_curr_task(rq, tsk);
		set_next_task(rq, tsk);


	task_rq_unlock(rq, tsk, &rf);
	task_rq_unlock(rq, tsk, &rf);
}
}
+1 −6
Original line number Original line Diff line number Diff line
@@ -1844,11 +1844,6 @@ static void task_fork_dl(struct task_struct *p)
	 */
	 */
}
}


static void set_curr_task_dl(struct rq *rq)
{
	set_next_task_dl(rq, rq->curr);
}

#ifdef CONFIG_SMP
#ifdef CONFIG_SMP


/* Only try algorithms three times */
/* Only try algorithms three times */
@@ -2466,6 +2461,7 @@ const struct sched_class dl_sched_class = {


	.pick_next_task		= pick_next_task_dl,
	.pick_next_task		= pick_next_task_dl,
	.put_prev_task		= put_prev_task_dl,
	.put_prev_task		= put_prev_task_dl,
	.set_next_task		= set_next_task_dl,


#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
	.select_task_rq		= select_task_rq_dl,
	.select_task_rq		= select_task_rq_dl,
@@ -2476,7 +2472,6 @@ const struct sched_class dl_sched_class = {
	.task_woken		= task_woken_dl,
	.task_woken		= task_woken_dl,
#endif
#endif


	.set_curr_task		= set_curr_task_dl,
	.task_tick		= task_tick_dl,
	.task_tick		= task_tick_dl,
	.task_fork              = task_fork_dl,
	.task_fork              = task_fork_dl,


+14 −3
Original line number Original line Diff line number Diff line
@@ -10150,9 +10150,19 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
 * This routine is mostly called to set cfs_rq->curr field when a task
 * This routine is mostly called to set cfs_rq->curr field when a task
 * migrates between groups/classes.
 * migrates between groups/classes.
 */
 */
static void set_curr_task_fair(struct rq *rq)
static void set_next_task_fair(struct rq *rq, struct task_struct *p)
{
{
	struct sched_entity *se = &rq->curr->se;
	struct sched_entity *se = &p->se;

#ifdef CONFIG_SMP
	if (task_on_rq_queued(p)) {
		/*
		 * Move the next running task to the front of the list, so our
		 * cfs_tasks list becomes MRU one.
		 */
		list_move(&se->group_node, &rq->cfs_tasks);
	}
#endif


	for_each_sched_entity(se) {
	for_each_sched_entity(se) {
		struct cfs_rq *cfs_rq = cfs_rq_of(se);
		struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -10423,7 +10433,9 @@ const struct sched_class fair_sched_class = {
	.check_preempt_curr	= check_preempt_wakeup,
	.check_preempt_curr	= check_preempt_wakeup,


	.pick_next_task		= pick_next_task_fair,
	.pick_next_task		= pick_next_task_fair,

	.put_prev_task		= put_prev_task_fair,
	.put_prev_task		= put_prev_task_fair,
	.set_next_task          = set_next_task_fair,


#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
	.select_task_rq		= select_task_rq_fair,
	.select_task_rq		= select_task_rq_fair,
@@ -10436,7 +10448,6 @@ const struct sched_class fair_sched_class = {
	.set_cpus_allowed	= set_cpus_allowed_common,
	.set_cpus_allowed	= set_cpus_allowed_common,
#endif
#endif


	.set_curr_task          = set_curr_task_fair,
	.task_tick		= task_tick_fair,
	.task_tick		= task_tick_fair,
	.task_fork		= task_fork_fair,
	.task_fork		= task_fork_fair,


+15 −12
Original line number Original line Diff line number Diff line
@@ -374,14 +374,25 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
	resched_curr(rq);
	resched_curr(rq);
}
}


static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{
}

static void set_next_task_idle(struct rq *rq, struct task_struct *next)
{
	update_idle_core(rq);
	schedstat_inc(rq->sched_goidle);
}

static struct task_struct *
static struct task_struct *
pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
{
	struct task_struct *next = rq->idle;

	put_prev_task(rq, prev);
	put_prev_task(rq, prev);
	update_idle_core(rq);
	set_next_task_idle(rq, next);
	schedstat_inc(rq->sched_goidle);


	return rq->idle;
	return next;
}
}


/*
/*
@@ -397,10 +408,6 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
	raw_spin_lock_irq(&rq->lock);
	raw_spin_lock_irq(&rq->lock);
}
}


static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{
}

/*
/*
 * scheduler tick hitting a task of our scheduling class.
 * scheduler tick hitting a task of our scheduling class.
 *
 *
@@ -413,10 +420,6 @@ static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
{
{
}
}


static void set_curr_task_idle(struct rq *rq)
{
}

static void switched_to_idle(struct rq *rq, struct task_struct *p)
static void switched_to_idle(struct rq *rq, struct task_struct *p)
{
{
	BUG();
	BUG();
@@ -451,13 +454,13 @@ const struct sched_class idle_sched_class = {


	.pick_next_task		= pick_next_task_idle,
	.pick_next_task		= pick_next_task_idle,
	.put_prev_task		= put_prev_task_idle,
	.put_prev_task		= put_prev_task_idle,
	.set_next_task          = set_next_task_idle,


#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
	.select_task_rq		= select_task_rq_idle,
	.select_task_rq		= select_task_rq_idle,
	.set_cpus_allowed	= set_cpus_allowed_common,
	.set_cpus_allowed	= set_cpus_allowed_common,
#endif
#endif


	.set_curr_task          = set_curr_task_idle,
	.task_tick		= task_tick_idle,
	.task_tick		= task_tick_idle,


	.get_rr_interval	= get_rr_interval_idle,
	.get_rr_interval	= get_rr_interval_idle,
+1 −6
Original line number Original line Diff line number Diff line
@@ -2354,11 +2354,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
	}
	}
}
}


static void set_curr_task_rt(struct rq *rq)
{
	set_next_task_rt(rq, rq->curr);
}

static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
{
{
	/*
	/*
@@ -2380,6 +2375,7 @@ const struct sched_class rt_sched_class = {


	.pick_next_task		= pick_next_task_rt,
	.pick_next_task		= pick_next_task_rt,
	.put_prev_task		= put_prev_task_rt,
	.put_prev_task		= put_prev_task_rt,
	.set_next_task          = set_next_task_rt,


#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
	.select_task_rq		= select_task_rq_rt,
	.select_task_rq		= select_task_rq_rt,
@@ -2391,7 +2387,6 @@ const struct sched_class rt_sched_class = {
	.switched_from		= switched_from_rt,
	.switched_from		= switched_from_rt,
#endif
#endif


	.set_curr_task          = set_curr_task_rt,
	.task_tick		= task_tick_rt,
	.task_tick		= task_tick_rt,


	.get_rr_interval	= get_rr_interval_rt,
	.get_rr_interval	= get_rr_interval_rt,
Loading