Commit 590d6979 authored by Steven Rostedt (VMware)'s avatar Steven Rostedt (VMware) Committed by Peter Zijlstra
Browse files

sched: Force the address order of each sched class descriptor



In order to make a micro optimization in pick_next_task(), the order of the
sched class descriptor address must be in the same order as their priority
to each other. That is:

 &idle_sched_class < &fair_sched_class < &rt_sched_class <
 &dl_sched_class < &stop_sched_class

In order to guarantee this order of the sched class descriptors, add each
one into their own data section and force the order in the linker script.

Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/157675913272.349305.8936736338884044103.stgit@localhost.localdomain
parent 87e867b4
Loading
Loading
Loading
Loading
+13 −0
Original line number Diff line number Diff line
@@ -108,6 +108,18 @@
#define SBSS_MAIN .sbss
#endif

/*
 * The order of the sched class addresses are important, as they are
 * used to determine the order of the priority of each sched class in
 * relation to each other.
 */
#define SCHED_DATA				\
	*(__idle_sched_class)			\
	*(__fair_sched_class)			\
	*(__rt_sched_class)			\
	*(__dl_sched_class)			\
	*(__stop_sched_class)

/*
 * Align to a 32 byte boundary equal to the
 * alignment gcc 4.5 uses for a struct
@@ -388,6 +400,7 @@
	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
		__start_rodata = .;					\
		*(.rodata) *(.rodata.*)					\
		SCHED_DATA						\
		RO_AFTER_INIT_DATA	/* Read only after init */	\
		. = ALIGN(8);						\
		__start___tracepoints_ptrs = .;				\
+2 −1
Original line number Diff line number Diff line
@@ -2479,7 +2479,8 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p,
	}
}

const struct sched_class dl_sched_class = {
const struct sched_class dl_sched_class
	__attribute__((section("__dl_sched_class"))) = {
	.next			= &rt_sched_class,
	.enqueue_task		= enqueue_task_dl,
	.dequeue_task		= dequeue_task_dl,
+2 −1
Original line number Diff line number Diff line
@@ -11122,7 +11122,8 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
/*
 * All the scheduling class methods:
 */
const struct sched_class fair_sched_class = {
const struct sched_class fair_sched_class
	__attribute__((section("__fair_sched_class"))) = {
	.next			= &idle_sched_class,
	.enqueue_task		= enqueue_task_fair,
	.dequeue_task		= dequeue_task_fair,
+2 −1
Original line number Diff line number Diff line
@@ -453,7 +453,8 @@ static void update_curr_idle(struct rq *rq)
/*
 * Simple, special scheduling class for the per-CPU idle tasks:
 */
const struct sched_class idle_sched_class = {
const struct sched_class idle_sched_class
	__attribute__((section("__idle_sched_class"))) = {
	/* .next is NULL */
	/* no enqueue/yield_task for idle tasks */

+2 −1
Original line number Diff line number Diff line
@@ -2429,7 +2429,8 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
		return 0;
}

const struct sched_class rt_sched_class = {
const struct sched_class rt_sched_class
	__attribute__((section("__rt_sched_class"))) = {
	.next			= &fair_sched_class,
	.enqueue_task		= enqueue_task_rt,
	.dequeue_task		= dequeue_task_rt,
Loading