Commit cbb104f9 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull misc scheduler fixes from Ingo Molnar:

 - Fix potential deadlock under CONFIG_DEBUG_OBJECTS=y

 - PELT metrics update ordering fix

 - uclamp logic fix

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/uclamp: Fix incorrect condition
  sched/pelt: Fix update of blocked PELT ordering
  sched/core: Avoid spurious lock dependencies
parents 6b27354c 6e1ff077
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -1065,7 +1065,7 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
	 * affecting a valid clamp bucket, the next time it's enqueued,
	 * it will already see the updated clamp bucket value.
	 */
	if (!p->uclamp[clamp_id].active) {
	if (p->uclamp[clamp_id].active) {
		uclamp_rq_dec_id(rq, p, clamp_id);
		uclamp_rq_inc_id(rq, p, clamp_id);
	}
@@ -6019,10 +6019,11 @@ void init_idle(struct task_struct *idle, int cpu)
	struct rq *rq = cpu_rq(cpu);
	unsigned long flags;

	__sched_fork(0, idle);

	raw_spin_lock_irqsave(&idle->pi_lock, flags);
	raw_spin_lock(&rq->lock);

	__sched_fork(0, idle);
	idle->state = TASK_RUNNING;
	idle->se.exec_start = sched_clock();
	idle->flags |= PF_IDLE;
+20 −9
Original line number Diff line number Diff line
@@ -7547,6 +7547,19 @@ static void update_blocked_averages(int cpu)
	rq_lock_irqsave(rq, &rf);
	update_rq_clock(rq);

	/*
	 * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
	 * that RT, DL and IRQ signals have been updated before updating CFS.
	 */
	curr_class = rq->curr->sched_class;
	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
	update_irq_load_avg(rq, 0);

	/* Don't need periodic decay once load/util_avg are null */
	if (others_have_blocked(rq))
		done = false;

	/*
	 * Iterates the task_group tree in a bottom up fashion, see
	 * list_add_leaf_cfs_rq() for details.
@@ -7574,14 +7587,6 @@ static void update_blocked_averages(int cpu)
			done = false;
	}

	curr_class = rq->curr->sched_class;
	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
	update_irq_load_avg(rq, 0);
	/* Don't need periodic decay once load/util_avg are null */
	if (others_have_blocked(rq))
		done = false;

	update_blocked_load_status(rq, !done);
	rq_unlock_irqrestore(rq, &rf);
}
@@ -7642,12 +7647,18 @@ static inline void update_blocked_averages(int cpu)

	rq_lock_irqsave(rq, &rf);
	update_rq_clock(rq);
	update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);

	/*
	 * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure
	 * that RT, DL and IRQ signals have been updated before updating CFS.
	 */
	curr_class = rq->curr->sched_class;
	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class);
	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class);
	update_irq_load_avg(rq, 0);

	update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);

	update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq));
	rq_unlock_irqrestore(rq, &rf);
}