Commit d479c5a1 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'sched-core-2020-06-02' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler updates from Ingo Molnar:
 "The changes in this cycle are:

   - Optimize the task wakeup CPU selection logic, to improve
     scalability and reduce wakeup latency spikes

   - PELT enhancements

   - CFS bandwidth handling fixes

   - Optimize the wakeup path by remove rq->wake_list and replacing it
     with ->ttwu_pending

   - Optimize IPI cross-calls by making flush_smp_call_function_queue()
     process sync callbacks first.

   - Misc fixes and enhancements"

* tag 'sched-core-2020-06-02' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (36 commits)
  irq_work: Define irq_work_single() on !CONFIG_IRQ_WORK too
  sched/headers: Split out open-coded prototypes into kernel/sched/smp.h
  sched: Replace rq::wake_list
  sched: Add rq::ttwu_pending
  irq_work, smp: Allow irq_work on call_single_queue
  smp: Optimize send_call_function_single_ipi()
  smp: Move irq_work_run() out of flush_smp_call_function_queue()
  smp: Optimize flush_smp_call_function_queue()
  sched: Fix smp_call_function_single_async() usage for ILB
  sched/core: Offload wakee task activation if it the wakee is descheduling
  sched/core: Optimize ttwu() spinning on p->on_cpu
  sched: Defend cfs and rt bandwidth quota against overflow
  sched/cpuacct: Fix charge cpuacct.usage_sys
  sched/fair: Replace zero-length array with flexible-array
  sched/pelt: Sync util/runnable_sum with PELT window when propagating
  sched/cpuacct: Use __this_cpu_add() instead of this_cpu_ptr()
  sched/fair: Optimize enqueue_task_fair()
  sched: Make scheduler_ipi inline
  sched: Clean up scheduler_ipi()
  sched/core: Simplify sched_init()
  ...
parents f6aee505 25de110d
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -167,7 +167,6 @@ static void pnv_smp_cpu_kill_self(void)
	/* Standard hot unplug procedure */

	idle_task_exit();
	current->active_mm = NULL; /* for sanity */
	cpu = smp_processor_id();
	DBG("CPU%d offline\n", cpu);
	generic_set_cpu_dead(cpu);
+8 −1
Original line number Diff line number Diff line
@@ -13,6 +13,8 @@
 * busy      NULL, 2 -> {free, claimed} : callback in progress, can be claimed
 */

/* flags share CSD_FLAG_ space */

#define IRQ_WORK_PENDING	BIT(0)
#define IRQ_WORK_BUSY		BIT(1)

@@ -23,9 +25,12 @@

#define IRQ_WORK_CLAIMED	(IRQ_WORK_PENDING | IRQ_WORK_BUSY)

/*
 * structure shares layout with single_call_data_t.
 */
struct irq_work {
	atomic_t flags;
	struct llist_node llnode;
	atomic_t flags;
	void (*func)(struct irq_work *);
};

@@ -53,9 +58,11 @@ void irq_work_sync(struct irq_work *work);

void irq_work_run(void);
bool irq_work_needs_cpu(void);
void irq_work_single(void *arg);
#else
static inline bool irq_work_needs_cpu(void) { return false; }
static inline void irq_work_run(void) { }
static inline void irq_work_single(void *arg) { }
#endif

#endif /* _LINUX_IRQ_WORK_H */
+10 −1
Original line number Diff line number Diff line
@@ -654,6 +654,7 @@ struct task_struct {

#ifdef CONFIG_SMP
	struct llist_node		wake_entry;
	unsigned int			wake_entry_type;
	int				on_cpu;
#ifdef CONFIG_THREAD_INFO_IN_TASK
	/* Current CPU: */
@@ -1730,7 +1731,15 @@ extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
})

#ifdef CONFIG_SMP
void scheduler_ipi(void);
static __always_inline void scheduler_ipi(void)
{
	/*
	 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
	 * TIF_NEED_RESCHED remotely (for the first time) will also send
	 * this IPI.
	 */
	preempt_fold_need_resched();
}
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
#else
static inline void scheduler_ipi(void) { }
+2 −0
Original line number Diff line number Diff line
@@ -49,6 +49,8 @@ static inline void mmdrop(struct mm_struct *mm)
		__mmdrop(mm);
}

void mmdrop(struct mm_struct *mm);

/*
 * This has to be called after a get_task_mm()/mmget_not_zero()
 * followed by taking the mmap_sem for writing before modifying the
+14 −15
Original line number Diff line number Diff line
@@ -11,21 +11,20 @@
 */
#ifdef CONFIG_SMP

#define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
#define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
#define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
#define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
#define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
#define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
#define SD_ASYM_CPUCAPACITY	0x0040  /* Domain members have different CPU capacities */
#define SD_SHARE_CPUCAPACITY	0x0080	/* Domain members share CPU capacity */
#define SD_SHARE_POWERDOMAIN	0x0100	/* Domain members share power domain */
#define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share CPU pkg resources */
#define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
#define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
#define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
#define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
#define SD_NUMA			0x4000	/* cross-node balancing */
#define SD_BALANCE_NEWIDLE	0x0001	/* Balance when about to become idle */
#define SD_BALANCE_EXEC		0x0002	/* Balance on exec */
#define SD_BALANCE_FORK		0x0004	/* Balance on fork, clone */
#define SD_BALANCE_WAKE		0x0008  /* Balance on wakeup */
#define SD_WAKE_AFFINE		0x0010	/* Wake task to waking CPU */
#define SD_ASYM_CPUCAPACITY	0x0020  /* Domain members have different CPU capacities */
#define SD_SHARE_CPUCAPACITY	0x0040	/* Domain members share CPU capacity */
#define SD_SHARE_POWERDOMAIN	0x0080	/* Domain members share power domain */
#define SD_SHARE_PKG_RESOURCES	0x0100	/* Domain members share CPU pkg resources */
#define SD_SERIALIZE		0x0200	/* Only a single load balancing instance */
#define SD_ASYM_PACKING		0x0400  /* Place busy groups earlier in the domain */
#define SD_PREFER_SIBLING	0x0800	/* Prefer to place tasks in a sibling domain */
#define SD_OVERLAP		0x1000	/* sched_domains of this level overlap */
#define SD_NUMA			0x2000	/* cross-node balancing */

#ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void)
Loading