Commit 52103be0 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

smp: Optimize flush_smp_call_function_queue()



The call_single_queue can contain (two) different callbacks,
synchronous and asynchronous. The current interrupt handler runs them
in-order, which means that remote CPUs that are waiting for their
synchronous call can be delayed by running asynchronous callbacks.

Rework the interrupt handler to first run the synchonous callbacks.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20200526161907.836818381@infradead.org
parent 19a1f5ec
Loading
Loading
Loading
Loading
+23 −4
Original line number Diff line number Diff line
@@ -209,9 +209,9 @@ void generic_smp_call_function_single_interrupt(void)
 */
static void flush_smp_call_function_queue(bool warn_cpu_offline)
{
	struct llist_head *head;
	struct llist_node *entry;
	call_single_data_t *csd, *csd_next;
	struct llist_node *entry, *prev;
	struct llist_head *head;
	static bool warned;

	lockdep_assert_irqs_disabled();
@@ -235,19 +235,38 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
				csd->func);
	}

	/*
	 * First; run all SYNC callbacks, people are waiting for us.
	 */
	prev = NULL;
	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
		smp_call_func_t func = csd->func;
		void *info = csd->info;

		/* Do we wait until *after* callback? */
		if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
			if (prev) {
				prev->next = &csd_next->llist;
			} else {
				entry = &csd_next->llist;
			}
			func(info);
			csd_unlock(csd);
		} else {
			prev = &csd->llist;
		}
	}

	/*
	 * Second; run all !SYNC callbacks.
	 */
	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
		smp_call_func_t func = csd->func;
		void *info = csd->info;

		csd_unlock(csd);
		func(info);
	}
	}

	/*
	 * Handle irq works queued remotely by irq_work_queue_on().