Commit b388fa50 authored by Valentin Schneider's avatar Valentin Schneider Committed by Marc Zyngier
Browse files

Revert "genirq: Add fasteoi IPI flow"



handle_percpu_devid_fasteoi_ipi() has no more users, and
handle_percpu_devid_irq() can do all that it was supposed to do. Get rid of
it.

This reverts commit c5e5ec03.

Signed-off-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20201109094121.29975-6-valentin.schneider@arm.com
parent a2e042e1
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -647,7 +647,6 @@ static inline int irq_set_parent(int irq, int parent_irq)
 */
extern void handle_level_irq(struct irq_desc *desc);
extern void handle_fasteoi_irq(struct irq_desc *desc);
extern void handle_percpu_devid_fasteoi_ipi(struct irq_desc *desc);
extern void handle_edge_irq(struct irq_desc *desc);
extern void handle_edge_eoi_irq(struct irq_desc *desc);
extern void handle_simple_irq(struct irq_desc *desc);
+0 −27
Original line number Diff line number Diff line
@@ -944,33 +944,6 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
		chip->irq_eoi(&desc->irq_data);
}

/**
 * handle_percpu_devid_fasteoi_ipi - Per CPU local IPI handler with per cpu
 *				     dev ids
 * @desc:	the interrupt description structure for this irq
 *
 * The biggest difference with the IRQ version is that the interrupt is
 * EOIed early, as the IPI could result in a context switch, and we need to
 * make sure the IPI can fire again. We also assume that the arch code has
 * registered an action. If not, we are positively doomed.
 */
void handle_percpu_devid_fasteoi_ipi(struct irq_desc *desc)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct irqaction *action = desc->action;
	unsigned int irq = irq_desc_get_irq(desc);
	irqreturn_t res;

	__kstat_incr_irqs_this_cpu(desc);

	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);

	trace_irq_handler_entry(irq, action);
	res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
	trace_irq_handler_exit(irq, action, res);
}

/**
 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
 *				     dev ids