Commit 7e62dd91 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

Merge remote-tracking branch 'origin/irq/ipi-as-irq' into irq/irqchip-next



Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 696966ec 8594c3b8
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -49,6 +49,7 @@ config ARM
	select GENERIC_ARCH_TOPOLOGY if ARM_CPU_TOPOLOGY
	select GENERIC_ATOMIC64 if CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI
	select GENERIC_CLOCKEVENTS_BROADCAST if SMP
	select GENERIC_IRQ_IPI if SMP
	select GENERIC_CPU_AUTOPROBE
	select GENERIC_EARLY_IOREMAP
	select GENERIC_IDLE_POLL_SETUP
+0 −17
Original line number Diff line number Diff line
@@ -6,29 +6,12 @@
#include <linux/threads.h>
#include <asm/irq.h>

/* number of IPIS _not_ including IPI_CPU_BACKTRACE */
#define NR_IPI	7

typedef struct {
	unsigned int __softirq_pending;
#ifdef CONFIG_SMP
	unsigned int ipi_irqs[NR_IPI];
#endif
} ____cacheline_aligned irq_cpustat_t;

#include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */

#define __inc_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)++
#define __get_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)

#ifdef CONFIG_SMP
u64 smp_irq_stat_cpu(unsigned int cpu);
#else
#define smp_irq_stat_cpu(cpu)	0
#endif

#define arch_irq_stat_cpu	smp_irq_stat_cpu

#define __ARCH_IRQ_EXIT_IRQS_DISABLED	1

#endif /* __ASM_HARDIRQ_H */
+2 −3
Original line number Diff line number Diff line
@@ -39,11 +39,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs);
 */
extern void smp_init_cpus(void);


/*
 * Provide a function to raise an IPI cross call on CPUs in callmap.
 * Register IPI interrupts with the arch SMP code
 */
extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
extern void set_smp_ipi_range(int ipi_base, int nr_ipi);

/*
 * Called from platform specific assembly code, this is the
+0 −1
Original line number Diff line number Diff line
@@ -18,7 +18,6 @@
 *  IRQ's are in fact implemented a bit like signal handlers for the kernel.
 *  Naturally it's not a 1:1 relation, but there are similarities.
 */
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
+90 −46
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@
#include <linux/completion.h>
#include <linux/cpufreq.h>
#include <linux/irq_work.h>
#include <linux/kernel_stat.h>

#include <linux/atomic.h>
#include <asm/bugs.h>
@@ -65,18 +66,27 @@ enum ipi_msg_type {
	IPI_CPU_STOP,
	IPI_IRQ_WORK,
	IPI_COMPLETION,
	NR_IPI,
	/*
	 * CPU_BACKTRACE is special and not included in NR_IPI
	 * or tracable with trace_ipi_*
	 */
	IPI_CPU_BACKTRACE,
	IPI_CPU_BACKTRACE = NR_IPI,
	/*
	 * SGI8-15 can be reserved by secure firmware, and thus may
	 * not be usable by the kernel. Please keep the above limited
	 * to at most 8 entries.
	 */
	MAX_IPI
};

static int ipi_irq_base __read_mostly;
static int nr_ipi __read_mostly = NR_IPI;
static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;

static void ipi_setup(int cpu);
static void ipi_teardown(int cpu);

static DECLARE_COMPLETION(cpu_running);

static struct smp_operations smp_ops __ro_after_init;
@@ -247,6 +257,7 @@ int __cpu_disable(void)
	 * and we must not schedule until we're ready to give up the cpu.
	 */
	set_cpu_online(cpu, false);
	ipi_teardown(cpu);

	/*
	 * OK - migrate IRQs away from this CPU
@@ -422,6 +433,8 @@ asmlinkage void secondary_start_kernel(void)

	notify_cpu_starting(cpu);

	ipi_setup(cpu);

	calibrate_delay();

	smp_store_cpu_info(cpu);
@@ -500,14 +513,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
	}
}

static void (*__smp_cross_call)(const struct cpumask *, unsigned int);

void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
	if (!__smp_cross_call)
		__smp_cross_call = fn;
}

static const char *ipi_types[NR_IPI] __tracepoint_string = {
#define S(x,s)	[x] = s
	S(IPI_WAKEUP, "CPU wakeup interrupts"),
@@ -519,38 +524,23 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
	S(IPI_COMPLETION, "completion interrupts"),
};

static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
	trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
	__smp_cross_call(target, ipinr);
}
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);

void show_ipi_list(struct seq_file *p, int prec)
{
	unsigned int cpu, i;

	for (i = 0; i < NR_IPI; i++) {
		unsigned int irq = irq_desc_get_irq(ipi_desc[i]);
		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);

		for_each_online_cpu(cpu)
			seq_printf(p, "%10u ",
				   __get_irq_stat(cpu, ipi_irqs[i]));
			seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));

		seq_printf(p, " %s\n", ipi_types[i]);
	}
}

u64 smp_irq_stat_cpu(unsigned int cpu)
{
	u64 sum = 0;
	int i;

	for (i = 0; i < NR_IPI; i++)
		sum += __get_irq_stat(cpu, ipi_irqs[i]);

	return sum;
}

void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
	smp_cross_call(mask, IPI_CALL_FUNC);
@@ -627,15 +617,12 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
	handle_IPI(ipinr, regs);
}

void handle_IPI(int ipinr, struct pt_regs *regs)
static void do_handle_IPI(int ipinr)
{
	unsigned int cpu = smp_processor_id();
	struct pt_regs *old_regs = set_irq_regs(regs);

	if ((unsigned)ipinr < NR_IPI) {
	if ((unsigned)ipinr < NR_IPI)
		trace_ipi_entry_rcuidle(ipi_types[ipinr]);
		__inc_irq_stat(cpu, ipi_irqs[ipinr]);
	}

	switch (ipinr) {
	case IPI_WAKEUP:
@@ -643,9 +630,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)

#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
	case IPI_TIMER:
		irq_enter();
		tick_receive_broadcast();
		irq_exit();
		break;
#endif

@@ -654,36 +639,26 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
		break;

	case IPI_CALL_FUNC:
		irq_enter();
		generic_smp_call_function_interrupt();
		irq_exit();
		break;

	case IPI_CPU_STOP:
		irq_enter();
		ipi_cpu_stop(cpu);
		irq_exit();
		break;

#ifdef CONFIG_IRQ_WORK
	case IPI_IRQ_WORK:
		irq_enter();
		irq_work_run();
		irq_exit();
		break;
#endif

	case IPI_COMPLETION:
		irq_enter();
		ipi_complete(cpu);
		irq_exit();
		break;

	case IPI_CPU_BACKTRACE:
		printk_nmi_enter();
		irq_enter();
		nmi_cpu_backtrace(regs);
		irq_exit();
		nmi_cpu_backtrace(get_irq_regs());
		printk_nmi_exit();
		break;

@@ -695,9 +670,78 @@ void handle_IPI(int ipinr, struct pt_regs *regs)

	if ((unsigned)ipinr < NR_IPI)
		trace_ipi_exit_rcuidle(ipi_types[ipinr]);
}

/* Legacy version, should go away once all irqchips have been converted */
void handle_IPI(int ipinr, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);

	irq_enter();
	do_handle_IPI(ipinr);
	irq_exit();

	set_irq_regs(old_regs);
}

static irqreturn_t ipi_handler(int irq, void *data)
{
	do_handle_IPI(irq - ipi_irq_base);
	return IRQ_HANDLED;
}

static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
	trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
	__ipi_send_mask(ipi_desc[ipinr], target);
}

static void ipi_setup(int cpu)
{
	int i;

	if (WARN_ON_ONCE(!ipi_irq_base))
		return;

	for (i = 0; i < nr_ipi; i++)
		enable_percpu_irq(ipi_irq_base + i, 0);
}

static void ipi_teardown(int cpu)
{
	int i;

	if (WARN_ON_ONCE(!ipi_irq_base))
		return;

	for (i = 0; i < nr_ipi; i++)
		disable_percpu_irq(ipi_irq_base + i);
}

void __init set_smp_ipi_range(int ipi_base, int n)
{
	int i;

	WARN_ON(n < MAX_IPI);
	nr_ipi = min(n, MAX_IPI);

	for (i = 0; i < nr_ipi; i++) {
		int err;

		err = request_percpu_irq(ipi_base + i, ipi_handler,
					 "IPI", &irq_stat);
		WARN_ON(err);

		ipi_desc[i] = irq_to_desc(ipi_base + i);
		irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
	}

	ipi_irq_base = ipi_base;

	/* Setup the boot CPU immediately */
	ipi_setup(smp_processor_id());
}

void smp_send_reschedule(int cpu)
{
	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
@@ -805,7 +849,7 @@ core_initcall(register_cpufreq_notifier);

static void raise_nmi(cpumask_t *mask)
{
	__smp_cross_call(mask, IPI_CPU_BACKTRACE);
	__ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
}

void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
Loading