Commit 2f1835df authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq updates from Ingo Molnar:
 "The changes in this cycle were:

   - Remove the irq timings/variance statistics code that tried to
     predict when the next interrupt would occur, which didn't work out
     as hoped and is replaced by another mechanism.

   - This new mechanism is the 'array suffix computation' estimate,
     which is superior to the previous one as it can detect not just a
     single periodic pattern, but independent periodic patterns along a
     log-2 scale of bucketing and exponential moving average. The
     comments are longer than the code - and it works better at
     predicting various complex interrupt patterns from real-world
     devices than the previous estimate.

   - avoid IRQ-work self-IPIs on the local CPU

   - fix work-list corruption in irq_set_affinity_notifier()"

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  irq_work: Do not raise an IPI when queueing work on the local CPU
  genirq/devres: Use struct_size() in devm_kzalloc()
  genirq/timings: Add array suffix computation code
  genirq/timings: Remove variance computation code
  genirq: Prevent use-after-free and work list corruption
parents d90dcc1f 471ba0e6
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -220,9 +220,8 @@ devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct,
			    irq_flow_handler_t handler)
{
	struct irq_chip_generic *gc;
	unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);

	gc = devm_kzalloc(dev, sz, GFP_KERNEL);
	gc = devm_kzalloc(dev, struct_size(gc, chip_types, num_ct), GFP_KERNEL);
	if (gc)
		irq_init_generic_chip(gc, name, num_ct,
				      irq_base, reg_base, handler);
+3 −1
Original line number Diff line number Diff line
@@ -357,8 +357,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
	desc->affinity_notify = notify;
	raw_spin_unlock_irqrestore(&desc->lock, flags);

	if (old_notify)
	if (old_notify) {
		cancel_work_sync(&old_notify->work);
		kref_put(&old_notify->kref, old_notify->release);
	}

	return 0;
}
+363 −159

File changed.

Preview size limit exceeded, changes collapsed.

+42 −33
Original line number Diff line number Diff line
@@ -56,6 +56,36 @@ void __weak arch_irq_work_raise(void)
	 */
}

/* Enqueue on current CPU, work must already be claimed and preempt disabled */
static void __irq_work_queue_local(struct irq_work *work)
{
	/* If the work is "lazy", handle it from next tick if any */
	if (work->flags & IRQ_WORK_LAZY) {
		if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
		    tick_nohz_tick_stopped())
			arch_irq_work_raise();
	} else {
		if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
			arch_irq_work_raise();
	}
}

/* Enqueue the irq work @work on the current CPU */
bool irq_work_queue(struct irq_work *work)
{
	/* Only queue if not already pending */
	if (!irq_work_claim(work))
		return false;

	/* Queue the entry and raise the IPI if needed. */
	preempt_disable();
	__irq_work_queue_local(work);
	preempt_enable();

	return true;
}
EXPORT_SYMBOL_GPL(irq_work_queue);

/*
 * Enqueue the irq_work @work on @cpu unless it's already pending
 * somewhere.
@@ -64,53 +94,32 @@ void __weak arch_irq_work_raise(void)
 */
bool irq_work_queue_on(struct irq_work *work, int cpu)
{
#ifndef CONFIG_SMP
	return irq_work_queue(work);

#else /* CONFIG_SMP: */
	/* All work should have been flushed before going offline */
	WARN_ON_ONCE(cpu_is_offline(cpu));

#ifdef CONFIG_SMP

	/* Arch remote IPI send/receive backend aren't NMI safe */
	WARN_ON_ONCE(in_nmi());

	/* Only queue if not already pending */
	if (!irq_work_claim(work))
		return false;

	preempt_disable();
	if (cpu != smp_processor_id()) {
		/* Arch remote IPI send/receive backend aren't NMI safe */
		WARN_ON_ONCE(in_nmi());
		if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
			arch_send_call_function_single_ipi(cpu);

#else /* #ifdef CONFIG_SMP */
	irq_work_queue(work);
#endif /* #else #ifdef CONFIG_SMP */

	return true;
}

/* Enqueue the irq work @work on the current CPU */
bool irq_work_queue(struct irq_work *work)
{
	/* Only queue if not already pending */
	if (!irq_work_claim(work))
		return false;

	/* Queue the entry and raise the IPI if needed. */
	preempt_disable();

	/* If the work is "lazy", handle it from next tick if any */
	if (work->flags & IRQ_WORK_LAZY) {
		if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
		    tick_nohz_tick_stopped())
			arch_irq_work_raise();
	} else {
		if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
			arch_irq_work_raise();
		__irq_work_queue_local(work);
	}

	preempt_enable();

	return true;
#endif /* CONFIG_SMP */
}
EXPORT_SYMBOL_GPL(irq_work_queue);


bool irq_work_needs_cpu(void)
{