Commit 9413cd77 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'irq-urgent-2020-07-19' of...

Merge tag 'irq-urgent-2020-07-19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into master

Pull irq fixes from Thomas Gleixner:
 "Two fixes for the interrupt subsystem:

   - Make the handling of the firmware node consistent and do not free
     the node after the domain has been created successfully. The core
     code stores a pointer to it which can lead to a use after free or
     double free.

     This used to "work" because the pointer was not stored when the
     initial code was written, but at some point later it was required
     to store it. Of course nobody noticed that the existing users break
     that way.

   - Handle affinity setting on inactive interrupts correctly when
     hierarchical irq domains are enabled.

     When interrupts are inactive with the modern hierarchical irqdomain
     design, the interrupt chips are not necessarily in a state where
     affinity changes can be handled. The legacy irq chip design allowed
     this because interrupts are immediately fully initialized at
     allocation time. X86 has a hacky workaround for this, but other
     implementations do not.

     This cased malfunction on GIC-V3. Instead of playing whack a mole
     to find all affected drivers, change the core code to store the
     requested affinity setting and then establish it when the interrupt
     is allocated, which makes the X86 hack go away"

* tag 'irq-urgent-2020-07-19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  genirq/affinity: Handle affinity setting on inactive interrupts correctly
  irqdomain/treewide: Keep firmware node unconditionally allocated
parents ce20d7bf baedb87d
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -627,9 +627,10 @@ static int bridge_probe(struct platform_device *pdev)
		return -ENOMEM;
	domain = irq_domain_create_hierarchy(parent, 0, 8, fn,
					     &bridge_domain_ops, NULL);
	if (!domain) {
		irq_domain_free_fwnode(fn);
	if (!domain)
		return -ENOMEM;
	}

	pci_set_flags(PCI_PROBE_ONLY);

+5 −5
Original line number Diff line number Diff line
@@ -2316,12 +2316,12 @@ static int mp_irqdomain_create(int ioapic)
	ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops,
						 (void *)(long)ioapic);

	if (!ip->irqdomain) {
		/* Release fw handle if it was allocated above */
		if (!cfg->dev)
			irq_domain_free_fwnode(fn);

	if (!ip->irqdomain)
		return -ENOMEM;
	}

	ip->irqdomain->parent = parent;

+12 −6
Original line number Diff line number Diff line
@@ -263,13 +263,14 @@ void __init arch_init_msi_domain(struct irq_domain *parent)
		msi_default_domain =
			pci_msi_create_irq_domain(fn, &pci_msi_domain_info,
						  parent);
		irq_domain_free_fwnode(fn);
	}
	if (!msi_default_domain)
	if (!msi_default_domain) {
		irq_domain_free_fwnode(fn);
		pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n");
	else
	} else {
		msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK;
	}
}

#ifdef CONFIG_IRQ_REMAP
static struct irq_chip pci_msi_ir_controller = {
@@ -301,6 +302,7 @@ struct irq_domain *arch_create_remap_msi_irq_domain(struct irq_domain *parent,
	if (!fn)
		return NULL;
	d = pci_msi_create_irq_domain(fn, &pci_msi_ir_domain_info, parent);
	if (!d)
		irq_domain_free_fwnode(fn);
	return d;
}
@@ -364,6 +366,7 @@ static struct irq_domain *dmar_get_irq_domain(void)
	if (fn) {
		dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info,
						    x86_vector_domain);
		if (!dmar_domain)
			irq_domain_free_fwnode(fn);
	}
out:
@@ -489,7 +492,10 @@ struct irq_domain *hpet_create_irq_domain(int hpet_id)
	}

	d = msi_create_irq_domain(fn, domain_info, parent);
	if (!d) {
		irq_domain_free_fwnode(fn);
		kfree(domain_info);
	}
	return d;
}

+5 −18
Original line number Diff line number Diff line
@@ -446,12 +446,10 @@ static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
	trace_vector_activate(irqd->irq, apicd->is_managed,
			      apicd->can_reserve, reserve);

	/* Nothing to do for fixed assigned vectors */
	if (!apicd->can_reserve && !apicd->is_managed)
		return 0;

	raw_spin_lock_irqsave(&vector_lock, flags);
	if (reserve || irqd_is_managed_and_shutdown(irqd))
	if (!apicd->can_reserve && !apicd->is_managed)
		assign_irq_vector_any_locked(irqd);
	else if (reserve || irqd_is_managed_and_shutdown(irqd))
		vector_assign_managed_shutdown(irqd);
	else if (apicd->is_managed)
		ret = activate_managed(irqd);
@@ -709,7 +707,6 @@ int __init arch_early_irq_init(void)
	x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
						   NULL);
	BUG_ON(x86_vector_domain == NULL);
	irq_domain_free_fwnode(fn);
	irq_set_default_host(x86_vector_domain);

	arch_init_msi_domain(x86_vector_domain);
@@ -775,20 +772,10 @@ void lapic_offline(void)
static int apic_set_affinity(struct irq_data *irqd,
			     const struct cpumask *dest, bool force)
{
	struct apic_chip_data *apicd = apic_chip_data(irqd);
	int err;

	/*
	 * Core code can call here for inactive interrupts. For inactive
	 * interrupts which use managed or reservation mode there is no
	 * point in going through the vector assignment right now as the
	 * activation will assign a vector which fits the destination
	 * cpumask. Let the core code store the destination mask and be
	 * done with it.
	 */
	if (!irqd_is_activated(irqd) &&
	    (apicd->is_managed || apicd->can_reserve))
		return IRQ_SET_MASK_OK;
	if (WARN_ON_ONCE(!irqd_is_activated(irqd)))
		return -EIO;

	raw_spin_lock(&vector_lock);
	cpumask_and(vector_searchmask, dest, cpu_online_mask);
+2 −1
Original line number Diff line number Diff line
@@ -167,9 +167,10 @@ static struct irq_domain *uv_get_irq_domain(void)
		goto out;

	uv_domain = irq_domain_create_tree(fn, &uv_domain_ops, NULL);
	irq_domain_free_fwnode(fn);
	if (uv_domain)
		uv_domain->parent = x86_vector_domain;
	else
		irq_domain_free_fwnode(fn);
out:
	mutex_unlock(&uv_lock);

Loading