Commit cbbfb0ae authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 apic updates from Ingo Molnar:
 "Improve the spreading of managed IRQs at allocation time"

* 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  irq/matrix: Spread managed interrupts on allocation
  irq/matrix: Split out the CPU selection code into a helper
parents 42f52e1c 76f99ae5
Loading
Loading
Loading
Loading
+4 −5
Original line number Original line Diff line number Diff line
@@ -313,14 +313,13 @@ assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
	struct apic_chip_data *apicd = apic_chip_data(irqd);
	struct apic_chip_data *apicd = apic_chip_data(irqd);
	int vector, cpu;
	int vector, cpu;


	cpumask_and(vector_searchmask, vector_searchmask, affmsk);
	cpumask_and(vector_searchmask, dest, affmsk);
	cpu = cpumask_first(vector_searchmask);

	if (cpu >= nr_cpu_ids)
		return -EINVAL;
	/* set_affinity might call here for nothing */
	/* set_affinity might call here for nothing */
	if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
	if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
		return 0;
		return 0;
	vector = irq_matrix_alloc_managed(vector_matrix, cpu);
	vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
					  &cpu);
	trace_vector_alloc_managed(irqd->irq, vector, vector);
	trace_vector_alloc_managed(irqd->irq, vector, vector);
	if (vector < 0)
	if (vector < 0)
		return vector;
		return vector;
+2 −1
Original line number Original line Diff line number Diff line
@@ -1151,7 +1151,8 @@ void irq_matrix_offline(struct irq_matrix *m);
void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
				unsigned int *mapped_cpu);
void irq_matrix_reserve(struct irq_matrix *m);
void irq_matrix_reserve(struct irq_matrix *m);
void irq_matrix_remove_reserved(struct irq_matrix *m);
void irq_matrix_remove_reserved(struct irq_matrix *m);
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
+52 −30
Original line number Original line Diff line number Diff line
@@ -124,6 +124,27 @@ static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
	return area;
	return area;
}
}


/* Find the best CPU which has the lowest vector allocation count */
static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
					const struct cpumask *msk)
{
	unsigned int cpu, best_cpu, maxavl = 0;
	struct cpumap *cm;

	best_cpu = UINT_MAX;

	for_each_cpu(cpu, msk) {
		cm = per_cpu_ptr(m->maps, cpu);

		if (!cm->online || cm->available <= maxavl)
			continue;

		best_cpu = cpu;
		maxavl = cm->available;
	}
	return best_cpu;
}

/**
/**
 * irq_matrix_assign_system - Assign system wide entry in the matrix
 * irq_matrix_assign_system - Assign system wide entry in the matrix
 * @m:		Matrix pointer
 * @m:		Matrix pointer
@@ -239,11 +260,21 @@ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
 * @m:		Matrix pointer
 * @m:		Matrix pointer
 * @cpu:	On which CPU the interrupt should be allocated
 * @cpu:	On which CPU the interrupt should be allocated
 */
 */
int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
			     unsigned int *mapped_cpu)
{
{
	struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
	unsigned int bit, cpu, end = m->alloc_end;
	unsigned int bit, end = m->alloc_end;
	struct cpumap *cm;

	if (cpumask_empty(msk))
		return -EINVAL;

	cpu = matrix_find_best_cpu(m, msk);
	if (cpu == UINT_MAX)
		return -ENOSPC;


	cm = per_cpu_ptr(m->maps, cpu);
	end = m->alloc_end;
	/* Get managed bit which are not allocated */
	/* Get managed bit which are not allocated */
	bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
	bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
	bit = find_first_bit(m->scratch_map, end);
	bit = find_first_bit(m->scratch_map, end);
@@ -252,6 +283,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
	set_bit(bit, cm->alloc_map);
	set_bit(bit, cm->alloc_map);
	cm->allocated++;
	cm->allocated++;
	m->total_allocated++;
	m->total_allocated++;
	*mapped_cpu = cpu;
	trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
	trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
	return bit;
	return bit;
}
}
@@ -322,37 +354,27 @@ void irq_matrix_remove_reserved(struct irq_matrix *m)
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
		     bool reserved, unsigned int *mapped_cpu)
		     bool reserved, unsigned int *mapped_cpu)
{
{
	unsigned int cpu, best_cpu, maxavl = 0;
	unsigned int cpu, bit;
	struct cpumap *cm;
	struct cpumap *cm;
	unsigned int bit;

	best_cpu = UINT_MAX;
	for_each_cpu(cpu, msk) {
		cm = per_cpu_ptr(m->maps, cpu);

		if (!cm->online || cm->available <= maxavl)
			continue;


		best_cpu = cpu;
	cpu = matrix_find_best_cpu(m, msk);
		maxavl = cm->available;
	if (cpu == UINT_MAX)
	}
		return -ENOSPC;


	if (maxavl) {
	cm = per_cpu_ptr(m->maps, cpu);
		cm = per_cpu_ptr(m->maps, best_cpu);
	bit = matrix_alloc_area(m, cm, 1, false);
	bit = matrix_alloc_area(m, cm, 1, false);
		if (bit < m->alloc_end) {
	if (bit >= m->alloc_end)
		return -ENOSPC;
	cm->allocated++;
	cm->allocated++;
	cm->available--;
	cm->available--;
	m->total_allocated++;
	m->total_allocated++;
	m->global_available--;
	m->global_available--;
	if (reserved)
	if (reserved)
		m->global_reserved--;
		m->global_reserved--;
			*mapped_cpu = best_cpu;
	*mapped_cpu = cpu;
			trace_irq_matrix_alloc(bit, best_cpu, m, cm);
	trace_irq_matrix_alloc(bit, cpu, m, cm);
	return bit;
	return bit;
		}

	}
	return -ENOSPC;
}
}


/**
/**