Commit d6c88a50 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar
Browse files

genirq: revert dynarray



Revert the dynarray changes. They need more thought and polishing.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent ee32c973
Loading
Loading
Loading
Loading
+0 −4
Original line number Diff line number Diff line
@@ -102,7 +102,3 @@ config HAVE_CLK
	help
	  The <linux/clk.h> calls support software clock gating and
	  thus are a key power management tool on many systems.

config HAVE_DYN_ARRAY
	def_bool n
+0 −1
Original line number Diff line number Diff line
@@ -33,7 +33,6 @@ config X86
	select HAVE_ARCH_TRACEHOOK
	select HAVE_GENERIC_DMA_COHERENT if X86_32
	select HAVE_EFFICIENT_UNALIGNED_ACCESS
	select HAVE_DYN_ARRAY

config ARCH_DEFCONFIG
	string
+74 −125
Original line number Diff line number Diff line
@@ -107,7 +107,6 @@ static int __init parse_noapic(char *str)
}
early_param("noapic", parse_noapic);

struct irq_cfg;
struct irq_pin_list;
struct irq_cfg {
	unsigned int irq;
@@ -120,7 +119,7 @@ struct irq_cfg {
};

/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
static struct irq_cfg irq_cfg_legacy[] __initdata = {
static struct irq_cfg irq_cfgx[NR_IRQS] = {
	[0]  = { .irq =  0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR,  },
	[1]  = { .irq =  1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR,  },
	[2]  = { .irq =  2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR,  },
@@ -139,48 +138,26 @@ static struct irq_cfg irq_cfg_legacy[] __initdata = {
	[15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
};

static struct irq_cfg irq_cfg_init = { .irq =  -1U, };

static void init_one_irq_cfg(struct irq_cfg *cfg)
{
	memcpy(cfg, &irq_cfg_init, sizeof(struct irq_cfg));
}

static struct irq_cfg *irq_cfgx;

static void __init init_work(void *data)
{
	struct dyn_array *da = data;
	struct irq_cfg *cfg;
	int legacy_count;
	int i;

	cfg = *da->name;

	memcpy(cfg, irq_cfg_legacy, sizeof(irq_cfg_legacy));

	legacy_count = ARRAY_SIZE(irq_cfg_legacy);
	for (i = legacy_count; i < *da->nr; i++)
		init_one_irq_cfg(&cfg[i]);
}

#define for_each_irq_cfg(irq, cfg)		\
	for (irq = 0, cfg = &irq_cfgx[irq]; irq < nr_irqs; irq++, cfg = &irq_cfgx[irq])
	for (irq = 0, cfg = irq_cfgx; irq < nr_irqs; irq++, cfg++)

DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irqs, PAGE_SIZE, init_work);

struct irq_cfg *irq_cfg(unsigned int irq)
static struct irq_cfg *irq_cfg(unsigned int irq)
{
	if (irq < nr_irqs)
		return &irq_cfgx[irq];

	return NULL;
	return irq < nr_irqs ? irq_cfgx + irq : NULL;
}
struct irq_cfg *irq_cfg_alloc(unsigned int irq)

static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
{
	return irq_cfg(irq);
}

/*
 * Rough estimation of how many shared IRQs there are, can be changed
 * anytime.
 */
#define MAX_PLUS_SHARED_IRQS NR_IRQS
#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)

/*
 * This is performance-critical, we want to do it O(1)
 *
@@ -193,59 +170,29 @@ struct irq_pin_list {
	struct irq_pin_list *next;
};

static struct irq_pin_list *irq_2_pin_head;
/* fill one page ? */
static int nr_irq_2_pin = 0x100;
static struct irq_pin_list irq_2_pin_head[PIN_MAP_SIZE];
static struct irq_pin_list *irq_2_pin_ptr;
static void __init irq_2_pin_init_work(void *data)

static void __init irq_2_pin_init(void)
{
	struct dyn_array *da = data;
	struct irq_pin_list *pin;
	struct irq_pin_list *pin = irq_2_pin_head;
	int i;

	pin = *da->name;

	for (i = 1; i < *da->nr; i++)
	for (i = 1; i < PIN_MAP_SIZE; i++)
		pin[i-1].next = &pin[i];

	irq_2_pin_ptr = &pin[0];
}
DEFINE_DYN_ARRAY(irq_2_pin_head, sizeof(struct irq_pin_list), nr_irq_2_pin, PAGE_SIZE, irq_2_pin_init_work);

static struct irq_pin_list *get_one_free_irq_2_pin(void)
{
	struct irq_pin_list *pin;
	int i;

	pin = irq_2_pin_ptr;

	if (pin) {
		irq_2_pin_ptr = pin->next;
		pin->next = NULL;
		return pin;
	}

	/*
	 *  we run out of pre-allocate ones, allocate more
	 */
	printk(KERN_DEBUG "try to get more irq_2_pin %d\n", nr_irq_2_pin);

	if (after_bootmem)
		pin = kzalloc(sizeof(struct irq_pin_list)*nr_irq_2_pin,
				 GFP_ATOMIC);
	else
		pin = __alloc_bootmem_nopanic(sizeof(struct irq_pin_list) *
				nr_irq_2_pin, PAGE_SIZE, 0);
	struct irq_pin_list *pin = irq_2_pin_ptr;

	if (!pin)
		panic("can not get more irq_2_pin\n");

	for (i = 1; i < nr_irq_2_pin; i++)
		pin[i-1].next = &pin[i];

	irq_2_pin_ptr = pin->next;
	pin->next = NULL;

	return pin;
}

@@ -284,6 +231,7 @@ static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned i
static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
{
	struct io_apic __iomem *io_apic = io_apic_base(apic);

	if (sis_apic_bug)
		writel(reg, &io_apic->index);
	writel(value, &io_apic->data);
@@ -3885,9 +3833,10 @@ static struct resource * __init ioapic_setup_resources(void)
void __init ioapic_init_mappings(void)
{
	unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
	int i;
	struct resource *ioapic_res;
	int i;

	irq_2_pin_init();
	ioapic_res = ioapic_setup_resources();
	for (i = 0; i < nr_ioapics; i++) {
		if (smp_found_config) {
+2 −6
Original line number Diff line number Diff line
@@ -140,7 +140,7 @@ static void __init setup_cpu_pda_map(void)
 */
void __init setup_per_cpu_areas(void)
{
	ssize_t size, old_size, da_size;
	ssize_t size, old_size;
	char *ptr;
	int cpu;
	unsigned long align = 1;
@@ -150,9 +150,8 @@ void __init setup_per_cpu_areas(void)

	/* Copy section for each CPU (we discard the original) */
	old_size = PERCPU_ENOUGH_ROOM;
	da_size = per_cpu_dyn_array_size(&align);
	align = max_t(unsigned long, PAGE_SIZE, align);
	size = roundup(old_size + da_size, align);
	size = roundup(old_size, align);
	printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
			  size);

@@ -182,9 +181,6 @@ void __init setup_per_cpu_areas(void)
#endif
		per_cpu_offset(cpu) = ptr - __per_cpu_start;
		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);

		per_cpu_alloc_dyn_array(cpu, ptr + old_size);

	}

	printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
+1 −1
Original line number Diff line number Diff line
@@ -633,7 +633,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
	/*
	 * handle this 'virtual interrupt' as a Cobalt one now.
	 */
	kstat_irqs_this_cpu(desc)++;
	kstat_incr_irqs_this_cpu(realirq, desc);

	if (likely(desc->action != NULL))
		handle_IRQ_event(realirq, desc->action);
Loading