Commit e0e86b11 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull SMP/hotplug updates from Thomas Gleixner:
 "A small set of updates for SMP and CPU hotplug:

   - Abort disabling secondary CPUs in the freezer when a wakeup is
     pending instead of evaluating it only after all CPUs have been
     offlined.

   - Remove the shared annotation for the strict per CPU cfd_data in the
     smp function call core code.

   - Remove the return values of smp_call_function() and on_each_cpu()
     as they are unconditionally 0. Fixup the few callers which actually
     bothered to check the return value"

* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  smp: Remove smp_call_function() and on_each_cpu() return values
  smp: Do not mark call_function_data as shared
  cpu/hotplug: Abort disabling secondary CPUs if wakeup is pending
  cpu/hotplug: Fix notify_cpu_starting() reference in bringup_wait_for_ap()
parents 568521d0 caa75932
Loading
Loading
Loading
Loading
+5 −14
Original line number Diff line number Diff line
@@ -614,8 +614,7 @@ void
smp_imb(void)
{
	/* Must wait other processors to flush their icache before continue. */
	if (on_each_cpu(ipi_imb, NULL, 1))
		printk(KERN_CRIT "smp_imb: timed out\n");
	on_each_cpu(ipi_imb, NULL, 1);
}
EXPORT_SYMBOL(smp_imb);

@@ -630,9 +629,7 @@ flush_tlb_all(void)
{
	/* Although we don't have any data to pass, we do want to
	   synchronize with the other processors.  */
	if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
		printk(KERN_CRIT "flush_tlb_all: timed out\n");
	}
	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
}

#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
@@ -667,9 +664,7 @@ flush_tlb_mm(struct mm_struct *mm)
		}
	}

	if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
		printk(KERN_CRIT "flush_tlb_mm: timed out\n");
	}
	smp_call_function(ipi_flush_tlb_mm, mm, 1);

	preempt_enable();
}
@@ -720,9 +715,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
	data.mm = mm;
	data.addr = addr;

	if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
		printk(KERN_CRIT "flush_tlb_page: timed out\n");
	}
	smp_call_function(ipi_flush_tlb_page, &data, 1);

	preempt_enable();
}
@@ -772,9 +765,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
		}
	}

	if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
		printk(KERN_CRIT "flush_icache_page: timed out\n");
	}
	smp_call_function(ipi_flush_icache_page, mm, 1);

	preempt_enable();
}
+3 −3
Original line number Diff line number Diff line
@@ -65,7 +65,7 @@ op_axp_setup(void)
	model->reg_setup(&reg, ctr, &sys);

	/* Configure the registers on all cpus.  */
	(void)smp_call_function(model->cpu_setup, &reg, 1);
	smp_call_function(model->cpu_setup, &reg, 1);
	model->cpu_setup(&reg);
	return 0;
}
@@ -86,7 +86,7 @@ op_axp_cpu_start(void *dummy)
static int
op_axp_start(void)
{
	(void)smp_call_function(op_axp_cpu_start, NULL, 1);
	smp_call_function(op_axp_cpu_start, NULL, 1);
	op_axp_cpu_start(NULL);
	return 0;
}
@@ -101,7 +101,7 @@ op_axp_cpu_stop(void *dummy)
static void
op_axp_stop(void)
{
	(void)smp_call_function(op_axp_cpu_stop, NULL, 1);
	smp_call_function(op_axp_cpu_stop, NULL, 1);
	op_axp_cpu_stop(NULL);
}

+2 −4
Original line number Diff line number Diff line
@@ -539,16 +539,14 @@ static void bL_switcher_trace_trigger_cpu(void *__always_unused info)

int bL_switcher_trace_trigger(void)
{
	int ret;

	preempt_disable();

	bL_switcher_trace_trigger_cpu(NULL);
	ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
	smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);

	preempt_enable();

	return ret;
	return 0;
}
EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);

+2 −10
Original line number Diff line number Diff line
@@ -6390,11 +6390,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
	}

	/* save the current system wide pmu states */
	ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
	if (ret) {
		DPRINT(("on_each_cpu() failed: %d\n", ret));
		goto cleanup_reserve;
	}
	on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);

	/* officially change to the alternate interrupt handler */
	pfm_alt_intr_handler = hdl;
@@ -6421,7 +6417,6 @@ int
pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
{
	int i;
	int ret;

	if (hdl == NULL) return -EINVAL;

@@ -6435,10 +6430,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)

	pfm_alt_intr_handler = NULL;

	ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
	if (ret) {
		DPRINT(("on_each_cpu() failed: %d\n", ret));
	}
	on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);

	for_each_online_cpu(i) {
		pfm_unreserve_session(NULL, 1, i);
+4 −4
Original line number Diff line number Diff line
@@ -121,8 +121,8 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
	status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
	if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
		atomic_set(&uc_pool->status, 0);
		status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
		if (status || atomic_read(&uc_pool->status))
		smp_call_function(uncached_ipi_visibility, uc_pool, 1);
		if (atomic_read(&uc_pool->status))
			goto failed;
	} else if (status != PAL_VISIBILITY_OK)
		goto failed;
@@ -143,8 +143,8 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
	if (status != PAL_STATUS_SUCCESS)
		goto failed;
	atomic_set(&uc_pool->status, 0);
	status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
	if (status || atomic_read(&uc_pool->status))
	smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
	if (atomic_read(&uc_pool->status))
		goto failed;

	/*
Loading