Commit 5389e239 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'sparc64-Use-low-latency-path-to-resume-idle-cpu'



Vijay Kumar says:

====================
sparc64: Use low latency path to resume idle cpu

CPU_POKE is a low latency path to resume the target cpu if suspended
using CPU_YIELD. Use CPU_POKE to resume cpu if supported by hypervisor.

	     Hackbench results (lower is better):
Number of
Process:		w/o fix		with fix
1  			0.012		 0.010
10			0.021		 0.019
100			0.151		 0.148

Changelog:
v2:
  - Fixed comments and spacing (2/2)
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 99274b81 8536e02e
Loading
Loading
Loading
Loading
+18 −0
Original line number Diff line number Diff line
@@ -298,6 +298,24 @@ unsigned long sun4v_cpu_stop(unsigned long cpuid);
unsigned long sun4v_cpu_yield(void);
#endif

/* cpu_poke()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_CPU_POKE
 * RET0:	status
 * ERRORS:	ENOCPU		cpuid refers to a CPU that does not exist
 *		EINVAL		cpuid is current CPU
 *
 * Poke CPU cpuid. If the target CPU is currently suspended having
 * invoked the cpu-yield service, that vCPU will be resumed.
 * Poke interrupts may only be sent to valid, non-local CPUs.
 * It is not legal to poke the current vCPU.
 */
#define HV_FAST_CPU_POKE                0x13

#ifndef __ASSEMBLY__
unsigned long sun4v_cpu_poke(unsigned long cpuid);
#endif

/* cpu_qconf()
 * TRAP:	HV_FAST_TRAP
 * FUNCTION:	HV_FAST_CPU_QCONF
+5 −0
Original line number Diff line number Diff line
@@ -33,6 +33,9 @@
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
extern cpumask_t cpu_core_map[NR_CPUS];

void smp_init_cpu_poke(void);
void scheduler_poke(void);

void arch_send_call_function_single_ipi(int cpu);
void arch_send_call_function_ipi_mask(const struct cpumask *mask);

@@ -74,6 +77,8 @@ void __cpu_die(unsigned int cpu);
#define smp_fetch_global_regs() do { } while (0)
#define smp_fetch_global_pmu() do { } while (0)
#define smp_fill_in_cpu_possible_map() do { } while (0)
#define smp_init_cpu_poke() do { } while (0)
#define scheduler_poke() do { } while (0)

#endif /* !(CONFIG_SMP) */

+1 −1
Original line number Diff line number Diff line
@@ -189,7 +189,7 @@ void __init sun4v_hvapi_init(void)

	group = HV_GRP_CORE;
	major = 1;
	minor = 1;
	minor = 6;
	if (sun4v_hvapi_register(group, major, &minor))
		goto bad;

+11 −0
Original line number Diff line number Diff line
@@ -106,6 +106,17 @@ ENTRY(sun4v_cpu_yield)
	 nop
ENDPROC(sun4v_cpu_yield)

	/* %o0: cpuid
	 *
	 * returns %o0:	status
	 */
ENTRY(sun4v_cpu_poke)
	mov     HV_FAST_CPU_POKE, %o5
	ta      HV_FAST_TRAP
	retl
	 nop
ENDPROC(sun4v_cpu_poke)

	/* %o0:	type
	 * %o1:	queue paddr
	 * %o2:	num queue entries
+6 −1
Original line number Diff line number Diff line
@@ -77,8 +77,13 @@ void arch_cpu_idle(void)
			: "=&r" (pstate)
			: "i" (PSTATE_IE));

		if (!need_resched() && !cpu_is_offline(smp_processor_id()))
		if (!need_resched() && !cpu_is_offline(smp_processor_id())) {
			sun4v_cpu_yield();
			/* If resumed by cpu_poke then we need to explicitly
			 * call scheduler_ipi().
			 */
			scheduler_poke();
		}

		/* Re-enable interrupts. */
		__asm__ __volatile__(
Loading