Commit 164c220f authored by David S. Miller's avatar David S. Miller
Browse files

[SPARC64]: Fix hypervisor call arg passing.



Function goes in %o5, args go in %o0 --> %o5.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent dedacf62
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -863,10 +863,10 @@ void init_irqwork_curcpu(void)

static void __cpuinit init_one_mondo(unsigned long *pa_ptr, unsigned long type)
{
	register unsigned long func __asm__("%o0");
	register unsigned long arg0 __asm__("%o1");
	register unsigned long arg1 __asm__("%o2");
	register unsigned long arg2 __asm__("%o3");
	register unsigned long func __asm__("%o5");
	register unsigned long arg0 __asm__("%o0");
	register unsigned long arg1 __asm__("%o1");
	register unsigned long arg2 __asm__("%o2");
	unsigned long page = get_zeroed_page(GFP_ATOMIC);

	if (!page) {
+8 −8
Original line number Diff line number Diff line
@@ -572,10 +572,10 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
	retries = 0;
	cnt = init_cpu_list(cpu_list, mask);
	do {
		register unsigned long func __asm__("%o0");
		register unsigned long arg0 __asm__("%o1");
		register unsigned long arg1 __asm__("%o2");
		register unsigned long arg2 __asm__("%o3");
		register unsigned long func __asm__("%o5");
		register unsigned long arg0 __asm__("%o0");
		register unsigned long arg1 __asm__("%o1");
		register unsigned long arg2 __asm__("%o2");

		func = HV_FAST_CPU_MONDO_SEND;
		arg0 = cnt;
@@ -624,10 +624,10 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
		int retries = 0;

		do {
			register unsigned long func __asm__("%o0");
			register unsigned long arg0 __asm__("%o1");
			register unsigned long arg1 __asm__("%o2");
			register unsigned long arg2 __asm__("%o3");
			register unsigned long func __asm__("%o5");
			register unsigned long arg0 __asm__("%o0");
			register unsigned long arg1 __asm__("%o1");
			register unsigned long arg2 __asm__("%o2");

			cpu_list[0] = i;
			func = HV_FAST_CPU_MONDO_SEND;
+28 −28
Original line number Diff line number Diff line
@@ -265,20 +265,20 @@ do_unlock:
	 nop

niagara_lock_tlb:
	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o0
	sethi		%hi(KERNBASE), %o1
	clr		%o2
	sethi		%hi(kern_locked_tte_data), %o3
	ldx		[%o3 + %lo(kern_locked_tte_data)], %o3
	mov		HV_MMU_IMMU, %o4
	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o5
	sethi		%hi(KERNBASE), %o0
	clr		%o1
	sethi		%hi(kern_locked_tte_data), %o2
	ldx		[%o2 + %lo(kern_locked_tte_data)], %o2
	mov		HV_MMU_IMMU, %o3
	ta		HV_FAST_TRAP

	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o0
	sethi		%hi(KERNBASE), %o1
	clr		%o2
	sethi		%hi(kern_locked_tte_data), %o3
	ldx		[%o3 + %lo(kern_locked_tte_data)], %o3
	mov		HV_MMU_DMMU, %o4
	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o5
	sethi		%hi(KERNBASE), %o0
	clr		%o1
	sethi		%hi(kern_locked_tte_data), %o2
	ldx		[%o2 + %lo(kern_locked_tte_data)], %o2
	mov		HV_MMU_DMMU, %o3
	ta		HV_FAST_TRAP

	sethi		%hi(bigkernel), %g2
@@ -286,24 +286,24 @@ niagara_lock_tlb:
	brz,pt		%g2, after_lock_tlb
	 nop

	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o0
	sethi		%hi(KERNBASE + 0x400000), %o1
	clr		%o2
	sethi		%hi(kern_locked_tte_data), %o3
	ldx		[%o3 + %lo(kern_locked_tte_data)], %o3
	sethi		%hi(0x400000), %o4
	add		%o3, %o4, %o3
	mov		HV_MMU_IMMU, %o4
	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o5
	sethi		%hi(KERNBASE + 0x400000), %o0
	clr		%o1
	sethi		%hi(kern_locked_tte_data), %o2
	ldx		[%o2 + %lo(kern_locked_tte_data)], %o2
	sethi		%hi(0x400000), %o3
	add		%o2, %o3, %o2
	mov		HV_MMU_IMMU, %o3
	ta		HV_FAST_TRAP

	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o0
	sethi		%hi(KERNBASE + 0x400000), %o1
	clr		%o2
	sethi		%hi(kern_locked_tte_data), %o3
	ldx		[%o3 + %lo(kern_locked_tte_data)], %o3
	sethi		%hi(0x400000), %o4
	add		%o3, %o4, %o3
	mov		HV_MMU_DMMU, %o4
	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o5
	sethi		%hi(KERNBASE + 0x400000), %o0
	clr		%o1
	sethi		%hi(kern_locked_tte_data), %o2
	ldx		[%o2 + %lo(kern_locked_tte_data)], %o2
	sethi		%hi(0x400000), %o3
	add		%o2, %o3, %o2
	mov		HV_MMU_DMMU, %o3
	ta		HV_FAST_TRAP

after_lock_tlb:
+3 −3
Original line number Diff line number Diff line
@@ -266,9 +266,9 @@ __tsb_context_switch:
	mov	SCRATCHPAD_UTSBREG2, %g1
	stxa	%g2, [%g1] ASI_SCRATCHPAD

	mov	HV_FAST_MMU_TSB_CTXNON0, %o0
	mov	1, %o1
	mov	%o4, %o2
	mov	HV_FAST_MMU_TSB_CTXNON0, %o5
	mov	1, %o0
	mov	%o4, %o1
	ta	HV_FAST_TRAP

	ba,pt	%xcc, 9f
+10 −10
Original line number Diff line number Diff line
@@ -518,11 +518,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr,
				       unsigned long pte,
				       unsigned long mmu)
{
	register unsigned long func asm("%o0");
	register unsigned long arg0 asm("%o1");
	register unsigned long arg1 asm("%o2");
	register unsigned long arg2 asm("%o3");
	register unsigned long arg3 asm("%o4");
	register unsigned long func asm("%o5");
	register unsigned long arg0 asm("%o0");
	register unsigned long arg1 asm("%o1");
	register unsigned long arg2 asm("%o2");
	register unsigned long arg3 asm("%o3");

	func = HV_FAST_MMU_MAP_PERM_ADDR;
	arg0 = vaddr;
@@ -1112,18 +1112,18 @@ static void __init tsb_phys_patch(void)
/* Register this cpu's fault status area with the hypervisor.  */
void __cpuinit sun4v_register_fault_status(void)
{
	register unsigned long func asm("%o5");
	register unsigned long arg0 asm("%o0");
	register unsigned long arg1 asm("%o1");
	int cpu = hard_smp_processor_id();
	struct trap_per_cpu *tb = &trap_block[cpu];
	unsigned long pa;

	pa = kern_base + ((unsigned long) tb - KERNBASE);
	arg0 = HV_FAST_MMU_FAULT_AREA_CONF;
	arg1 = pa;
	func = HV_FAST_MMU_FAULT_AREA_CONF;
	arg0 = pa;
	__asm__ __volatile__("ta	%4"
			     : "=&r" (arg0), "=&r" (arg1)
			     : "0" (arg0), "1" (arg1),
			     : "=&r" (func), "=&r" (arg0)
			     : "0" (func), "1" (arg0),
			       "i" (HV_FAST_TRAP));
}