Commit d6d5f19e authored by Max Filippov's avatar Max Filippov
Browse files

xtensa: abstract 'entry' and 'retw' in assembly code



Provide abi_entry, abi_entry_default, abi_ret and abi_ret_default macros
that allocate aligned stack frame in windowed and call0 ABIs.
Provide XTENSA_SPILL_STACK_RESERVE macro that specifies required stack
frame size when register spilling is involved.
Replace all uses of 'entry' and 'retw' with the above macros.
This makes most of the xtensa assembly code ready for XEA3 and call0 ABI.

Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
parent 831c4f3d
Loading
Loading
Loading
Loading
+46 −0
Original line number Diff line number Diff line
@@ -191,4 +191,50 @@
#endif
	.endm

#define XTENSA_STACK_ALIGNMENT		16

#if defined(__XTENSA_WINDOWED_ABI__)
#define XTENSA_FRAME_SIZE_RESERVE	16
#define XTENSA_SPILL_STACK_RESERVE	32

#define abi_entry(frame_size) \
	entry sp, (XTENSA_FRAME_SIZE_RESERVE + \
		   (((frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
		    -XTENSA_STACK_ALIGNMENT))
#define abi_entry_default abi_entry(0)

#define abi_ret(frame_size) retw
#define abi_ret_default retw

#elif defined(__XTENSA_CALL0_ABI__)

#define XTENSA_SPILL_STACK_RESERVE	0

#define abi_entry(frame_size) __abi_entry (frame_size)

	.macro	__abi_entry frame_size
	.ifgt \frame_size
	addi sp, sp, -(((\frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
		       -XTENSA_STACK_ALIGNMENT)
	.endif
	.endm

#define abi_entry_default

#define abi_ret(frame_size) __abi_ret (frame_size)

	.macro	__abi_ret frame_size
	.ifgt \frame_size
	addi sp, sp, (((\frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
		      -XTENSA_STACK_ALIGNMENT)
	.endif
	ret
	.endm

#define abi_ret_default ret

#else
#error Unsupported Xtensa ABI
#endif

#endif /* _XTENSA_ASMMACRO_H */
+5 −2
Original line number Diff line number Diff line
@@ -121,7 +121,9 @@

ENTRY(coprocessor_flush)

	entry	a1, 32
	/* reserve 4 bytes on stack to save a0 */
	abi_entry(4)

	s32i	a0, a1, 0
	movi	a0, .Lsave_cp_regs_jump_table
	addx8	a3, a3, a0
@@ -131,7 +133,8 @@ ENTRY(coprocessor_flush)
	beqz	a3, 1f
	callx0	a3
1:	l32i	a0, a1, 0
	retw

	abi_ret(4)

ENDPROC(coprocessor_flush)

+6 −5
Original line number Diff line number Diff line
@@ -1842,7 +1842,8 @@ ENDPROC(fast_store_prohibited)

ENTRY(system_call)

	entry	a1, 32
	/* reserve 4 bytes on stack for function parameter */
	abi_entry(4)

	/* regs->syscall = regs->areg[2] */

@@ -1892,7 +1893,7 @@ ENTRY(system_call)

	s32i	a6, a2, PT_AREG2
	bnez	a3, 1f
	retw
	abi_ret(4)

1:
	l32i	a4, a1, 4
@@ -1901,7 +1902,7 @@ ENTRY(system_call)
	mov	a6, a2
	call4	do_syscall_trace_leave
	s32i	a3, a2, PT_SYSCALL
	retw
	abi_ret(4)

ENDPROC(system_call)

@@ -1952,7 +1953,7 @@ ENDPROC(system_call)

ENTRY(_switch_to)

	entry	a1, 48
	abi_entry(XTENSA_SPILL_STACK_RESERVE)

	mov	a11, a3			# and 'next' (a3)

@@ -2013,7 +2014,7 @@ ENTRY(_switch_to)
	wsr	a14, ps
	rsync

	retw
	abi_ret(XTENSA_SPILL_STACK_RESERVE)

ENDPROC(_switch_to)

+6 −5
Original line number Diff line number Diff line
@@ -11,6 +11,7 @@
 */

#include <linux/linkage.h>
#include <asm/asmmacro.h>
#include <asm/ftrace.h>

/*
@@ -21,13 +22,13 @@

ENTRY(_mcount)

	entry	a1, 16
	abi_entry_default

	movi	a4, ftrace_trace_function
	l32i	a4, a4, 0
	movi	a3, ftrace_stub
	bne	a3, a4, 1f
	retw
	abi_ret_default

1: 	xor	a7, a2, a1
	movi	a3, 0x3fffffff
@@ -40,11 +41,11 @@ ENTRY(_mcount)
	addi	a6, a6, -MCOUNT_INSN_SIZE
	callx4	a4

	retw
	abi_ret_default

ENDPROC(_mcount)

ENTRY(ftrace_stub)
	entry	a1, 16
	retw
	abi_entry_default
	abi_ret_default
ENDPROC(ftrace_stub)
+6 −6
Original line number Diff line number Diff line
@@ -43,7 +43,7 @@ ENTRY(csum_partial)
	 * Experiments with Ethernet and SLIP connections show that buf
	 * is aligned on either a 2-byte or 4-byte boundary.
	 */
	entry	sp, 32
	abi_entry_default
	extui	a5, a2, 0, 2
	bnez	a5, 8f		/* branch if 2-byte aligned */
	/* Fall-through on common case, 4-byte alignment */
@@ -107,7 +107,7 @@ ENTRY(csum_partial)
	ONES_ADD(a4, a6)
7:
	mov	a2, a4
	retw
	abi_ret_default

	/* uncommon case, buf is 2-byte aligned */
8:
@@ -195,7 +195,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,

ENTRY(csum_partial_copy_generic)

	entry	sp, 32
	abi_entry_default
	mov	a12, a3
	mov	a11, a4
	or	a10, a2, a3
@@ -316,7 +316,7 @@ EX(11f) s8i a9, a3, 0
	ONES_ADD(a5, a9)
8:
	mov	a2, a5
	retw
	abi_ret_default

5:
	/* Control branch to here when either src or dst is odd.  We
@@ -383,12 +383,12 @@ ENDPROC(csum_partial_copy_generic)
	blt	a12, a11, .Leloop
#endif
2:
	retw
	abi_ret_default

11:
	movi	a2, -EFAULT
	s32i	a2, a7, 0	/* dst_err_ptr */
	movi	a2, 0
	retw
	abi_ret_default

.previous
Loading