Commit 78762b0e authored by Jiri Slaby's avatar Jiri Slaby Committed by Borislav Petkov
Browse files

x86/asm/32: Add ENDs to some functions and relabel with SYM_CODE_*



All these are functions which are invoked from elsewhere but they are
not typical C functions. So annotate them using the new SYM_CODE_START.
All these were not balanced with any END, so mark their ends by
SYM_CODE_END, appropriately.

Signed-off-by: default avatarJiri Slaby <jslaby@suse.cz>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> [xen bits]
Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> [hibernate]
Cc: Andy Lutomirski <luto@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Len Brown <len.brown@intel.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-pm@vger.kernel.org
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Pingfan Liu <kernelfans@gmail.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86-ml <x86@kernel.org>
Cc: xen-devel@lists.xenproject.org
Link: https://lkml.kernel.org/r/20191011115108.12392-26-jslaby@suse.cz
parent 6dcc5627
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -847,9 +847,10 @@ SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
 * Xen doesn't set %esp to be precisely what the normal SYSENTER
 * entry point expects, so fix it up before using the normal path.
 */
ENTRY(xen_sysenter_target)
SYM_CODE_START(xen_sysenter_target)
	addl	$5*4, %esp			/* remove xen-provided frame */
	jmp	.Lsysenter_past_esp
SYM_CODE_END(xen_sysenter_target)
#endif

/*
+4 −3
Original line number Diff line number Diff line
@@ -9,8 +9,7 @@
	.code32
	ALIGN

ENTRY(wakeup_pmode_return)
wakeup_pmode_return:
SYM_CODE_START(wakeup_pmode_return)
	movw	$__KERNEL_DS, %ax
	movw	%ax, %ss
	movw	%ax, %fs
@@ -39,6 +38,7 @@ wakeup_pmode_return:
	# jump to place where we left off
	movl	saved_eip, %eax
	jmp	*%eax
SYM_CODE_END(wakeup_pmode_return)

bogus_magic:
	jmp	bogus_magic
@@ -72,7 +72,7 @@ restore_registers:
	popfl
	ret

ENTRY(do_suspend_lowlevel)
SYM_CODE_START(do_suspend_lowlevel)
	call	save_processor_state
	call	save_registers
	pushl	$3
@@ -87,6 +87,7 @@ ret_point:
	call	restore_registers
	call	restore_processor_state
	ret
SYM_CODE_END(do_suspend_lowlevel)

.data
ALIGN
+2 −1
Original line number Diff line number Diff line
@@ -89,7 +89,7 @@ WEAK(ftrace_stub)
	ret
END(ftrace_caller)

ENTRY(ftrace_regs_caller)
SYM_CODE_START(ftrace_regs_caller)
	/*
	 * We're here from an mcount/fentry CALL, and the stack frame looks like:
	 *
@@ -163,6 +163,7 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
	popl	%eax

	jmp	.Lftrace_ret
SYM_CODE_END(ftrace_regs_caller)

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
+2 −1
Original line number Diff line number Diff line
@@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
 * can.
 */
__HEAD
ENTRY(startup_32)
SYM_CODE_START(startup_32)
	movl pa(initial_stack),%ecx
	
	/* test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4
#else
	jmp .Ldefault_entry
#endif /* CONFIG_PARAVIRT */
SYM_CODE_END(startup_32)

#ifdef CONFIG_HOTPLUG_CPU
/*
+4 −2
Original line number Diff line number Diff line
@@ -35,7 +35,7 @@ ENTRY(swsusp_arch_suspend)
	ret
ENDPROC(swsusp_arch_suspend)

ENTRY(restore_image)
SYM_CODE_START(restore_image)
	/* prepare to jump to the image kernel */
	movl	restore_jump_address, %ebx
	movl	restore_cr3, %ebp
@@ -45,9 +45,10 @@ ENTRY(restore_image)
	/* jump to relocated restore code */
	movl	relocated_restore_code, %eax
	jmpl	*%eax
SYM_CODE_END(restore_image)

/* code below has been relocated to a safe page */
ENTRY(core_restore_code)
SYM_CODE_START(core_restore_code)
	movl	temp_pgt, %eax
	movl	%eax, %cr3

@@ -77,6 +78,7 @@ copy_loop:

done:
	jmpl	*%ebx
SYM_CODE_END(core_restore_code)

	/* code below belongs to the image kernel */
	.align PAGE_SIZE
Loading