Commit 4aec216b authored by Jiri Slaby's avatar Jiri Slaby Committed by Borislav Petkov
Browse files

x86/asm/64: Add ENDs to some functions and relabel with SYM_CODE_*



All these are functions which are invoked from elsewhere but they are
not typical C functions. So annotate them using the new SYM_CODE_START.
All these were not balanced with any END, so mark their ends by
SYM_CODE_END appropriately too.

Signed-off-by: default avatarJiri Slaby <jslaby@suse.cz>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> [xen bits]
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> [power mgmt]
Cc: Andy Shevchenko <andy@infradead.org>
Cc: Cao jin <caoj.fnst@cn.fujitsu.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-pm@vger.kernel.org
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Pingfan Liu <kernelfans@gmail.com>
Cc: platform-driver-x86@vger.kernel.org
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wei Huang <wei@redhat.com>
Cc: x86-ml <x86@kernel.org>
Cc: xen-devel@lists.xenproject.org
Cc: Xiaoyao Li <xiaoyao.li@linux.intel.com>
Link: https://lkml.kernel.org/r/20191011115108.12392-23-jslaby@suse.cz
parent f13ad88a
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -250,7 +250,7 @@ ENDPROC(efi32_stub_entry)

	.code64
	.org 0x200
ENTRY(startup_64)
SYM_CODE_START(startup_64)
	/*
	 * 64bit entry is 0x200 and it is ABI so immutable!
	 * We come here either from startup_32 or directly from a
@@ -442,6 +442,7 @@ trampoline_return:
 */
	leaq	.Lrelocated(%rbx), %rax
	jmp	*%rax
SYM_CODE_END(startup_64)

#ifdef CONFIG_EFI_STUB

@@ -571,7 +572,7 @@ SYM_FUNC_END(.Lrelocated)
 * ECX contains the base address of the trampoline memory.
 * Non zero RDX means trampoline needs to enable 5-level paging.
 */
ENTRY(trampoline_32bit_src)
SYM_CODE_START(trampoline_32bit_src)
	/* Set up data and stack segments */
	movl	$__KERNEL_DS, %eax
	movl	%eax, %ds
@@ -634,6 +635,7 @@ ENTRY(trampoline_32bit_src)
	movl	%eax, %cr0

	lret
SYM_CODE_END(trampoline_32bit_src)

	.code64
SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
+2 −1
Original line number Diff line number Diff line
@@ -90,7 +90,7 @@ restore_registers:

	ret

ENTRY(do_olpc_suspend_lowlevel)
SYM_CODE_START(do_olpc_suspend_lowlevel)
	call	save_processor_state
	call	save_registers

@@ -110,6 +110,7 @@ ret_point:
	call	restore_registers
	call	restore_processor_state
	ret
SYM_CODE_END(do_olpc_suspend_lowlevel)

.data
saved_gdt:             .long   0,0
+4 −2
Original line number Diff line number Diff line
@@ -52,7 +52,7 @@ ENTRY(swsusp_arch_suspend)
	ret
ENDPROC(swsusp_arch_suspend)

ENTRY(restore_image)
SYM_CODE_START(restore_image)
	/* prepare to jump to the image kernel */
	movq	restore_jump_address(%rip), %r8
	movq	restore_cr3(%rip), %r9
@@ -67,9 +67,10 @@ ENTRY(restore_image)
	/* jump to relocated restore code */
	movq	relocated_restore_code(%rip), %rcx
	jmpq	*%rcx
SYM_CODE_END(restore_image)

	/* code below has been relocated to a safe page */
ENTRY(core_restore_code)
SYM_CODE_START(core_restore_code)
	/* switch to temporary page tables */
	movq	%rax, %cr3
	/* flush TLB */
@@ -97,6 +98,7 @@ ENTRY(core_restore_code)
.Ldone:
	/* jump to the restore_registers address from the image header */
	jmpq	*%r8
SYM_CODE_END(core_restore_code)

	 /* code below belongs to the image kernel */
	.align PAGE_SIZE
+2 −1
Original line number Diff line number Diff line
@@ -19,7 +19,7 @@
 */
	.section ".text32", "ax"
	.code32
ENTRY(machine_real_restart_asm)
SYM_CODE_START(machine_real_restart_asm)

#ifdef CONFIG_X86_64
	/* Switch to trampoline GDT as it is guaranteed < 4 GiB */
@@ -63,6 +63,7 @@ SYM_INNER_LABEL(machine_real_restart_paging_off, SYM_L_GLOBAL)
	movl	%ecx, %gs
	movl	%ecx, %ss
	ljmpw	$8, $1f
SYM_CODE_END(machine_real_restart_asm)

/*
 * This is 16-bit protected mode code to disable paging and the cache,
+7 −3
Original line number Diff line number Diff line
@@ -38,7 +38,7 @@
	.code16

	.balign	PAGE_SIZE
ENTRY(trampoline_start)
SYM_CODE_START(trampoline_start)
	cli			# We should be safe anyway
	wbinvd

@@ -78,12 +78,14 @@ ENTRY(trampoline_start)
no_longmode:
	hlt
	jmp no_longmode
SYM_CODE_END(trampoline_start)

#include "../kernel/verify_cpu.S"

	.section ".text32","ax"
	.code32
	.balign 4
ENTRY(startup_32)
SYM_CODE_START(startup_32)
	movl	%edx, %ss
	addl	$pa_real_mode_base, %esp
	movl	%edx, %ds
@@ -137,13 +139,15 @@ ENTRY(startup_32)
	 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
	 */
	ljmpl	$__KERNEL_CS, $pa_startup_64
SYM_CODE_END(startup_32)

	.section ".text64","ax"
	.code64
	.balign 4
ENTRY(startup_64)
SYM_CODE_START(startup_64)
	# Now jump into the kernel using virtual addresses
	jmpq	*tr_start(%rip)
SYM_CODE_END(startup_64)

	.section ".rodata","a"
	# Duplicate the global descriptor table
Loading