Commit b40d68d5 authored by H. Peter Anvin's avatar H. Peter Anvin
Browse files

x86, boot: stylistic cleanups for boot/compressed/head_64.S



Clean up style issues in arch/x86/boot/compressed/head_64.S.  This
file had a lot fewer style issues than its 32-bit cousin, but the ones
it has are worth fixing, especially since it makes the two files more
similar.

[ Impact: cleanup, no object code change ]

Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent 5f64ec64
Loading
Loading
Loading
Loading
+31 −21
Original line number Diff line number Diff line
@@ -37,8 +37,10 @@
	.code32
ENTRY(startup_32)
	cld
	/* test KEEP_SEGMENTS flag to see if the bootloader is asking
	 * us to not reload segments */
	/*
	 * Test KEEP_SEGMENTS flag to see if the bootloader is asking
	 * us to not reload segments
	 */
	testb $(1<<6), BP_loadflags(%esi)
	jnz 1f

@@ -49,7 +51,8 @@ ENTRY(startup_32)
	movl	%eax, %ss
1:

/* Calculate the delta between where we were compiled to run
/*
 * Calculate the delta between where we were compiled to run
 * at and where we were actually loaded at.  This can only be done
 * with a short local call on x86.  Nothing  else will tell us what
 * address we are running at.  The reserved chunk of the real-mode
@@ -70,10 +73,11 @@ ENTRY(startup_32)
	testl	%eax, %eax
	jnz	no_longmode

/* Compute the delta between where we were compiled to run at
/*
 * Compute the delta between where we were compiled to run at
 * and where the code will actually run at.
 */
/* %ebp contains the address we are loaded at by the boot loader and %ebx
 *
 * %ebp contains the address we are loaded at by the boot loader and %ebx
 * contains the address where we should move the kernel image temporarily
 * for safe in-place decompression.
 */
@@ -155,7 +159,8 @@ ENTRY(startup_32)
	btsl	$_EFER_LME, %eax
	wrmsr

	/* Setup for the jump to 64bit mode
	/*
	 * Setup for the jump to 64bit mode
	 *
	 * When the jump is performend we will be in long mode but
	 * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
@@ -184,7 +189,8 @@ no_longmode:

#include "../../kernel/verify_cpu_64.S"

	/* Be careful here startup_64 needs to be at a predictable
	/*
	 * Be careful here startup_64 needs to be at a predictable
	 * address so I can export it in an ELF header.  Bootloaders
	 * should look at the ELF header to find this address, as
	 * it may change in the future.
@@ -192,7 +198,8 @@ no_longmode:
	.code64
	.org 0x200
ENTRY(startup_64)
	/* We come here either from startup_32 or directly from a
	/*
	 * We come here either from startup_32 or directly from a
	 * 64bit bootloader.  If we come here from a bootloader we depend on
	 * an identity mapped page table being provied that maps our
	 * entire text+data+bss and hopefully all of memory.
@@ -209,7 +216,8 @@ ENTRY(startup_64)
	movl    $0x20, %eax
	ltr	%ax

	/* Compute the decompressed kernel start address.  It is where
	/*
	 * Compute the decompressed kernel start address.  It is where
	 * we were loaded at aligned to a 2M boundary. %rbp contains the
	 * decompressed kernel start address.
	 *
@@ -241,7 +249,8 @@ ENTRY(startup_64)
	addq	$(32768 + 18 + 4095), %rbx
	andq	$~4095, %rbx

/* Copy the compressed kernel to the end of our buffer
/*
 * Copy the compressed kernel to the end of our buffer
 * where decompression in place becomes safe.
 */
	leaq	_end_before_pgt(%rip), %r8
@@ -260,7 +269,7 @@ ENTRY(startup_64)
	leaq	relocated(%rbx), %rax
	jmp	*%rax

.section ".text"
	.text
relocated:

/*
@@ -271,8 +280,7 @@ relocated:
	leaq    _end_before_pgt(%rbx), %rcx
	subq	%rdi, %rcx
	cld
	rep
	stosb
	rep	stosb

	/* Setup the stack */
	leaq	boot_stack_end(%rip), %rsp
@@ -311,8 +319,10 @@ gdt:
	.quad   0x0000000000000000	/* TS continued */
gdt_end:

/*
 * Stack and heap for uncompression
 */
	.bss
/* Stack and heap for uncompression */
	.balign 4
boot_heap:
	.fill BOOT_HEAP_SIZE, 1, 0