Commit 9f459fad authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: Fix build with older binutils and consolidate linker script
  x86: Fix an incorrect argument of reserve_bootmem()
  x86: add vmlinux.lds to targets in arch/x86/boot/compressed/Makefile
  xen: rearrange things to fix stackprotector
  x86: make sure load_percpu_segment has no stackprotector
  i386: Fix section mismatches for init code with !HOTPLUG_CPU
  x86, pat: Allow ISA memory range uncacheable mapping requests
parents e9cab24c c62e4320
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -4,7 +4,7 @@
# create a compressed vmlinux image from the original vmlinux
#

targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o

KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
+10 −2
Original line number Diff line number Diff line
@@ -2,6 +2,7 @@
#define _ASM_X86_PGTABLE_H

#include <asm/page.h>
#include <asm/e820.h>

#include <asm/pgtable_types.h>

@@ -269,9 +270,16 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)

#define canon_pgprot(p) __pgprot(massage_pgprot(p))

static inline int is_new_memtype_allowed(unsigned long flags,
static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
					 unsigned long flags,
					 unsigned long new_flags)
{
	/*
	 * PAT type is always WB for ISA. So no need to check.
	 */
	if (is_ISA_range(paddr, paddr + size - 1))
		return 1;

	/*
	 * Certain new memtypes are not allowed with certain
	 * requested memtype:
+4 −0
Original line number Diff line number Diff line
@@ -7,6 +7,10 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_common.o = -pg
endif

# Make sure load_percpu_segment has no stackprotector
nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_common.o		:= $(nostackp)

obj-y			:= intel_cacheinfo.o addon_cpuid_features.o
obj-y			+= proc.o capflags.o powerflags.o common.o
obj-y			+= vmware.o hypervisor.o
+1 −7
Original line number Diff line number Diff line
@@ -261,9 +261,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
 * which will be freed later
 */

#ifndef CONFIG_HOTPLUG_CPU
.section .init.text,"ax",@progbits
#endif
__CPUINIT

#ifdef CONFIG_SMP
ENTRY(startup_32_smp)
@@ -602,11 +600,7 @@ ignore_int:
#endif
	iret

#ifndef CONFIG_HOTPLUG_CPU
	__CPUINITDATA
#else
	__REFDATA
#endif
.align 4
ENTRY(initial_code)
	.long i386_start_kernel
+47 −79
Original line number Diff line number Diff line
@@ -46,11 +46,10 @@ PHDRS {
	data PT_LOAD FLAGS(7);          /* RWE */
#ifdef CONFIG_X86_64
	user PT_LOAD FLAGS(7);          /* RWE */
	data.init PT_LOAD FLAGS(7);     /* RWE */
#ifdef CONFIG_SMP
	percpu PT_LOAD FLAGS(7);        /* RWE */
#endif
	data.init2 PT_LOAD FLAGS(7);    /* RWE */
	init PT_LOAD FLAGS(7);          /* RWE */
#endif
	note PT_NOTE FLAGS(0);          /* ___ */
}
@@ -103,65 +102,43 @@ SECTIONS
		__stop___ex_table = .;
	} :text = 0x9090

	RODATA
	RO_DATA(PAGE_SIZE)

	/* Data */
	. = ALIGN(PAGE_SIZE);
	.data : AT(ADDR(.data) - LOAD_OFFSET) {
		/* Start of data section */
		_sdata = .;
		DATA_DATA
		CONSTRUCTORS
	} :data

		/* init_task */
		INIT_TASK_DATA(THREAD_SIZE)

#ifdef CONFIG_X86_32
		/* 32 bit has nosave before _edata */
	. = ALIGN(PAGE_SIZE);
	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
		__nosave_begin = .;
		*(.data.nosave)
		. = ALIGN(PAGE_SIZE);
		__nosave_end = .;
	}
		NOSAVE_DATA
#endif

	. = ALIGN(PAGE_SIZE);
	.data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
		*(.data.page_aligned)
		PAGE_ALIGNED_DATA(PAGE_SIZE)
		*(.data.idt)
	}

#ifdef CONFIG_X86_32
	. = ALIGN(32);
#else
	. = ALIGN(PAGE_SIZE);
	. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
#endif
	.data.cacheline_aligned :
		AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
		*(.data.cacheline_aligned)
	}
		CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES)

		DATA_DATA
		CONSTRUCTORS

		/* rarely changed data like cpu maps */
#ifdef CONFIG_X86_32
	. = ALIGN(32);
#else
	. = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
#endif
	.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
		*(.data.read_mostly)
		READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)

		/* End of data section */
		_edata = .;
	}
	} :data

#ifdef CONFIG_X86_64

#define VSYSCALL_ADDR (-10*1024*1024)
#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + \
                            SIZEOF(.data.read_mostly) + 4095) & ~(4095))
#define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + \
                            SIZEOF(.data.read_mostly) + 4095) & ~(4095))
#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data) + SIZEOF(.data) + \
                            PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
#define VSYSCALL_VIRT_ADDR ((ADDR(.data) + SIZEOF(.data) + \
                            PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))

#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
@@ -227,35 +204,29 @@ SECTIONS

#endif /* CONFIG_X86_64 */

	/* init_task */
	. = ALIGN(THREAD_SIZE);
	.data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
		*(.data.init_task)
	/* Init code and data - will be freed after init */
	. = ALIGN(PAGE_SIZE);
	.init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
		__init_begin = .; /* paired with __init_end */
	}
#ifdef CONFIG_X86_64
	 :data.init
#endif

#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
	/*
	 * smp_locks might be freed after init
	 * start/end must be page aligned
	 * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
	 * output PHDR, so the next output section - .init.text - should
	 * start another segment - init.
	 */
	. = ALIGN(PAGE_SIZE);
	.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
		__smp_locks = .;
		*(.smp_locks)
		__smp_locks_end = .;
		. = ALIGN(PAGE_SIZE);
	}
	PERCPU_VADDR(0, :percpu)
#endif

	/* Init code and data - will be freed after init */
	. = ALIGN(PAGE_SIZE);
	.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
		__init_begin = .; /* paired with __init_end */
		_sinittext = .;
		INIT_TEXT
		_einittext = .;
	}
#ifdef CONFIG_X86_64
	:init
#endif

	.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
		INIT_DATA
@@ -326,17 +297,7 @@ SECTIONS
	}
#endif

#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
	/*
	 * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
	 * output PHDR, so the next output section - __data_nosave - should
	 * start another section data.init2.  Also, pda should be at the head of
	 * percpu area.  Preallocate it and define the percpu offset symbol
	 * so that it can be accessed as a percpu variable.
	 */
	. = ALIGN(PAGE_SIZE);
	PERCPU_VADDR(0, :percpu)
#else
#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
	PERCPU(PAGE_SIZE)
#endif

@@ -347,15 +308,22 @@ SECTIONS
		__init_end = .;
	}

#ifdef CONFIG_X86_64
	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
	/*
	 * smp_locks might be freed after init
	 * start/end must be page aligned
	 */
	. = ALIGN(PAGE_SIZE);
		__nosave_begin = .;
		*(.data.nosave)
	.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
		__smp_locks = .;
		*(.smp_locks)
		__smp_locks_end = .;
		. = ALIGN(PAGE_SIZE);
		__nosave_end = .;
	} :data.init2
	/* use another section data.init2, see PERCPU_VADDR() above */
	}

#ifdef CONFIG_X86_64
	.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
		NOSAVE_DATA
	}
#endif

	/* BSS */
Loading