Commit 5c51bdbe authored by Yinghai Lu's avatar Yinghai Lu Committed by H. Peter Anvin
Browse files

x86, mm: Merge alloc_low_page between 64bit and 32bit



They are almost same except 64 bit need to handle after_bootmem case.

Add mm_internal.h to make that alloc_low_page() only to be accessible
from arch/x86/mm/init*.c

Signed-off-by: default avatarYinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1353123563-3103-25-git-send-email-yinghai@kernel.org


Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 868bf4d6
Loading
Loading
Loading
Loading
+34 −0
Original line number Diff line number Diff line
@@ -17,10 +17,44 @@
#include <asm/proto.h>
#include <asm/dma.h>		/* for MAX_DMA_PFN */

#include "mm_internal.h"

unsigned long __initdata pgt_buf_start;
unsigned long __meminitdata pgt_buf_end;
unsigned long __meminitdata pgt_buf_top;

__ref void *alloc_low_page(void)
{
	unsigned long pfn;
	void *adr;

#ifdef CONFIG_X86_64
	if (after_bootmem) {
		adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);

		return adr;
	}
#endif

	if ((pgt_buf_end + 1) >= pgt_buf_top) {
		unsigned long ret;
		if (min_pfn_mapped >= max_pfn_mapped)
			panic("alloc_low_page: ran out of memory");
		ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
					max_pfn_mapped << PAGE_SHIFT,
					PAGE_SIZE, PAGE_SIZE);
		if (!ret)
			panic("alloc_low_page: can not alloc memory");
		memblock_reserve(ret, PAGE_SIZE);
		pfn = ret >> PAGE_SHIFT;
	} else
		pfn = pgt_buf_end++;

	adr = __va(pfn * PAGE_SIZE);
	clear_page(adr);
	return adr;
}

/* need 4 4k for initial PMD_SIZE, 4k for 0-ISA_END_ADDRESS */
#define INIT_PGT_BUF_SIZE	(5 * PAGE_SIZE)
RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
+2 −24
Original line number Diff line number Diff line
@@ -53,36 +53,14 @@
#include <asm/page_types.h>
#include <asm/init.h>

#include "mm_internal.h"

unsigned long highstart_pfn, highend_pfn;

static noinline int do_test_wp_bit(void);

bool __read_mostly __vmalloc_start_set = false;

static __init void *alloc_low_page(void)
{
	unsigned long pfn;
	void *adr;

	if ((pgt_buf_end + 1) >= pgt_buf_top) {
		unsigned long ret;
		if (min_pfn_mapped >= max_pfn_mapped)
			panic("alloc_low_page: ran out of memory");
		ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
					max_pfn_mapped << PAGE_SHIFT,
					PAGE_SIZE, PAGE_SIZE);
		if (!ret)
			panic("alloc_low_page: can not alloc memory");
		memblock_reserve(ret, PAGE_SIZE);
		pfn = ret >> PAGE_SHIFT;
	} else
		pfn = pgt_buf_end++;

	adr = __va(pfn * PAGE_SIZE);
	clear_page(adr);
	return adr;
}

/*
 * Creates a middle page table and puts a pointer to it in the
 * given global directory entry. This only returns the gd entry
+2 −30
Original line number Diff line number Diff line
@@ -54,6 +54,8 @@
#include <asm/uv/uv.h>
#include <asm/setup.h>

#include "mm_internal.h"

static int __init parse_direct_gbpages_off(char *arg)
{
	direct_gbpages = 0;
@@ -314,36 +316,6 @@ void __init cleanup_highmap(void)
	}
}

static __ref void *alloc_low_page(void)
{
	unsigned long pfn;
	void *adr;

	if (after_bootmem) {
		adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);

		return adr;
	}

	if ((pgt_buf_end + 1) >= pgt_buf_top) {
		unsigned long ret;
		if (min_pfn_mapped >= max_pfn_mapped)
			panic("alloc_low_page: ran out of memory");
		ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
					max_pfn_mapped << PAGE_SHIFT,
					PAGE_SIZE, PAGE_SIZE);
		if (!ret)
			panic("alloc_low_page: can not alloc memory");
		memblock_reserve(ret, PAGE_SIZE);
		pfn = ret >> PAGE_SHIFT;
	} else
		pfn = pgt_buf_end++;

	adr = __va(pfn * PAGE_SIZE);
	clear_page(adr);
	return adr;
}

static unsigned long __meminit
phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
	      pgprot_t prot)
+6 −0
Original line number Diff line number Diff line
#ifndef __X86_MM_INTERNAL_H
#define __X86_MM_INTERNAL_H

void *alloc_low_page(void);

#endif	/* __X86_MM_INTERNAL_H */