Commit c14fa0b6 authored by Yinghai Lu's avatar Yinghai Lu Committed by H. Peter Anvin
Browse files

x86, mm: Find early page table buffer together



We should not do that in every calling of init_memory_mapping.

At the same time need to move down early_memtest, and could remove after_bootmem
checking.

-v2: fix one early_memtest with 32bit by passing max_pfn_mapped instead.

Signed-off-by: default avatarYinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1353123563-3103-8-git-send-email-yinghai@kernel.org


Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 84f1ae30
Loading
Loading
Loading
Loading
+34 −32
Original line number Diff line number Diff line
@@ -274,16 +274,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
	memset(mr, 0, sizeof(mr));
	nr_range = split_mem_range(mr, 0, start, end);

	/*
	 * Find space for the kernel direct mapping tables.
	 *
	 * Later we should allocate these tables in the local node of the
	 * memory mapped. Unfortunately this is done currently before the
	 * nodes are discovered.
	 */
	if (!after_bootmem)
		find_early_table_space(start, end);

	for (i = 0; i < nr_range; i++)
		ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
						   mr[i].page_size_mask);
@@ -296,6 +286,36 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,

	__flush_tlb_all();

	return ret >> PAGE_SHIFT;
}

void __init init_mem_mapping(void)
{
	probe_page_size_mask();

	/*
	 * Find space for the kernel direct mapping tables.
	 *
	 * Later we should allocate these tables in the local node of the
	 * memory mapped. Unfortunately this is done currently before the
	 * nodes are discovered.
	 */
#ifdef CONFIG_X86_64
	find_early_table_space(0, max_pfn<<PAGE_SHIFT);
#else
	find_early_table_space(0, max_low_pfn<<PAGE_SHIFT);
#endif
	max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
	max_pfn_mapped = max_low_pfn_mapped;

#ifdef CONFIG_X86_64
	if (max_pfn > max_low_pfn) {
		max_pfn_mapped = init_memory_mapping(1UL<<32,
						     max_pfn<<PAGE_SHIFT);
		/* can we preseve max_low_pfn ?*/
		max_low_pfn = max_pfn;
	}
#endif
	/*
	 * Reserve the kernel pagetable pages we used (pgt_buf_start -
	 * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
@@ -311,32 +331,14 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
	 * RO all the pagetable pages, including the ones that are beyond
	 * pgt_buf_end at that time.
	 */
	if (!after_bootmem && pgt_buf_end > pgt_buf_start)
	if (pgt_buf_end > pgt_buf_start)
		x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start),
				PFN_PHYS(pgt_buf_end));

	if (!after_bootmem)
		early_memtest(start, end);
	/* stop the wrong using */
	pgt_buf_top = 0;

	return ret >> PAGE_SHIFT;
}

void __init init_mem_mapping(void)
{
	probe_page_size_mask();

	/* max_pfn_mapped is updated here */
	max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
	max_pfn_mapped = max_low_pfn_mapped;

#ifdef CONFIG_X86_64
	if (max_pfn > max_low_pfn) {
		max_pfn_mapped = init_memory_mapping(1UL<<32,
						     max_pfn<<PAGE_SHIFT);
		/* can we preseve max_low_pfn ?*/
		max_low_pfn = max_pfn;
	}
#endif
	early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
}

/*