Commit cbba6579 authored by Pekka Enberg's avatar Pekka Enberg Committed by Ingo Molnar
Browse files

x86: unify kernel_physical_mapping_init() call in init_memory_mapping()



Impact: cleanup

The 64-bit version of init_memory_mapping() uses the last mapped
address returned from kernel_physical_mapping_init() whereas the
32-bit version doesn't. This patch adds relevant ifdefs to both
versions of the function to reduce the diff between them.

Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
Cc: Yinghai Lu <yinghai@kernel.org>
LKML-Reference: <1236257708-27269-8-git-send-email-penberg@cs.helsinki.fi>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent c464573c
Loading
Loading
Loading
Loading
+9 −1
Original line number Diff line number Diff line
@@ -929,6 +929,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
	unsigned long page_size_mask = 0;
	unsigned long start_pfn, end_pfn;
	unsigned long pos;
	unsigned long ret;

	struct map_range mr[NR_RANGE_MR];
	int nr_range, i;
@@ -1040,11 +1041,18 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
	if (!after_bootmem)
		find_early_table_space(end, use_pse, use_gbpages);

#ifdef CONFIG_X86_32
	for (i = 0; i < nr_range; i++)
		kernel_physical_mapping_init(
				mr[i].start >> PAGE_SHIFT,
				mr[i].end >> PAGE_SHIFT,
				mr[i].page_size_mask == (1<<PG_LEVEL_2M));
	ret = end;
#else /* CONFIG_X86_64 */
	for (i = 0; i < nr_range; i++)
		ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
						   mr[i].page_size_mask);
#endif

	early_ioremap_page_table_range_init();

@@ -1059,7 +1067,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
	if (!after_bootmem)
		early_memtest(start, end);

	return end >> PAGE_SHIFT;
	return ret >> PAGE_SHIFT;
}


+13 −8
Original line number Diff line number Diff line
@@ -686,10 +686,10 @@ static int save_mr(struct map_range *mr, int nr_range,
unsigned long __init_refok init_memory_mapping(unsigned long start,
					       unsigned long end)
{
	unsigned long last_map_addr = 0;
	unsigned long page_size_mask = 0;
	unsigned long start_pfn, end_pfn;
	unsigned long pos;
	unsigned long ret;

	struct map_range mr[NR_RANGE_MR];
	int nr_range, i;
@@ -819,10 +819,18 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
	if (!after_bootmem)
		find_early_table_space(end, use_pse, use_gbpages);

#ifdef CONFIG_X86_32
	for (i = 0; i < nr_range; i++)
		last_map_addr = kernel_physical_mapping_init(
					mr[i].start, mr[i].end,
		kernel_physical_mapping_init(
				mr[i].start >> PAGE_SHIFT,
				mr[i].end >> PAGE_SHIFT,
				mr[i].page_size_mask == (1<<PG_LEVEL_2M));
	ret = end;
#else /* CONFIG_X86_64 */
	for (i = 0; i < nr_range; i++)
		ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
						   mr[i].page_size_mask);
#endif

	if (!after_bootmem)
		mmu_cr4_features = read_cr4();
@@ -832,13 +840,10 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
		reserve_early(table_start << PAGE_SHIFT,
				 table_end << PAGE_SHIFT, "PGTABLE");

	printk(KERN_INFO "last_map_addr: %lx end: %lx\n",
			 last_map_addr, end);

	if (!after_bootmem)
		early_memtest(start, end);

	return last_map_addr >> PAGE_SHIFT;
	return ret >> PAGE_SHIFT;
}

#ifndef CONFIG_NUMA