Commit 6f80b68e authored by Yinghai Lu's avatar Yinghai Lu Committed by H. Peter Anvin
Browse files

x86, mm, Xen: Remove mapping_pagetable_reserve()



Page table area are pre-mapped now after
	x86, mm: setup page table in top-down
	x86, mm: Remove early_memremap workaround for page table accessing on 64bit

mapping_pagetable_reserve is not used anymore, so remove it.

Also remove operation in mask_rw_pte(), as modified allow_low_page
always return pages that are already mapped, moreover
xen_alloc_pte_init, xen_alloc_pmd_init, etc, will mark the page RO
before hooking it into the pagetable automatically.

-v2: add changelog about mask_rw_pte() from Stefano.

Signed-off-by: default avatarYinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1353123563-3103-27-git-send-email-yinghai@kernel.org


Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 9985b4c6
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -301,7 +301,6 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
/* Install a pte for a particular vaddr in kernel space. */
void set_pte_vaddr(unsigned long vaddr, pte_t pte);

extern void native_pagetable_reserve(u64 start, u64 end);
#ifdef CONFIG_X86_32
extern void native_pagetable_init(void);
#else
+0 −12
Original line number Diff line number Diff line
@@ -68,17 +68,6 @@ struct x86_init_oem {
	void (*banner)(void);
};

/**
 * struct x86_init_mapping - platform specific initial kernel pagetable setup
 * @pagetable_reserve:	reserve a range of addresses for kernel pagetable usage
 *
 * For more details on the purpose of this hook, look in
 * init_memory_mapping and the commit that added it.
 */
struct x86_init_mapping {
	void (*pagetable_reserve)(u64 start, u64 end);
};

/**
 * struct x86_init_paging - platform specific paging functions
 * @pagetable_init:	platform specific paging initialization call to setup
@@ -136,7 +125,6 @@ struct x86_init_ops {
	struct x86_init_mpparse		mpparse;
	struct x86_init_irqs		irqs;
	struct x86_init_oem		oem;
	struct x86_init_mapping		mapping;
	struct x86_init_paging		paging;
	struct x86_init_timers		timers;
	struct x86_init_iommu		iommu;
+0 −4
Original line number Diff line number Diff line
@@ -62,10 +62,6 @@ struct x86_init_ops x86_init __initdata = {
		.banner			= default_banner,
	},

	.mapping = {
		.pagetable_reserve		= native_pagetable_reserve,
	},

	.paging = {
		.pagetable_init		= native_pagetable_init,
	},
+0 −4
Original line number Diff line number Diff line
@@ -112,10 +112,6 @@ static void __init probe_page_size_mask(void)
		__supported_pte_mask |= _PAGE_GLOBAL;
	}
}
void __init native_pagetable_reserve(u64 start, u64 end)
{
	memblock_reserve(start, end - start);
}

#ifdef CONFIG_X86_32
#define NR_RANGE_MR 3
+0 −28
Original line number Diff line number Diff line
@@ -1178,20 +1178,6 @@ static void xen_exit_mmap(struct mm_struct *mm)

static void xen_post_allocator_init(void);

static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
{
	/* reserve the range used */
	native_pagetable_reserve(start, end);

	/* set as RW the rest */
	printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
			PFN_PHYS(pgt_buf_top));
	while (end < PFN_PHYS(pgt_buf_top)) {
		make_lowmem_page_readwrite(__va(end));
		end += PAGE_SIZE;
	}
}

#ifdef CONFIG_X86_64
static void __init xen_cleanhighmap(unsigned long vaddr,
				    unsigned long vaddr_end)
@@ -1503,19 +1489,6 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
#else /* CONFIG_X86_64 */
static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
{
	unsigned long pfn = pte_pfn(pte);

	/*
	 * If the new pfn is within the range of the newly allocated
	 * kernel pagetable, and it isn't being mapped into an
	 * early_ioremap fixmap slot as a freshly allocated page, make sure
	 * it is RO.
	 */
	if (((!is_early_ioremap_ptep(ptep) &&
			pfn >= pgt_buf_start && pfn < pgt_buf_top)) ||
			(is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
		pte = pte_wrprotect(pte);

	return pte;
}
#endif /* CONFIG_X86_64 */
@@ -2197,7 +2170,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {

void __init xen_init_mmu_ops(void)
{
	x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
	x86_init.paging.pagetable_init = xen_pagetable_init;
	pv_mmu_ops = xen_mmu_ops;