Commit ef9285f6 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Geert Uytterhoeven
Browse files

m68k: mm: Improve kernel_page_table()



With the PTE-tables now only being 256 bytes, allocating a full page
for them is a giant waste. Start by improving the boot time allocator
such that init_mm initialization will at least have optimal memory
density.

Much thanks to Will Deacon in help with debugging and ferreting out
lost information on these dusty MMUs.

Notes:

 - _TABLE_MASK is reduced to account for the shorter (256 byte)
   alignment of pte-tables, per the manual, table entries should only
   ever have state in the low 4 bits (Used,WrProt,Desc1,Desc0) so it is
   still longer than strictly required. (Thanks Will!!!)

 - Also use kernel_page_table() for the 020/030 zero_pgtable case and
   consequently remove the zero_pgtable init hack (will fix up later).

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarWill Deacon <will@kernel.org>
Acked-by: default avatarGreg Ungerer <gerg@linux-m68k.org>
Tested-by: default avatarMichael Schmitz <schmitzmic@gmail.com>
Tested-by: default avatarGreg Ungerer <gerg@linux-m68k.org>
Link: https://lore.kernel.org/r/20200131125403.768263973@infradead.org


Signed-off-by: default avatarGeert Uytterhoeven <geert@linux-m68k.org>
parent ef22d8ab
Loading
Loading
Loading
Loading
+12 −1
Original line number Diff line number Diff line
@@ -23,7 +23,18 @@
#define _DESCTYPE_MASK	0x003

#define _CACHEMASK040	(~0x060)
#define _TABLE_MASK	(0xfffffe00)

/*
 * Currently set to the minimum alignment of table pointers (256 bytes).
 * The hardware only uses the low 4 bits for state:
 *
 *    3 - Used
 *    2 - Write Protected
 *  0,1 - Descriptor Type
 *
 * and has the rest of the bits reserved.
 */
#define _TABLE_MASK	(0xffffff00)

#define _PAGE_TABLE	(_PAGE_SHORT)
#define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
+0 −5
Original line number Diff line number Diff line
@@ -42,7 +42,6 @@ EXPORT_SYMBOL(empty_zero_page);

#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
extern void init_pointer_table(unsigned long ptable);
extern pmd_t *zero_pgtable;
#endif

#ifdef CONFIG_MMU
@@ -135,10 +134,6 @@ static inline void init_pointer_tables(void)
		if (pud_present(*pud))
			init_pointer_table(pgd_page_vaddr(kernel_pg_dir[i]));
	}

	/* insert also pointer table that we used to unmap the zero page */
	if (zero_pgtable)
		init_pointer_table((unsigned long)zero_pgtable);
#endif
}

+29 −22
Original line number Diff line number Diff line
@@ -174,27 +174,35 @@ extern __initdata unsigned long m68k_init_mapped_size;

extern unsigned long availmem;

static pte_t *last_pte_table __initdata = NULL;

static pte_t * __init kernel_page_table(void)
{
	pte_t *ptablep;
	pte_t *pte_table = last_pte_table;

	ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
	if (!ptablep)
	if (((unsigned long)last_pte_table & ~PAGE_MASK) == 0) {
		pte_table = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
		if (!pte_table) {
			panic("%s: Failed to allocate %lu bytes align=%lx\n",
					__func__, PAGE_SIZE, PAGE_SIZE);
		}

		clear_page(pte_table);
		mmu_page_ctor(pte_table);

	clear_page(ptablep);
	mmu_page_ctor(ptablep);
		last_pte_table = pte_table;
	}

	last_pte_table += PTRS_PER_PTE;

	return ptablep;
	return pte_table;
}

static pmd_t *last_pgtable __initdata = NULL;
pmd_t *zero_pgtable __initdata = NULL;
static pmd_t *last_pmd_table __initdata = NULL;

static pmd_t * __init kernel_ptr_table(void)
{
	if (!last_pgtable) {
	if (!last_pmd_table) {
		unsigned long pmd, last;
		int i;

@@ -213,25 +221,25 @@ static pmd_t * __init kernel_ptr_table(void)
				last = pmd;
		}

		last_pgtable = (pmd_t *)last;
		last_pmd_table = (pmd_t *)last;
#ifdef DEBUG
		printk("kernel_ptr_init: %p\n", last_pgtable);
		printk("kernel_ptr_init: %p\n", last_pmd_table);
#endif
	}

	last_pgtable += PTRS_PER_PMD;
	if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
		last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
	last_pmd_table += PTRS_PER_PMD;
	if (((unsigned long)last_pmd_table & ~PAGE_MASK) == 0) {
		last_pmd_table = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
							   PAGE_SIZE);
		if (!last_pgtable)
		if (!last_pmd_table)
			panic("%s: Failed to allocate %lu bytes align=%lx\n",
			      __func__, PAGE_SIZE, PAGE_SIZE);

		clear_page(last_pgtable);
		mmu_page_ctor(last_pgtable);
		clear_page(last_pmd_table);
		mmu_page_ctor(last_pmd_table);
	}

	return last_pgtable;
	return last_pmd_table;
}

static void __init map_node(int node)
@@ -294,8 +302,7 @@ static void __init map_node(int node)
#ifdef DEBUG
				printk ("[zero map]");
#endif
				zero_pgtable = kernel_ptr_table();
				pte_dir = (pte_t *)zero_pgtable;
				pte_dir = kernel_page_table();
				pmd_set(pmd_dir, pte_dir);

				pte_val(*pte_dir++) = 0;