Commit 69a1593a authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman
Browse files

powerpc/32s: Setup the early hash table at all time.



At the time being, an early hash table is set up when
CONFIG_KASAN is selected.

There is nothing wrong with setting such an early hash table
all the time, even if it is not used. This is a statically
allocated 256 kB table which lies in the init data section.

This makes the code simpler and may in the future allow to
setup early IO mappings with fixmap instead of hard coding BATs.

Put create_hpte() and flush_hash_pages() in the .ref.text section
in order to avoid warning for the reference to early_hash[]. This
reference is removed by MMU_init_hw_patch() before init memory is
freed.

Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/b8f8101c368b8a6451844a58d7bd7d83c14cf2aa.1601566529.git.christophe.leroy@csgroup.eu
parent 63f9d9df
Loading
Loading
Loading
Loading
+5 −8
Original line number Diff line number Diff line
@@ -155,9 +155,9 @@ __after_mmu_off:

	bl	initial_bats
	bl	load_segment_registers
#ifdef CONFIG_KASAN
BEGIN_MMU_FTR_SECTION
	bl	early_hash_table
#endif
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
#if defined(CONFIG_BOOTX_TEXT)
	bl	setup_disp_bat
#endif
@@ -936,7 +936,6 @@ _ENTRY(__restore_cpu_setup)
 * Load stuff into the MMU.  Intended to be called with
 * IR=0 and DR=0.
 */
#ifdef CONFIG_KASAN
early_hash_table:
	sync			/* Force all PTE updates to finish */
	isync
@@ -947,8 +946,10 @@ early_hash_table:
	lis	r6, early_hash - PAGE_OFFSET@h
	ori	r6, r6, 3	/* 256kB table */
	mtspr	SPRN_SDR1, r6
	lis	r6, early_hash@h
	lis	r3, Hash@ha
	stw	r6, Hash@l(r3)
	blr
#endif

load_up_mmu:
	sync			/* Force all PTE updates to finish */
@@ -1037,11 +1038,7 @@ start_here:
	bl	machine_init
	bl	__save_cpu_setup
	bl	MMU_init
#ifdef CONFIG_KASAN
BEGIN_MMU_FTR_SECTION
	bl	MMU_init_hw_patch
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
#endif

/*
 * Go back to running unmapped so we can load up new values
+7 −2
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@
 */

#include <linux/pgtable.h>
#include <linux/init.h>
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/cputable.h>
@@ -284,9 +285,9 @@ _ASM_NOKPROBE_SYMBOL(add_hash_page)
 *
 * For speed, 4 of the instructions get patched once the size and
 * physical address of the hash table are known.  These definitions
 * of Hash_base and Hash_bits below are just an example.
 * of Hash_base and Hash_bits below are for the early hash table.
 */
Hash_base = 0xc0180000
Hash_base = early_hash
Hash_bits = 12				/* e.g. 256kB hash table */
Hash_msk = (((1 << Hash_bits) - 1) * 64)

@@ -307,6 +308,7 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64)
#define HASH_LEFT	31-(LG_PTEG_SIZE+Hash_bits-1)
#define HASH_RIGHT	31-LG_PTEG_SIZE

__REF
_GLOBAL(create_hpte)
	/* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
	rlwinm	r8,r5,32-9,30,30	/* _PAGE_RW -> PP msb */
@@ -473,6 +475,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)

	sync		/* make sure pte updates get to memory */
	blr
	.previous
_ASM_NOKPROBE_SYMBOL(create_hpte)

	.section .bss
@@ -493,6 +496,7 @@ htab_hash_searches:
 *
 * We assume that there is a hash table in use (Hash != 0).
 */
__REF
_GLOBAL(flush_hash_pages)
	/*
	 * We disable interrupts here, even on UP, because we want
@@ -626,6 +630,7 @@ _GLOBAL(flush_hash_pages)
19:	mtmsr	r10
	isync
	blr
	.previous
EXPORT_SYMBOL(flush_hash_pages)
_ASM_NOKPROBE_SYMBOL(flush_hash_pages)

+5 −9
Original line number Diff line number Diff line
@@ -31,6 +31,8 @@

#include <mm/mmu_decl.h>

u8 __initdata early_hash[SZ_256K] __aligned(SZ_256K) = {0};

struct hash_pte *Hash;
static unsigned long Hash_size, Hash_mask;
unsigned long _SDR1;
@@ -395,15 +397,6 @@ void __init MMU_init_hw(void)
	hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
	if (lg_n_hpteg > 16)
		hash_mb2 = 16 - LG_HPTEG_SIZE;

	/*
	 * When KASAN is selected, there is already an early temporary hash
	 * table and the switch to the final hash table is done later.
	 */
	if (IS_ENABLED(CONFIG_KASAN))
		return;

	MMU_init_hw_patch();
}

void __init MMU_init_hw_patch(void)
@@ -411,6 +404,9 @@ void __init MMU_init_hw_patch(void)
	unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
	unsigned int hash = (unsigned int)Hash - PAGE_OFFSET;

	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
		return;

	if (ppc_md.progress)
		ppc_md.progress("hash:patch", 0x345);
	if (ppc_md.progress)
+0 −19
Original line number Diff line number Diff line
@@ -174,22 +174,6 @@ void __init kasan_late_init(void)
		kasan_unmap_early_shadow_vmalloc();
}

#ifdef CONFIG_PPC_BOOK3S_32
u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};

static void __init kasan_early_hash_table(void)
{
	unsigned int hash = __pa(early_hash);

	modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
	modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);

	Hash = (struct hash_pte *)early_hash;
}
#else
static void __init kasan_early_hash_table(void) {}
#endif

void __init kasan_early_init(void)
{
	unsigned long addr = KASAN_SHADOW_START;
@@ -205,7 +189,4 @@ void __init kasan_early_init(void)
		next = pgd_addr_end(addr, end);
		pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte);
	} while (pmd++, addr = next, addr != end);

	if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
		kasan_early_hash_table();
}