Commit 3d4247fc authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman
Browse files

powerpc/32: Add support of KASAN_VMALLOC



Add support of KASAN_VMALLOC on PPC32.

To allow this, the early shadow covering the VMALLOC space
need to be removed once high_memory var is set and before
freeing memblock.

And the VMALLOC area need to be aligned such that boundaries
are covered by a full shadow page.

Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/031dec5487bde9b2181c8b3c9800e1879cf98c1a.1579024426.git.christophe.leroy@c-s.fr
parent 0f9aee0c
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -173,6 +173,7 @@ config PPC
	select HAVE_ARCH_HUGE_VMAP		if PPC_BOOK3S_64 && PPC_RADIX_MMU
	select HAVE_ARCH_JUMP_LABEL
	select HAVE_ARCH_KASAN			if PPC32
	select HAVE_ARCH_KASAN_VMALLOC		if PPC32
	select HAVE_ARCH_KGDB
	select HAVE_ARCH_MMAP_RND_BITS
	select HAVE_ARCH_MMAP_RND_COMPAT_BITS	if COMPAT
+5 −0
Original line number Diff line number Diff line
@@ -193,7 +193,12 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
#else
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
#endif

#ifdef CONFIG_KASAN_VMALLOC
#define VMALLOC_END	_ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
#else
#define VMALLOC_END	ioremap_bot
#endif

#ifndef __ASSEMBLY__
#include <linux/sched.h>
+2 −0
Original line number Diff line number Diff line
@@ -31,9 +31,11 @@
void kasan_early_init(void);
void kasan_mmu_init(void);
void kasan_init(void);
void kasan_late_init(void);
#else
static inline void kasan_init(void) { }
static inline void kasan_mmu_init(void) { }
static inline void kasan_late_init(void) { }
#endif

#endif /* __ASSEMBLY */
+5 −0
Original line number Diff line number Diff line
@@ -114,7 +114,12 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
#else
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
#endif

#ifdef CONFIG_KASAN_VMALLOC
#define VMALLOC_END	_ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
#else
#define VMALLOC_END	ioremap_bot
#endif

/*
 * Bits in a linux-style PTE.  These match the bits in the
+32 −1
Original line number Diff line number Diff line
@@ -129,6 +129,31 @@ static void __init kasan_remap_early_shadow_ro(void)
	flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
}

static void __init kasan_unmap_early_shadow_vmalloc(void)
{
	unsigned long k_start = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START);
	unsigned long k_end = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END);
	unsigned long k_cur;
	phys_addr_t pa = __pa(kasan_early_shadow_page);

	if (!early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
		int ret = kasan_init_shadow_page_tables(k_start, k_end);

		if (ret)
			panic("kasan: kasan_init_shadow_page_tables() failed");
	}
	for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
		pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
		pte_t *ptep = pte_offset_kernel(pmd, k_cur);

		if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
			continue;

		__set_pte_at(&init_mm, k_cur, ptep, __pte(0), 0);
	}
	flush_tlb_kernel_range(k_start, k_end);
}

void __init kasan_mmu_init(void)
{
	int ret;
@@ -165,7 +190,13 @@ void __init kasan_init(void)
	pr_info("KASAN init done\n");
}

#ifdef CONFIG_MODULES
void __init kasan_late_init(void)
{
	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
		kasan_unmap_early_shadow_vmalloc();
}

#if defined(CONFIG_MODULES) && !defined(CONFIG_KASAN_VMALLOC)
void *module_alloc(unsigned long size)
{
	void *base;
Loading