Commit 9577dd74 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Linus Torvalds
Browse files

kasan: rename kasan_zero_page to kasan_early_shadow_page

With tag based KASAN mode the early shadow value is 0xff and not 0x00, so
this patch renames kasan_zero_(page|pte|pmd|pud|p4d) to
kasan_early_shadow_(page|pte|pmd|pud|p4d) to avoid confusion.

Link: http://lkml.kernel.org/r/3fed313280ebf4f88645f5b89ccbc066d320e177.1544099024.git.andreyknvl@google.com


Signed-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Suggested-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b2f557ea
Loading
Loading
Loading
Loading
+24 −19
Original line number Diff line number Diff line
@@ -47,7 +47,8 @@ static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
				      bool early)
{
	if (pmd_none(READ_ONCE(*pmdp))) {
		phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte)
		phys_addr_t pte_phys = early ?
				__pa_symbol(kasan_early_shadow_pte)
					: kasan_alloc_zeroed_page(node);
		__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
	}
@@ -60,7 +61,8 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
				      bool early)
{
	if (pud_none(READ_ONCE(*pudp))) {
		phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd)
		phys_addr_t pmd_phys = early ?
				__pa_symbol(kasan_early_shadow_pmd)
					: kasan_alloc_zeroed_page(node);
		__pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
	}
@@ -72,7 +74,8 @@ static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
				      bool early)
{
	if (pgd_none(READ_ONCE(*pgdp))) {
		phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud)
		phys_addr_t pud_phys = early ?
				__pa_symbol(kasan_early_shadow_pud)
					: kasan_alloc_zeroed_page(node);
		__pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
	}
@@ -87,7 +90,8 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
	pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);

	do {
		phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page)
		phys_addr_t page_phys = early ?
				__pa_symbol(kasan_early_shadow_page)
					: kasan_alloc_zeroed_page(node);
		next = addr + PAGE_SIZE;
		set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
@@ -205,13 +209,13 @@ void __init kasan_init(void)
	kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
			   early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));

	kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
	kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
				    (void *)mod_shadow_start);
	kasan_populate_zero_shadow((void *)kimg_shadow_end,
	kasan_populate_early_shadow((void *)kimg_shadow_end,
				    kasan_mem_to_shadow((void *)PAGE_OFFSET));

	if (kimg_shadow_start > mod_shadow_end)
		kasan_populate_zero_shadow((void *)mod_shadow_end,
		kasan_populate_early_shadow((void *)mod_shadow_end,
					    (void *)kimg_shadow_start);

	for_each_memblock(memory, reg) {
@@ -227,14 +231,15 @@ void __init kasan_init(void)
	}

	/*
	 * KAsan may reuse the contents of kasan_zero_pte directly, so we
	 * should make sure that it maps the zero page read-only.
	 * KAsan may reuse the contents of kasan_early_shadow_pte directly,
	 * so we should make sure that it maps the zero page read-only.
	 */
	for (i = 0; i < PTRS_PER_PTE; i++)
		set_pte(&kasan_zero_pte[i],
			pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
		set_pte(&kasan_early_shadow_pte[i],
			pfn_pte(sym_to_pfn(kasan_early_shadow_page),
				PAGE_KERNEL_RO));

	memset(kasan_zero_page, 0, PAGE_SIZE);
	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));

	/* At this point kasan is fully initialized. Enable error messages */
+9 −8
Original line number Diff line number Diff line
@@ -111,11 +111,12 @@ static void note_page(struct seq_file *m, struct pg_state *st,
}

#ifdef CONFIG_KASAN
static void note_kasan_zero_page(struct seq_file *m, struct pg_state *st)
static void note_kasan_early_shadow_page(struct seq_file *m,
						struct pg_state *st)
{
	unsigned int prot;

	prot = pte_val(*kasan_zero_pte) &
	prot = pte_val(*kasan_early_shadow_pte) &
		(_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
	note_page(m, st, prot, 4);
}
@@ -154,8 +155,8 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
	int i;

#ifdef CONFIG_KASAN
	if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_zero_pmd)) {
		note_kasan_zero_page(m, st);
	if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_early_shadow_pmd)) {
		note_kasan_early_shadow_page(m, st);
		return;
	}
#endif
@@ -185,8 +186,8 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
	int i;

#ifdef CONFIG_KASAN
	if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_zero_pud)) {
		note_kasan_zero_page(m, st);
	if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_early_shadow_pud)) {
		note_kasan_early_shadow_page(m, st);
		return;
	}
#endif
@@ -215,8 +216,8 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
	int i;

#ifdef CONFIG_KASAN
	if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_zero_p4d)) {
		note_kasan_zero_page(m, st);
	if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_early_shadow_p4d)) {
		note_kasan_early_shadow_page(m, st);
		return;
	}
#endif
+20 −13
Original line number Diff line number Diff line
@@ -107,7 +107,8 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
			if (mode == POPULATE_ZERO_SHADOW &&
			    IS_ALIGNED(address, PGDIR_SIZE) &&
			    end - address >= PGDIR_SIZE) {
				pgd_populate(&init_mm, pg_dir, kasan_zero_p4d);
				pgd_populate(&init_mm, pg_dir,
						kasan_early_shadow_p4d);
				address = (address + PGDIR_SIZE) & PGDIR_MASK;
				continue;
			}
@@ -120,7 +121,8 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
			if (mode == POPULATE_ZERO_SHADOW &&
			    IS_ALIGNED(address, P4D_SIZE) &&
			    end - address >= P4D_SIZE) {
				p4d_populate(&init_mm, p4_dir, kasan_zero_pud);
				p4d_populate(&init_mm, p4_dir,
						kasan_early_shadow_pud);
				address = (address + P4D_SIZE) & P4D_MASK;
				continue;
			}
@@ -133,7 +135,8 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
			if (mode == POPULATE_ZERO_SHADOW &&
			    IS_ALIGNED(address, PUD_SIZE) &&
			    end - address >= PUD_SIZE) {
				pud_populate(&init_mm, pu_dir, kasan_zero_pmd);
				pud_populate(&init_mm, pu_dir,
						kasan_early_shadow_pmd);
				address = (address + PUD_SIZE) & PUD_MASK;
				continue;
			}
@@ -146,7 +149,8 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
			if (mode == POPULATE_ZERO_SHADOW &&
			    IS_ALIGNED(address, PMD_SIZE) &&
			    end - address >= PMD_SIZE) {
				pmd_populate(&init_mm, pm_dir, kasan_zero_pte);
				pmd_populate(&init_mm, pm_dir,
						kasan_early_shadow_pte);
				address = (address + PMD_SIZE) & PMD_MASK;
				continue;
			}
@@ -188,7 +192,7 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
				pte_val(*pt_dir) = __pa(page) | pgt_prot;
				break;
			case POPULATE_ZERO_SHADOW:
				page = kasan_zero_page;
				page = kasan_early_shadow_page;
				pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
				break;
			}
@@ -256,14 +260,14 @@ void __init kasan_early_init(void)
	unsigned long vmax;
	unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
	pte_t pte_z;
	pmd_t pmd_z = __pmd(__pa(kasan_zero_pte) | _SEGMENT_ENTRY);
	pud_t pud_z = __pud(__pa(kasan_zero_pmd) | _REGION3_ENTRY);
	p4d_t p4d_z = __p4d(__pa(kasan_zero_pud) | _REGION2_ENTRY);
	pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
	pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
	p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);

	kasan_early_detect_facilities();
	if (!has_nx)
		pgt_prot &= ~_PAGE_NOEXEC;
	pte_z = __pte(__pa(kasan_zero_page) | pgt_prot);
	pte_z = __pte(__pa(kasan_early_shadow_page) | pgt_prot);

	memsize = get_mem_detect_end();
	if (!memsize)
@@ -292,10 +296,13 @@ void __init kasan_early_init(void)
	}

	/* init kasan zero shadow */
	crst_table_init((unsigned long *)kasan_zero_p4d, p4d_val(p4d_z));
	crst_table_init((unsigned long *)kasan_zero_pud, pud_val(pud_z));
	crst_table_init((unsigned long *)kasan_zero_pmd, pmd_val(pmd_z));
	memset64((u64 *)kasan_zero_pte, pte_val(pte_z), PTRS_PER_PTE);
	crst_table_init((unsigned long *)kasan_early_shadow_p4d,
				p4d_val(p4d_z));
	crst_table_init((unsigned long *)kasan_early_shadow_pud,
				pud_val(pud_z));
	crst_table_init((unsigned long *)kasan_early_shadow_pmd,
				pmd_val(pmd_z));
	memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);

	shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
	pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
+6 −5
Original line number Diff line number Diff line
@@ -377,7 +377,7 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,

/*
 * This is an optimization for KASAN=y case. Since all kasan page tables
 * eventually point to the kasan_zero_page we could call note_page()
 * eventually point to the kasan_early_shadow_page we could call note_page()
 * right away without walking through lower level page tables. This saves
 * us dozens of seconds (minutes for 5-level config) while checking for
 * W+X mapping or reading kernel_page_tables debugfs file.
@@ -385,10 +385,11 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
				void *pt)
{
	if (__pa(pt) == __pa(kasan_zero_pmd) ||
	    (pgtable_l5_enabled() && __pa(pt) == __pa(kasan_zero_p4d)) ||
	    __pa(pt) == __pa(kasan_zero_pud)) {
		pgprotval_t prot = pte_flags(kasan_zero_pte[0]);
	if (__pa(pt) == __pa(kasan_early_shadow_pmd) ||
	    (pgtable_l5_enabled() &&
			__pa(pt) == __pa(kasan_early_shadow_p4d)) ||
	    __pa(pt) == __pa(kasan_early_shadow_pud)) {
		pgprotval_t prot = pte_flags(kasan_early_shadow_pte[0]);
		note_page(m, st, __pgprot(prot), 0, 5);
		return true;
	}
+29 −26
Original line number Diff line number Diff line
@@ -211,7 +211,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd,
	unsigned long next;

	if (pgd_none(*pgd)) {
		pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
		pgd_entry = __pgd(_KERNPG_TABLE |
					__pa_nodebug(kasan_early_shadow_p4d));
		set_pgd(pgd, pgd_entry);
	}

@@ -222,7 +223,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd,
		if (!p4d_none(*p4d))
			continue;

		p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
		p4d_entry = __p4d(_KERNPG_TABLE |
					__pa_nodebug(kasan_early_shadow_pud));
		set_p4d(p4d, p4d_entry);
	} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
}
@@ -261,10 +263,11 @@ static struct notifier_block kasan_die_notifier = {
void __init kasan_early_init(void)
{
	int i;
	pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
	pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
	pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
	p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
	pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) |
				__PAGE_KERNEL | _PAGE_ENC;
	pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE;
	pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE;
	p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE;

	/* Mask out unsupported __PAGE_KERNEL bits: */
	pte_val &= __default_kernel_pte_mask;
@@ -273,16 +276,16 @@ void __init kasan_early_init(void)
	p4d_val &= __default_kernel_pte_mask;

	for (i = 0; i < PTRS_PER_PTE; i++)
		kasan_zero_pte[i] = __pte(pte_val);
		kasan_early_shadow_pte[i] = __pte(pte_val);

	for (i = 0; i < PTRS_PER_PMD; i++)
		kasan_zero_pmd[i] = __pmd(pmd_val);
		kasan_early_shadow_pmd[i] = __pmd(pmd_val);

	for (i = 0; i < PTRS_PER_PUD; i++)
		kasan_zero_pud[i] = __pud(pud_val);
		kasan_early_shadow_pud[i] = __pud(pud_val);

	for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
		kasan_zero_p4d[i] = __p4d(p4d_val);
		kasan_early_shadow_p4d[i] = __p4d(p4d_val);

	kasan_map_early_shadow(early_top_pgt);
	kasan_map_early_shadow(init_top_pgt);
@@ -326,7 +329,7 @@ void __init kasan_init(void)

	clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);

	kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
	kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
			kasan_mem_to_shadow((void *)PAGE_OFFSET));

	for (i = 0; i < E820_MAX_ENTRIES; i++) {
@@ -338,41 +341,41 @@ void __init kasan_init(void)

	shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
	shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
	shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
						PAGE_SIZE);
	shadow_cpu_entry_begin = (void *)round_down(
			(unsigned long)shadow_cpu_entry_begin, PAGE_SIZE);

	shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
					CPU_ENTRY_AREA_MAP_SIZE);
	shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
	shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
					PAGE_SIZE);
	shadow_cpu_entry_end = (void *)round_up(
			(unsigned long)shadow_cpu_entry_end, PAGE_SIZE);

	kasan_populate_zero_shadow(
	kasan_populate_early_shadow(
		kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
		shadow_cpu_entry_begin);

	kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
			      (unsigned long)shadow_cpu_entry_end, 0);

	kasan_populate_zero_shadow(shadow_cpu_entry_end,
	kasan_populate_early_shadow(shadow_cpu_entry_end,
			kasan_mem_to_shadow((void *)__START_KERNEL_map));

	kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
			      (unsigned long)kasan_mem_to_shadow(_end),
			      early_pfn_to_nid(__pa(_stext)));

	kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END),
					(void *)KASAN_SHADOW_END);

	load_cr3(init_top_pgt);
	__flush_tlb_all();

	/*
	 * kasan_zero_page has been used as early shadow memory, thus it may
	 * contain some garbage. Now we can clear and write protect it, since
	 * after the TLB flush no one should write to it.
	 * kasan_early_shadow_page has been used as early shadow memory, thus
	 * it may contain some garbage. Now we can clear and write protect it,
	 * since after the TLB flush no one should write to it.
	 */
	memset(kasan_zero_page, 0, PAGE_SIZE);
	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
	for (i = 0; i < PTRS_PER_PTE; i++) {
		pte_t pte;
		pgprot_t prot;
@@ -380,8 +383,8 @@ void __init kasan_init(void)
		prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
		pgprot_val(prot) &= __default_kernel_pte_mask;

		pte = __pte(__pa(kasan_zero_page) | pgprot_val(prot));
		set_pte(&kasan_zero_pte[i], pte);
		pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot));
		set_pte(&kasan_early_shadow_pte[i], pte);
	}
	/* Flush TLBs again to be sure that write protection applied. */
	__flush_tlb_all();
Loading