Commit c1b4ec85 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull x86 mm updates from Ingo Molnar:
 "Do not sync vmalloc/ioremap mappings on x86-64 kernels.

  Hopefully now without the bugs!"

* tag 'x86-mm-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm/64: Update comment in preallocate_vmalloc_pages()
  x86/mm/64: Do not sync vmalloc/ioremap mappings
parents b85cac57 7a27ef5e
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -159,6 +159,4 @@ extern unsigned int ptrs_per_p4d;

#define PGD_KERNEL_START	((PAGE_SIZE / 2) / sizeof(pgd_t))

#define ARCH_PAGE_TABLE_SYNC_MASK	(pgtable_l5_enabled() ?	PGTBL_PGD_MODIFIED : PGTBL_P4D_MODIFIED)

#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
+10 −10
Original line number Diff line number Diff line
@@ -217,11 +217,6 @@ static void sync_global_pgds(unsigned long start, unsigned long end)
		sync_global_pgds_l4(start, end);
}

void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
{
	sync_global_pgds(start, end);
}

/*
 * NOTE: This function is marked __ref because it calls __init function
 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
@@ -1257,14 +1252,19 @@ static void __init preallocate_vmalloc_pages(void)
		if (!p4d)
			goto failed;

		/*
		 * With 5-level paging the P4D level is not folded. So the PGDs
		 * are now populated and there is no need to walk down to the
		 * PUD level.
		 */
		if (pgtable_l5_enabled())
			continue;

		/*
		 * The goal here is to allocate all possibly required
		 * hardware page tables pointed to by the top hardware
		 * level.
		 *
		 * On 4-level systems, the P4D layer is folded away and
		 * the above code does no preallocation.  Below, go down
		 * to the pud _software_ level to ensure the second
		 * hardware level is allocated on 4-level systems too.
		 */
		lvl = "pud";
		pud = pud_alloc(&init_mm, p4d, addr);
		if (!pud)