Commit 73f693c3 authored by Joerg Roedel's avatar Joerg Roedel Committed by Linus Torvalds
Browse files

mm: remove vmalloc_sync_(un)mappings()



These functions are not needed anymore because the vmalloc and ioremap
mappings are now synchronized when they are created or torn down.

Remove all callers and function definitions.

Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Tested-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
Acked-by: default avatarAndy Lutomirski <luto@kernel.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H . Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Link: http://lkml.kernel.org/r/20200515140023.25469-7-joro@8bytes.org


Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 86cf69f1
Loading
Loading
Loading
Loading
+0 −37
Original line number Diff line number Diff line
@@ -214,26 +214,6 @@ void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
	}
}

static void vmalloc_sync(void)
{
	unsigned long address;

	if (SHARED_KERNEL_PMD)
		return;

	arch_sync_kernel_mappings(VMALLOC_START, VMALLOC_END);
}

void vmalloc_sync_mappings(void)
{
	vmalloc_sync();
}

void vmalloc_sync_unmappings(void)
{
	vmalloc_sync();
}

/*
 * 32-bit:
 *
@@ -336,23 +316,6 @@ out:

#else /* CONFIG_X86_64: */

void vmalloc_sync_mappings(void)
{
	/*
	 * 64-bit mappings might allocate new p4d/pud pages
	 * that need to be propagated to all tasks' PGDs.
	 */
	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
}

void vmalloc_sync_unmappings(void)
{
	/*
	 * Unmappings never allocate or free p4d/pud pages.
	 * No work is required here.
	 */
}

/*
 * 64-bit:
 *
+0 −6
Original line number Diff line number Diff line
@@ -167,12 +167,6 @@ int ghes_estatus_pool_init(int num_ghes)
	if (!addr)
		goto err_pool_alloc;

	/*
	 * New allocation must be visible in all pgd before it can be found by
	 * an NMI allocating from the pool.
	 */
	vmalloc_sync_mappings();

	rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
	if (rc)
		goto err_pool_add;
+0 −2
Original line number Diff line number Diff line
@@ -130,8 +130,6 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,

extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
							unsigned long pgoff);
void vmalloc_sync_mappings(void);
void vmalloc_sync_unmappings(void);

/*
 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
+0 −1
Original line number Diff line number Diff line
@@ -519,7 +519,6 @@ NOKPROBE_SYMBOL(notify_die);

int register_die_notifier(struct notifier_block *nb)
{
	vmalloc_sync_mappings();
	return atomic_notifier_chain_register(&die_chain, nb);
}
EXPORT_SYMBOL_GPL(register_die_notifier);
+0 −12
Original line number Diff line number Diff line
@@ -8527,18 +8527,6 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
	allocate_snapshot = false;
#endif

	/*
	 * Because of some magic with the way alloc_percpu() works on
	 * x86_64, we need to synchronize the pgd of all the tables,
	 * otherwise the trace events that happen in x86_64 page fault
	 * handlers can't cope with accessing the chance that a
	 * alloc_percpu()'d memory might be touched in the page fault trace
	 * event. Oh, and we need to audit all other alloc_percpu() and vmalloc()
	 * calls in tracing, because something might get triggered within a
	 * page fault trace event!
	 */
	vmalloc_sync_mappings();

	return 0;
}

Loading