Commit b521c43f authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds
Browse files

mm: remove vmap_page_range_noflush and vunmap_page_range



These have non-static aliases called map_kernel_range_noflush and
unmap_kernel_range_noflush that just differ slightly in the calling
conventions that pass addr + size instead of an end.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Airlie <airlied@linux.ie>
Cc: Gao Xiang <xiang@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Haiyang Zhang <haiyangz@microsoft.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: "K. Y. Srinivasan" <kys@microsoft.com>
Cc: Laura Abbott <labbott@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Kelley <mikelley@microsoft.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Sakari Ailus <sakari.ailus@linux.intel.com>
Cc: Stephen Hemminger <sthemmin@microsoft.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Wei Liu <wei.liu@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Link: http://lkml.kernel.org/r/20200414131348.444715-14-hch@lst.de


Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 78a0e8c4
Loading
Loading
Loading
Loading
+40 −58
Original line number Diff line number Diff line
@@ -128,10 +128,24 @@ static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
	} while (p4d++, addr = next, addr != end);
}

static void vunmap_page_range(unsigned long addr, unsigned long end)
/**
 * unmap_kernel_range_noflush - unmap kernel VM area
 * @addr: start of the VM area to unmap
 * @size: size of the VM area to unmap
 *
 * Unmap PFN_UP(@size) pages at @addr.  The VM area @addr and @size specify
 * should have been allocated using get_vm_area() and its friends.
 *
 * NOTE:
 * This function does NOT do any cache flushing.  The caller is responsible
 * for calling flush_cache_vunmap() on to-be-mapped areas before calling this
 * function and flush_tlb_kernel_range() after.
 */
void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{
	pgd_t *pgd;
	unsigned long end = addr + size;
	unsigned long next;
	pgd_t *pgd;

	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
@@ -220,18 +234,30 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
	return 0;
}

/*
 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
 * will have pfns corresponding to the "pages" array.
/**
 * map_kernel_range_noflush - map kernel VM area with the specified pages
 * @addr: start of the VM area to map
 * @size: size of the VM area to map
 * @prot: page protection flags to use
 * @pages: pages to map
 *
 * Map PFN_UP(@size) pages at @addr.  The VM area @addr and @size specify should
 * have been allocated using get_vm_area() and its friends.
 *
 * NOTE:
 * This function does NOT do any cache flushing.  The caller is responsible for
 * calling flush_cache_vmap() on to-be-mapped areas before calling this
 * function.
 *
 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
 * RETURNS:
 * The number of pages mapped on success, -errno on failure.
 */
static int vmap_page_range_noflush(unsigned long start, unsigned long end,
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
			     pgprot_t prot, struct page **pages)
{
	pgd_t *pgd;
	unsigned long end = addr + size;
	unsigned long next;
	unsigned long addr = start;
	pgd_t *pgd;
	int err = 0;
	int nr = 0;

@@ -252,7 +278,7 @@ static int vmap_page_range(unsigned long start, unsigned long end,
{
	int ret;

	ret = vmap_page_range_noflush(start, end, prot, pages);
	ret = map_kernel_range_noflush(start, end - start, prot, pages);
	flush_cache_vmap(start, end);
	return ret;
}
@@ -1227,7 +1253,7 @@ EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
 */
static void unmap_vmap_area(struct vmap_area *va)
{
	vunmap_page_range(va->va_start, va->va_end);
	unmap_kernel_range_noflush(va->va_start, va->va_end - va->va_start);
}

/*
@@ -1687,7 +1713,7 @@ static void vb_free(unsigned long addr, unsigned long size)
	rcu_read_unlock();
	BUG_ON(!vb);

	vunmap_page_range(addr, addr + size);
	unmap_kernel_range_noflush(addr, size);

	if (debug_pagealloc_enabled_static())
		flush_tlb_kernel_range(addr, addr + size);
@@ -1985,50 +2011,6 @@ void __init vmalloc_init(void)
	vmap_initialized = true;
}

/**
 * map_kernel_range_noflush - map kernel VM area with the specified pages
 * @addr: start of the VM area to map
 * @size: size of the VM area to map
 * @prot: page protection flags to use
 * @pages: pages to map
 *
 * Map PFN_UP(@size) pages at @addr.  The VM area @addr and @size
 * specify should have been allocated using get_vm_area() and its
 * friends.
 *
 * NOTE:
 * This function does NOT do any cache flushing.  The caller is
 * responsible for calling flush_cache_vmap() on to-be-mapped areas
 * before calling this function.
 *
 * RETURNS:
 * The number of pages mapped on success, -errno on failure.
 */
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
			     pgprot_t prot, struct page **pages)
{
	return vmap_page_range_noflush(addr, addr + size, prot, pages);
}

/**
 * unmap_kernel_range_noflush - unmap kernel VM area
 * @addr: start of the VM area to unmap
 * @size: size of the VM area to unmap
 *
 * Unmap PFN_UP(@size) pages at @addr.  The VM area @addr and @size
 * specify should have been allocated using get_vm_area() and its
 * friends.
 *
 * NOTE:
 * This function does NOT do any cache flushing.  The caller is
 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
 * before calling this function and flush_tlb_kernel_range() after.
 */
void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{
	vunmap_page_range(addr, addr + size);
}

/**
 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
 * @addr: start of the VM area to unmap
@@ -2042,7 +2024,7 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
	unsigned long end = addr + size;

	flush_cache_vunmap(addr, end);
	vunmap_page_range(addr, end);
	unmap_kernel_range_noflush(addr, size);
	flush_tlb_kernel_range(addr, end);
}