Commit 580a586c authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds
Browse files

asm-generic/tlb: rename HAVE_MMU_GATHER_NO_GATHER

Towards a more consistent naming scheme.

Link: http://lkml.kernel.org/r/20200116064531.483522-9-aneesh.kumar@linux.ibm.com


Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3af4bd03
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -402,7 +402,7 @@ config MMU_GATHER_PAGE_SIZE
config MMU_GATHER_NO_RANGE
	bool

config HAVE_MMU_GATHER_NO_GATHER
config MMU_GATHER_NO_GATHER
	bool

config ARCH_HAVE_NMI_SAFE_CMPXCHG
+1 −1
Original line number Diff line number Diff line
@@ -163,7 +163,7 @@ config S390
	select HAVE_PERF_USER_STACK_DUMP
	select HAVE_MEMBLOCK_NODE_MAP
	select HAVE_MEMBLOCK_PHYS_MAP
	select HAVE_MMU_GATHER_NO_GATHER
	select MMU_GATHER_NO_GATHER
	select HAVE_MOD_ARCH_SPECIFIC
	select HAVE_NOP_MCOUNT
	select HAVE_OPROFILE
+12 −2
Original line number Diff line number Diff line
@@ -143,6 +143,16 @@
 *  MMU_GATHER_NO_RANGE
 *
 *  Use this if your architecture lacks an efficient flush_tlb_range().
 *
 *  MMU_GATHER_NO_GATHER
 *
 *  If the option is set the mmu_gather will not track individual pages for
 *  delayed page free anymore. A platform that enables the option needs to
 *  provide its own implementation of the __tlb_remove_page_size() function to
 *  free pages.
 *
 *  This is useful if your architecture already flushes TLB entries in the
 *  various ptep_get_and_clear() functions.
 */

#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
@@ -202,7 +212,7 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */


#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
#ifndef CONFIG_MMU_GATHER_NO_GATHER
/*
 * If we can't allocate a page to make a big batch of page pointers
 * to work on, then just handle a few from the on-stack structure.
@@ -277,7 +287,7 @@ struct mmu_gather {

	unsigned int		batch_count;

#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
#ifndef CONFIG_MMU_GATHER_NO_GATHER
	struct mmu_gather_batch *active;
	struct mmu_gather_batch	local;
	struct page		*__pages[MMU_GATHER_BUNDLE];
+5 −5
Original line number Diff line number Diff line
@@ -11,7 +11,7 @@
#include <asm/pgalloc.h>
#include <asm/tlb.h>

#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
#ifndef CONFIG_MMU_GATHER_NO_GATHER

static bool tlb_next_batch(struct mmu_gather *tlb)
{
@@ -89,7 +89,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
	return false;
}

#endif /* HAVE_MMU_GATHER_NO_GATHER */
#endif /* MMU_GATHER_NO_GATHER */

#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE

@@ -180,7 +180,7 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
	tlb_table_flush(tlb);
#endif
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
#ifndef CONFIG_MMU_GATHER_NO_GATHER
	tlb_batch_pages_flush(tlb);
#endif
}
@@ -211,7 +211,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
	/* Is it from 0 to ~0? */
	tlb->fullmm     = !(start | (end+1));

#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
#ifndef CONFIG_MMU_GATHER_NO_GATHER
	tlb->need_flush_all = 0;
	tlb->local.next = NULL;
	tlb->local.nr   = 0;
@@ -271,7 +271,7 @@ void tlb_finish_mmu(struct mmu_gather *tlb,

	tlb_flush_mmu(tlb);

#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
#ifndef CONFIG_MMU_GATHER_NO_GATHER
	tlb_batch_list_free(tlb);
#endif
	dec_tlb_flush_pending(tlb->mm);