Commit a31acd3e authored by Peter Zijlstra's avatar Peter Zijlstra
Browse files

x86/mm: Page size aware flush_tlb_mm_range()



Use the new tlb_get_unmap_shift() to determine the stride of the
INVLPG loop.

Cc: Nick Piggin <npiggin@gmail.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
parent a5b966ae
Loading
Loading
Loading
Loading
+14 −7
Original line number Diff line number Diff line
@@ -6,16 +6,23 @@
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)

#define tlb_flush(tlb)							\
{									\
	if (!tlb->fullmm && !tlb->need_flush_all) 			\
		flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL);	\
	else								\
		flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL);	\
}
static inline void tlb_flush(struct mmu_gather *tlb);

#include <asm-generic/tlb.h>

static inline void tlb_flush(struct mmu_gather *tlb)
{
	unsigned long start = 0UL, end = TLB_FLUSH_ALL;
	unsigned int stride_shift = tlb_get_unmap_shift(tlb);

	if (!tlb->fullmm && !tlb->need_flush_all) {
		start = tlb->start;
		end = tlb->end;
	}

	flush_tlb_mm_range(tlb->mm, start, end, stride_shift);
}

/*
 * While x86 architecture in general requires an IPI to perform TLB
 * shootdown, enablement code for several hypervisors overrides
+8 −4
Original line number Diff line number Diff line
@@ -547,6 +547,7 @@ struct flush_tlb_info {
	unsigned long		start;
	unsigned long		end;
	u64			new_tlb_gen;
	unsigned int		stride_shift;
};

#define local_flush_tlb() __flush_tlb()
@@ -554,16 +555,19 @@ struct flush_tlb_info {
#define flush_tlb_mm(mm)	flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)

#define flush_tlb_range(vma, start, end)				\
		flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
	flush_tlb_mm_range((vma)->vm_mm, start, end,			\
			   ((vma)->vm_flags & VM_HUGETLB)		\
				? huge_page_shift(hstate_vma(vma))	\
				: PAGE_SHIFT)

extern void flush_tlb_all(void);
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
				unsigned long end, unsigned long vmflag);
				unsigned long end, unsigned int stride_shift);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);

static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
{
	flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
	flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT);
}

void native_flush_tlb_others(const struct cpumask *cpumask,
+1 −1
Original line number Diff line number Diff line
@@ -273,7 +273,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
	map_ldt_struct_to_user(mm);

	va = (unsigned long)ldt_slot_va(slot);
	flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
	flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, PAGE_SHIFT);

	ldt->slot = slot;
	return 0;
+1 −1
Original line number Diff line number Diff line
@@ -199,7 +199,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
	pte_unmap_unlock(pte, ptl);
out:
	up_write(&mm->mmap_sem);
	flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, 0UL);
	flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT);
}


+8 −9
Original line number Diff line number Diff line
@@ -528,17 +528,16 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
	    f->new_tlb_gen == local_tlb_gen + 1 &&
	    f->new_tlb_gen == mm_tlb_gen) {
		/* Partial flush */
		unsigned long addr;
		unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT;
		unsigned long nr_invalidate = (f->end - f->start) >> f->stride_shift;
		unsigned long addr = f->start;

		addr = f->start;
		while (addr < f->end) {
			__flush_tlb_one_user(addr);
			addr += PAGE_SIZE;
			addr += 1UL << f->stride_shift;
		}
		if (local)
			count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages);
		trace_tlb_flush(reason, nr_pages);
			count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate);
		trace_tlb_flush(reason, nr_invalidate);
	} else {
		/* Full flush. */
		local_flush_tlb();
@@ -623,12 +622,13 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;

void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
				unsigned long end, unsigned long vmflag)
				unsigned long end, unsigned int stride_shift)
{
	int cpu;

	struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = {
		.mm = mm,
		.stride_shift = stride_shift,
	};

	cpu = get_cpu();
@@ -638,8 +638,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,

	/* Should we flush just the requested range? */
	if ((end != TLB_FLUSH_ALL) &&
	    !(vmflag & VM_HUGETLB) &&
	    ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) {
	    ((end - start) >> stride_shift) <= tlb_single_page_flush_ceiling) {
		info.start = start;
		info.end = end;
	} else {
Loading