Commit 7900757c authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman
Browse files

powerpc/hash64: Restrict page table lookup using init_mm with __flush_hash_table_range



This is only used with init_mm currently. Walking init_mm is much simpler
because we don't need to handle concurrent page table like other mm_context

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200505071729.54912-5-aneesh.kumar@linux.ibm.com
parent ec4abf1e
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -113,8 +113,7 @@ static inline void hash__flush_tlb_kernel_range(unsigned long start,
struct mmu_gather;
extern void hash__tlb_flush(struct mmu_gather *tlb);
/* Private function for use by PCI IO mapping code */
extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
				     unsigned long end);
extern void __flush_hash_table_range(unsigned long start, unsigned long end);
extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
				unsigned long addr);
#endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */
+1 −1
Original line number Diff line number Diff line
@@ -100,7 +100,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
			 pci_name(bus->self));

#ifdef CONFIG_PPC_BOOK3S_64
		__flush_hash_table_range(&init_mm, res->start + _IO_BASE,
		__flush_hash_table_range(res->start + _IO_BASE,
					 res->end + _IO_BASE + 1);
#endif
		return 0;
+3 −13
Original line number Diff line number Diff line
@@ -176,7 +176,6 @@ void hash__tlb_flush(struct mmu_gather *tlb)
 *                            from the hash table (and the TLB). But keeps
 *                            the linux PTEs intact.
 *
 * @mm		: mm_struct of the target address space (generally init_mm)
 * @start	: starting address
 * @end         : ending address (not included in the flush)
 *
@@ -189,17 +188,14 @@ void hash__tlb_flush(struct mmu_gather *tlb)
 * Because of that usage pattern, it is implemented for small size rather
 * than speed.
 */
void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
			      unsigned long end)
void __flush_hash_table_range(unsigned long start, unsigned long end)
{
	bool is_thp;
	int hugepage_shift;
	unsigned long flags;

	start = _ALIGN_DOWN(start, PAGE_SIZE);
	end = _ALIGN_UP(end, PAGE_SIZE);

	BUG_ON(!mm->pgd);

	/*
	 * Note: Normally, we should only ever use a batch within a
@@ -212,21 +208,15 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
	local_irq_save(flags);
	arch_enter_lazy_mmu_mode();
	for (; start < end; start += PAGE_SIZE) {
		pte_t *ptep = find_current_mm_pte(mm->pgd, start, &is_thp,
						  &hugepage_shift);
		pte_t *ptep = find_init_mm_pte(start, &hugepage_shift);
		unsigned long pte;

		if (ptep == NULL)
			continue;
		pte = pte_val(*ptep);
		if (is_thp)
			trace_hugepage_invalidate(start, pte);
		if (!(pte & H_PAGE_HASHPTE))
			continue;
		if (unlikely(is_thp))
			hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
		else
			hpte_need_flush(mm, start, ptep, pte, hugepage_shift);
		hpte_need_flush(&init_mm, start, ptep, pte, hugepage_shift);
	}
	arch_leave_lazy_mmu_mode();
	local_irq_restore(flags);