Commit 488ae6a2 authored by Steven Price's avatar Steven Price Committed by Linus Torvalds
Browse files

mm: pagewalk: allow walking without vma

Since 48684a65: "mm: pagewalk: fix misbehavior of walk_page_range for
vma(VM_PFNMAP)", page_table_walk() will report any kernel area as a hole,
because it lacks a vma.

This means each arch has re-implemented page table walking when needed,
for example in the per-arch ptdump walker.

Remove the requirement to have a vma in the generic code and add a new
function walk_page_range_novma() which ignores the VMAs and simply walks
the page tables.

Link: http://lkml.kernel.org/r/20191218162402.45610-13-steven.price@arm.com


Signed-off-by: default avatarSteven Price <steven.price@arm.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: James Morse <james.morse@arm.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: "Liang, Kan" <kan.liang@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Burton <paul.burton@mips.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Will Deacon <will@kernel.org>
Cc: Zong Li <zong.li@sifive.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3afc4236
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -73,6 +73,7 @@ enum page_walk_action {
 * @mm:		mm_struct representing the target process of page table walk
 * @vma:	vma currently walked (NULL if walking outside vmas)
 * @action:	next action to perform (see enum page_walk_action)
 * @no_vma:	walk ignoring vmas (vma will always be NULL)
 * @private:	private data for callbacks' usage
 *
 * (see the comment on walk_page_range() for more details)
@@ -82,12 +83,16 @@ struct mm_walk {
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	enum page_walk_action action;
	bool no_vma;
	void *private;
};

int walk_page_range(struct mm_struct *mm, unsigned long start,
		unsigned long end, const struct mm_walk_ops *ops,
		void *private);
int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
			  unsigned long end, const struct mm_walk_ops *ops,
			  void *private);
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
		void *private);
int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
+32 −8
Original line number Diff line number Diff line
@@ -39,7 +39,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
	do {
again:
		next = pmd_addr_end(addr, end);
		if (pmd_none(*pmd) || !walk->vma) {
		if (pmd_none(*pmd) || (!walk->vma && !walk->no_vma)) {
			if (ops->pte_hole)
				err = ops->pte_hole(addr, next, walk);
			if (err)
@@ -65,13 +65,16 @@ again:
		 * Check this here so we only break down trans_huge
		 * pages when we _need_ to
		 */
		if (walk->action == ACTION_CONTINUE ||
		if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) ||
		    walk->action == ACTION_CONTINUE ||
		    !(ops->pte_entry))
			continue;

		if (walk->vma) {
			split_huge_pmd(walk->vma, pmd, addr);
			if (pmd_trans_unstable(pmd))
				goto again;
		}

		err = walk_pte_range(pmd, addr, next, walk);
		if (err)
@@ -93,7 +96,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
	do {
 again:
		next = pud_addr_end(addr, end);
		if (pud_none(*pud) || !walk->vma) {
		if (pud_none(*pud) || (!walk->vma && !walk->no_vma)) {
			if (ops->pte_hole)
				err = ops->pte_hole(addr, next, walk);
			if (err)
@@ -111,10 +114,12 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
		if (walk->action == ACTION_AGAIN)
			goto again;

		if (walk->action == ACTION_CONTINUE ||
		if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) ||
		    walk->action == ACTION_CONTINUE ||
		    !(ops->pmd_entry || ops->pte_entry))
			continue;

		if (walk->vma)
			split_huge_pud(walk->vma, pud, addr);
		if (pud_none(*pud))
			goto again;
@@ -389,6 +394,25 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
	return err;
}

int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
			  unsigned long end, const struct mm_walk_ops *ops,
			  void *private)
{
	struct mm_walk walk = {
		.ops		= ops,
		.mm		= mm,
		.private	= private,
		.no_vma		= true
	};

	if (start >= end || !walk.mm)
		return -EINVAL;

	lockdep_assert_held(&walk.mm->mmap_sem);

	return __walk_page_range(start, end, &walk);
}

int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
		void *private)
{