Commit c74df32c authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds
Browse files

[PATCH] mm: ptd_alloc take ptlock



Second step in pushing down the page_table_lock.  Remove the temporary
bridging hack from __pud_alloc, __pmd_alloc, __pte_alloc: expect callers not
to hold page_table_lock, whether it's on init_mm or a user mm; take
page_table_lock internally to check if a racing task already allocated.

Convert their callers from common code.  But avoid coming back to change them
again later: instead of moving the spin_lock(&mm->page_table_lock) down,
switch over to new macros pte_alloc_map_lock and pte_unmap_unlock, which
encapsulate the mapping+locking and unlocking+unmapping together, and in the
end may use alternatives to the mm page_table_lock itself.

These callers all hold mmap_sem (some exclusively, some not), so at no level
can a page table be whipped away from beneath them; and pte_alloc uses the
"atomic" pmd_present to test whether it needs to allocate.  It appears that on
all arches we can safely descend without page_table_lock.

Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 1bb3630e
Loading
Loading
Loading
Loading
+5 −9
Original line number Diff line number Diff line
@@ -309,25 +309,24 @@ void install_arg_page(struct vm_area_struct *vma,
	pud_t * pud;
	pmd_t * pmd;
	pte_t * pte;
	spinlock_t *ptl;

	if (unlikely(anon_vma_prepare(vma)))
		goto out_sig;
		goto out;

	flush_dcache_page(page);
	pgd = pgd_offset(mm, address);

	spin_lock(&mm->page_table_lock);
	pud = pud_alloc(mm, pgd, address);
	if (!pud)
		goto out;
	pmd = pmd_alloc(mm, pud, address);
	if (!pmd)
		goto out;
	pte = pte_alloc_map(mm, pmd, address);
	pte = pte_alloc_map_lock(mm, pmd, address, &ptl);
	if (!pte)
		goto out;
	if (!pte_none(*pte)) {
		pte_unmap(pte);
		pte_unmap_unlock(pte, ptl);
		goto out;
	}
	inc_mm_counter(mm, anon_rss);
@@ -335,14 +334,11 @@ void install_arg_page(struct vm_area_struct *vma,
	set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
					page, vma->vm_page_prot))));
	page_add_anon_rmap(page, vma, address);
	pte_unmap(pte);
	spin_unlock(&mm->page_table_lock);
	pte_unmap_unlock(pte, ptl);

	/* no need for flush_tlb */
	return;
out:
	spin_unlock(&mm->page_table_lock);
out_sig:
	__free_page(page);
	force_sig(SIGKILL, current);
}
+18 −0
Original line number Diff line number Diff line
@@ -779,10 +779,28 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
}
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */

#define pte_offset_map_lock(mm, pmd, address, ptlp)	\
({							\
	spinlock_t *__ptl = &(mm)->page_table_lock;	\
	pte_t *__pte = pte_offset_map(pmd, address);	\
	*(ptlp) = __ptl;				\
	spin_lock(__ptl);				\
	__pte;						\
})

#define pte_unmap_unlock(pte, ptl)	do {		\
	spin_unlock(ptl);				\
	pte_unmap(pte);					\
} while (0)

#define pte_alloc_map(mm, pmd, address)			\
	((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
		NULL: pte_offset_map(pmd, address))

#define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
	((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
		NULL: pte_offset_map_lock(mm, pmd, address, ptlp))

#define pte_alloc_kernel(pmd, address)			\
	((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
		NULL: pte_offset_kernel(pmd, address))
+0 −2
Original line number Diff line number Diff line
@@ -255,7 +255,6 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
		/*
		 * Link in the new vma and copy the page table entries.
		 */
		spin_lock(&mm->page_table_lock);
		*pprev = tmp;
		pprev = &tmp->vm_next;

@@ -265,7 +264,6 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)

		mm->map_count++;
		retval = copy_page_range(mm, oldmm, tmp);
		spin_unlock(&mm->page_table_lock);

		if (tmp->vm_ops && tmp->vm_ops->open)
			tmp->vm_ops->open(tmp);
+18 −30
Original line number Diff line number Diff line
@@ -63,23 +63,20 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
	pud_t *pud;
	pgd_t *pgd;
	pte_t pte_val;
	spinlock_t *ptl;

	BUG_ON(vma->vm_flags & VM_RESERVED);

	pgd = pgd_offset(mm, addr);
	spin_lock(&mm->page_table_lock);
	
	pud = pud_alloc(mm, pgd, addr);
	if (!pud)
		goto err_unlock;

		goto out;
	pmd = pmd_alloc(mm, pud, addr);
	if (!pmd)
		goto err_unlock;

	pte = pte_alloc_map(mm, pmd, addr);
		goto out;
	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
	if (!pte)
		goto err_unlock;
		goto out;

	/*
	 * This page may have been truncated. Tell the
@@ -89,10 +86,10 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
	inode = vma->vm_file->f_mapping->host;
	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
	if (!page->mapping || page->index >= size)
		goto err_unlock;
		goto unlock;
	err = -ENOMEM;
	if (page_mapcount(page) > INT_MAX/2)
		goto err_unlock;
		goto unlock;

	if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
		inc_mm_counter(mm, file_rss);
@@ -101,17 +98,15 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
	set_pte_at(mm, addr, pte, mk_pte(page, prot));
	page_add_file_rmap(page);
	pte_val = *pte;
	pte_unmap(pte);
	update_mmu_cache(vma, addr, pte_val);

	err = 0;
err_unlock:
	spin_unlock(&mm->page_table_lock);
unlock:
	pte_unmap_unlock(pte, ptl);
out:
	return err;
}
EXPORT_SYMBOL(install_page);


/*
 * Install a file pte to a given virtual memory address, release any
 * previously existing mapping.
@@ -125,23 +120,20 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
	pud_t *pud;
	pgd_t *pgd;
	pte_t pte_val;
	spinlock_t *ptl;

	BUG_ON(vma->vm_flags & VM_RESERVED);

	pgd = pgd_offset(mm, addr);
	spin_lock(&mm->page_table_lock);
	
	pud = pud_alloc(mm, pgd, addr);
	if (!pud)
		goto err_unlock;

		goto out;
	pmd = pmd_alloc(mm, pud, addr);
	if (!pmd)
		goto err_unlock;

	pte = pte_alloc_map(mm, pmd, addr);
		goto out;
	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
	if (!pte)
		goto err_unlock;
		goto out;

	if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) {
		update_hiwater_rss(mm);
@@ -150,17 +142,13 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,

	set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
	pte_val = *pte;
	pte_unmap(pte);
	update_mmu_cache(vma, addr, pte_val);
	spin_unlock(&mm->page_table_lock);
	return 0;

err_unlock:
	spin_unlock(&mm->page_table_lock);
	pte_unmap_unlock(pte, ptl);
	err = 0;
out:
	return err;
}


/***
 * sys_remap_file_pages - remap arbitrary pages of a shared backing store
 *                        file within an existing vma.
+8 −4
Original line number Diff line number Diff line
@@ -277,12 +277,15 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
	unsigned long addr;

	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
		src_pte = huge_pte_offset(src, addr);
		if (!src_pte)
			continue;
		dst_pte = huge_pte_alloc(dst, addr);
		if (!dst_pte)
			goto nomem;
		spin_lock(&dst->page_table_lock);
		spin_lock(&src->page_table_lock);
		src_pte = huge_pte_offset(src, addr);
		if (src_pte && !pte_none(*src_pte)) {
		if (!pte_none(*src_pte)) {
			entry = *src_pte;
			ptepage = pte_page(entry);
			get_page(ptepage);
@@ -290,6 +293,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
			set_huge_pte_at(dst, addr, dst_pte, entry);
		}
		spin_unlock(&src->page_table_lock);
		spin_unlock(&dst->page_table_lock);
	}
	return 0;

@@ -354,7 +358,6 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)

	hugetlb_prefault_arch_hook(mm);

	spin_lock(&mm->page_table_lock);
	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
		unsigned long idx;
		pte_t *pte = huge_pte_alloc(mm, addr);
@@ -389,11 +392,12 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
				goto out;
			}
		}
		spin_lock(&mm->page_table_lock);
		add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
		set_huge_pte_at(mm, addr, pte, make_huge_pte(vma, page));
		spin_unlock(&mm->page_table_lock);
	}
out:
	spin_unlock(&mm->page_table_lock);
	return ret;
}

Loading