Commit 1f947a7a authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge fixes from Andrew Morton:
 "6 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm: proc: smaps_rollup: fix pss_locked calculation
  Rename include/{uapi => }/asm-generic/shmparam.h really
  Revert "mm: use early_pfn_to_nid in page_ext_init"
  mm/gup: fix gup_pmd_range() for dax
  Revert "mm: slowly shrink slabs with a relatively small number of objects"
  Revert "mm: don't reclaim inodes with many attached pages"
parents 991b9eb4 27dd768e
Loading
Loading
Loading
Loading
+2 −5
Original line number Diff line number Diff line
@@ -730,11 +730,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
		return LRU_REMOVED;
	}

	/*
	 * Recently referenced inodes and inodes with many attached pages
	 * get one more pass.
	 */
	if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
	/* recently referenced inodes get one more pass */
	if (inode->i_state & I_REFERENCED) {
		inode->i_state &= ~I_REFERENCED;
		spin_unlock(&inode->i_lock);
		return LRU_ROTATE;
+14 −8
Original line number Diff line number Diff line
@@ -423,7 +423,7 @@ struct mem_size_stats {
};

static void smaps_account(struct mem_size_stats *mss, struct page *page,
		bool compound, bool young, bool dirty)
		bool compound, bool young, bool dirty, bool locked)
{
	int i, nr = compound ? 1 << compound_order(page) : 1;
	unsigned long size = nr * PAGE_SIZE;
@@ -450,24 +450,31 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
		else
			mss->private_clean += size;
		mss->pss += (u64)size << PSS_SHIFT;
		if (locked)
			mss->pss_locked += (u64)size << PSS_SHIFT;
		return;
	}

	for (i = 0; i < nr; i++, page++) {
		int mapcount = page_mapcount(page);
		unsigned long pss = (PAGE_SIZE << PSS_SHIFT);

		if (mapcount >= 2) {
			if (dirty || PageDirty(page))
				mss->shared_dirty += PAGE_SIZE;
			else
				mss->shared_clean += PAGE_SIZE;
			mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
			mss->pss += pss / mapcount;
			if (locked)
				mss->pss_locked += pss / mapcount;
		} else {
			if (dirty || PageDirty(page))
				mss->private_dirty += PAGE_SIZE;
			else
				mss->private_clean += PAGE_SIZE;
			mss->pss += PAGE_SIZE << PSS_SHIFT;
			mss->pss += pss;
			if (locked)
				mss->pss_locked += pss;
		}
	}
}
@@ -490,6 +497,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
{
	struct mem_size_stats *mss = walk->private;
	struct vm_area_struct *vma = walk->vma;
	bool locked = !!(vma->vm_flags & VM_LOCKED);
	struct page *page = NULL;

	if (pte_present(*pte)) {
@@ -532,7 +540,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
	if (!page)
		return;

	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -541,6 +549,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
{
	struct mem_size_stats *mss = walk->private;
	struct vm_area_struct *vma = walk->vma;
	bool locked = !!(vma->vm_flags & VM_LOCKED);
	struct page *page;

	/* FOLL_DUMP will return -EFAULT on huge zero page */
@@ -555,7 +564,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
		/* pass */;
	else
		VM_BUG_ON_PAGE(1, page);
	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
}
#else
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
@@ -737,11 +746,8 @@ static void smap_gather_stats(struct vm_area_struct *vma,
		}
	}
#endif

	/* mmap_sem is held in m_start */
	walk_page_vma(vma, &smaps_walk);
	if (vma->vm_flags & VM_LOCKED)
		mss->pss_locked += mss->pss;
}

#define SEQ_PUT_DEC(str, val) \
+2 −1
Original line number Diff line number Diff line
@@ -695,7 +695,6 @@ asmlinkage __visible void __init start_kernel(void)
		initrd_start = 0;
	}
#endif
	page_ext_init();
	kmemleak_init();
	setup_per_cpu_pageset();
	numa_policy_init();
@@ -1131,6 +1130,8 @@ static noinline void __init kernel_init_freeable(void)
	sched_init_smp();

	page_alloc_init_late();
	/* Initialize page ext after all struct pages are initialized. */
	page_ext_init();

	do_basic_setup();

+2 −1
Original line number Diff line number Diff line
@@ -1674,7 +1674,8 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
		if (!pmd_present(pmd))
			return 0;

		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
			     pmd_devmap(pmd))) {
			/*
			 * NUMA hinting faults need to be handled in the GUP
			 * slowpath for accounting purposes and so that they
Loading