Commit 9df41314 authored by Alex Shi's avatar Alex Shi Committed by Linus Torvalds
Browse files

mm/compaction: do page isolation first in compaction

Currently, compaction would get the lru_lock and then do page isolation
which works fine with pgdat->lru_lock, since any page isoltion would
compete for the lru_lock.  If we want to change to memcg lru_lock, we have
to isolate the page before getting lru_lock, thus isoltion would block
page's memcg change which relay on page isoltion too.  Then we could
safely use per memcg lru_lock later.

The new page isolation use previous introduced TestClearPageLRU() + pgdat
lru locking which will be changed to memcg lru lock later.

Hugh Dickins <hughd@google.com> fixed following bugs in this patch's early
version:

Fix lots of crashes under compaction load: isolate_migratepages_block()
must clean up appropriately when rejecting a page, setting PageLRU again
if it had been cleared; and a put_page() after get_page_unless_zero()
cannot safely be done while holding locked_lruvec - it may turn out to be
the final put_page(), which will take an lruvec lock when PageLRU.

And move __isolate_lru_page_prepare back after get_page_unless_zero to
make trylock_page() safe: trylock_page() is not safe to use at this time:
its setting PG_locked can race with the page being freed or allocated
("Bad page"), and can also erase flags being set by one of those "sole
owners" of a freshly allocated page who use non-atomic __SetPageFlag().

Link: https://lkml.kernel.org/r/1604566549-62481-16-git-send-email-alex.shi@linux.alibaba.com


Suggested-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAlex Shi <alex.shi@linux.alibaba.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Alexander Duyck <alexander.duyck@gmail.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: "Chen, Rong A" <rong.a.chen@intel.com>
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Jann Horn <jannh@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mika Penttilä <mika.penttila@nextfour.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Yang Shi <yang.shi@linux.alibaba.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d25b5bd8
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -356,7 +356,7 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page,
extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
					gfp_t gfp_mask, nodemask_t *mask);
extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
extern int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
						  unsigned long nr_pages,
						  gfp_t gfp_mask,
+33 −9
Original line number Diff line number Diff line
@@ -890,6 +890,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
		if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) {
			if (!cc->ignore_skip_hint && get_pageblock_skip(page)) {
				low_pfn = end_pfn;
				page = NULL;
				goto isolate_abort;
			}
			valid_page = page;
@@ -971,6 +972,21 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
		if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
			goto isolate_fail;

		/*
		 * Be careful not to clear PageLRU until after we're
		 * sure the page is not being freed elsewhere -- the
		 * page release code relies on it.
		 */
		if (unlikely(!get_page_unless_zero(page)))
			goto isolate_fail;

		if (__isolate_lru_page_prepare(page, isolate_mode) != 0)
			goto isolate_fail_put;

		/* Try isolate the page */
		if (!TestClearPageLRU(page))
			goto isolate_fail_put;

		/* If we already hold the lock, we can skip some rechecking */
		if (!locked) {
			locked = compact_lock_irqsave(&pgdat->lru_lock,
@@ -983,10 +999,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
					goto isolate_abort;
			}

			/* Recheck PageLRU and PageCompound under lock */
			if (!PageLRU(page))
				goto isolate_fail;

			/*
			 * Page become compound since the non-locked check,
			 * and it's on LRU. It can only be a THP so the order
@@ -994,16 +1006,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
			 */
			if (unlikely(PageCompound(page) && !cc->alloc_contig)) {
				low_pfn += compound_nr(page) - 1;
				goto isolate_fail;
				SetPageLRU(page);
				goto isolate_fail_put;
			}
		}

		lruvec = mem_cgroup_page_lruvec(page, pgdat);

		/* Try isolate the page */
		if (__isolate_lru_page(page, isolate_mode) != 0)
			goto isolate_fail;

		/* The whole page is taken off the LRU; skip the tail pages. */
		if (PageCompound(page))
			low_pfn += compound_nr(page) - 1;
@@ -1032,6 +1041,15 @@ isolate_success:
		}

		continue;

isolate_fail_put:
		/* Avoid potential deadlock in freeing page under lru_lock */
		if (locked) {
			spin_unlock_irqrestore(&pgdat->lru_lock, flags);
			locked = false;
		}
		put_page(page);

isolate_fail:
		if (!skip_on_failure)
			continue;
@@ -1068,9 +1086,15 @@ isolate_fail:
	if (unlikely(low_pfn > end_pfn))
		low_pfn = end_pfn;

	page = NULL;

isolate_abort:
	if (locked)
		spin_unlock_irqrestore(&pgdat->lru_lock, flags);
	if (page) {
		SetPageLRU(page);
		put_page(page);
	}

	/*
	 * Updated the cached scanner pfn once the pageblock has been scanned
+22 −21
Original line number Diff line number Diff line
@@ -1539,7 +1539,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
 *
 * returns 0 on success, -ve errno on failure.
 */
int __isolate_lru_page(struct page *page, isolate_mode_t mode)
int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
{
	int ret = -EBUSY;

@@ -1591,22 +1591,9 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
	if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
		return ret;

	if (likely(get_page_unless_zero(page))) {
		/*
		 * Be careful not to clear PageLRU until after we're
		 * sure the page is not being freed elsewhere -- the
		 * page release code relies on it.
		 */
		if (TestClearPageLRU(page))
			ret = 0;
		else
			put_page(page);
	}

	return ret;
	return 0;
}


/*
 * Update LRU sizes after isolating pages. The LRU size updates must
 * be complete before mem_cgroup_update_lru_size due to a sanity check.
@@ -1686,20 +1673,34 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
		 * only when the page is being freed somewhere else.
		 */
		scan += nr_pages;
		switch (__isolate_lru_page(page, mode)) {
		switch (__isolate_lru_page_prepare(page, mode)) {
		case 0:
			/*
			 * Be careful not to clear PageLRU until after we're
			 * sure the page is not being freed elsewhere -- the
			 * page release code relies on it.
			 */
			if (unlikely(!get_page_unless_zero(page)))
				goto busy;

			if (!TestClearPageLRU(page)) {
				/*
				 * This page may in other isolation path,
				 * but we still hold lru_lock.
				 */
				put_page(page);
				goto busy;
			}

			nr_taken += nr_pages;
			nr_zone_taken[page_zonenum(page)] += nr_pages;
			list_move(&page->lru, dst);
			break;

		case -EBUSY:
		default:
busy:
			/* else it is being freed elsewhere */
			list_move(&page->lru, src);
			continue;

		default:
			BUG();
		}
	}