Commit 5d91f31f authored by Shakeel Butt's avatar Shakeel Butt Committed by Linus Torvalds
Browse files

mm: swap: fix vmstats for huge pages



Many of the callbacks called by pagevec_lru_move_fn() does not correctly
update the vmstats for huge pages. Fix that. Also __pagevec_lru_add_fn()
use the irq-unsafe alternative to update the stat as the irqs are
already disabled.

Signed-off-by: default avatarShakeel Butt <shakeelb@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Link: http://lkml.kernel.org/r/20200527182916.249910-1-shakeelb@google.com


Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d483a5dd
Loading
Loading
Loading
Loading
+8 −6
Original line number Diff line number Diff line
@@ -241,7 +241,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
		del_page_from_lru_list(page, lruvec, page_lru(page));
		ClearPageActive(page);
		add_page_to_lru_list_tail(page, lruvec, page_lru(page));
		(*pgmoved)++;
		(*pgmoved) += hpage_nr_pages(page);
	}
}

@@ -327,7 +327,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
		add_page_to_lru_list(page, lruvec, lru);
		trace_mm_lru_activate(page);

		__count_vm_event(PGACTIVATE);
		__count_vm_events(PGACTIVATE, hpage_nr_pages(page));
	}
}

@@ -529,6 +529,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
{
	int lru;
	bool active;
	int nr_pages = hpage_nr_pages(page);

	if (!PageLRU(page))
		return;
@@ -561,11 +562,11 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
		 * We moves tha page into tail of inactive.
		 */
		add_page_to_lru_list_tail(page, lruvec, lru);
		__count_vm_event(PGROTATED);
		__count_vm_events(PGROTATED, nr_pages);
	}

	if (active)
		__count_vm_event(PGDEACTIVATE);
		__count_vm_events(PGDEACTIVATE, nr_pages);
}

static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
@@ -960,6 +961,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
{
	enum lru_list lru;
	int was_unevictable = TestClearPageUnevictable(page);
	int nr_pages = hpage_nr_pages(page);

	VM_BUG_ON_PAGE(PageLRU(page), page);

@@ -995,13 +997,13 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
	if (page_evictable(page)) {
		lru = page_lru(page);
		if (was_unevictable)
			count_vm_event(UNEVICTABLE_PGRESCUED);
			__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
	} else {
		lru = LRU_UNEVICTABLE;
		ClearPageActive(page);
		SetPageUnevictable(page);
		if (!was_unevictable)
			count_vm_event(UNEVICTABLE_PGCULLED);
			__count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
	}

	add_page_to_lru_list(page, lruvec, lru);