Commit f87bccde authored by Andrey Ryabinin's avatar Andrey Ryabinin Committed by Linus Torvalds
Browse files

mm/vmscan: remove unused lru_pages argument

Since 9092c71b ("mm: use sc->priority for slab shrink targets") the
argument 'unsigned long *lru_pages' passed around with no purpose.  Remove
it.

Link: http://lkml.kernel.org/r/20190228083329.31892-4-aryabinin@virtuozzo.com


Signed-off-by: default avatarAndrey Ryabinin <aryabinin@virtuozzo.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarMel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: William Kucharski <william.kucharski@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e47b346a
Loading
Loading
Loading
Loading
+5 −12
Original line number Diff line number Diff line
@@ -2302,8 +2302,7 @@ enum scan_balance {
 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
 */
static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
			   struct scan_control *sc, unsigned long *nr,
			   unsigned long *lru_pages)
			   struct scan_control *sc, unsigned long *nr)
{
	int swappiness = mem_cgroup_swappiness(memcg);
	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
@@ -2454,7 +2453,6 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
	fraction[1] = fp;
	denominator = ap + fp + 1;
out:
	*lru_pages = 0;
	for_each_evictable_lru(lru) {
		int file = is_file_lru(lru);
		unsigned long lruvec_size;
@@ -2549,7 +2547,6 @@ out:
			BUG();
		}

		*lru_pages += lruvec_size;
		nr[lru] = scan;
	}
}
@@ -2558,7 +2555,7 @@ out:
 * This is a basic per-node page freer.  Used by both kswapd and direct reclaim.
 */
static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg,
			      struct scan_control *sc, unsigned long *lru_pages)
			      struct scan_control *sc)
{
	struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
	unsigned long nr[NR_LRU_LISTS];
@@ -2570,7 +2567,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
	struct blk_plug plug;
	bool scan_adjusted;

	get_scan_count(lruvec, memcg, sc, nr, lru_pages);
	get_scan_count(lruvec, memcg, sc, nr);

	/* Record the original scan target for proportional adjustments later */
	memcpy(targets, nr, sizeof(nr));
@@ -2758,7 +2755,6 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)

	do {
		struct mem_cgroup *root = sc->target_mem_cgroup;
		unsigned long node_lru_pages = 0;
		struct mem_cgroup *memcg;

		memset(&sc->nr, 0, sizeof(sc->nr));
@@ -2768,7 +2764,6 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)

		memcg = mem_cgroup_iter(root, NULL, NULL);
		do {
			unsigned long lru_pages;
			unsigned long reclaimed;
			unsigned long scanned;

@@ -2805,8 +2800,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)

			reclaimed = sc->nr_reclaimed;
			scanned = sc->nr_scanned;
			shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
			node_lru_pages += lru_pages;
			shrink_node_memcg(pgdat, memcg, sc);

			shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
					sc->priority);
@@ -3317,7 +3311,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
		.reclaim_idx = MAX_NR_ZONES - 1,
		.may_swap = !noswap,
	};
	unsigned long lru_pages;

	WARN_ON_ONCE(!current->reclaim_state);

@@ -3334,7 +3327,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
	 * will pick up pages from other mem cgroup's as well. We hack
	 * the priority and make it zero.
	 */
	shrink_node_memcg(pgdat, memcg, &sc, &lru_pages);
	shrink_node_memcg(pgdat, memcg, &sc);

	trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);