Commit a8d01437 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds
Browse files

mm: page_alloc: generalize the dirty balance reserve



The dirty balance reserve that dirty throttling has to consider is
merely memory not available to userspace allocations.  There is nothing
writeback-specific about it.  Generalize the name so that it's reusable
outside of that context.

Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c20cd45e
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -356,10 +356,10 @@ struct zone {
	struct per_cpu_pageset __percpu *pageset;

	/*
	 * This is a per-zone reserve of pages that should not be
	 * considered dirtyable memory.
	 * This is a per-zone reserve of pages that are not available
	 * to userspace allocations.
	 */
	unsigned long		dirty_balance_reserve;
	unsigned long		totalreserve_pages;

#ifndef CONFIG_SPARSEMEM
	/*
+0 −1
Original line number Diff line number Diff line
@@ -287,7 +287,6 @@ static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
/* linux/mm/page_alloc.c */
extern unsigned long totalram_pages;
extern unsigned long totalreserve_pages;
extern unsigned long dirty_balance_reserve;
extern unsigned long nr_free_buffer_pages(void);
extern unsigned long nr_free_pagecache_pages(void);

+12 −2
Original line number Diff line number Diff line
@@ -278,7 +278,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone)
	unsigned long nr_pages;

	nr_pages = zone_page_state(zone, NR_FREE_PAGES);
	nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
	/*
	 * Pages reserved for the kernel should not be considered
	 * dirtyable, to prevent a situation where reclaim has to
	 * clean pages in order to balance the zones.
	 */
	nr_pages -= min(nr_pages, zone->totalreserve_pages);

	nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
	nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
@@ -332,7 +337,12 @@ static unsigned long global_dirtyable_memory(void)
	unsigned long x;

	x = global_page_state(NR_FREE_PAGES);
	x -= min(x, dirty_balance_reserve);
	/*
	 * Pages reserved for the kernel should not be considered
	 * dirtyable, to prevent a situation where reclaim has to
	 * clean pages in order to balance the zones.
	 */
	x -= min(x, totalreserve_pages);

	x += global_page_state(NR_INACTIVE_FILE);
	x += global_page_state(NR_ACTIVE_FILE);
+3 −18
Original line number Diff line number Diff line
@@ -114,13 +114,6 @@ static DEFINE_SPINLOCK(managed_page_count_lock);
unsigned long totalram_pages __read_mostly;
unsigned long totalreserve_pages __read_mostly;
unsigned long totalcma_pages __read_mostly;
/*
 * When calculating the number of globally allowed dirty pages, there
 * is a certain number of per-zone reserves that should not be
 * considered dirtyable memory.  This is the sum of those reserves
 * over all existing zones that contribute dirtyable memory.
 */
unsigned long dirty_balance_reserve __read_mostly;

int percpu_pagelist_fraction;
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
@@ -5942,20 +5935,12 @@ static void calculate_totalreserve_pages(void)

			if (max > zone->managed_pages)
				max = zone->managed_pages;

			zone->totalreserve_pages = max;

			reserve_pages += max;
			/*
			 * Lowmem reserves are not available to
			 * GFP_HIGHUSER page cache allocations and
			 * kswapd tries to balance zones to their high
			 * watermark.  As a result, neither should be
			 * regarded as dirtyable memory, to prevent a
			 * situation where reclaim has to clean pages
			 * in order to balance the zones.
			 */
			zone->dirty_balance_reserve = max;
		}
	}
	dirty_balance_reserve = reserve_pages;
	totalreserve_pages = reserve_pages;
}