Commit 21c527a3 authored by Yaowei Bai's avatar Yaowei Bai Committed by Linus Torvalds
Browse files

mm/compaction.c: add an is_via_compact_memory() helper



Introduce is_via_compact_memory() helper indicating compacting via
/proc/sys/vm/compact_memory to improve readability.

To catch this situation in __compaction_suitable, use order as parameter
directly instead of using struct compact_control.

This patch has no functional changes.

Signed-off-by: default avatarYaowei Bai <bywxiaobai@163.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 29d06bbb
Loading
Loading
Loading
Loading
+14 −12
Original line number Diff line number Diff line
@@ -1197,6 +1197,15 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
	return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
}

/*
 * order == -1 is expected when compacting via
 * /proc/sys/vm/compact_memory
 */
static inline bool is_via_compact_memory(int order)
{
	return order == -1;
}

static int __compact_finished(struct zone *zone, struct compact_control *cc,
			    const int migratetype)
{
@@ -1223,11 +1232,7 @@ static int __compact_finished(struct zone *zone, struct compact_control *cc,
		return COMPACT_COMPLETE;
	}

	/*
	 * order == -1 is expected when compacting via
	 * /proc/sys/vm/compact_memory
	 */
	if (cc->order == -1)
	if (is_via_compact_memory(cc->order))
		return COMPACT_CONTINUE;

	/* Compaction run is not finished if the watermark is not met */
@@ -1290,11 +1295,7 @@ static unsigned long __compaction_suitable(struct zone *zone, int order,
	int fragindex;
	unsigned long watermark;

	/*
	 * order == -1 is expected when compacting via
	 * /proc/sys/vm/compact_memory
	 */
	if (order == -1)
	if (is_via_compact_memory(order))
		return COMPACT_CONTINUE;

	watermark = low_wmark_pages(zone);
@@ -1658,10 +1659,11 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
		 * this makes sure we compact the whole zone regardless of
		 * cached scanner positions.
		 */
		if (cc->order == -1)
		if (is_via_compact_memory(cc->order))
			__reset_isolation_suitable(zone);

		if (cc->order == -1 || !compaction_deferred(zone, cc->order))
		if (is_via_compact_memory(cc->order) ||
				!compaction_deferred(zone, cc->order))
			compact_zone(zone, cc);

		if (cc->order > 0) {