Commit f53d7ce3 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds
Browse files

mm: memcg: shorten preempt-disabled section around event checks



Only the ratelimit checks themselves have to run with preemption
disabled, the resulting actions - checking for usage thresholds,
updating the soft limit tree - can and should run with preemption
enabled.

Signed-off-by: default avatarJohannes Weiner <jweiner@redhat.com>
Reported-by: default avatarYong Zhang <yong.zhang0@gmail.com>
Tested-by: default avatarYong Zhang <yong.zhang0@gmail.com>
Reported-by: default avatarLuis Henriques <henrix@camandro.org>
Tested-by: default avatarLuis Henriques <henrix@camandro.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e94c8a9c
Loading
Loading
Loading
Loading
+35 −38
Original line number Diff line number Diff line
@@ -748,22 +748,15 @@ static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
	return total;
}

static bool __memcg_event_check(struct mem_cgroup *memcg, int target)
static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
				       enum mem_cgroup_events_target target)
{
	unsigned long val, next;

	val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
	next = __this_cpu_read(memcg->stat->targets[target]);
	/* from time_after() in jiffies.h */
	return ((long)next - (long)val < 0);
}

static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target)
{
	unsigned long val, next;

	val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);

	if ((long)next - (long)val < 0) {
		switch (target) {
		case MEM_CGROUP_TARGET_THRESH:
			next = val + THRESHOLDS_EVENTS_TARGET;
@@ -775,10 +768,12 @@ static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target)
			next = val + NUMAINFO_EVENTS_TARGET;
			break;
		default:
		return;
			break;
		}

		__this_cpu_write(memcg->stat->targets[target], next);
		return true;
	}
	return false;
}

/*
@@ -789,24 +784,26 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
{
	preempt_disable();
	/* threshold event is triggered in finer grain than soft limit */
	if (unlikely(__memcg_event_check(memcg, MEM_CGROUP_TARGET_THRESH))) {
	if (unlikely(mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_THRESH))) {
		bool do_softlimit, do_numainfo;

		do_softlimit = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_SOFTLIMIT);
#if MAX_NUMNODES > 1
		do_numainfo = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_NUMAINFO);
#endif
		preempt_enable();

		mem_cgroup_threshold(memcg);
		__mem_cgroup_target_update(memcg, MEM_CGROUP_TARGET_THRESH);
		if (unlikely(__memcg_event_check(memcg,
			     MEM_CGROUP_TARGET_SOFTLIMIT))) {
		if (unlikely(do_softlimit))
			mem_cgroup_update_tree(memcg, page);
			__mem_cgroup_target_update(memcg,
						   MEM_CGROUP_TARGET_SOFTLIMIT);
		}
#if MAX_NUMNODES > 1
		if (unlikely(__memcg_event_check(memcg,
			MEM_CGROUP_TARGET_NUMAINFO))) {
		if (unlikely(do_numainfo))
			atomic_inc(&memcg->numainfo_events);
			__mem_cgroup_target_update(memcg,
				MEM_CGROUP_TARGET_NUMAINFO);
		}
#endif
	}
	} else
		preempt_enable();
}