Commit c3d53200 authored by Chris Down's avatar Chris Down Committed by Linus Torvalds
Browse files

mm, memcg: prevent memory.min load/store tearing



This can be set concurrently with reads, which may cause the wrong value
to be propagated.

Signed-off-by: default avatarChris Down <chris@chrisdown.name>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/e809b4e6b0c1626dac6945970de06409a180ee65.1584034301.git.chris@chrisdown.name


Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f86b810c
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -6389,7 +6389,7 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
		return MEMCG_PROT_NONE;

	if (parent == root) {
		memcg->memory.emin = memcg->memory.min;
		memcg->memory.emin = READ_ONCE(memcg->memory.min);
		memcg->memory.elow = memcg->memory.low;
		goto out;
	}
@@ -6397,7 +6397,8 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
	parent_usage = page_counter_read(&parent->memory);

	memcg->memory.emin = effective_protection(usage, parent_usage,
			memcg->memory.min, READ_ONCE(parent->memory.emin),
			READ_ONCE(memcg->memory.min),
			READ_ONCE(parent->memory.emin),
			atomic_long_read(&parent->memory.children_min_usage));

	memcg->memory.elow = effective_protection(usage, parent_usage,
+5 −4
Original line number Diff line number Diff line
@@ -17,14 +17,15 @@ static void propagate_protected_usage(struct page_counter *c,
				      unsigned long usage)
{
	unsigned long protected, old_protected;
	unsigned long low;
	unsigned long low, min;
	long delta;

	if (!c->parent)
		return;

	if (c->min || atomic_long_read(&c->min_usage)) {
		protected = min(usage, c->min);
	min = READ_ONCE(c->min);
	if (min || atomic_long_read(&c->min_usage)) {
		protected = min(usage, min);
		old_protected = atomic_long_xchg(&c->min_usage, protected);
		delta = protected - old_protected;
		if (delta)
@@ -207,7 +208,7 @@ void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages)
{
	struct page_counter *c;

	counter->min = nr_pages;
	WRITE_ONCE(counter->min, nr_pages);

	for (c = counter; c; c = c->parent)
		propagate_protected_usage(c, atomic_long_read(&c->usage));