Commit 272911a4 authored by Roman Gushchin's avatar Roman Gushchin Committed by Linus Torvalds
Browse files

mm: memcg/slab: remove memcg_kmem_get_cache()



The memcg_kmem_get_cache() function became really trivial, so let's just
inline it into the single call point: memcg_slab_pre_alloc_hook().

It will make the code less bulky and can also help the compiler to
generate a better code.

Signed-off-by: default avatarRoman Gushchin <guro@fb.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/20200623174037.3951353-15-guro@fb.com


Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d797b7d0
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -1403,8 +1403,6 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
}
#endif

struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);

#ifdef CONFIG_MEMCG_KMEM
int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
			unsigned int nr_pages);
+1 −24
Original line number Diff line number Diff line
@@ -393,7 +393,7 @@ void memcg_put_cache_ids(void)

/*
 * A lot of the calls to the cache allocation functions are expected to be
 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
 * conditional to this static branch, we'll have to allow modules that does
 * kmem_cache_alloc and the such to see this symbol as well
 */
@@ -2900,29 +2900,6 @@ static void memcg_free_cache_id(int id)
	ida_simple_remove(&memcg_cache_ida, id);
}

/**
 * memcg_kmem_get_cache: select memcg or root cache for allocation
 * @cachep: the original global kmem cache
 *
 * Return the kmem_cache we're supposed to use for a slab allocation.
 *
 * If the cache does not exist yet, if we are the first user of it, we
 * create it asynchronously in a workqueue and let the current allocation
 * go through with the original cache.
 */
struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
{
	struct kmem_cache *memcg_cachep;

	memcg_cachep = READ_ONCE(cachep->memcg_params.memcg_cache);
	if (unlikely(!memcg_cachep)) {
		queue_work(system_wq, &cachep->memcg_params.work);
		return cachep;
	}

	return memcg_cachep;
}

/**
 * __memcg_kmem_charge: charge a number of kernel pages to a memcg
 * @memcg: memory cgroup to charge
+9 −2
Original line number Diff line number Diff line
@@ -365,9 +365,16 @@ static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
	if (memcg_kmem_bypass())
		return s;

	cachep = memcg_kmem_get_cache(s);
	if (is_root_cache(cachep))
	cachep = READ_ONCE(s->memcg_params.memcg_cache);
	if (unlikely(!cachep)) {
		/*
		 * If memcg cache does not exist yet, we schedule it's
		 * asynchronous creation and let the current allocation
		 * go through with the root cache.
		 */
		queue_work(system_wq, &s->memcg_params.work);
		return s;
	}

	objcg = get_obj_cgroup_from_current();
	if (!objcg)
+1 −1
Original line number Diff line number Diff line
@@ -570,7 +570,7 @@ void memcg_create_kmem_cache(struct kmem_cache *root_cache)
	}

	/*
	 * Since readers won't lock (see memcg_kmem_get_cache()), we need a
	 * Since readers won't lock (see memcg_slab_pre_alloc_hook()), we need a
	 * barrier here to ensure nobody will see the kmem_cache partially
	 * initialized.
	 */