Commit 6cea1d56 authored by Roman Gushchin's avatar Roman Gushchin Committed by Linus Torvalds
Browse files

mm: memcg/slab: unify SLAB and SLUB page accounting

Currently the page accounting code is duplicated in SLAB and SLUB
internals.  Let's move it into new (un)charge_slab_page helpers in the
slab_common.c file.  These helpers will be responsible for statistics
(global and memcg-aware) and memcg charging.  So they are replacing direct
memcg_(un)charge_slab() calls.

Link: http://lkml.kernel.org/r/20190611231813.3148843-6-guro@fb.com


Signed-off-by: default avatarRoman Gushchin <guro@fb.com>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Acked-by: default avatarVladimir Davydov <vdavydov.dev@gmail.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Waiman Long <longman@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Andrei Vagin <avagin@gmail.com>
Cc: Qian Cai <cai@lca.pw>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 49a18eae
Loading
Loading
Loading
Loading
+3 −16
Original line number Diff line number Diff line
@@ -1360,7 +1360,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
								int nodeid)
{
	struct page *page;
	int nr_pages;

	flags |= cachep->allocflags;

@@ -1370,17 +1369,11 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
		return NULL;
	}

	if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
	if (charge_slab_page(page, flags, cachep->gfporder, cachep)) {
		__free_pages(page, cachep->gfporder);
		return NULL;
	}

	nr_pages = (1 << cachep->gfporder);
	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
		mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages);
	else
		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages);

	__SetPageSlab(page);
	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
@@ -1395,12 +1388,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
{
	int order = cachep->gfporder;
	unsigned long nr_freed = (1 << order);

	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
		mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
	else
		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);

	BUG_ON(!PageSlab(page));
	__ClearPageSlabPfmemalloc(page);
@@ -1409,8 +1396,8 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
	page->mapping = NULL;

	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += nr_freed;
	memcg_uncharge_slab(page, order, cachep);
		current->reclaim_state->reclaimed_slab += 1 << order;
	uncharge_slab_page(page, order, cachep);
	__free_pages(page, order);
}

+25 −0
Original line number Diff line number Diff line
@@ -205,6 +205,12 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);

static inline int cache_vmstat_idx(struct kmem_cache *s)
{
	return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE;
}

#ifdef CONFIG_MEMCG_KMEM

/* List of all root caches. */
@@ -361,6 +367,25 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
	return page->slab_cache;
}

static __always_inline int charge_slab_page(struct page *page,
					    gfp_t gfp, int order,
					    struct kmem_cache *s)
{
	int ret = memcg_charge_slab(page, gfp, order, s);

	if (!ret)
		mod_lruvec_page_state(page, cache_vmstat_idx(s), 1 << order);

	return ret;
}

static __always_inline void uncharge_slab_page(struct page *page, int order,
					       struct kmem_cache *s)
{
	mod_lruvec_page_state(page, cache_vmstat_idx(s), -(1 << order));
	memcg_uncharge_slab(page, order, s);
}

static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
	struct kmem_cache *cachep;
+2 −12
Original line number Diff line number Diff line
@@ -1488,7 +1488,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
	else
		page = __alloc_pages_node(node, flags, order);

	if (page && memcg_charge_slab(page, flags, order, s)) {
	if (page && charge_slab_page(page, flags, order, s)) {
		__free_pages(page, order);
		page = NULL;
	}
@@ -1681,11 +1681,6 @@ out:
	if (!page)
		return NULL;

	mod_lruvec_page_state(page,
		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
		1 << oo_order(oo));

	inc_slabs_node(s, page_to_nid(page), page->objects);

	return page;
@@ -1719,18 +1714,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
			check_object(s, page, p, SLUB_RED_INACTIVE);
	}

	mod_lruvec_page_state(page,
		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
		-pages);

	__ClearPageSlabPfmemalloc(page);
	__ClearPageSlab(page);

	page->mapping = NULL;
	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += pages;
	memcg_uncharge_slab(page, order, s);
	uncharge_slab_page(page, order, s);
	__free_pages(page, order);
}