Commit 916ac052 authored by Tobin C. Harding's avatar Tobin C. Harding Committed by Linus Torvalds
Browse files

slub: use slab_list instead of lru

Currently we use the page->lru list for maintaining lists of slabs.  We
have a list in the page structure (slab_list) that can be used for this
purpose.  Doing so makes the code cleaner since we are not overloading the
lru list.

Use the slab_list instead of the lru list for maintaining lists of slabs.

Link: http://lkml.kernel.org/r/20190402230545.2929-6-tobin@kernel.org


Signed-off-by: default avatarTobin C. Harding <tobin@kernel.org>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Reviewed-by: default avatarRoman Gushchin <guro@fb.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6dfd1b65
Loading
Loading
Loading
Loading
+20 −20
Original line number Diff line number Diff line
@@ -1014,7 +1014,7 @@ static void add_full(struct kmem_cache *s,
		return;

	lockdep_assert_held(&n->list_lock);
	list_add(&page->lru, &n->full);
	list_add(&page->slab_list, &n->full);
}

static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
@@ -1023,7 +1023,7 @@ static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct
		return;

	lockdep_assert_held(&n->list_lock);
	list_del(&page->lru);
	list_del(&page->slab_list);
}

/* Tracking of the number of slabs for debugging purposes */
@@ -1764,9 +1764,9 @@ __add_partial(struct kmem_cache_node *n, struct page *page, int tail)
{
	n->nr_partial++;
	if (tail == DEACTIVATE_TO_TAIL)
		list_add_tail(&page->lru, &n->partial);
		list_add_tail(&page->slab_list, &n->partial);
	else
		list_add(&page->lru, &n->partial);
		list_add(&page->slab_list, &n->partial);
}

static inline void add_partial(struct kmem_cache_node *n,
@@ -1780,7 +1780,7 @@ static inline void remove_partial(struct kmem_cache_node *n,
					struct page *page)
{
	lockdep_assert_held(&n->list_lock);
	list_del(&page->lru);
	list_del(&page->slab_list);
	n->nr_partial--;
}

@@ -1854,7 +1854,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
		return NULL;

	spin_lock(&n->list_lock);
	list_for_each_entry_safe(page, page2, &n->partial, lru) {
	list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
		void *t;

		if (!pfmemalloc_match(page, flags))
@@ -2398,7 +2398,7 @@ static unsigned long count_partial(struct kmem_cache_node *n,
	struct page *page;

	spin_lock_irqsave(&n->list_lock, flags);
	list_for_each_entry(page, &n->partial, lru)
	list_for_each_entry(page, &n->partial, slab_list)
		x += get_count(page);
	spin_unlock_irqrestore(&n->list_lock, flags);
	return x;
@@ -3696,10 +3696,10 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)

	BUG_ON(irqs_disabled());
	spin_lock_irq(&n->list_lock);
	list_for_each_entry_safe(page, h, &n->partial, lru) {
	list_for_each_entry_safe(page, h, &n->partial, slab_list) {
		if (!page->inuse) {
			remove_partial(n, page);
			list_add(&page->lru, &discard);
			list_add(&page->slab_list, &discard);
		} else {
			list_slab_objects(s, page,
			"Objects remaining in %s on __kmem_cache_shutdown()");
@@ -3707,7 +3707,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
	}
	spin_unlock_irq(&n->list_lock);

	list_for_each_entry_safe(page, h, &discard, lru)
	list_for_each_entry_safe(page, h, &discard, slab_list)
		discard_slab(s, page);
}

@@ -3987,7 +3987,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
		 * Note that concurrent frees may occur while we hold the
		 * list_lock. page->inuse here is the upper limit.
		 */
		list_for_each_entry_safe(page, t, &n->partial, lru) {
		list_for_each_entry_safe(page, t, &n->partial, slab_list) {
			int free = page->objects - page->inuse;

			/* Do not reread page->inuse */
@@ -3997,10 +3997,10 @@ int __kmem_cache_shrink(struct kmem_cache *s)
			BUG_ON(free <= 0);

			if (free == page->objects) {
				list_move(&page->lru, &discard);
				list_move(&page->slab_list, &discard);
				n->nr_partial--;
			} else if (free <= SHRINK_PROMOTE_MAX)
				list_move(&page->lru, promote + free - 1);
				list_move(&page->slab_list, promote + free - 1);
		}

		/*
@@ -4013,7 +4013,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
		spin_unlock_irqrestore(&n->list_lock, flags);

		/* Release empty slabs */
		list_for_each_entry_safe(page, t, &discard, lru)
		list_for_each_entry_safe(page, t, &discard, slab_list)
			discard_slab(s, page);

		if (slabs_node(s, node))
@@ -4205,11 +4205,11 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
	for_each_kmem_cache_node(s, node, n) {
		struct page *p;

		list_for_each_entry(p, &n->partial, lru)
		list_for_each_entry(p, &n->partial, slab_list)
			p->slab_cache = s;

#ifdef CONFIG_SLUB_DEBUG
		list_for_each_entry(p, &n->full, lru)
		list_for_each_entry(p, &n->full, slab_list)
			p->slab_cache = s;
#endif
	}
@@ -4426,7 +4426,7 @@ static int validate_slab_node(struct kmem_cache *s,

	spin_lock_irqsave(&n->list_lock, flags);

	list_for_each_entry(page, &n->partial, lru) {
	list_for_each_entry(page, &n->partial, slab_list) {
		validate_slab_slab(s, page, map);
		count++;
	}
@@ -4437,7 +4437,7 @@ static int validate_slab_node(struct kmem_cache *s,
	if (!(s->flags & SLAB_STORE_USER))
		goto out;

	list_for_each_entry(page, &n->full, lru) {
	list_for_each_entry(page, &n->full, slab_list) {
		validate_slab_slab(s, page, map);
		count++;
	}
@@ -4633,9 +4633,9 @@ static int list_locations(struct kmem_cache *s, char *buf,
			continue;

		spin_lock_irqsave(&n->list_lock, flags);
		list_for_each_entry(page, &n->partial, lru)
		list_for_each_entry(page, &n->partial, slab_list)
			process_slab(&t, s, page, alloc, map);
		list_for_each_entry(page, &n->full, lru)
		list_for_each_entry(page, &n->full, slab_list)
			process_slab(&t, s, page, alloc, map);
		spin_unlock_irqrestore(&n->list_lock, flags);
	}