Commit 16cb0ec7 authored by Tobin C. Harding's avatar Tobin C. Harding Committed by Linus Torvalds
Browse files

slab: use slab_list instead of lru

Currently we use the page->lru list for maintaining lists of slabs.  We
have a list in the page structure (slab_list) that can be used for this
purpose.  Doing so makes the code cleaner since we are not overloading the
lru list.

Use the slab_list instead of the lru list for maintaining lists of slabs.

Link: http://lkml.kernel.org/r/20190402230545.2929-7-tobin@kernel.org


Signed-off-by: default avatarTobin C. Harding <tobin@kernel.org>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Reviewed-by: default avatarRoman Gushchin <guro@fb.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 916ac052
Loading
Loading
Loading
Loading
+25 −24
Original line number Diff line number Diff line
@@ -1674,8 +1674,8 @@ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
{
	struct page *page, *n;

	list_for_each_entry_safe(page, n, list, lru) {
		list_del(&page->lru);
	list_for_each_entry_safe(page, n, list, slab_list) {
		list_del(&page->slab_list);
		slab_destroy(cachep, page);
	}
}
@@ -2231,8 +2231,8 @@ static int drain_freelist(struct kmem_cache *cache,
			goto out;
		}

		page = list_entry(p, struct page, lru);
		list_del(&page->lru);
		page = list_entry(p, struct page, slab_list);
		list_del(&page->slab_list);
		n->free_slabs--;
		n->total_slabs--;
		/*
@@ -2691,13 +2691,13 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
	if (!page)
		return;

	INIT_LIST_HEAD(&page->lru);
	INIT_LIST_HEAD(&page->slab_list);
	n = get_node(cachep, page_to_nid(page));

	spin_lock(&n->list_lock);
	n->total_slabs++;
	if (!page->active) {
		list_add_tail(&page->lru, &(n->slabs_free));
		list_add_tail(&page->slab_list, &n->slabs_free);
		n->free_slabs++;
	} else
		fixup_slab_list(cachep, n, page, &list);
@@ -2806,9 +2806,9 @@ static inline void fixup_slab_list(struct kmem_cache *cachep,
				void **list)
{
	/* move slabp to correct slabp list: */
	list_del(&page->lru);
	list_del(&page->slab_list);
	if (page->active == cachep->num) {
		list_add(&page->lru, &n->slabs_full);
		list_add(&page->slab_list, &n->slabs_full);
		if (OBJFREELIST_SLAB(cachep)) {
#if DEBUG
			/* Poisoning will be done without holding the lock */
@@ -2822,7 +2822,7 @@ static inline void fixup_slab_list(struct kmem_cache *cachep,
			page->freelist = NULL;
		}
	} else
		list_add(&page->lru, &n->slabs_partial);
		list_add(&page->slab_list, &n->slabs_partial);
}

/* Try to find non-pfmemalloc slab if needed */
@@ -2845,20 +2845,20 @@ static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
	}

	/* Move pfmemalloc slab to the end of list to speed up next search */
	list_del(&page->lru);
	list_del(&page->slab_list);
	if (!page->active) {
		list_add_tail(&page->lru, &n->slabs_free);
		list_add_tail(&page->slab_list, &n->slabs_free);
		n->free_slabs++;
	} else
		list_add_tail(&page->lru, &n->slabs_partial);
		list_add_tail(&page->slab_list, &n->slabs_partial);

	list_for_each_entry(page, &n->slabs_partial, lru) {
	list_for_each_entry(page, &n->slabs_partial, slab_list) {
		if (!PageSlabPfmemalloc(page))
			return page;
	}

	n->free_touched = 1;
	list_for_each_entry(page, &n->slabs_free, lru) {
	list_for_each_entry(page, &n->slabs_free, slab_list) {
		if (!PageSlabPfmemalloc(page)) {
			n->free_slabs--;
			return page;
@@ -2873,11 +2873,12 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
	struct page *page;

	assert_spin_locked(&n->list_lock);
	page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
	page = list_first_entry_or_null(&n->slabs_partial, struct page,
					slab_list);
	if (!page) {
		n->free_touched = 1;
		page = list_first_entry_or_null(&n->slabs_free, struct page,
						lru);
						slab_list);
		if (page)
			n->free_slabs--;
	}
@@ -3378,29 +3379,29 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
		objp = objpp[i];

		page = virt_to_head_page(objp);
		list_del(&page->lru);
		list_del(&page->slab_list);
		check_spinlock_acquired_node(cachep, node);
		slab_put_obj(cachep, page, objp);
		STATS_DEC_ACTIVE(cachep);

		/* fixup slab chains */
		if (page->active == 0) {
			list_add(&page->lru, &n->slabs_free);
			list_add(&page->slab_list, &n->slabs_free);
			n->free_slabs++;
		} else {
			/* Unconditionally move a slab to the end of the
			 * partial list on free - maximum time for the
			 * other objects to be freed, too.
			 */
			list_add_tail(&page->lru, &n->slabs_partial);
			list_add_tail(&page->slab_list, &n->slabs_partial);
		}
	}

	while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
		n->free_objects -= cachep->num;

		page = list_last_entry(&n->slabs_free, struct page, lru);
		list_move(&page->lru, list);
		page = list_last_entry(&n->slabs_free, struct page, slab_list);
		list_move(&page->slab_list, list);
		n->free_slabs--;
		n->total_slabs--;
	}
@@ -3438,7 +3439,7 @@ free_done:
		int i = 0;
		struct page *page;

		list_for_each_entry(page, &n->slabs_free, lru) {
		list_for_each_entry(page, &n->slabs_free, slab_list) {
			BUG_ON(page->active);

			i++;
@@ -4302,9 +4303,9 @@ static int leaks_show(struct seq_file *m, void *p)
			check_irq_on();
			spin_lock_irq(&n->list_lock);

			list_for_each_entry(page, &n->slabs_full, lru)
			list_for_each_entry(page, &n->slabs_full, slab_list)
				handle_slab(x, cachep, page);
			list_for_each_entry(page, &n->slabs_partial, lru)
			list_for_each_entry(page, &n->slabs_partial, slab_list)
				handle_slab(x, cachep, page);
			spin_unlock_irq(&n->list_lock);
		}