Commit 97d06609 authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg
Browse files

mm, sl[aou]b: Common definition for boot state of the slab allocators



All allocators have some sort of support for the bootstrap status.

Setup a common definition for the boot states and make all slab
allocators use that definition.

Reviewed-by: default avatarGlauber Costa <glommer@parallels.com>
Reviewed-by: default avatarJoonsoo Kim <js1304@gmail.com>
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 039363f3
Loading
Loading
Loading
Loading
+0 −4
Original line number Diff line number Diff line
@@ -130,10 +130,6 @@ int kmem_cache_shrink(struct kmem_cache *);
void kmem_cache_free(struct kmem_cache *, void *);
unsigned int kmem_cache_size(struct kmem_cache *);

/* Slab internal function */
struct kmem_cache *__kmem_cache_create(const char *, size_t, size_t,
			unsigned long,
			void (*)(void *));
/*
 * Please use this macro to create slab caches. Simply specify the
 * name of the structure and maybe some flags that are listed above.
+14 −31
Original line number Diff line number Diff line
@@ -87,6 +87,7 @@
 */

#include	<linux/slab.h>
#include	"slab.h"
#include	<linux/mm.h>
#include	<linux/poison.h>
#include	<linux/swap.h>
@@ -565,27 +566,6 @@ static struct kmem_cache cache_cache = {

#define BAD_ALIEN_MAGIC 0x01020304ul

/*
 * chicken and egg problem: delay the per-cpu array allocation
 * until the general caches are up.
 */
static enum {
	NONE,
	PARTIAL_AC,
	PARTIAL_L3,
	EARLY,
	LATE,
	FULL
} g_cpucache_up;

/*
 * used by boot code to determine if it can use slab based allocator
 */
int slab_is_available(void)
{
	return g_cpucache_up >= EARLY;
}

#ifdef CONFIG_LOCKDEP

/*
@@ -651,7 +631,7 @@ static void init_node_lock_keys(int q)
{
	struct cache_sizes *s = malloc_sizes;

	if (g_cpucache_up < LATE)
	if (slab_state < UP)
		return;

	for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
@@ -1649,14 +1629,14 @@ void __init kmem_cache_init(void)
		}
	}

	g_cpucache_up = EARLY;
	slab_state = UP;
}

void __init kmem_cache_init_late(void)
{
	struct kmem_cache *cachep;

	g_cpucache_up = LATE;
	slab_state = UP;

	/* Annotate slab for lockdep -- annotate the malloc caches */
	init_lock_keys();
@@ -1668,6 +1648,9 @@ void __init kmem_cache_init_late(void)
			BUG();
	mutex_unlock(&cache_chain_mutex);

	/* Done! */
	slab_state = FULL;

	/*
	 * Register a cpu startup notifier callback that initializes
	 * cpu_cache_get for all new cpus
@@ -1699,7 +1682,7 @@ static int __init cpucache_init(void)
		start_cpu_timer(cpu);

	/* Done! */
	g_cpucache_up = FULL;
	slab_state = FULL;
	return 0;
}
__initcall(cpucache_init);
@@ -2167,10 +2150,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,

static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
{
	if (g_cpucache_up >= LATE)
	if (slab_state >= FULL)
		return enable_cpucache(cachep, gfp);

	if (g_cpucache_up == NONE) {
	if (slab_state == DOWN) {
		/*
		 * Note: the first kmem_cache_create must create the cache
		 * that's used by kmalloc(24), otherwise the creation of
@@ -2185,16 +2168,16 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
		 */
		set_up_list3s(cachep, SIZE_AC);
		if (INDEX_AC == INDEX_L3)
			g_cpucache_up = PARTIAL_L3;
			slab_state = PARTIAL_L3;
		else
			g_cpucache_up = PARTIAL_AC;
			slab_state = PARTIAL_ARRAYCACHE;
	} else {
		cachep->array[smp_processor_id()] =
			kmalloc(sizeof(struct arraycache_init), gfp);

		if (g_cpucache_up == PARTIAL_AC) {
		if (slab_state == PARTIAL_ARRAYCACHE) {
			set_up_list3s(cachep, SIZE_L3);
			g_cpucache_up = PARTIAL_L3;
			slab_state = PARTIAL_L3;
		} else {
			int node;
			for_each_online_node(node) {

mm/slab.h

0 → 100644
+29 −0
Original line number Diff line number Diff line
#ifndef MM_SLAB_H
#define MM_SLAB_H
/*
 * Internal slab definitions
 */

/*
 * State of the slab allocator.
 *
 * This is used to describe the states of the allocator during bootup.
 * Allocators use this to gradually bootstrap themselves. Most allocators
 * have the problem that the structures used for managing slab caches are
 * allocated from slab caches themselves.
 */
enum slab_state {
	DOWN,			/* No slab functionality yet */
	PARTIAL,		/* SLUB: kmem_cache_node available */
	PARTIAL_ARRAYCACHE,	/* SLAB: kmalloc size for arraycache available */
	PARTIAL_L3,		/* SLAB: kmalloc size for l3 struct available */
	UP,			/* Slab caches usable but not all extras yet */
	FULL			/* Everything is working */
};

extern enum slab_state slab_state;

struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
	size_t align, unsigned long flags, void (*ctor)(void *));

#endif
+9 −0
Original line number Diff line number Diff line
@@ -16,6 +16,10 @@
#include <asm/tlbflush.h>
#include <asm/page.h>

#include "slab.h"

enum slab_state slab_state;

/*
 * kmem_cache_create - Create a cache.
 * @name: A string which is used in /proc/slabinfo to identify this cache.
@@ -66,3 +70,8 @@ out:
	return s;
}
EXPORT_SYMBOL(kmem_cache_create);

int slab_is_available(void)
{
	return slab_state >= UP;
}
+5 −9
Original line number Diff line number Diff line
@@ -59,6 +59,8 @@

#include <linux/kernel.h>
#include <linux/slab.h>
#include "slab.h"

#include <linux/mm.h>
#include <linux/swap.h> /* struct reclaim_state */
#include <linux/cache.h>
@@ -531,6 +533,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
			c->align = align;

		kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
		c->refcount = 1;
	}
	return c;
}
@@ -616,19 +619,12 @@ int kmem_cache_shrink(struct kmem_cache *d)
}
EXPORT_SYMBOL(kmem_cache_shrink);

static unsigned int slob_ready __read_mostly;

int slab_is_available(void)
{
	return slob_ready;
}

void __init kmem_cache_init(void)
{
	slob_ready = 1;
	slab_state = UP;
}

void __init kmem_cache_init_late(void)
{
	/* Nothing to do */
	slab_state = FULL;
}
Loading