Commit 69df2ac1 authored by Pekka Enberg's avatar Pekka Enberg
Browse files

Merge branch 'slab/next' into slab/for-linus

parents c1be5a5b 8a965b3b
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -184,7 +184,7 @@ static int show_stat(struct seq_file *p, void *v)


static int stat_open(struct inode *inode, struct file *file)
static int stat_open(struct inode *inode, struct file *file)
{
{
	unsigned size = 1024 + 128 * num_possible_cpus();
	size_t size = 1024 + 128 * num_possible_cpus();
	char *buf;
	char *buf;
	struct seq_file *m;
	struct seq_file *m;
	int res;
	int res;

include/linux/kmalloc_sizes.h

deleted100644 → 0
+0 −45
Original line number Original line Diff line number Diff line
#if (PAGE_SIZE == 4096)
	CACHE(32)
#endif
	CACHE(64)
#if L1_CACHE_BYTES < 64
	CACHE(96)
#endif
	CACHE(128)
#if L1_CACHE_BYTES < 128
	CACHE(192)
#endif
	CACHE(256)
	CACHE(512)
	CACHE(1024)
	CACHE(2048)
	CACHE(4096)
	CACHE(8192)
	CACHE(16384)
	CACHE(32768)
	CACHE(65536)
	CACHE(131072)
#if KMALLOC_MAX_SIZE >= 262144
	CACHE(262144)
#endif
#if KMALLOC_MAX_SIZE >= 524288
	CACHE(524288)
#endif
#if KMALLOC_MAX_SIZE >= 1048576
	CACHE(1048576)
#endif
#if KMALLOC_MAX_SIZE >= 2097152
	CACHE(2097152)
#endif
#if KMALLOC_MAX_SIZE >= 4194304
	CACHE(4194304)
#endif
#if KMALLOC_MAX_SIZE >= 8388608
	CACHE(8388608)
#endif
#if KMALLOC_MAX_SIZE >= 16777216
	CACHE(16777216)
#endif
#if KMALLOC_MAX_SIZE >= 33554432
	CACHE(33554432)
#endif
+163 −68
Original line number Original line Diff line number Diff line
@@ -94,29 +94,6 @@
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
				(unsigned long)ZERO_SIZE_PTR)
				(unsigned long)ZERO_SIZE_PTR)


/*
 * Common fields provided in kmem_cache by all slab allocators
 * This struct is either used directly by the allocator (SLOB)
 * or the allocator must include definitions for all fields
 * provided in kmem_cache_common in their definition of kmem_cache.
 *
 * Once we can do anonymous structs (C11 standard) we could put a
 * anonymous struct definition in these allocators so that the
 * separate allocations in the kmem_cache structure of SLAB and
 * SLUB is no longer needed.
 */
#ifdef CONFIG_SLOB
struct kmem_cache {
	unsigned int object_size;/* The original size of the object */
	unsigned int size;	/* The aligned/padded/added on size  */
	unsigned int align;	/* Alignment as calculated */
	unsigned long flags;	/* Active flags on the slab */
	const char *name;	/* Slab name for sysfs */
	int refcount;		/* Use counter */
	void (*ctor)(void *);	/* Called on object slot creation */
	struct list_head list;	/* List of all slab caches on the system */
};
#endif


struct mem_cgroup;
struct mem_cgroup;
/*
/*
@@ -148,7 +125,63 @@ void kmem_cache_free(struct kmem_cache *, void *);
		(__flags), NULL)
		(__flags), NULL)


/*
/*
 * The largest kmalloc size supported by the slab allocators is
 * Common kmalloc functions provided by all allocators
 */
void * __must_check __krealloc(const void *, size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);

/*
 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
 * alignment larger than the alignment of a 64-bit integer.
 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
 */
#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
#else
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif

#ifdef CONFIG_SLOB
/*
 * Common fields provided in kmem_cache by all slab allocators
 * This struct is either used directly by the allocator (SLOB)
 * or the allocator must include definitions for all fields
 * provided in kmem_cache_common in their definition of kmem_cache.
 *
 * Once we can do anonymous structs (C11 standard) we could put a
 * anonymous struct definition in these allocators so that the
 * separate allocations in the kmem_cache structure of SLAB and
 * SLUB is no longer needed.
 */
struct kmem_cache {
	unsigned int object_size;/* The original size of the object */
	unsigned int size;	/* The aligned/padded/added on size  */
	unsigned int align;	/* Alignment as calculated */
	unsigned long flags;	/* Active flags on the slab */
	const char *name;	/* Slab name for sysfs */
	int refcount;		/* Use counter */
	void (*ctor)(void *);	/* Called on object slot creation */
	struct list_head list;	/* List of all slab caches on the system */
};

#define KMALLOC_MAX_SIZE (1UL << 30)

#include <linux/slob_def.h>

#else /* CONFIG_SLOB */

/*
 * Kmalloc array related definitions
 */

#ifdef CONFIG_SLAB
/*
 * The largest kmalloc size supported by the SLAB allocators is
 * 32 megabyte (2^25) or the maximum allocatable page order if that is
 * 32 megabyte (2^25) or the maximum allocatable page order if that is
 * less than 32 MB.
 * less than 32 MB.
 *
 *
@@ -158,21 +191,119 @@ void kmem_cache_free(struct kmem_cache *, void *);
 */
 */
#define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
#define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
#define KMALLOC_SHIFT_MAX	KMALLOC_SHIFT_HIGH
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW	5
#endif
#else
/*
 * SLUB allocates up to order 2 pages directly and otherwise
 * passes the request to the page allocator.
 */
#define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW	3
#endif
#endif


#define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
/* Maximum allocatable size */
#define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
#define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
/* Maximum size for which we actually use a slab cache */
#define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
/* Maximum order allocatable via the slab allocagtor */
#define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)


/*
/*
 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
 * Kmalloc subsystem.
 * alignment larger than the alignment of a 64-bit integer.
 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
 */
 */
#ifdef ARCH_DMA_MINALIGN
#ifndef KMALLOC_MIN_SIZE
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
#endif

extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
#ifdef CONFIG_ZONE_DMA
extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
#endif

/*
 * Figure out which kmalloc slab an allocation of a certain size
 * belongs to.
 * 0 = zero alloc
 * 1 =  65 .. 96 bytes
 * 2 = 120 .. 192 bytes
 * n = 2^(n-1) .. 2^n -1
 */
static __always_inline int kmalloc_index(size_t size)
{
	if (!size)
		return 0;

	if (size <= KMALLOC_MIN_SIZE)
		return KMALLOC_SHIFT_LOW;

	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
		return 1;
	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
		return 2;
	if (size <=          8) return 3;
	if (size <=         16) return 4;
	if (size <=         32) return 5;
	if (size <=         64) return 6;
	if (size <=        128) return 7;
	if (size <=        256) return 8;
	if (size <=        512) return 9;
	if (size <=       1024) return 10;
	if (size <=   2 * 1024) return 11;
	if (size <=   4 * 1024) return 12;
	if (size <=   8 * 1024) return 13;
	if (size <=  16 * 1024) return 14;
	if (size <=  32 * 1024) return 15;
	if (size <=  64 * 1024) return 16;
	if (size <= 128 * 1024) return 17;
	if (size <= 256 * 1024) return 18;
	if (size <= 512 * 1024) return 19;
	if (size <= 1024 * 1024) return 20;
	if (size <=  2 * 1024 * 1024) return 21;
	if (size <=  4 * 1024 * 1024) return 22;
	if (size <=  8 * 1024 * 1024) return 23;
	if (size <=  16 * 1024 * 1024) return 24;
	if (size <=  32 * 1024 * 1024) return 25;
	if (size <=  64 * 1024 * 1024) return 26;
	BUG();

	/* Will never be reached. Needed because the compiler may complain */
	return -1;
}

#ifdef CONFIG_SLAB
#include <linux/slab_def.h>
#elif defined(CONFIG_SLUB)
#include <linux/slub_def.h>
#else
#else
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#error "Unknown slab allocator"
#endif
#endif


/*
 * Determine size used for the nth kmalloc cache.
 * return size or 0 if a kmalloc cache for that
 * size does not exist
 */
static __always_inline int kmalloc_size(int n)
{
	if (n > 2)
		return 1 << n;

	if (n == 1 && KMALLOC_MIN_SIZE <= 32)
		return 96;

	if (n == 2 && KMALLOC_MIN_SIZE <= 64)
		return 192;

	return 0;
}
#endif /* !CONFIG_SLOB */

/*
/*
 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
 * Intended for arches that get misalignment faults even for 64 bit integer
 * Intended for arches that get misalignment faults even for 64 bit integer
@@ -224,42 +355,6 @@ struct seq_file;
int cache_show(struct kmem_cache *s, struct seq_file *m);
int cache_show(struct kmem_cache *s, struct seq_file *m);
void print_slabinfo_header(struct seq_file *m);
void print_slabinfo_header(struct seq_file *m);


/*
 * Common kmalloc functions provided by all allocators
 */
void * __must_check __krealloc(const void *, size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);

/*
 * Allocator specific definitions. These are mainly used to establish optimized
 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
 * selecting the appropriate general cache at compile time.
 *
 * Allocators must define at least:
 *
 *	kmem_cache_alloc()
 *	__kmalloc()
 *	kmalloc()
 *
 * Those wishing to support NUMA must also define:
 *
 *	kmem_cache_alloc_node()
 *	kmalloc_node()
 *
 * See each allocator definition file for additional comments and
 * implementation notes.
 */
#ifdef CONFIG_SLUB
#include <linux/slub_def.h>
#elif defined(CONFIG_SLOB)
#include <linux/slob_def.h>
#else
#include <linux/slab_def.h>
#endif

/**
/**
 * kmalloc_array - allocate memory for an array.
 * kmalloc_array - allocate memory for an array.
 * @n: number of elements.
 * @n: number of elements.
+17 −37
Original line number Original line Diff line number Diff line
@@ -11,8 +11,6 @@
 */
 */


#include <linux/init.h>
#include <linux/init.h>
#include <asm/page.h>		/* kmalloc_sizes.h needs PAGE_SIZE */
#include <asm/cache.h>		/* kmalloc_sizes.h needs L1_CACHE_BYTES */
#include <linux/compiler.h>
#include <linux/compiler.h>


/*
/*
@@ -97,23 +95,13 @@ struct kmem_cache {
	 * pointer for each node since "nodelists" uses the remainder of
	 * pointer for each node since "nodelists" uses the remainder of
	 * available pointers.
	 * available pointers.
	 */
	 */
	struct kmem_list3 **nodelists;
	struct kmem_cache_node **node;
	struct array_cache *array[NR_CPUS + MAX_NUMNODES];
	struct array_cache *array[NR_CPUS + MAX_NUMNODES];
	/*
	/*
	 * Do not add fields after array[]
	 * Do not add fields after array[]
	 */
	 */
};
};


/* Size description struct for general caches. */
struct cache_sizes {
	size_t		 	cs_size;
	struct kmem_cache	*cs_cachep;
#ifdef CONFIG_ZONE_DMA
	struct kmem_cache	*cs_dmacachep;
#endif
};
extern struct cache_sizes malloc_sizes[];

void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
void *__kmalloc(size_t size, gfp_t flags);


@@ -133,26 +121,22 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
	void *ret;
	void *ret;


	if (__builtin_constant_p(size)) {
	if (__builtin_constant_p(size)) {
		int i = 0;
		int i;


		if (!size)
		if (!size)
			return ZERO_SIZE_PTR;
			return ZERO_SIZE_PTR;


#define CACHE(x) \
		if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
		if (size <= x) \
			goto found; \
		else \
			i++;
#include <linux/kmalloc_sizes.h>
#undef CACHE
			return NULL;
			return NULL;
found:

		i = kmalloc_index(size);

#ifdef CONFIG_ZONE_DMA
#ifdef CONFIG_ZONE_DMA
		if (flags & GFP_DMA)
		if (flags & GFP_DMA)
			cachep = malloc_sizes[i].cs_dmacachep;
			cachep = kmalloc_dma_caches[i];
		else
		else
#endif
#endif
			cachep = malloc_sizes[i].cs_cachep;
			cachep = kmalloc_caches[i];


		ret = kmem_cache_alloc_trace(cachep, flags, size);
		ret = kmem_cache_alloc_trace(cachep, flags, size);


@@ -186,26 +170,22 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
	struct kmem_cache *cachep;
	struct kmem_cache *cachep;


	if (__builtin_constant_p(size)) {
	if (__builtin_constant_p(size)) {
		int i = 0;
		int i;


		if (!size)
		if (!size)
			return ZERO_SIZE_PTR;
			return ZERO_SIZE_PTR;


#define CACHE(x) \
		if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
		if (size <= x) \
			goto found; \
		else \
			i++;
#include <linux/kmalloc_sizes.h>
#undef CACHE
			return NULL;
			return NULL;
found:

		i = kmalloc_index(size);

#ifdef CONFIG_ZONE_DMA
#ifdef CONFIG_ZONE_DMA
		if (flags & GFP_DMA)
		if (flags & GFP_DMA)
			cachep = malloc_sizes[i].cs_dmacachep;
			cachep = kmalloc_dma_caches[i];
		else
		else
#endif
#endif
			cachep = malloc_sizes[i].cs_cachep;
			cachep = kmalloc_caches[i];


		return kmem_cache_alloc_node_trace(cachep, flags, node, size);
		return kmem_cache_alloc_node_trace(cachep, flags, node, size);
	}
	}
+11 −125
Original line number Original line Diff line number Diff line
@@ -53,17 +53,6 @@ struct kmem_cache_cpu {
#endif
#endif
};
};


struct kmem_cache_node {
	spinlock_t list_lock;	/* Protect partial list and nr_partial */
	unsigned long nr_partial;
	struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
	atomic_long_t nr_slabs;
	atomic_long_t total_objects;
	struct list_head full;
#endif
};

/*
/*
 * Word size structure that can be atomically updated or read and that
 * Word size structure that can be atomically updated or read and that
 * contains both the order and the number of objects that a slab of the
 * contains both the order and the number of objects that a slab of the
@@ -115,111 +104,6 @@ struct kmem_cache {
	struct kmem_cache_node *node[MAX_NUMNODES];
	struct kmem_cache_node *node[MAX_NUMNODES];
};
};


/*
 * Kmalloc subsystem.
 */
#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
#else
#define KMALLOC_MIN_SIZE 8
#endif

#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)

/*
 * Maximum kmalloc object size handled by SLUB. Larger object allocations
 * are passed through to the page allocator. The page allocator "fastpath"
 * is relatively slow so we need this value sufficiently high so that
 * performance critical objects are allocated through the SLUB fastpath.
 *
 * This should be dropped to PAGE_SIZE / 2 once the page allocator
 * "fastpath" becomes competitive with the slab allocator fastpaths.
 */
#define SLUB_MAX_SIZE (2 * PAGE_SIZE)

#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)

#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#endif

/*
 * We keep the general caches in an array of slab caches that are used for
 * 2^x bytes of allocations.
 */
extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];

/*
 * Sorry that the following has to be that ugly but some versions of GCC
 * have trouble with constant propagation and loops.
 */
static __always_inline int kmalloc_index(size_t size)
{
	if (!size)
		return 0;

	if (size <= KMALLOC_MIN_SIZE)
		return KMALLOC_SHIFT_LOW;

	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
		return 1;
	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
		return 2;
	if (size <=          8) return 3;
	if (size <=         16) return 4;
	if (size <=         32) return 5;
	if (size <=         64) return 6;
	if (size <=        128) return 7;
	if (size <=        256) return 8;
	if (size <=        512) return 9;
	if (size <=       1024) return 10;
	if (size <=   2 * 1024) return 11;
	if (size <=   4 * 1024) return 12;
/*
 * The following is only needed to support architectures with a larger page
 * size than 4k. We need to support 2 * PAGE_SIZE here. So for a 64k page
 * size we would have to go up to 128k.
 */
	if (size <=   8 * 1024) return 13;
	if (size <=  16 * 1024) return 14;
	if (size <=  32 * 1024) return 15;
	if (size <=  64 * 1024) return 16;
	if (size <= 128 * 1024) return 17;
	if (size <= 256 * 1024) return 18;
	if (size <= 512 * 1024) return 19;
	if (size <= 1024 * 1024) return 20;
	if (size <=  2 * 1024 * 1024) return 21;
	BUG();
	return -1; /* Will never be reached */

/*
 * What we really wanted to do and cannot do because of compiler issues is:
 *	int i;
 *	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
 *		if (size <= (1 << i))
 *			return i;
 */
}

/*
 * Find the slab cache for a given combination of allocation flags and size.
 *
 * This ought to end up with a global pointer to the right cache
 * in kmalloc_caches.
 */
static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
{
	int index = kmalloc_index(size);

	if (index == 0)
		return NULL;

	return kmalloc_caches[index];
}

void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
void *__kmalloc(size_t size, gfp_t flags);


@@ -274,16 +158,17 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
static __always_inline void *kmalloc(size_t size, gfp_t flags)
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
{
	if (__builtin_constant_p(size)) {
	if (__builtin_constant_p(size)) {
		if (size > SLUB_MAX_SIZE)
		if (size > KMALLOC_MAX_CACHE_SIZE)
			return kmalloc_large(size, flags);
			return kmalloc_large(size, flags);


		if (!(flags & SLUB_DMA)) {
		if (!(flags & GFP_DMA)) {
			struct kmem_cache *s = kmalloc_slab(size);
			int index = kmalloc_index(size);


			if (!s)
			if (!index)
				return ZERO_SIZE_PTR;
				return ZERO_SIZE_PTR;


			return kmem_cache_alloc_trace(s, flags, size);
			return kmem_cache_alloc_trace(kmalloc_caches[index],
					flags, size);
		}
		}
	}
	}
	return __kmalloc(size, flags);
	return __kmalloc(size, flags);
@@ -310,13 +195,14 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
{
	if (__builtin_constant_p(size) &&
	if (__builtin_constant_p(size) &&
		size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
		size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
			struct kmem_cache *s = kmalloc_slab(size);
		int index = kmalloc_index(size);


		if (!s)
		if (!index)
			return ZERO_SIZE_PTR;
			return ZERO_SIZE_PTR;


		return kmem_cache_alloc_node_trace(s, flags, node, size);
		return kmem_cache_alloc_node_trace(kmalloc_caches[index],
			       flags, node, size);
	}
	}
	return __kmalloc_node(size, flags, node);
	return __kmalloc_node(size, flags, node);
}
}
Loading