Commit 87098373 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds
Browse files

slub: avoid irqoff/on in bulk allocation



Use the new function that can do allocation while interrupts are disabled.
Avoids irq on/off sequences.

Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Alexander Duyck <alexander.h.duyck@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a380a3c7
Loading
Loading
Loading
Loading
+11 −13
Original line number Diff line number Diff line
@@ -2818,30 +2818,23 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
		void *object = c->freelist;

		if (unlikely(!object)) {
			local_irq_enable();
			/*
			 * Invoking slow path likely have side-effect
			 * of re-populating per CPU c->freelist
			 */
			p[i] = __slab_alloc(s, flags, NUMA_NO_NODE,
			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
					    _RET_IP_, c);
			if (unlikely(!p[i])) {
				__kmem_cache_free_bulk(s, i, p);
				return false;
			}
			local_irq_disable();
			if (unlikely(!p[i]))
				goto error;

			c = this_cpu_ptr(s->cpu_slab);
			continue; /* goto for-loop */
		}

		/* kmem_cache debug support */
		s = slab_pre_alloc_hook(s, flags);
		if (unlikely(!s)) {
			__kmem_cache_free_bulk(s, i, p);
			c->tid = next_tid(c->tid);
			local_irq_enable();
			return false;
		}
		if (unlikely(!s))
			goto error;

		c->freelist = get_freepointer(s, object);
		p[i] = object;
@@ -2861,6 +2854,11 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
	}

	return true;

error:
	__kmem_cache_free_bulk(s, i, p);
	local_irq_enable();
	return false;
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);