Commit 90e9f6a6 authored by Yu Zhao's avatar Yu Zhao Committed by Linus Torvalds
Browse files

mm/slub.c: avoid slub allocation while holding list_lock

If we are already under list_lock, don't call kmalloc().  Otherwise we
will run into a deadlock because kmalloc() also tries to grab the same
lock.

Fix the problem by using a static bitmap instead.

  WARNING: possible recursive locking detected
  --------------------------------------------
  mount-encrypted/4921 is trying to acquire lock:
  (&(&n->list_lock)->rlock){-.-.}, at: ___slab_alloc+0x104/0x437

  but task is already holding lock:
  (&(&n->list_lock)->rlock){-.-.}, at: __kmem_cache_shutdown+0x81/0x3cb

  other info that might help us debug this:
   Possible unsafe locking scenario:

         CPU0
         ----
    lock(&(&n->list_lock)->rlock);
    lock(&(&n->list_lock)->rlock);

   *** DEADLOCK ***

Link: http://lkml.kernel.org/r/20191108193958.205102-2-yuzhao@google.com


Signed-off-by: default avatarYu Zhao <yuzhao@google.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Cc: Yu Zhao <yuzhao@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 25b69918
Loading
Loading
Loading
Loading
+47 −41
Original line number Diff line number Diff line
@@ -439,19 +439,38 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
}

#ifdef CONFIG_SLUB_DEBUG
static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
static DEFINE_SPINLOCK(object_map_lock);

/*
 * Determine a map of object in use on a page.
 *
 * Node listlock must be held to guarantee that the page does
 * not vanish from under us.
 */
static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
static unsigned long *get_map(struct kmem_cache *s, struct page *page)
{
	void *p;
	void *addr = page_address(page);

	VM_BUG_ON(!irqs_disabled());

	spin_lock(&object_map_lock);

	bitmap_zero(object_map, page->objects);

	for (p = page->freelist; p; p = get_freepointer(s, p))
		set_bit(slab_index(p, s, addr), map);
		set_bit(slab_index(p, s, addr), object_map);

	return object_map;
}

static void put_map(unsigned long *map)
{
	VM_BUG_ON(map != object_map);
	lockdep_assert_held(&object_map_lock);

	spin_unlock(&object_map_lock);
}

static inline unsigned int size_from_object(struct kmem_cache *s)
@@ -3675,13 +3694,12 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
#ifdef CONFIG_SLUB_DEBUG
	void *addr = page_address(page);
	void *p;
	unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC);
	if (!map)
		return;
	unsigned long *map;

	slab_err(s, page, text, s->name);
	slab_lock(page);

	get_map(s, page, map);
	map = get_map(s, page);
	for_each_object(p, s, addr, page->objects) {

		if (!test_bit(slab_index(p, s, addr), map)) {
@@ -3689,8 +3707,9 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
			print_tracking(s, p);
		}
	}
	put_map(map);

	slab_unlock(page);
	bitmap_free(map);
#endif
}

@@ -4384,19 +4403,19 @@ static int count_total(struct page *page)
#endif

#ifdef CONFIG_SLUB_DEBUG
static void validate_slab(struct kmem_cache *s, struct page *page,
						unsigned long *map)
static void validate_slab(struct kmem_cache *s, struct page *page)
{
	void *p;
	void *addr = page_address(page);
	unsigned long *map;

	slab_lock(page);

	if (!check_slab(s, page) || !on_freelist(s, page, NULL))
		return;
		goto unlock;

	/* Now we know that a valid freelist exists */
	bitmap_zero(map, page->objects);

	get_map(s, page, map);
	map = get_map(s, page);
	for_each_object(p, s, addr, page->objects) {
		u8 val = test_bit(slab_index(p, s, addr), map) ?
			 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
@@ -4404,18 +4423,13 @@ static void validate_slab(struct kmem_cache *s, struct page *page,
		if (!check_object(s, page, p, val))
			break;
	}
}

static void validate_slab_slab(struct kmem_cache *s, struct page *page,
						unsigned long *map)
{
	slab_lock(page);
	validate_slab(s, page, map);
	put_map(map);
unlock:
	slab_unlock(page);
}

static int validate_slab_node(struct kmem_cache *s,
		struct kmem_cache_node *n, unsigned long *map)
		struct kmem_cache_node *n)
{
	unsigned long count = 0;
	struct page *page;
@@ -4424,7 +4438,7 @@ static int validate_slab_node(struct kmem_cache *s,
	spin_lock_irqsave(&n->list_lock, flags);

	list_for_each_entry(page, &n->partial, slab_list) {
		validate_slab_slab(s, page, map);
		validate_slab(s, page);
		count++;
	}
	if (count != n->nr_partial)
@@ -4435,7 +4449,7 @@ static int validate_slab_node(struct kmem_cache *s,
		goto out;

	list_for_each_entry(page, &n->full, slab_list) {
		validate_slab_slab(s, page, map);
		validate_slab(s, page);
		count++;
	}
	if (count != atomic_long_read(&n->nr_slabs))
@@ -4452,15 +4466,11 @@ static long validate_slab_cache(struct kmem_cache *s)
	int node;
	unsigned long count = 0;
	struct kmem_cache_node *n;
	unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);

	if (!map)
		return -ENOMEM;

	flush_all(s);
	for_each_kmem_cache_node(s, node, n)
		count += validate_slab_node(s, n, map);
	bitmap_free(map);
		count += validate_slab_node(s, n);

	return count;
}
/*
@@ -4590,18 +4600,17 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
}

static void process_slab(struct loc_track *t, struct kmem_cache *s,
		struct page *page, enum track_item alloc,
		unsigned long *map)
		struct page *page, enum track_item alloc)
{
	void *addr = page_address(page);
	void *p;
	unsigned long *map;

	bitmap_zero(map, page->objects);
	get_map(s, page, map);

	map = get_map(s, page);
	for_each_object(p, s, addr, page->objects)
		if (!test_bit(slab_index(p, s, addr), map))
			add_location(t, s, get_track(s, p, alloc));
	put_map(map);
}

static int list_locations(struct kmem_cache *s, char *buf,
@@ -4612,11 +4621,9 @@ static int list_locations(struct kmem_cache *s, char *buf,
	struct loc_track t = { 0, 0, NULL };
	int node;
	struct kmem_cache_node *n;
	unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);

	if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
	if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
			     GFP_KERNEL)) {
		bitmap_free(map);
		return sprintf(buf, "Out of memory\n");
	}
	/* Push back cpu slabs */
@@ -4631,9 +4638,9 @@ static int list_locations(struct kmem_cache *s, char *buf,

		spin_lock_irqsave(&n->list_lock, flags);
		list_for_each_entry(page, &n->partial, slab_list)
			process_slab(&t, s, page, alloc, map);
			process_slab(&t, s, page, alloc);
		list_for_each_entry(page, &n->full, slab_list)
			process_slab(&t, s, page, alloc, map);
			process_slab(&t, s, page, alloc);
		spin_unlock_irqrestore(&n->list_lock, flags);
	}

@@ -4682,7 +4689,6 @@ static int list_locations(struct kmem_cache *s, char *buf,
	}

	free_loc_track(&t);
	bitmap_free(map);
	if (!t.count)
		len += sprintf(buf, "No data\n");
	return len;