Commit 661d4f55 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

sbitmap: remove swap_lock



map->swap_lock protects map->cleared from concurrent modification,
however sbitmap_deferred_clear() is already atomically drains it, so
it's guaranteed to not loose bits on concurrent
sbitmap_deferred_clear().

A one threaded tag heavy test on top of nullbk showed ~1.5% t-put
increase, and 3% -> 1% cycle reduction of sbitmap_get() according to perf.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b78beea0
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -32,11 +32,6 @@ struct sbitmap_word {
	 * @cleared: word holding cleared bits
	 */
	unsigned long cleared ____cacheline_aligned_in_smp;

	/**
	 * @swap_lock: Held while swapping word <-> cleared
	 */
	spinlock_t swap_lock;
} ____cacheline_aligned_in_smp;

/**
+3 −11
Original line number Diff line number Diff line
@@ -15,13 +15,9 @@
static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
{
	unsigned long mask, val;
	bool ret = false;
	unsigned long flags;

	spin_lock_irqsave(&map->swap_lock, flags);

	if (!map->cleared)
		goto out_unlock;
	if (!READ_ONCE(map->cleared))
		return false;

	/*
	 * First get a stable cleared mask, setting the old mask to 0.
@@ -35,10 +31,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
		val = map->word;
	} while (cmpxchg(&map->word, val, val & ~mask) != val);

	ret = true;
out_unlock:
	spin_unlock_irqrestore(&map->swap_lock, flags);
	return ret;
	return true;
}

int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
@@ -80,7 +73,6 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
	for (i = 0; i < sb->map_nr; i++) {
		sb->map[i].depth = min(depth, bits_per_word);
		depth -= sb->map[i].depth;
		spin_lock_init(&sb->map[i].swap_lock);
	}
	return 0;
}