Commit 26b924b9 authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Mike Snitzer
Browse files

dm cache: replace spin_lock_irqsave with spin_lock_irq



If we are in a place where it is known that interrupts are enabled,
functions spin_lock_irq/spin_unlock_irq should be used instead of
spin_lock_irqsave/spin_unlock_irqrestore.

spin_lock_irq and spin_unlock_irq are faster because they don't need to
push and pop the flags register.

Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 235bc861
Loading
Loading
Loading
Loading
+28 −49
Original line number Diff line number Diff line
@@ -74,22 +74,19 @@ static bool __iot_idle_for(struct io_tracker *iot, unsigned long jifs)
static bool iot_idle_for(struct io_tracker *iot, unsigned long jifs)
{
	bool r;
	unsigned long flags;

	spin_lock_irqsave(&iot->lock, flags);
	spin_lock_irq(&iot->lock);
	r = __iot_idle_for(iot, jifs);
	spin_unlock_irqrestore(&iot->lock, flags);
	spin_unlock_irq(&iot->lock);

	return r;
}

static void iot_io_begin(struct io_tracker *iot, sector_t len)
{
	unsigned long flags;

	spin_lock_irqsave(&iot->lock, flags);
	spin_lock_irq(&iot->lock);
	iot->in_flight += len;
	spin_unlock_irqrestore(&iot->lock, flags);
	spin_unlock_irq(&iot->lock);
}

static void __iot_io_end(struct io_tracker *iot, sector_t len)
@@ -172,7 +169,6 @@ static void __commit(struct work_struct *_ws)
{
	struct batcher *b = container_of(_ws, struct batcher, commit_work);
	blk_status_t r;
	unsigned long flags;
	struct list_head work_items;
	struct work_struct *ws, *tmp;
	struct continuation *k;
@@ -186,12 +182,12 @@ static void __commit(struct work_struct *_ws)
	 * We have to grab these before the commit_op to avoid a race
	 * condition.
	 */
	spin_lock_irqsave(&b->lock, flags);
	spin_lock_irq(&b->lock);
	list_splice_init(&b->work_items, &work_items);
	bio_list_merge(&bios, &b->bios);
	bio_list_init(&b->bios);
	b->commit_scheduled = false;
	spin_unlock_irqrestore(&b->lock, flags);
	spin_unlock_irq(&b->lock);

	r = b->commit_op(b->commit_context);

@@ -238,13 +234,12 @@ static void async_commit(struct batcher *b)

static void continue_after_commit(struct batcher *b, struct continuation *k)
{
	unsigned long flags;
	bool commit_scheduled;

	spin_lock_irqsave(&b->lock, flags);
	spin_lock_irq(&b->lock);
	commit_scheduled = b->commit_scheduled;
	list_add_tail(&k->ws.entry, &b->work_items);
	spin_unlock_irqrestore(&b->lock, flags);
	spin_unlock_irq(&b->lock);

	if (commit_scheduled)
		async_commit(b);
@@ -255,13 +250,12 @@ static void continue_after_commit(struct batcher *b, struct continuation *k)
 */
static void issue_after_commit(struct batcher *b, struct bio *bio)
{
       unsigned long flags;
       bool commit_scheduled;

       spin_lock_irqsave(&b->lock, flags);
       spin_lock_irq(&b->lock);
       commit_scheduled = b->commit_scheduled;
       bio_list_add(&b->bios, bio);
       spin_unlock_irqrestore(&b->lock, flags);
       spin_unlock_irq(&b->lock);

       if (commit_scheduled)
	       async_commit(b);
@@ -273,12 +267,11 @@ static void issue_after_commit(struct batcher *b, struct bio *bio)
static void schedule_commit(struct batcher *b)
{
	bool immediate;
	unsigned long flags;

	spin_lock_irqsave(&b->lock, flags);
	spin_lock_irq(&b->lock);
	immediate = !list_empty(&b->work_items) || !bio_list_empty(&b->bios);
	b->commit_scheduled = true;
	spin_unlock_irqrestore(&b->lock, flags);
	spin_unlock_irq(&b->lock);

	if (immediate)
		async_commit(b);
@@ -630,23 +623,19 @@ static struct per_bio_data *init_per_bio_data(struct bio *bio)

static void defer_bio(struct cache *cache, struct bio *bio)
{
	unsigned long flags;

	spin_lock_irqsave(&cache->lock, flags);
	spin_lock_irq(&cache->lock);
	bio_list_add(&cache->deferred_bios, bio);
	spin_unlock_irqrestore(&cache->lock, flags);
	spin_unlock_irq(&cache->lock);

	wake_deferred_bio_worker(cache);
}

static void defer_bios(struct cache *cache, struct bio_list *bios)
{
	unsigned long flags;

	spin_lock_irqsave(&cache->lock, flags);
	spin_lock_irq(&cache->lock);
	bio_list_merge(&cache->deferred_bios, bios);
	bio_list_init(bios);
	spin_unlock_irqrestore(&cache->lock, flags);
	spin_unlock_irq(&cache->lock);

	wake_deferred_bio_worker(cache);
}
@@ -756,33 +745,27 @@ static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)

static void set_discard(struct cache *cache, dm_dblock_t b)
{
	unsigned long flags;

	BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
	atomic_inc(&cache->stats.discard_count);

	spin_lock_irqsave(&cache->lock, flags);
	spin_lock_irq(&cache->lock);
	set_bit(from_dblock(b), cache->discard_bitset);
	spin_unlock_irqrestore(&cache->lock, flags);
	spin_unlock_irq(&cache->lock);
}

static void clear_discard(struct cache *cache, dm_dblock_t b)
{
	unsigned long flags;

	spin_lock_irqsave(&cache->lock, flags);
	spin_lock_irq(&cache->lock);
	clear_bit(from_dblock(b), cache->discard_bitset);
	spin_unlock_irqrestore(&cache->lock, flags);
	spin_unlock_irq(&cache->lock);
}

static bool is_discarded(struct cache *cache, dm_dblock_t b)
{
	int r;
	unsigned long flags;

	spin_lock_irqsave(&cache->lock, flags);
	spin_lock_irq(&cache->lock);
	r = test_bit(from_dblock(b), cache->discard_bitset);
	spin_unlock_irqrestore(&cache->lock, flags);
	spin_unlock_irq(&cache->lock);

	return r;
}
@@ -790,12 +773,10 @@ static bool is_discarded(struct cache *cache, dm_dblock_t b)
static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
{
	int r;
	unsigned long flags;

	spin_lock_irqsave(&cache->lock, flags);
	spin_lock_irq(&cache->lock);
	r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
		     cache->discard_bitset);
	spin_unlock_irqrestore(&cache->lock, flags);
	spin_unlock_irq(&cache->lock);

	return r;
}
@@ -827,17 +808,16 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,

static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
{
	unsigned long flags;
	struct per_bio_data *pb;

	spin_lock_irqsave(&cache->lock, flags);
	spin_lock_irq(&cache->lock);
	if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
	    bio_op(bio) != REQ_OP_DISCARD) {
		pb = get_per_bio_data(bio);
		pb->tick = true;
		cache->need_tick_bio = false;
	}
	spin_unlock_irqrestore(&cache->lock, flags);
	spin_unlock_irq(&cache->lock);
}

static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
@@ -1889,17 +1869,16 @@ static void process_deferred_bios(struct work_struct *ws)
{
	struct cache *cache = container_of(ws, struct cache, deferred_bio_worker);

	unsigned long flags;
	bool commit_needed = false;
	struct bio_list bios;
	struct bio *bio;

	bio_list_init(&bios);

	spin_lock_irqsave(&cache->lock, flags);
	spin_lock_irq(&cache->lock);
	bio_list_merge(&bios, &cache->deferred_bios);
	bio_list_init(&cache->deferred_bios);
	spin_unlock_irqrestore(&cache->lock, flags);
	spin_unlock_irq(&cache->lock);

	while ((bio = bio_list_pop(&bios))) {
		if (bio->bi_opf & REQ_PREFLUSH)