Commit eeee2827 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-5.5/dm-changes' of...

Merge tag 'for-5.5/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper updates from Mike Snitzer:

 - Fix DM core to disallow stacking request-based DM on partitions.

 - Fix DM raid target to properly resync raidset even if bitmap needed
   additional pages.

 - Fix DM crypt performance regression due to use of WQ_HIGHPRI for the
   IO and crypt workqueues.

 - Fix DM integrity metadata layout that was aligned on 128K boundary
   rather than the intended 4K boundary (removes 124K of wasted space
   for each metadata block).

 - Improve the DM thin, cache and clone targets to use spin_lock_irq
   rather than spin_lock_irqsave where possible.

 - Fix DM thin single thread performance that was lost due to needless
   workqueue wakeups.

 - Fix DM zoned target performance that was lost due to excessive
   backing device checks.

 - Add ability to trigger write failure with the DM dust test target.

 - Fix whitespace indentation in drivers/md/Kconfig.

 - Various smalls fixes and cleanups (e.g. use struct_size, fix
   uninitialized variable, variable renames, etc).

* tag 'for-5.5/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (22 commits)
  Revert "dm crypt: use WQ_HIGHPRI for the IO and crypt workqueues"
  dm: Fix Kconfig indentation
  dm thin: wakeup worker only when deferred bios exist
  dm integrity: fix excessive alignment of metadata runs
  dm raid: Remove unnecessary negation of a shift in raid10_format_to_md_layout
  dm zoned: reduce overhead of backing device checks
  dm dust: add limited write failure mode
  dm dust: change ret to r in dust_map_read and dust_map
  dm dust: change result vars to r
  dm cache: replace spin_lock_irqsave with spin_lock_irq
  dm bio prison: replace spin_lock_irqsave with spin_lock_irq
  dm thin: replace spin_lock_irqsave with spin_lock_irq
  dm clone: add bucket_lock_irq/bucket_unlock_irq helpers
  dm clone: replace spin_lock_irqsave with spin_lock_irq
  dm writecache: handle REQ_FUA
  dm writecache: fix uninitialized variable warning
  dm stripe: use struct_size() in kmalloc()
  dm raid: streamline rs_get_progress() and its raid_status() caller side
  dm raid: simplify rs_setup_recovery call chain
  dm raid: to ensure resynchronization, perform raid set grow in preresume
  ...
parents 7e5192b9 f612b213
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -177,6 +177,11 @@ bitmap_flush_interval:number
	The bitmap flush interval in milliseconds. The metadata buffers
	are synchronized when this interval expires.

fix_padding
	Use a smaller padding of the tag area that is more
	space-efficient. If this option is not present, large padding is
	used - that is for compatibility with older kernels.


The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can
be changed when reloading the target (load an inactive table and swap the
+2 −0
Original line number Diff line number Diff line
@@ -417,3 +417,5 @@ Version History
	deadlock/potential data corruption.  Update superblock when
	specific devices are requested via rebuild.  Fix RAID leg
	rebuild errors.
 1.15.0 Fix size extensions not being synchronized in case of new MD bitmap
        pages allocated;  also fix those not occuring after previous reductions
+10 −17
Original line number Diff line number Diff line
@@ -150,11 +150,10 @@ static int bio_detain(struct dm_bio_prison *prison,
		      struct dm_bio_prison_cell **cell_result)
{
	int r;
	unsigned long flags;

	spin_lock_irqsave(&prison->lock, flags);
	spin_lock_irq(&prison->lock);
	r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
	spin_unlock_irqrestore(&prison->lock, flags);
	spin_unlock_irq(&prison->lock);

	return r;
}
@@ -198,11 +197,9 @@ void dm_cell_release(struct dm_bio_prison *prison,
		     struct dm_bio_prison_cell *cell,
		     struct bio_list *bios)
{
	unsigned long flags;

	spin_lock_irqsave(&prison->lock, flags);
	spin_lock_irq(&prison->lock);
	__cell_release(prison, cell, bios);
	spin_unlock_irqrestore(&prison->lock, flags);
	spin_unlock_irq(&prison->lock);
}
EXPORT_SYMBOL_GPL(dm_cell_release);

@@ -250,12 +247,10 @@ void dm_cell_visit_release(struct dm_bio_prison *prison,
			   void *context,
			   struct dm_bio_prison_cell *cell)
{
	unsigned long flags;

	spin_lock_irqsave(&prison->lock, flags);
	spin_lock_irq(&prison->lock);
	visit_fn(context, cell);
	rb_erase(&cell->node, &prison->cells);
	spin_unlock_irqrestore(&prison->lock, flags);
	spin_unlock_irq(&prison->lock);
}
EXPORT_SYMBOL_GPL(dm_cell_visit_release);

@@ -275,11 +270,10 @@ int dm_cell_promote_or_release(struct dm_bio_prison *prison,
			       struct dm_bio_prison_cell *cell)
{
	int r;
	unsigned long flags;

	spin_lock_irqsave(&prison->lock, flags);
	spin_lock_irq(&prison->lock);
	r = __promote_or_release(prison, cell);
	spin_unlock_irqrestore(&prison->lock, flags);
	spin_unlock_irq(&prison->lock);

	return r;
}
@@ -379,10 +373,9 @@ EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
{
	int r = 1;
	unsigned long flags;
	unsigned next_entry;

	spin_lock_irqsave(&ds->lock, flags);
	spin_lock_irq(&ds->lock);
	if ((ds->sweeper == ds->current_entry) &&
	    !ds->entries[ds->current_entry].count)
		r = 0;
@@ -392,7 +385,7 @@ int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
		if (!ds->entries[next_entry].count)
			ds->current_entry = next_entry;
	}
	spin_unlock_irqrestore(&ds->lock, flags);
	spin_unlock_irq(&ds->lock);

	return r;
}
+10 −16
Original line number Diff line number Diff line
@@ -177,11 +177,10 @@ bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
		    struct dm_bio_prison_cell_v2 **cell_result)
{
	int r;
	unsigned long flags;

	spin_lock_irqsave(&prison->lock, flags);
	spin_lock_irq(&prison->lock);
	r = __get(prison, key, lock_level, inmate, cell_prealloc, cell_result);
	spin_unlock_irqrestore(&prison->lock, flags);
	spin_unlock_irq(&prison->lock);

	return r;
}
@@ -261,11 +260,10 @@ int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
		    struct dm_bio_prison_cell_v2 **cell_result)
{
	int r;
	unsigned long flags;

	spin_lock_irqsave(&prison->lock, flags);
	spin_lock_irq(&prison->lock);
	r = __lock(prison, key, lock_level, cell_prealloc, cell_result);
	spin_unlock_irqrestore(&prison->lock, flags);
	spin_unlock_irq(&prison->lock);

	return r;
}
@@ -285,11 +283,9 @@ void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
			struct dm_bio_prison_cell_v2 *cell,
			struct work_struct *continuation)
{
	unsigned long flags;

	spin_lock_irqsave(&prison->lock, flags);
	spin_lock_irq(&prison->lock);
	__quiesce(prison, cell, continuation);
	spin_unlock_irqrestore(&prison->lock, flags);
	spin_unlock_irq(&prison->lock);
}
EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);

@@ -309,11 +305,10 @@ int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
			    unsigned new_lock_level)
{
	int r;
	unsigned long flags;

	spin_lock_irqsave(&prison->lock, flags);
	spin_lock_irq(&prison->lock);
	r = __promote(prison, cell, new_lock_level);
	spin_unlock_irqrestore(&prison->lock, flags);
	spin_unlock_irq(&prison->lock);

	return r;
}
@@ -342,11 +337,10 @@ bool dm_cell_unlock_v2(struct dm_bio_prison_v2 *prison,
		       struct bio_list *bios)
{
	bool r;
	unsigned long flags;

	spin_lock_irqsave(&prison->lock, flags);
	spin_lock_irq(&prison->lock);
	r = __unlock(prison, cell, bios);
	spin_unlock_irqrestore(&prison->lock, flags);
	spin_unlock_irq(&prison->lock);

	return r;
}
+28 −49
Original line number Diff line number Diff line
@@ -74,22 +74,19 @@ static bool __iot_idle_for(struct io_tracker *iot, unsigned long jifs)
static bool iot_idle_for(struct io_tracker *iot, unsigned long jifs)
{
	bool r;
	unsigned long flags;

	spin_lock_irqsave(&iot->lock, flags);
	spin_lock_irq(&iot->lock);
	r = __iot_idle_for(iot, jifs);
	spin_unlock_irqrestore(&iot->lock, flags);
	spin_unlock_irq(&iot->lock);

	return r;
}

static void iot_io_begin(struct io_tracker *iot, sector_t len)
{
	unsigned long flags;

	spin_lock_irqsave(&iot->lock, flags);
	spin_lock_irq(&iot->lock);
	iot->in_flight += len;
	spin_unlock_irqrestore(&iot->lock, flags);
	spin_unlock_irq(&iot->lock);
}

static void __iot_io_end(struct io_tracker *iot, sector_t len)
@@ -172,7 +169,6 @@ static void __commit(struct work_struct *_ws)
{
	struct batcher *b = container_of(_ws, struct batcher, commit_work);
	blk_status_t r;
	unsigned long flags;
	struct list_head work_items;
	struct work_struct *ws, *tmp;
	struct continuation *k;
@@ -186,12 +182,12 @@ static void __commit(struct work_struct *_ws)
	 * We have to grab these before the commit_op to avoid a race
	 * condition.
	 */
	spin_lock_irqsave(&b->lock, flags);
	spin_lock_irq(&b->lock);
	list_splice_init(&b->work_items, &work_items);
	bio_list_merge(&bios, &b->bios);
	bio_list_init(&b->bios);
	b->commit_scheduled = false;
	spin_unlock_irqrestore(&b->lock, flags);
	spin_unlock_irq(&b->lock);

	r = b->commit_op(b->commit_context);

@@ -238,13 +234,12 @@ static void async_commit(struct batcher *b)

static void continue_after_commit(struct batcher *b, struct continuation *k)
{
	unsigned long flags;
	bool commit_scheduled;

	spin_lock_irqsave(&b->lock, flags);
	spin_lock_irq(&b->lock);
	commit_scheduled = b->commit_scheduled;
	list_add_tail(&k->ws.entry, &b->work_items);
	spin_unlock_irqrestore(&b->lock, flags);
	spin_unlock_irq(&b->lock);

	if (commit_scheduled)
		async_commit(b);
@@ -255,13 +250,12 @@ static void continue_after_commit(struct batcher *b, struct continuation *k)
 */
static void issue_after_commit(struct batcher *b, struct bio *bio)
{
       unsigned long flags;
       bool commit_scheduled;

       spin_lock_irqsave(&b->lock, flags);
       spin_lock_irq(&b->lock);
       commit_scheduled = b->commit_scheduled;
       bio_list_add(&b->bios, bio);
       spin_unlock_irqrestore(&b->lock, flags);
       spin_unlock_irq(&b->lock);

       if (commit_scheduled)
	       async_commit(b);
@@ -273,12 +267,11 @@ static void issue_after_commit(struct batcher *b, struct bio *bio)
static void schedule_commit(struct batcher *b)
{
	bool immediate;
	unsigned long flags;

	spin_lock_irqsave(&b->lock, flags);
	spin_lock_irq(&b->lock);
	immediate = !list_empty(&b->work_items) || !bio_list_empty(&b->bios);
	b->commit_scheduled = true;
	spin_unlock_irqrestore(&b->lock, flags);
	spin_unlock_irq(&b->lock);

	if (immediate)
		async_commit(b);
@@ -630,23 +623,19 @@ static struct per_bio_data *init_per_bio_data(struct bio *bio)

static void defer_bio(struct cache *cache, struct bio *bio)
{
	unsigned long flags;

	spin_lock_irqsave(&cache->lock, flags);
	spin_lock_irq(&cache->lock);
	bio_list_add(&cache->deferred_bios, bio);
	spin_unlock_irqrestore(&cache->lock, flags);
	spin_unlock_irq(&cache->lock);

	wake_deferred_bio_worker(cache);
}

static void defer_bios(struct cache *cache, struct bio_list *bios)
{
	unsigned long flags;

	spin_lock_irqsave(&cache->lock, flags);
	spin_lock_irq(&cache->lock);
	bio_list_merge(&cache->deferred_bios, bios);
	bio_list_init(bios);
	spin_unlock_irqrestore(&cache->lock, flags);
	spin_unlock_irq(&cache->lock);

	wake_deferred_bio_worker(cache);
}
@@ -756,33 +745,27 @@ static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)

static void set_discard(struct cache *cache, dm_dblock_t b)
{
	unsigned long flags;

	BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
	atomic_inc(&cache->stats.discard_count);

	spin_lock_irqsave(&cache->lock, flags);
	spin_lock_irq(&cache->lock);
	set_bit(from_dblock(b), cache->discard_bitset);
	spin_unlock_irqrestore(&cache->lock, flags);
	spin_unlock_irq(&cache->lock);
}

static void clear_discard(struct cache *cache, dm_dblock_t b)
{
	unsigned long flags;

	spin_lock_irqsave(&cache->lock, flags);
	spin_lock_irq(&cache->lock);
	clear_bit(from_dblock(b), cache->discard_bitset);
	spin_unlock_irqrestore(&cache->lock, flags);
	spin_unlock_irq(&cache->lock);
}

static bool is_discarded(struct cache *cache, dm_dblock_t b)
{
	int r;
	unsigned long flags;

	spin_lock_irqsave(&cache->lock, flags);
	spin_lock_irq(&cache->lock);
	r = test_bit(from_dblock(b), cache->discard_bitset);
	spin_unlock_irqrestore(&cache->lock, flags);
	spin_unlock_irq(&cache->lock);

	return r;
}
@@ -790,12 +773,10 @@ static bool is_discarded(struct cache *cache, dm_dblock_t b)
static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
{
	int r;
	unsigned long flags;

	spin_lock_irqsave(&cache->lock, flags);
	spin_lock_irq(&cache->lock);
	r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
		     cache->discard_bitset);
	spin_unlock_irqrestore(&cache->lock, flags);
	spin_unlock_irq(&cache->lock);

	return r;
}
@@ -827,17 +808,16 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,

static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
{
	unsigned long flags;
	struct per_bio_data *pb;

	spin_lock_irqsave(&cache->lock, flags);
	spin_lock_irq(&cache->lock);
	if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
	    bio_op(bio) != REQ_OP_DISCARD) {
		pb = get_per_bio_data(bio);
		pb->tick = true;
		cache->need_tick_bio = false;
	}
	spin_unlock_irqrestore(&cache->lock, flags);
	spin_unlock_irq(&cache->lock);
}

static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
@@ -1889,17 +1869,16 @@ static void process_deferred_bios(struct work_struct *ws)
{
	struct cache *cache = container_of(ws, struct cache, deferred_bio_worker);

	unsigned long flags;
	bool commit_needed = false;
	struct bio_list bios;
	struct bio *bio;

	bio_list_init(&bios);

	spin_lock_irqsave(&cache->lock, flags);
	spin_lock_irq(&cache->lock);
	bio_list_merge(&bios, &cache->deferred_bios);
	bio_list_init(&cache->deferred_bios);
	spin_unlock_irqrestore(&cache->lock, flags);
	spin_unlock_irq(&cache->lock);

	while ((bio = bio_list_pop(&bios))) {
		if (bio->bi_opf & REQ_PREFLUSH)
Loading