Commit 2a89b99f authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-5.8/dm-fixes-2' of...

Merge tag 'for-5.8/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - A request-based DM fix to not use a waitqueue to wait for blk-mq IO
   completion because doing so is racey.

 - A couple more DM zoned target fixes to address issues introduced
   during the 5.8 cycle.

 - A DM core fix to use proper interface to cleanup DM's static flush
   bio.

 - A DM core fix to prevent mm recursion during memory allocation needed
   by dm_kobject_uevent.

* tag 'for-5.8/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm: use noio when sending kobject event
  dm zoned: Fix zone reclaim trigger
  dm zoned: fix unused but set variable warnings
  dm writecache: reject asynchronous pmem devices
  dm: use bio_uninit instead of bio_disassociate_blkg
  dm: do not use waitqueue for request-based DM
parents ce69fb3b 6958c1c6
Loading
Loading
Loading
Loading
+0 −4
Original line number Diff line number Diff line
@@ -146,10 +146,6 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
 */
static void rq_completed(struct mapped_device *md)
{
	/* nudge anyone waiting on suspend queue */
	if (unlikely(wq_has_sleeper(&md->wait)))
		wake_up(&md->wait);

	/*
	 * dm_put() must be at the end of this function. See the comment above
	 */
+6 −0
Original line number Diff line number Diff line
@@ -2266,6 +2266,12 @@ invalid_optional:
	}

	if (WC_MODE_PMEM(wc)) {
		if (!dax_synchronous(wc->ssd_dev->dax_dev)) {
			r = -EOPNOTSUPP;
			ti->error = "Asynchronous persistent memory not supported as pmem cache";
			goto bad;
		}

		r = persistent_memory_claim(wc);
		if (r) {
			ti->error = "Unable to map persistent memory for cache";
+8 −1
Original line number Diff line number Diff line
@@ -2217,8 +2217,15 @@ struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned int dev_idx,
{
	struct list_head *list;
	struct dm_zone *zone;
	int i = 0;
	int i;

	/* Schedule reclaim to ensure free zones are available */
	if (!(flags & DMZ_ALLOC_RECLAIM)) {
		for (i = 0; i < zmd->nr_devs; i++)
			dmz_schedule_reclaim(zmd->dev[i].reclaim);
	}

	i = 0;
again:
	if (flags & DMZ_ALLOC_CACHE)
		list = &zmd->unmap_cache_list;
+3 −4
Original line number Diff line number Diff line
@@ -456,6 +456,8 @@ static unsigned int dmz_reclaim_percentage(struct dmz_reclaim *zrc)
		nr_zones = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
		nr_unmap = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
	}
	if (nr_unmap <= 1)
		return 0;
	return nr_unmap * 100 / nr_zones;
}

@@ -501,7 +503,7 @@ static void dmz_reclaim_work(struct work_struct *work)
{
	struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
	struct dmz_metadata *zmd = zrc->metadata;
	unsigned int p_unmap, nr_unmap_rnd = 0, nr_rnd = 0;
	unsigned int p_unmap;
	int ret;

	if (dmz_dev_is_dying(zmd))
@@ -527,9 +529,6 @@ static void dmz_reclaim_work(struct work_struct *work)
		zrc->kc_throttle.throttle = min(75U, 100U - p_unmap / 2);
	}

	nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
	nr_rnd = dmz_nr_rnd_zones(zmd, zrc->dev_idx);

	DMDEBUG("(%s/%u): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)",
		dmz_metadata_label(zmd), zrc->dev_idx,
		zrc->kc_throttle.throttle,
+1 −9
Original line number Diff line number Diff line
@@ -400,15 +400,7 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
		dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
	struct dmz_metadata *zmd = dmz->metadata;
	struct dm_zone *zone;
	int i, ret;

	/*
	 * Write may trigger a zone allocation. So make sure the
	 * allocation can succeed.
	 */
	if (bio_op(bio) == REQ_OP_WRITE)
		for (i = 0; i < dmz->nr_ddevs; i++)
			dmz_schedule_reclaim(dmz->dev[i].reclaim);
	int ret;

	dmz_lock_metadata(zmd);

Loading