Commit 0776aa0e authored by Mike Snitzer's avatar Mike Snitzer
Browse files

dm: ensure bio-based DM's bioset and io_pool support targets' maximum IOs



alloc_multiple_bios() assumes it can allocate the requested number of
bios but until now there was no gaurantee that the mempools would be
accomodating.

Suggested-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 4a3f54d9
Loading
Loading
Loading
Loading
+7 −4
Original line number Diff line number Diff line
@@ -1079,7 +1079,8 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
{
	enum dm_queue_mode type = dm_table_get_type(t);
	unsigned per_io_data_size = 0;
	struct dm_target *tgt;
	unsigned min_pool_size = 0;
	struct dm_target *ti;
	unsigned i;

	if (unlikely(type == DM_TYPE_NONE)) {
@@ -1089,11 +1090,13 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *

	if (__table_type_bio_based(type))
		for (i = 0; i < t->num_targets; i++) {
			tgt = t->targets + i;
			per_io_data_size = max(per_io_data_size, tgt->per_io_data_size);
			ti = t->targets + i;
			per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
			min_pool_size = max(min_pool_size, ti->num_flush_bios);
		}

	t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size);
	t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
					   per_io_data_size, min_pool_size);
	if (!t->mempools)
		return -ENOMEM;

+18 −10
Original line number Diff line number Diff line
@@ -1810,17 +1810,26 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
	struct dm_md_mempools *p = dm_table_get_md_mempools(t);

	if (md->bs) {
		/* The md already has necessary mempools. */
	if (dm_table_bio_based(t)) {
		/* The md may already have mempools that need changing. */
		if (md->bs) {
			/*
			 * Reload bioset because front_pad may have changed
			 * because a different table was loaded.
			 */
			bioset_free(md->bs);
			md->bs = p->bs;
			p->bs = NULL;
			md->bs = NULL;
		}
		if (md->io_pool) {
			/*
			 * Reload io_pool because pool_size may have changed
			 * because a different table was loaded.
			 */
			mempool_destroy(md->io_pool);
			md->io_pool = NULL;
		}

	} else if (md->bs) {
		/*
		 * There's no need to reload with request-based dm
		 * because the size of front_pad doesn't change.
@@ -1838,7 +1847,6 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
	p->io_pool = NULL;
	md->bs = p->bs;
	p->bs = NULL;

out:
	/* mempool bind completed, no longer need any mempools in the table */
	dm_table_free_md_mempools(t);
@@ -2727,7 +2735,8 @@ int dm_noflush_suspending(struct dm_target *ti)
EXPORT_SYMBOL_GPL(dm_noflush_suspending);

struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
					    unsigned integrity, unsigned per_io_data_size)
					    unsigned integrity, unsigned per_io_data_size,
					    unsigned min_pool_size)
{
	struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
	unsigned int pool_size = 0;
@@ -2739,16 +2748,15 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
	switch (type) {
	case DM_TYPE_BIO_BASED:
	case DM_TYPE_DAX_BIO_BASED:
		pool_size = dm_get_reserved_bio_based_ios();
		pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
		front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
	
		pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
		if (!pools->io_pool)
			goto out;
		break;
	case DM_TYPE_REQUEST_BASED:
	case DM_TYPE_MQ_REQUEST_BASED:
		pool_size = dm_get_reserved_rq_based_ios();
		pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
		/* per_io_data_size is used for blk-mq pdu at queue allocation */
		break;
+2 −1
Original line number Diff line number Diff line
@@ -206,7 +206,8 @@ void dm_kcopyd_exit(void);
 * Mempool operations
 */
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
					    unsigned integrity, unsigned per_bio_data_size);
					    unsigned integrity, unsigned per_bio_data_size,
					    unsigned min_pool_size);
void dm_free_md_mempools(struct dm_md_mempools *pools);

/*