Commit 23de4a7a authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull device mapper fixes from Mike Snitzer:
 "A dm-crypt fix for a cpu hotplug crash that switches from using
  per-cpu data to a mempool allocation (which offers allocation with cpu
  locality, and there is no inter-cpu communication on slab allocation).

  A couple dm-thinp stable fixes to address "out-of-data-space" issues.

  A dm-multipath fix for a LOCKDEP warning introduced in 3.15-rc1"

* tag 'dm-3.15-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm mpath: fix lock order inconsistency in multipath_ioctl
  dm thin: add timeout to stop out-of-data-space mode holding IO forever
  dm thin: allow metadata commit if pool is in PM_OUT_OF_DATA_SPACE mode
  dm crypt: fix cpu hotplug crash by removing per-cpu structure
parents 31a3fcab 4cdd2ad7
Loading
Loading
Loading
Loading
+12 −49
Original line number Diff line number Diff line
@@ -19,7 +19,6 @@
#include <linux/crypto.h>
#include <linux/workqueue.h>
#include <linux/backing-dev.h>
#include <linux/percpu.h>
#include <linux/atomic.h>
#include <linux/scatterlist.h>
#include <asm/page.h>
@@ -43,6 +42,7 @@ struct convert_context {
	struct bvec_iter iter_out;
	sector_t cc_sector;
	atomic_t cc_pending;
	struct ablkcipher_request *req;
};

/*
@@ -111,15 +111,7 @@ struct iv_tcw_private {
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };

/*
 * Duplicated per-CPU state for cipher.
 */
struct crypt_cpu {
	struct ablkcipher_request *req;
};

/*
 * The fields in here must be read only after initialization,
 * changing state should be in crypt_cpu.
 * The fields in here must be read only after initialization.
 */
struct crypt_config {
	struct dm_dev *dev;
@@ -150,12 +142,6 @@ struct crypt_config {
	sector_t iv_offset;
	unsigned int iv_size;

	/*
	 * Duplicated per cpu state. Access through
	 * per_cpu_ptr() only.
	 */
	struct crypt_cpu __percpu *cpu;

	/* ESSIV: struct crypto_cipher *essiv_tfm */
	void *iv_private;
	struct crypto_ablkcipher **tfms;
@@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *);
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);

static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
{
	return this_cpu_ptr(cc->cpu);
}

/*
 * Use this to access cipher attributes that are the same for each CPU.
 */
@@ -903,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
static void crypt_alloc_req(struct crypt_config *cc,
			    struct convert_context *ctx)
{
	struct crypt_cpu *this_cc = this_crypt_config(cc);
	unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);

	if (!this_cc->req)
		this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
	if (!ctx->req)
		ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);

	ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
	ablkcipher_request_set_callback(this_cc->req,
	ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
	ablkcipher_request_set_callback(ctx->req,
	    CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
	    kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
	    kcryptd_async_done, dmreq_of_req(cc, ctx->req));
}

/*
@@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt_config *cc,
static int crypt_convert(struct crypt_config *cc,
			 struct convert_context *ctx)
{
	struct crypt_cpu *this_cc = this_crypt_config(cc);
	int r;

	atomic_set(&ctx->cc_pending, 1);
@@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_config *cc,

		atomic_inc(&ctx->cc_pending);

		r = crypt_convert_block(cc, ctx, this_cc->req);
		r = crypt_convert_block(cc, ctx, ctx->req);

		switch (r) {
		/* async */
@@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_config *cc,
			reinit_completion(&ctx->restart);
			/* fall through*/
		case -EINPROGRESS:
			this_cc->req = NULL;
			ctx->req = NULL;
			ctx->cc_sector++;
			continue;

@@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
	io->sector = sector;
	io->error = 0;
	io->base_io = NULL;
	io->ctx.req = NULL;
	atomic_set(&io->io_pending, 0);

	return io;
@@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
	if (!atomic_dec_and_test(&io->io_pending))
		return;

	if (io->ctx.req)
		mempool_free(io->ctx.req, cc->req_pool);
	mempool_free(io, cc->io_pool);

	if (likely(!base_io))
@@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc)
static void crypt_dtr(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;
	struct crypt_cpu *cpu_cc;
	int cpu;

	ti->private = NULL;

@@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti)
	if (cc->crypt_queue)
		destroy_workqueue(cc->crypt_queue);

	if (cc->cpu)
		for_each_possible_cpu(cpu) {
			cpu_cc = per_cpu_ptr(cc->cpu, cpu);
			if (cpu_cc->req)
				mempool_free(cpu_cc->req, cc->req_pool);
		}

	crypt_free_tfms(cc);

	if (cc->bs)
@@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti)
	if (cc->dev)
		dm_put_device(ti, cc->dev);

	if (cc->cpu)
		free_percpu(cc->cpu);

	kzfree(cc->cipher);
	kzfree(cc->cipher_string);

@@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
	if (tmp)
		DMWARN("Ignoring unexpected additional cipher options");

	cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
				 __alignof__(struct crypt_cpu));
	if (!cc->cpu) {
		ti->error = "Cannot allocate per cpu state";
		goto bad_mem;
	}

	/*
	 * For compatibility with the original dm-crypt mapping format, if
	 * only the cipher name is supplied, use cbc-plain.
+1 −1
Original line number Diff line number Diff line
@@ -1566,8 +1566,8 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
		}
		if (m->pg_init_required)
			__pg_init_all_paths(m);
		spin_unlock_irqrestore(&m->lock, flags);
		dm_table_run_md_queue_async(m->ti->table);
		spin_unlock_irqrestore(&m->lock, flags);
	}

	return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+22 −1
Original line number Diff line number Diff line
@@ -27,6 +27,7 @@
#define MAPPING_POOL_SIZE 1024
#define PRISON_CELLS 1024
#define COMMIT_PERIOD HZ
#define NO_SPACE_TIMEOUT (HZ * 60)

DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
		"A percentage of time allocated for copy on write");
@@ -175,6 +176,7 @@ struct pool {
	struct workqueue_struct *wq;
	struct work_struct worker;
	struct delayed_work waker;
	struct delayed_work no_space_timeout;

	unsigned long last_commit_jiffies;
	unsigned ref_count;
@@ -935,7 +937,7 @@ static int commit(struct pool *pool)
{
	int r;

	if (get_pool_mode(pool) != PM_WRITE)
	if (get_pool_mode(pool) >= PM_READ_ONLY)
		return -EINVAL;

	r = dm_pool_commit_metadata(pool->pmd);
@@ -1590,6 +1592,20 @@ static void do_waker(struct work_struct *ws)
	queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
}

/*
 * We're holding onto IO to allow userland time to react.  After the
 * timeout either the pool will have been resized (and thus back in
 * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
 */
static void do_no_space_timeout(struct work_struct *ws)
{
	struct pool *pool = container_of(to_delayed_work(ws), struct pool,
					 no_space_timeout);

	if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
		set_pool_mode(pool, PM_READ_ONLY);
}

/*----------------------------------------------------------------*/

struct noflush_work {
@@ -1715,6 +1731,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
		pool->process_discard = process_discard;
		pool->process_prepared_mapping = process_prepared_mapping;
		pool->process_prepared_discard = process_prepared_discard_passdown;

		if (!pool->pf.error_if_no_space)
			queue_delayed_work(pool->wq, &pool->no_space_timeout, NO_SPACE_TIMEOUT);
		break;

	case PM_WRITE:
@@ -2100,6 +2119,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,

	INIT_WORK(&pool->worker, do_worker);
	INIT_DELAYED_WORK(&pool->waker, do_waker);
	INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
	spin_lock_init(&pool->lock);
	bio_list_init(&pool->deferred_flush_bios);
	INIT_LIST_HEAD(&pool->prepared_mappings);
@@ -2662,6 +2682,7 @@ static void pool_postsuspend(struct dm_target *ti)
	struct pool *pool = pt->pool;

	cancel_delayed_work(&pool->waker);
	cancel_delayed_work(&pool->no_space_timeout);
	flush_workqueue(pool->wq);
	(void) commit(pool);
}