Commit 8083b1bf authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu
Browse files

crypto: xts - add support for ciphertext stealing



Add support for the missing ciphertext stealing part of the XTS-AES
specification, which permits inputs of any size >= the block size.

Cc: Pascal van Leeuwen <pvanleeuwen@verimatrix.com>
Cc: Ondrej Mosnacek <omosnace@redhat.com>
Tested-by: default avatarMilan Broz <gmazyland@gmail.com>
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent a62084d2
Loading
Loading
Loading
Loading
+132 −20
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-or-later
/* XTS: as defined in IEEE1619/D16
 *	http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
 *	(sector sizes which are not a multiple of 16 bytes are,
 *	however currently unsupported)
 *
 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
 *
@@ -34,6 +32,8 @@ struct xts_instance_ctx {

struct rctx {
	le128 t;
	struct scatterlist *tail;
	struct scatterlist sg[2];
	struct skcipher_request subreq;
};

@@ -84,10 +84,11 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
 * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
 * just doing the gf128mul_x_ble() calls again.
 */
static int xor_tweak(struct skcipher_request *req, bool second_pass)
static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc)
{
	struct rctx *rctx = skcipher_request_ctx(req);
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
	const int bs = XTS_BLOCK_SIZE;
	struct skcipher_walk w;
	le128 t = rctx->t;
@@ -109,6 +110,20 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
		wdst = w.dst.virt.addr;

		do {
			if (unlikely(cts) &&
			    w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) {
				if (!enc) {
					if (second_pass)
						rctx->t = t;
					gf128mul_x_ble(&t, &t);
				}
				le128_xor(wdst, &t, wsrc);
				if (enc && second_pass)
					gf128mul_x_ble(&rctx->t, &t);
				skcipher_walk_done(&w, avail - bs);
				return 0;
			}

			le128_xor(wdst++, &t, wsrc++);
			gf128mul_x_ble(&t, &t);
		} while ((avail -= bs) >= bs);
@@ -119,17 +134,71 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
	return err;
}

static int xor_tweak_pre(struct skcipher_request *req)
static int xor_tweak_pre(struct skcipher_request *req, bool enc)
{
	return xor_tweak(req, false);
	return xor_tweak(req, false, enc);
}

static int xor_tweak_post(struct skcipher_request *req)
static int xor_tweak_post(struct skcipher_request *req, bool enc)
{
	return xor_tweak(req, true);
	return xor_tweak(req, true, enc);
}

static void crypt_done(struct crypto_async_request *areq, int err)
static void cts_done(struct crypto_async_request *areq, int err)
{
	struct skcipher_request *req = areq->data;
	le128 b;

	if (!err) {
		struct rctx *rctx = skcipher_request_ctx(req);

		scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
		le128_xor(&b, &rctx->t, &b);
		scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
	}

	skcipher_request_complete(req, err);
}

static int cts_final(struct skcipher_request *req,
		     int (*crypt)(struct skcipher_request *req))
{
	struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
	int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
	struct rctx *rctx = skcipher_request_ctx(req);
	struct skcipher_request *subreq = &rctx->subreq;
	int tail = req->cryptlen % XTS_BLOCK_SIZE;
	le128 b[2];
	int err;

	rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst,
				      offset - XTS_BLOCK_SIZE);

	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
	memcpy(b + 1, b, tail);
	scatterwalk_map_and_copy(b, req->src, offset, tail, 0);

	le128_xor(b, &rctx->t, b);

	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);

	skcipher_request_set_tfm(subreq, ctx->child);
	skcipher_request_set_callback(subreq, req->base.flags, cts_done, req);
	skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
				   XTS_BLOCK_SIZE, NULL);

	err = crypt(subreq);
	if (err)
		return err;

	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
	le128_xor(b, &rctx->t, b);
	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);

	return 0;
}

static void encrypt_done(struct crypto_async_request *areq, int err)
{
	struct skcipher_request *req = areq->data;

@@ -137,47 +206,90 @@ static void crypt_done(struct crypto_async_request *areq, int err)
		struct rctx *rctx = skcipher_request_ctx(req);

		rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
		err = xor_tweak_post(req);
		err = xor_tweak_post(req, true);

		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
			err = cts_final(req, crypto_skcipher_encrypt);
			if (err == -EINPROGRESS)
				return;
		}
	}

	skcipher_request_complete(req, err);
}

static void init_crypt(struct skcipher_request *req)
static void decrypt_done(struct crypto_async_request *areq, int err)
{
	struct skcipher_request *req = areq->data;

	if (!err) {
		struct rctx *rctx = skcipher_request_ctx(req);

		rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
		err = xor_tweak_post(req, false);

		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
			err = cts_final(req, crypto_skcipher_decrypt);
			if (err == -EINPROGRESS)
				return;
		}
	}

	skcipher_request_complete(req, err);
}

static int init_crypt(struct skcipher_request *req, crypto_completion_t compl)
{
	struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
	struct rctx *rctx = skcipher_request_ctx(req);
	struct skcipher_request *subreq = &rctx->subreq;

	if (req->cryptlen < XTS_BLOCK_SIZE)
		return -EINVAL;

	skcipher_request_set_tfm(subreq, ctx->child);
	skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req);
	skcipher_request_set_callback(subreq, req->base.flags, compl, req);
	skcipher_request_set_crypt(subreq, req->dst, req->dst,
				   req->cryptlen, NULL);
				   req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL);

	/* calculate first value of T */
	crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);

	return 0;
}

static int encrypt(struct skcipher_request *req)
{
	struct rctx *rctx = skcipher_request_ctx(req);
	struct skcipher_request *subreq = &rctx->subreq;
	int err;

	init_crypt(req);
	return xor_tweak_pre(req) ?:
	err = init_crypt(req, encrypt_done) ?:
	      xor_tweak_pre(req, true) ?:
	      crypto_skcipher_encrypt(subreq) ?:
		xor_tweak_post(req);
	      xor_tweak_post(req, true);

	if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
		return err;

	return cts_final(req, crypto_skcipher_encrypt);
}

static int decrypt(struct skcipher_request *req)
{
	struct rctx *rctx = skcipher_request_ctx(req);
	struct skcipher_request *subreq = &rctx->subreq;
	int err;

	init_crypt(req);
	return xor_tweak_pre(req) ?:
	err = init_crypt(req, decrypt_done) ?:
	      xor_tweak_pre(req, false) ?:
	      crypto_skcipher_decrypt(subreq) ?:
		xor_tweak_post(req);
	      xor_tweak_post(req, false);

	if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
		return err;

	return cts_final(req, crypto_skcipher_decrypt);
}

static int init_tfm(struct crypto_skcipher *tfm)