Commit 7740bd51 authored by Eric Biggers's avatar Eric Biggers Committed by Herbert Xu
Browse files

crypto: nx - don't abuse blkcipher_desc to pass iv around



The NX crypto driver is using 'struct blkcipher_desc' to pass the IV
around, even for AEADs (for which it creates the struct on the stack).
This is not appropriate since this structure is part of the "blkcipher"
API, which is deprecated and will be removed.

Just pass around the IV directly instead.

Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Reviewed-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 713b2e72
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -72,8 +72,9 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
	do {
		to_process = nbytes - processed;

		rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
				       processed, csbcpb->cpb.aes_cbc.iv);
		rc = nx_build_sg_lists(nx_ctx, desc->info, dst, src,
				       &to_process, processed,
				       csbcpb->cpb.aes_cbc.iv);
		if (rc)
			goto out;

+14 −26
Original line number Diff line number Diff line
@@ -327,7 +327,7 @@ static int generate_pat(u8 *iv,
}

static int ccm_nx_decrypt(struct aead_request   *req,
			  struct blkcipher_desc *desc,
			  u8                    *iv,
			  unsigned int assoclen)
{
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
@@ -348,7 +348,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
				 req->src, nbytes + req->assoclen, authsize,
				 SCATTERWALK_FROM_SG);

	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
	rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
	if (rc)
		goto out;
@@ -367,7 +367,7 @@ static int ccm_nx_decrypt(struct aead_request *req,

		NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;

		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
		rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
				       &to_process, processed + req->assoclen,
				       csbcpb->cpb.aes_ccm.iv_or_ctr);
		if (rc)
@@ -381,7 +381,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
		/* for partial completion, copy following for next
		 * entry into loop...
		 */
		memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
		memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
			csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_ccm.in_s0,
@@ -405,7 +405,7 @@ out:
}

static int ccm_nx_encrypt(struct aead_request   *req,
			  struct blkcipher_desc *desc,
			  u8                    *iv,
			  unsigned int assoclen)
{
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
@@ -418,7 +418,7 @@ static int ccm_nx_encrypt(struct aead_request *req,

	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
	rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
	if (rc)
		goto out;
@@ -436,7 +436,7 @@ static int ccm_nx_encrypt(struct aead_request *req,

		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;

		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
		rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
				       &to_process, processed + req->assoclen,
				       csbcpb->cpb.aes_ccm.iv_or_ctr);
		if (rc)
@@ -450,7 +450,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
		/* for partial completion, copy following for next
		 * entry into loop...
		 */
		memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
		memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
			csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_ccm.in_s0,
@@ -481,60 +481,48 @@ static int ccm4309_aes_nx_encrypt(struct aead_request *req)
{
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
	struct blkcipher_desc desc;
	u8 *iv = rctx->iv;

	iv[0] = 3;
	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
	memcpy(iv + 4, req->iv, 8);

	desc.info = iv;

	return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
	return ccm_nx_encrypt(req, iv, req->assoclen - 8);
}

static int ccm_aes_nx_encrypt(struct aead_request *req)
{
	struct blkcipher_desc desc;
	int rc;

	desc.info = req->iv;

	rc = crypto_ccm_check_iv(desc.info);
	rc = crypto_ccm_check_iv(req->iv);
	if (rc)
		return rc;

	return ccm_nx_encrypt(req, &desc, req->assoclen);
	return ccm_nx_encrypt(req, req->iv, req->assoclen);
}

static int ccm4309_aes_nx_decrypt(struct aead_request *req)
{
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
	struct blkcipher_desc desc;
	u8 *iv = rctx->iv;

	iv[0] = 3;
	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
	memcpy(iv + 4, req->iv, 8);

	desc.info = iv;

	return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
	return ccm_nx_decrypt(req, iv, req->assoclen - 8);
}

static int ccm_aes_nx_decrypt(struct aead_request *req)
{
	struct blkcipher_desc desc;
	int rc;

	desc.info = req->iv;

	rc = crypto_ccm_check_iv(desc.info);
	rc = crypto_ccm_check_iv(req->iv);
	if (rc)
		return rc;

	return ccm_nx_decrypt(req, &desc, req->assoclen);
	return ccm_nx_decrypt(req, req->iv, req->assoclen);
}

/* tell the block cipher walk routines that this is a stream cipher by
+3 −2
Original line number Diff line number Diff line
@@ -85,8 +85,9 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
	do {
		to_process = nbytes - processed;

		rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
				       processed, csbcpb->cpb.aes_ctr.iv);
		rc = nx_build_sg_lists(nx_ctx, desc->info, dst, src,
				       &to_process, processed,
				       csbcpb->cpb.aes_ctr.iv);
		if (rc)
			goto out;

+2 −2
Original line number Diff line number Diff line
@@ -72,7 +72,7 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
	do {
		to_process = nbytes - processed;

		rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
		rc = nx_build_sg_lists(nx_ctx, NULL, dst, src, &to_process,
				       processed, NULL);
		if (rc)
			goto out;
+10 −14
Original line number Diff line number Diff line
@@ -166,8 +166,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
	return rc;
}

static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
		unsigned int assoclen)
static int gmac(struct aead_request *req, const u8 *iv, unsigned int assoclen)
{
	int rc;
	struct nx_crypto_ctx *nx_ctx =
@@ -190,7 +189,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
			   nx_ctx->ap->databytelen/NX_PAGE_SIZE);

	/* Copy IV */
	memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
	memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, iv, AES_BLOCK_SIZE);

	do {
		/*
@@ -240,8 +239,7 @@ out:
	return rc;
}

static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
		     int enc)
static int gcm_empty(struct aead_request *req, const u8 *iv, int enc)
{
	int rc;
	struct nx_crypto_ctx *nx_ctx =
@@ -268,7 +266,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
	len = AES_BLOCK_SIZE;

	/* Encrypt the counter/IV */
	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) iv,
				 &len, nx_ctx->ap->sglen);

	if (len != AES_BLOCK_SIZE)
@@ -285,7 +283,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);

	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
			   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
	if (rc)
		goto out;
	atomic_inc(&(nx_ctx->stats->aes_ops));
@@ -313,7 +311,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
		crypto_aead_ctx(crypto_aead_reqtfm(req));
	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	struct blkcipher_desc desc;
	unsigned int nbytes = req->cryptlen;
	unsigned int processed = 0, to_process;
	unsigned long irq_flags;
@@ -321,15 +318,14 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,

	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

	desc.info = rctx->iv;
	/* initialize the counter */
	*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
	*(u32 *)&rctx->iv[NX_GCM_CTR_OFFSET] = 1;

	if (nbytes == 0) {
		if (assoclen == 0)
			rc = gcm_empty(req, &desc, enc);
			rc = gcm_empty(req, rctx->iv, enc);
		else
			rc = gmac(req, &desc, assoclen);
			rc = gmac(req, rctx->iv, assoclen);
		if (rc)
			goto out;
		else
@@ -358,7 +354,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
		to_process = nbytes - processed;

		csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
		rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
		rc = nx_build_sg_lists(nx_ctx, rctx->iv, req->dst,
				       req->src, &to_process,
				       processed + req->assoclen,
				       csbcpb->cpb.aes_gcm.iv_or_cnt);
@@ -377,7 +373,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
		if (rc)
			goto out;

		memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
		memcpy(rctx->iv, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
			csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_gcm.in_s0,
Loading