Commit a213e41c authored by Gilad Ben-Yossef's avatar Gilad Ben-Yossef Committed by Greg Kroah-Hartman
Browse files

staging: ccree: trim long lines for readability



The ccree driver did not adhere to the kernel max 80 chars per line limit
making the code hard to follow. Fix this by breaking long lines and
in some cases, moving comments to a separate line from code.

Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 07ece8b5
Loading
Loading
Loading
Loading
+104 −48
Original line number Diff line number Diff line
@@ -100,7 +100,8 @@ static void ssi_aead_exit(struct crypto_aead *tfm)

	/* Unmap enckey buffer */
	if (ctx->enckey) {
		dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr);
		dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
				  ctx->enckey_dma_addr);
		dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
			&ctx->enckey_dma_addr);
		ctx->enckey_dma_addr = 0;
@@ -259,12 +260,17 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req)
					   SSI_SG_FROM_BUF);
		}

		/* If an IV was generated, copy it back to the user provided buffer. */
		/* If an IV was generated, copy it back to the user provided
		 * buffer.
		 */
		if (areq_ctx->backup_giv) {
			if (ctx->cipher_mode == DRV_CIPHER_CTR)
				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_IV_SIZE);
				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
				       CTR_RFC3686_NONCE_SIZE,
				       CTR_RFC3686_IV_SIZE);
			else if (ctx->cipher_mode == DRV_CIPHER_CCM)
				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
				       CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
		}
	}

@@ -275,8 +281,9 @@ static int xcbc_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
{
	/* Load the AES key */
	hw_desc_init(&desc[0]);
	/* We are using for the source/user key the same buffer as for the output keys,
	 * because after this key loading it is not needed anymore
	/* We are using for the source/user key the same buffer
	 * as for the output keys, * because after this key loading it
	 * is not needed anymore
	 */
	set_din_type(&desc[0], DMA_DLLI,
		     ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
@@ -428,7 +435,8 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
 * (copy to intenral buffer or hash in case of key longer than block
 */
static int
ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
		       unsigned int keylen)
{
	dma_addr_t key_dma_addr = 0;
	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -459,7 +467,8 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
	}

	if (likely(keylen != 0)) {
		key_dma_addr = dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
		key_dma_addr = dma_map_single(dev, (void *)key, keylen,
					      DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(dev, key_dma_addr))) {
			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
				key, keylen);
@@ -587,8 +596,9 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
			/* Copy nonce from last 4 bytes in CTR key to
			 *  first 4 bytes in CTR IV
			 */
			memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + ctx->enc_keylen -
				CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
			memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
			       ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
			       CTR_RFC3686_NONCE_SIZE);
			/* Set CTR key size */
			ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
		}
@@ -654,7 +664,8 @@ setkey_error:
}

#if SSI_CC_HAS_AES_CCM
static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
				  unsigned int keylen)
{
	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);

@@ -856,7 +867,8 @@ ssi_aead_process_cipher_data_desc(
		hw_desc_init(&desc[idx]);
		set_din_type(&desc[idx], DMA_DLLI,
			     (sg_dma_address(areq_ctx->src_sgl) +
			      areq_ctx->src_offset), areq_ctx->cryptlen, NS_BIT);
			      areq_ctx->src_offset), areq_ctx->cryptlen,
			      NS_BIT);
		set_dout_dlli(&desc[idx],
			      (sg_dma_address(areq_ctx->dst_sgl) +
			       areq_ctx->dst_offset),
@@ -1256,7 +1268,8 @@ static inline void ssi_aead_hmac_authenc(
		ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
		ssi_aead_setup_cipher_desc(req, desc, seq_size);
		ssi_aead_process_digest_header_desc(req, desc, seq_size);
		ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
		ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc,
						  seq_size);
		ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
		ssi_aead_process_digest_result_desc(req, desc, seq_size);
		return;
@@ -1272,14 +1285,16 @@ static inline void ssi_aead_hmac_authenc(
		ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
		/* authenc after..*/
		ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
		ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
		ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc,
						   seq_size, direct);
		ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
		ssi_aead_process_digest_result_desc(req, desc, seq_size);

	} else { /*DECRYPT*/
		/* authenc first..*/
		ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
		ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
		ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc,
						   seq_size, direct);
		ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
		/* decrypt after.. */
		ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
@@ -1310,7 +1325,8 @@ ssi_aead_xcbc_authenc(
		ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
		ssi_aead_setup_cipher_desc(req, desc, seq_size);
		ssi_aead_process_digest_header_desc(req, desc, seq_size);
		ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
		ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc,
						  seq_size);
		ssi_aead_process_digest_result_desc(req, desc, seq_size);
		return;
	}
@@ -1325,12 +1341,14 @@ ssi_aead_xcbc_authenc(
		ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
		/* authenc after.. */
		ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
		ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
		ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc,
						   seq_size, direct);
		ssi_aead_process_digest_result_desc(req, desc, seq_size);
	} else { /*DECRYPT*/
		/* authenc first.. */
		ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
		ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
		ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc,
						   seq_size, direct);
		/* decrypt after..*/
		ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
		/* read the digest result with setting the completion bit
@@ -1521,7 +1539,8 @@ static inline int ssi_aead_ccm(

	/* process the cipher */
	if (req_ctx->cryptlen)
		ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, &idx);
		ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc,
						  &idx);

	/* Read temporal MAC */
	hw_desc_init(&desc[idx]);
@@ -1571,7 +1590,9 @@ static int config_ccm_adata(struct aead_request *req)
	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
	//unsigned int size_of_a = 0, rem_a_size = 0;
	unsigned int lp = req->iv[0];
	/* Note: The code assume that req->iv[0] already contains the value of L' of RFC3610 */
	/* Note: The code assume that req->iv[0] already contains the value
	 * of L' of RFC3610
	 */
	unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
	unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
	u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
@@ -1628,11 +1649,18 @@ static void ssi_rfc4309_ccm_process(struct aead_request *req)

	/* L' */
	memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
	areq_ctx->ctr_iv[0] = 3;  /* For RFC 4309, always use 4 bytes for message length (at most 2^32-1 bytes). */
	/* For RFC 4309, always use 4 bytes for message length
	 * (at most 2^32-1 bytes).
	 */
	areq_ctx->ctr_iv[0] = 3;

	/* In RFC 4309 there is an 11-bytes nonce+IV part, that we build here. */
	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce, CCM_BLOCK_NONCE_SIZE);
	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET,    req->iv,        CCM_BLOCK_IV_SIZE);
	/* In RFC 4309 there is an 11-bytes nonce+IV part,
	 * that we build here.
	 */
	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
	       CCM_BLOCK_NONCE_SIZE);
	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
	       CCM_BLOCK_IV_SIZE);
	req->iv = areq_ctx->ctr_iv;
	req->assoclen -= CCM_BLOCK_IV_SIZE;
}
@@ -1704,7 +1732,9 @@ static inline void ssi_aead_gcm_setup_ghash_desc(
	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
	idx++;

	/* Load GHASH initial STATE (which is 0). (for any hash there is an initial state) */
	/* Load GHASH initial STATE (which is 0). (for any hash there is an
	 * initial state)
	 */
	hw_desc_init(&desc[idx]);
	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
	set_dout_no_dma(&desc[idx], 0, 0, 1);
@@ -1855,7 +1885,8 @@ static inline int ssi_aead_gcm(
	ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
	/* process(gctr+ghash) */
	if (req_ctx->cryptlen)
		ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
		ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc,
						  seq_size);
	ssi_aead_process_gcm_result_desc(req, desc, seq_size);

	return 0;
@@ -1895,13 +1926,16 @@ static inline void ssi_aead_dump_gcm(

	dump_byte_array("mac_buf", req_ctx->mac_buf, AES_BLOCK_SIZE);

	dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.len_a, AES_BLOCK_SIZE);
	dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.len_a,
			AES_BLOCK_SIZE);

	if (req->src && req->cryptlen)
		dump_byte_array("req->src", sg_virt(req->src), req->cryptlen + req->assoclen);
		dump_byte_array("req->src", sg_virt(req->src),
				req->cryptlen + req->assoclen);

	if (req->dst)
		dump_byte_array("req->dst", sg_virt(req->dst), req->cryptlen + ctx->authsize + req->assoclen);
		dump_byte_array("req->dst", sg_virt(req->dst),
				req->cryptlen + ctx->authsize + req->assoclen);
}
#endif

@@ -1939,10 +1973,14 @@ static int config_gcm_context(struct aead_request *req)
		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
		temp64 = cpu_to_be64(cryptlen * 8);
		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
	} else { //rfc4543=>  all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted.
	} else {
		/* rfc4543=>  all data(AAD,IV,Plain) are considered additional
		 * data that is nothing is encrypted.
		 */
		__be64 temp64;

		temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
		temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
				      cryptlen) * 8);
		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
		temp64 = 0;
		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1957,15 +1995,18 @@ static void ssi_rfc4_gcm_process(struct aead_request *req)
	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);

	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET, ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET,    req->iv, GCM_BLOCK_RFC4_IV_SIZE);
	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
	       ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
	       GCM_BLOCK_RFC4_IV_SIZE);
	req->iv = areq_ctx->ctr_iv;
	req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
}

#endif /*SSI_CC_HAS_AES_GCM*/

static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction direct)
static int ssi_aead_process(struct aead_request *req,
			    enum drv_crypto_direction direct)
{
	int rc = 0;
	int seq_len = 0;
@@ -2006,7 +2047,8 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
		/* Build CTR IV - Copy nonce from last 4 bytes in
		 * CTR key to first 4 bytes in CTR IV
		 */
		memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE);
		memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
		       CTR_RFC3686_NONCE_SIZE);
		if (!areq_ctx->backup_giv) /*User none-generated IV*/
			memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
			       req->iv, CTR_RFC3686_IV_SIZE);
@@ -2021,7 +2063,8 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
		   (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
		areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
		if (areq_ctx->ctr_iv != req->iv) {
			memcpy(areq_ctx->ctr_iv, req->iv, crypto_aead_ivsize(tfm));
			memcpy(areq_ctx->ctr_iv, req->iv,
			       crypto_aead_ivsize(tfm));
			req->iv = areq_ctx->ctr_iv;
		}
	}  else {
@@ -2064,20 +2107,29 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
	if (areq_ctx->backup_giv) {
		/* set the DMA mapped IV address*/
		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
			ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CTR_RFC3686_NONCE_SIZE;
			ssi_req.ivgen_dma_addr[0] =
				areq_ctx->gen_ctx.iv_dma_addr +
				CTR_RFC3686_NONCE_SIZE;
			ssi_req.ivgen_dma_addr_len = 1;
		} else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
			/* In ccm, the IV needs to exist both inside B0 and inside the counter.
			 * It is also copied to iv_dma_addr for other reasons (like returning
			 * it to the user).
			/* In ccm, the IV needs to exist both inside B0 and
			 * inside the counter.It is also copied to iv_dma_addr
			 * for other reasons (like returning it to the user).
			 * So, using 3 (identical) IV outputs.
			 */
			ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CCM_BLOCK_IV_OFFSET;
			ssi_req.ivgen_dma_addr[1] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_B0_OFFSET          + CCM_BLOCK_IV_OFFSET;
			ssi_req.ivgen_dma_addr[2] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
			ssi_req.ivgen_dma_addr[0] =
				areq_ctx->gen_ctx.iv_dma_addr +
				CCM_BLOCK_IV_OFFSET;
			ssi_req.ivgen_dma_addr[1] =
				sg_dma_address(&areq_ctx->ccm_adata_sg) +
				CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
			ssi_req.ivgen_dma_addr[2] =
				sg_dma_address(&areq_ctx->ccm_adata_sg) +
				CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
			ssi_req.ivgen_dma_addr_len = 3;
		} else {
			ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr;
			ssi_req.ivgen_dma_addr[0] =
				areq_ctx->gen_ctx.iv_dma_addr;
			ssi_req.ivgen_dma_addr_len = 1;
		}

@@ -2232,7 +2284,8 @@ out:

#if SSI_CC_HAS_AES_GCM

static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
				  unsigned int keylen)
{
	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
	struct device *dev = drvdata_to_dev(ctx->drvdata);
@@ -2248,7 +2301,8 @@ static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
	return ssi_aead_setkey(tfm, key, keylen);
}

static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
				  unsigned int keylen)
{
	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
	struct device *dev = drvdata_to_dev(ctx->drvdata);
@@ -2696,7 +2750,8 @@ static struct ssi_crypto_alg *ssi_aead_create_alg(

	alg = &template->template_aead;

	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s",
		 template->name);
	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
		 template->driver_name);
	alg->base.cra_module = THIS_MODULE;
@@ -2725,7 +2780,8 @@ int ssi_aead_free(struct ssi_drvdata *drvdata)

	if (aead_handle) {
		/* Remove registered algs */
		list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
		list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
					 entry) {
			crypto_unregister_aead(&t_alg->aead_alg);
			list_del(&t_alg->entry);
			kfree(t_alg);
+10 −5
Original line number Diff line number Diff line
@@ -74,17 +74,22 @@ struct aead_req_ctx {
	} gcm_len_block;

	u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
	unsigned int hw_iv_size ____cacheline_aligned; /*HW actual size input*/
	u8 backup_mac[MAX_MAC_SIZE]; /*used to prevent cache coherence problem*/
	/* HW actual size input */
	unsigned int hw_iv_size ____cacheline_aligned;
	/* used to prevent cache coherence problem */
	u8 backup_mac[MAX_MAC_SIZE];
	u8 *backup_iv; /*store iv for generated IV flow*/
	u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
	dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
	dma_addr_t ccm_iv0_dma_addr; /* buffer for internal ccm configurations */
	/* buffer for internal ccm configurations */
	dma_addr_t ccm_iv0_dma_addr;
	dma_addr_t icv_dma_addr; /* Phys. address of ICV */

	//used in gcm
	dma_addr_t gcm_iv_inc1_dma_addr; /* buffer for internal gcm configurations */
	dma_addr_t gcm_iv_inc2_dma_addr; /* buffer for internal gcm configurations */
	/* buffer for internal gcm configurations */
	dma_addr_t gcm_iv_inc1_dma_addr;
	/* buffer for internal gcm configurations */
	dma_addr_t gcm_iv_inc2_dma_addr;
	dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
	dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
	bool is_gcm4543;
+61 −39
Original line number Diff line number Diff line
@@ -112,7 +112,8 @@ static unsigned int cc_get_sgl_nents(
			nents++;
			/* get the number of bytes in the last entry */
			*lbytes = nbytes;
			nbytes -= (sg_list->length > nbytes) ? nbytes : sg_list->length;
			nbytes -= (sg_list->length > nbytes) ?
					nbytes : sg_list->length;
			sg_list = sg_next(sg_list);
		} else {
			sg_list = (struct scatterlist *)sg_page(sg_list);
@@ -433,7 +434,8 @@ ssi_aead_handle_config_buf(struct device *dev,
{
	dev_dbg(dev, " handle additional data config set to DLLI\n");
	/* create sg for the current buffer */
	sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
	sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
		    AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
	if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
				DMA_TO_DEVICE) != 1)) {
		dev_err(dev, "dma_map_sg() config buffer failed\n");
@@ -701,7 +703,8 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
	    areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
	    likely(req->src == req->dst)) {
		/* copy back mac from temporary location to deal with possible
		 * data memory overriding that caused by cache coherence problem.
		 * data memory overriding that caused by cache coherence
		 * problem.
		 */
		cc_copy_mac(dev, req, SSI_SG_FROM_BUF);
	}
@@ -716,7 +719,9 @@ static inline int cc_get_aead_icv_nents(
	bool *is_icv_fragmented)
{
	unsigned int icv_max_size = 0;
	unsigned int icv_required_size = authsize > last_entry_data_size ? (authsize - last_entry_data_size) : authsize;
	unsigned int icv_required_size = authsize > last_entry_data_size ?
					(authsize - last_entry_data_size) :
					authsize;
	unsigned int nents;
	unsigned int i;

@@ -735,10 +740,12 @@ static inline int cc_get_aead_icv_nents(
		icv_max_size = sgl->length;

	if (last_entry_data_size > authsize) {
		nents = 0; /* ICV attached to data in last entry (not fragmented!) */
		/* ICV attached to data in last entry (not fragmented!) */
		nents = 0;
		*is_icv_fragmented = false;
	} else if (last_entry_data_size == authsize) {
		nents = 1; /* ICV placed in whole last entry (not fragmented!) */
		/* ICV placed in whole last entry (not fragmented!) */
		nents = 1;
		*is_icv_fragmented = false;
	} else if (icv_max_size > icv_required_size) {
		nents = 1;
@@ -773,7 +780,8 @@ static inline int cc_aead_chain_iv(
		goto chain_iv_exit;
	}

	areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, hw_iv_size,
	areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
						       hw_iv_size,
						       DMA_BIDIRECTIONAL);
	if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
		dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
@@ -784,7 +792,8 @@ static inline int cc_aead_chain_iv(

	dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
		hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
	if (do_chain && areq_ctx->plaintext_authenticate_only) {  // TODO: what about CTR?? ask Ron
	// TODO: what about CTR?? ask Ron
	if (do_chain && areq_ctx->plaintext_authenticate_only) {
		struct crypto_aead *tfm = crypto_aead_reqtfm(req);
		unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
		unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
@@ -836,12 +845,15 @@ static inline int cc_aead_chain_assoc(
	//iterate over the sgl to see how many entries are for associated data
	//it is assumed that if we reach here , the sgl is already mapped
	sg_index = current_sg->length;
	if (sg_index > size_of_assoc) { //the first entry in the scatter list contains all the associated data
	//the first entry in the scatter list contains all the associated data
	if (sg_index > size_of_assoc) {
		mapped_nents++;
	} else {
		while (sg_index <= size_of_assoc) {
			current_sg = sg_next(current_sg);
			//if have reached the end of the sgl, then this is unexpected
			/* if have reached the end of the sgl, then this is
			 * unexpected
			 */
			if (!current_sg) {
				dev_err(dev, "reached end of sg list. unexpected\n");
				return -EINVAL;
@@ -959,8 +971,8 @@ static inline int cc_prepare_aead_data_mlli(

		if (unlikely(areq_ctx->is_icv_fragmented)) {
			/* Backup happens only when ICV is fragmented, ICV
			 * verification is made by CPU compare in order to simplify
			 * MAC verification upon request completion
			 * verification is made by CPU compare in order to
			 * simplify MAC verification upon request completion
			 */
			if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
				/* In coherent platforms (e.g. ACP)
@@ -974,7 +986,8 @@ static inline int cc_prepare_aead_data_mlli(
				areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
			} else {
				areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
				areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
				areq_ctx->icv_dma_addr =
					areq_ctx->mac_buf_dma_addr;
			}
		} else { /* Contig. ICV */
			/*Should hanlde if the sg is not contig.*/
@@ -1076,7 +1089,8 @@ static inline int cc_aead_chain_data(
	int rc = 0;
	u32 src_mapped_nents = 0, dst_mapped_nents = 0;
	u32 offset = 0;
	unsigned int size_for_map = req->assoclen + req->cryptlen; /*non-inplace mode*/
	/* non-inplace mode */
	unsigned int size_for_map = req->assoclen + req->cryptlen;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	u32 sg_index = 0;
	bool chained = false;
@@ -1097,7 +1111,8 @@ static inline int cc_aead_chain_data(
	if (is_gcm4543)
		size_for_map += crypto_aead_ivsize(tfm);

	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
	size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
			authsize : 0;
	src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
					    &src_last_bytes, &chained);
	sg_index = areq_ctx->src_sgl->length;
@@ -1125,7 +1140,8 @@ static inline int cc_aead_chain_data(

	if (req->src != req->dst) {
		size_for_map = req->assoclen + req->cryptlen;
		size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
		size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
				authsize : 0;
		if (is_gcm4543)
			size_for_map += crypto_aead_ivsize(tfm);

@@ -1276,12 +1292,13 @@ int cc_map_aead_request(
	}

	if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
		areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev,
							    (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET),
							    AES_BLOCK_SIZE,
							    DMA_TO_DEVICE);
		areq_ctx->ccm_iv0_dma_addr =
			dma_map_single(dev, (areq_ctx->ccm_config +
					     CCM_CTR_COUNT_0_OFFSET),
				       AES_BLOCK_SIZE, DMA_TO_DEVICE);

		if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) {
		if (unlikely(dma_mapping_error(dev,
					       areq_ctx->ccm_iv0_dma_addr))) {
			dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
				AES_BLOCK_SIZE,
				(areq_ctx->ccm_config +
@@ -1304,30 +1321,31 @@ int cc_map_aead_request(
							 areq_ctx->hkey,
							 AES_BLOCK_SIZE,
							 DMA_BIDIRECTIONAL);
		if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
		if (unlikely(dma_mapping_error(dev,
					       areq_ctx->hkey_dma_addr))) {
			dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
				AES_BLOCK_SIZE, areq_ctx->hkey);
			rc = -ENOMEM;
			goto aead_map_failure;
		}

		areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev,
								  &areq_ctx->gcm_len_block,
								  AES_BLOCK_SIZE,
								  DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
		areq_ctx->gcm_block_len_dma_addr =
			dma_map_single(dev, &areq_ctx->gcm_len_block,
				       AES_BLOCK_SIZE, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(dev,
					       areq_ctx->gcm_block_len_dma_addr))) {
			dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
				AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
			rc = -ENOMEM;
			goto aead_map_failure;
		}

		areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev,
								areq_ctx->gcm_iv_inc1,
								AES_BLOCK_SIZE,
								DMA_TO_DEVICE);
		areq_ctx->gcm_iv_inc1_dma_addr =
			dma_map_single(dev, areq_ctx->gcm_iv_inc1,
				       AES_BLOCK_SIZE, DMA_TO_DEVICE);

		if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) {
		if (unlikely(dma_mapping_error(dev,
					       areq_ctx->gcm_iv_inc1_dma_addr))) {
			dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
			areq_ctx->gcm_iv_inc1_dma_addr = 0;
@@ -1335,12 +1353,12 @@ int cc_map_aead_request(
			goto aead_map_failure;
		}

		areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev,
								areq_ctx->gcm_iv_inc2,
								AES_BLOCK_SIZE,
								DMA_TO_DEVICE);
		areq_ctx->gcm_iv_inc2_dma_addr =
			dma_map_single(dev, areq_ctx->gcm_iv_inc2,
				       AES_BLOCK_SIZE, DMA_TO_DEVICE);

		if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) {
		if (unlikely(dma_mapping_error(dev,
					       areq_ctx->gcm_iv_inc2_dma_addr))) {
			dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
				AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
			areq_ctx->gcm_iv_inc2_dma_addr = 0;
@@ -1414,7 +1432,9 @@ int cc_map_aead_request(
			goto aead_map_failure;
	}

	/* Mlli support -start building the MLLI according to the above results */
	/* Mlli support -start building the MLLI according to the above
	 * results
	 */
	if (unlikely(
		areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI ||
		areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI)) {
@@ -1667,7 +1687,9 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
			sg_dma_len(areq_ctx->buff_sg));
		dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
		if (!do_revert) {
			/* clean the previous data length for update operation */
			/* clean the previous data length for update
			 * operation
			 */
			*prev_len = 0;
		} else {
			areq_ctx->buff_index ^= 1;
+47 −19

File changed.

Preview size limit exceeded, changes collapsed.

+4 −1
Original line number Diff line number Diff line
@@ -33,7 +33,10 @@
#define CC_CRYPTO_CIPHER_KEY_KFDE3	BIT(3)
#define CC_CRYPTO_CIPHER_DU_SIZE_512B	BIT(4)

#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | CC_CRYPTO_CIPHER_KEY_KFDE1 | CC_CRYPTO_CIPHER_KEY_KFDE2 | CC_CRYPTO_CIPHER_KEY_KFDE3)
#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | \
					CC_CRYPTO_CIPHER_KEY_KFDE1 | \
					CC_CRYPTO_CIPHER_KEY_KFDE2 | \
					CC_CRYPTO_CIPHER_KEY_KFDE3)

struct blkcipher_req_ctx {
	struct async_gen_req_ctx gen_ctx;
Loading