Commit 492ddcbb authored by Gilad Ben-Yossef's avatar Gilad Ben-Yossef Committed by Greg Kroah-Hartman
Browse files

staging: ccree: remove/add (un)needed blank lines



Remove or add blank lines as needed to match coding style.

Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent e8e5110e
Loading
Loading
Loading
Loading
+5 −15
Original line number Diff line number Diff line
@@ -49,7 +49,6 @@
#define AES_CCM_RFC4309_NONCE_SIZE 3
#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE


/* Value of each ICV_CMP byte (of 8) in case of success */
#define ICV_VERIF_OK 0x01

@@ -209,7 +208,6 @@ init_failed:
	return -ENOMEM;
}


static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
{
	struct aead_request *areq = (struct aead_request *)ssi_req;
@@ -402,6 +400,7 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)

	return 0; /* All tests of keys sizes passed */
}

/* This function prepers the user key so it can pass to the hmac processing
 * (copy to intenral buffer or hash in case of key longer than block
 */
@@ -526,7 +525,6 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
	return rc;
}


static int
ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
{
@@ -594,7 +592,6 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
			goto badkey;
	}


	/* STAT_PHASE_2: Create sequence */

	switch (ctx->auth_mode) {
@@ -613,7 +610,6 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
		goto badkey;
	}


	/* STAT_PHASE_3: Submit sequence to HW */

	if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
@@ -1372,6 +1368,7 @@ data_size_err:
static unsigned int format_ccm_a0(u8 *pA0Buff, u32 headerSize)
{
	unsigned int len = 0;

	if (headerSize == 0)
		return 0;

@@ -1424,7 +1421,6 @@ static inline int ssi_aead_ccm(
	unsigned int cipher_flow_mode;
	dma_addr_t mac_result;


	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
		cipher_flow_mode = AES_to_HASH_and_DOUT;
		mac_result = req_ctx->mac_buf_dma_addr;
@@ -1481,7 +1477,6 @@ static inline int ssi_aead_ccm(
	set_aes_not_hash_mode(&desc[idx]);
	idx++;


	/* process assoc data */
	if (req->assoclen > 0) {
		ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
@@ -1556,6 +1551,7 @@ static int config_ccm_adata(struct aead_request *req)
				req->cryptlen :
				(req->cryptlen - ctx->authsize);
	int rc;

	memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
	memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);

@@ -1808,7 +1804,6 @@ static inline int ssi_aead_gcm(
		cipher_flow_mode = AES_to_HASH_and_DOUT;
	}


	//in RFC4543 no data to encrypt. just copy data from src to dest.
	if (req_ctx->plaintext_authenticate_only) {
		ssi_aead_process_cipher_data_desc(req, BYPASS, desc, seq_size);
@@ -1904,15 +1899,16 @@ static int config_gcm_context(struct aead_request *req)
	memcpy(req->iv + 12, &counter, 4);
	memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);


	if (!req_ctx->plaintext_authenticate_only) {
		__be64 temp64;

		temp64 = cpu_to_be64(req->assoclen * 8);
		memcpy(&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64));
		temp64 = cpu_to_be64(cryptlen * 8);
		memcpy(&req_ctx->gcm_len_block.lenC, &temp64, 8);
	} else { //rfc4543=>  all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted.
		__be64 temp64;

		temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
		memcpy(&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64));
		temp64 = 0;
@@ -1934,7 +1930,6 @@ static void ssi_rfc4_gcm_process(struct aead_request *req)
	req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
}


#endif /*SSI_CC_HAS_AES_GCM*/

static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction direct)
@@ -1948,7 +1943,6 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
	struct device *dev = &ctx->drvdata->plat_dev->dev;
	struct ssi_crypto_req ssi_req = {};


	SSI_LOG_DEBUG("%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
		((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"), ctx, req, req->iv,
		sg_virt(req->src), req->src->offset, sg_virt(req->dst), req->dst->offset, req->cryptlen);
@@ -1973,7 +1967,6 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
	areq_ctx->req_authsize = ctx->authsize;
	areq_ctx->cipher_mode = ctx->cipher_mode;


	/* STAT_PHASE_1: Map buffers */

	if (ctx->cipher_mode == DRV_CIPHER_CTR) {
@@ -2057,7 +2050,6 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
		ssi_req.ivgen_size = crypto_aead_ivsize(tfm);
	}


	/* STAT_PHASE_2: Create sequence */

	/* Load MLLI tables to SRAM if necessary */
@@ -2091,7 +2083,6 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
		goto exit;
	}


	/* STAT_PHASE_3: Lock HW and push sequence */

	rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
@@ -2101,7 +2092,6 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
		ssi_buffer_mgr_unmap_aead_request(dev, req);
	}


exit:
	return rc;
}
+0 −4
Original line number Diff line number Diff line
@@ -25,13 +25,11 @@
#include <crypto/algapi.h>
#include <crypto/ctr.h>


/* mac_cmp - HW writes 8 B but all bytes hold the same value */
#define ICV_CMP_SIZE 8
#define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE * 3)
#define MAX_MAC_SIZE MAX(SHA256_DIGEST_SIZE, AES_BLOCK_SIZE)


/* defines for AES GCM configuration buffer */
#define GCM_BLOCK_LEN_SIZE 8

@@ -40,8 +38,6 @@
#define GCM_BLOCK_RFC4_NONCE_OFFSET	0
#define GCM_BLOCK_RFC4_NONCE_SIZE	4



/* Offsets into AES CCM configuration buffer */
#define CCM_B0_OFFSET 0
#define CCM_A0_OFFSET 16
+4 −3
Original line number Diff line number Diff line
@@ -42,7 +42,6 @@
#define GET_DMA_BUFFER_TYPE(buff_type)
#endif


enum dma_buffer_type {
	DMA_NULL_TYPE = -1,
	DMA_SGL_TYPE = 1,
@@ -80,6 +79,7 @@ static unsigned int ssi_buffer_mgr_get_sgl_nents(
	struct scatterlist *sg_list, unsigned int nbytes, u32 *lbytes, bool *is_chained)
{
	unsigned int nents = 0;

	while (nbytes != 0) {
		if (sg_is_chain(sg_list)) {
			SSI_LOG_ERR("Unexpected chained entry "
@@ -181,7 +181,6 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
	return 0;
}


static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
	struct scatterlist *sgl, u32 sgl_data_len, u32 sglOffset, u32 *curr_nents,
	u32 **mlli_entry_pp)
@@ -322,6 +321,7 @@ ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
{
	u32 i, j;
	struct scatterlist *l_sg = sg;

	for (i = 0; i < nents; i++) {
		if (!l_sg)
			break;
@@ -441,7 +441,6 @@ ssi_aead_handle_config_buf(struct device *dev,
	return 0;
}


static inline int ssi_ahash_handle_curr_buf(struct device *dev,
					   struct ahash_req_ctx *areq_ctx,
					   u8 *curr_buff,
@@ -700,6 +699,7 @@ void ssi_buffer_mgr_unmap_aead_request(
	    likely(req->src == req->dst))
	{
		u32 size_to_skip = req->assoclen;

		if (areq_ctx->is_gcm4543)
			size_to_skip += crypto_aead_ivsize(tfm);

@@ -1027,6 +1027,7 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
			 * MAC verification upon request completion
			 */
			  u32 size_to_skip = req->assoclen;

			  if (areq_ctx->is_gcm4543)
				  size_to_skip += crypto_aead_ivsize(tfm);

+0 −1
Original line number Diff line number Diff line
@@ -26,7 +26,6 @@
#include "ssi_config.h"
#include "ssi_driver.h"


enum ssi_req_dma_buf_type {
	SSI_DMA_BUF_NULL = 0,
	SSI_DMA_BUF_DLLI,
+3 −14
Original line number Diff line number Diff line
@@ -47,6 +47,7 @@ struct cc_user_key_info {
	u8 *key;
	dma_addr_t key_dma_addr;
};

struct cc_hw_key_info {
	enum cc_hw_crypto_key key1_slot;
	enum cc_hw_crypto_key key2_slot;
@@ -67,7 +68,6 @@ struct ssi_ablkcipher_ctx {

static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base);


static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) {
	switch (ctx_p->flow_mode) {
	case S_DIN_to_AES:
@@ -108,7 +108,6 @@ static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) {
	return -EINVAL;
}


static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int size) {
	switch (ctx_p->flow_mode) {
	case S_DIN_to_AES:
@@ -252,7 +251,6 @@ static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
	SSI_LOG_DEBUG("Free key buffer in context. key=@%p\n", ctx_p->user.key);
}


struct tdes_keys {
	u8	key1[DES_KEY_SIZE];
	u8	key2[DES_KEY_SIZE];
@@ -396,8 +394,6 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
		return -EINVAL;
	}



	/* STAT_PHASE_1: Copy key to ctx */
	dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
					max_key_buf_size, DMA_TO_DEVICE);
@@ -422,6 +418,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
			int key_len = keylen >> 1;
			int err;
			SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);

			desc->tfm = ctx_p->shash_tfm;

			err = crypto_shash_digest(desc, ctx_p->user.key, key_len, ctx_p->user.key + key_len);
@@ -435,7 +432,6 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
					max_key_buf_size, DMA_TO_DEVICE);
	ctx_p->keylen = keylen;


	 SSI_LOG_DEBUG("ssi_blkcipher_setkey: return safely");
	return 0;
}
@@ -598,7 +594,6 @@ static inline void ssi_blkcipher_create_multi2_setup_desc(
	set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
	(*seq_size)++;


	/* Set state */
	hw_desc_init(&desc[*seq_size]);
	set_din_type(&desc[*seq_size], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
@@ -724,7 +719,6 @@ static int ssi_blkcipher_complete(struct device *dev,

	ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);


	/*Set the inflight couter value to local variable*/
	inflight_counter =  ctx_p->drvdata->inflight_counter;
	/*Decrease the inflight counter*/
@@ -790,7 +784,6 @@ static int ssi_blkcipher_process(
	/* Setup request context */
	req_ctx->gen_ctx.op_type = direction;


	/* STAT_PHASE_1: Map buffers */

	rc = ssi_buffer_mgr_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes, info, src, dst);
@@ -799,7 +792,6 @@ static int ssi_blkcipher_process(
		goto exit_process;
	}


	/* STAT_PHASE_2: Create sequence */

	/* Setup processing */
@@ -878,7 +870,6 @@ static int ssi_ablkcipher_init(struct crypto_tfm *tfm)
	return ssi_blkcipher_init(tfm);
}


static int ssi_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
				const u8 *key,
				unsigned int keylen)
@@ -911,7 +902,6 @@ static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req)
	return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT);
}


/* DX Block cipher alg */
static struct ssi_alg_template blkcipher_algs[] = {
/* Async template */
@@ -1290,6 +1280,7 @@ int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
	struct ssi_blkcipher_handle *blkcipher_handle =
						drvdata->blkcipher_handle;
	struct device *dev;

	dev = &drvdata->plat_dev->dev;

	if (blkcipher_handle) {
@@ -1307,8 +1298,6 @@ int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
	return 0;
}



int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
{
	struct ssi_blkcipher_handle *ablkcipher_handle;
Loading