Commit 7f821f0c authored by Gilad Ben-Yossef's avatar Gilad Ben-Yossef Committed by Greg Kroah-Hartman
Browse files

staging: ccree: remove cycle count debug support



The ccree driver had support for rough performance debugging
via cycle counting which has bit rotted and can easily be
replcaed with perf. Remove it from the driver.

Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 841d1d80
Loading
Loading
Loading
Loading
+0 −14
Original line number Diff line number Diff line
@@ -29,8 +29,6 @@
#define HW_DESC_SIZE_WORDS		6
#define HW_QUEUE_SLOTS_MAX              15 /* Max. available slots in HW queue */

#define _HW_DESC_MONITOR_KICK 0x7FFFC00

#define CC_REG_NAME(word, name) DX_DSCRPTR_QUEUE_WORD ## word ## _ ## name

#define CC_REG_LOW(word, name)  \
@@ -606,16 +604,4 @@ static inline void set_cipher_do(struct cc_hw_desc *pdesc,
				(config & HW_KEY_MASK_CIPHER_DO));
}

/*!
 * This macro sets the DIN field of a HW descriptors to star/stop monitor descriptor.
 * Used for performance measurements and debug purposes.
 *
 * \param pDesc pointer HW descriptor struct
 */
#define HW_DESC_SET_DIN_MONITOR_CNTR(pDesc)										\
	do {														\
		CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_MEASURE_CNTR, VALUE, (pDesc)->word[1], _HW_DESC_MONITOR_KICK);	\
	} while (0)


#endif /*__CC_HW_QUEUE_DEFS_H__*/
+0 −33
Original line number Diff line number Diff line
@@ -217,9 +217,6 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
	struct crypto_aead *tfm = crypto_aead_reqtfm(ssi_req);
	struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
	int err = 0;
	DECL_CYCLE_COUNT_RESOURCES;

	START_CYCLE_COUNT();

	ssi_buffer_mgr_unmap_aead_request(dev, areq);

@@ -254,7 +251,6 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
		}
	}

	END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_4);
	aead_request_complete(areq, err);
}

@@ -521,10 +517,6 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
		idx++;
	}

#ifdef ENABLE_CYCLE_COUNT
	ssi_req.op_type = STAT_OP_TYPE_SETKEY;
#endif

	rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
	if (unlikely(rc != 0))
		SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
@@ -546,14 +538,12 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
	struct crypto_authenc_key_param *param;
	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
	int seq_len = 0, rc = -EINVAL;
	DECL_CYCLE_COUNT_RESOURCES;

	SSI_LOG_DEBUG("Setting key in context @%p for %s. key=%p keylen=%u\n",
		ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);

	CHECK_AND_RETURN_UPON_FIPS_ERROR();
	/* STAT_PHASE_0: Init and sanity checks */
	START_CYCLE_COUNT();

	if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
		if (!RTA_OK(rta, keylen))
@@ -592,9 +582,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
	if (unlikely(rc != 0))
		goto badkey;

	END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_0);
	/* STAT_PHASE_1: Copy key to ctx */
	START_CYCLE_COUNT();

	/* Get key material */
	memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
@@ -608,10 +596,8 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
			goto badkey;
	}

	END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_1);

	/* STAT_PHASE_2: Create sequence */
	START_CYCLE_COUNT();

	switch (ctx->auth_mode) {
	case DRV_HASH_SHA1:
@@ -629,15 +615,10 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
		goto badkey;
	}

	END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_2);

	/* STAT_PHASE_3: Submit sequence to HW */
	START_CYCLE_COUNT();

	if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
#ifdef ENABLE_CYCLE_COUNT
		ssi_req.op_type = STAT_OP_TYPE_SETKEY;
#endif
		rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0);
		if (unlikely(rc != 0)) {
			SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
@@ -646,7 +627,6 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
	}

	/* Update STAT_PHASE_3 */
	END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_3);
	return rc;

badkey:
@@ -1977,7 +1957,6 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
	struct device *dev = &ctx->drvdata->plat_dev->dev;
	struct ssi_crypto_req ssi_req = {};

	DECL_CYCLE_COUNT_RESOURCES;

	SSI_LOG_DEBUG("%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
		((direct==DRV_CRYPTO_DIRECTION_ENCRYPT)?"Encrypt":"Decrypt"), ctx, req, req->iv,
@@ -1985,7 +1964,6 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
	CHECK_AND_RETURN_UPON_FIPS_ERROR();

	/* STAT_PHASE_0: Init and sanity checks */
	START_CYCLE_COUNT();

	/* Check data length according to mode */
	if (unlikely(validate_data_size(ctx, direct, req) != 0)) {
@@ -1999,19 +1977,13 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
	ssi_req.user_cb = (void *)ssi_aead_complete;
	ssi_req.user_arg = (void *)req;

#ifdef ENABLE_CYCLE_COUNT
	ssi_req.op_type = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
		STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
#endif
	/* Setup request context */
	areq_ctx->gen_ctx.op_type = direct;
	areq_ctx->req_authsize = ctx->authsize;
	areq_ctx->cipher_mode = ctx->cipher_mode;

	END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_0);

	/* STAT_PHASE_1: Map buffers */
	START_CYCLE_COUNT();

	if (ctx->cipher_mode == DRV_CIPHER_CTR) {
		/* Build CTR IV - Copy nonce from last 4 bytes in
@@ -2095,10 +2067,8 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
		ssi_req.ivgen_size = crypto_aead_ivsize(tfm);
	}

	END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_1);

	/* STAT_PHASE_2: Create sequence */
	START_CYCLE_COUNT();

	/* Load MLLI tables to SRAM if necessary */
	ssi_aead_load_mlli_to_sram(req, desc, &seq_len);
@@ -2133,10 +2103,8 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
		goto exit;
	}

	END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_2);

	/* STAT_PHASE_3: Lock HW and push sequence */
	START_CYCLE_COUNT();

	rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);

@@ -2146,7 +2114,6 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
	}


	END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
exit:
	return rc;
}
+0 −20
Original line number Diff line number Diff line
@@ -323,7 +323,6 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
	struct device *dev = &ctx_p->drvdata->plat_dev->dev;
	u32 tmp[DES_EXPKEY_WORDS];
	unsigned int max_key_buf_size = get_max_keysize(tfm);
	DECL_CYCLE_COUNT_RESOURCES;

	SSI_LOG_DEBUG("Setting key in context @%p for %s. keylen=%u\n",
		ctx_p, crypto_tfm_alg_name(tfm), keylen);
@@ -334,7 +333,6 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
	SSI_LOG_DEBUG("ssi_blkcipher_setkey: after FIPS check");

	/* STAT_PHASE_0: Init and sanity checks */
	START_CYCLE_COUNT();

#if SSI_CC_HAS_MULTI2
	/*last byte of key buffer is round number and should not be a part of key size*/
@@ -379,7 +377,6 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
		}

		ctx_p->keylen = keylen;
		END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_0);
		SSI_LOG_DEBUG("ssi_blkcipher_setkey: ssi_is_hw_key ret 0");

		return 0;
@@ -407,10 +404,8 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
	}


	END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_0);

	/* STAT_PHASE_1: Copy key to ctx */
	START_CYCLE_COUNT();
	dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
					max_key_buf_size, DMA_TO_DEVICE);
#if SSI_CC_HAS_MULTI2
@@ -448,7 +443,6 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
					max_key_buf_size, DMA_TO_DEVICE);
	ctx_p->keylen = keylen;

	END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_1);

	 SSI_LOG_DEBUG("ssi_blkcipher_setkey: return safely");
	return 0;
@@ -736,11 +730,8 @@ static int ssi_blkcipher_complete(struct device *dev,
{
	int completion_error = 0;
	u32 inflight_counter;
	DECL_CYCLE_COUNT_RESOURCES;

	START_CYCLE_COUNT();
	ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
	END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_4);


	/*Set the inflight couter value to local variable*/
@@ -771,7 +762,6 @@ static int ssi_blkcipher_process(
	struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
	struct ssi_crypto_req ssi_req = {};
	int rc, seq_len = 0,cts_restore_flag = 0;
	DECL_CYCLE_COUNT_RESOURCES;

	SSI_LOG_DEBUG("%s areq=%p info=%p nbytes=%d\n",
		((direction==DRV_CRYPTO_DIRECTION_ENCRYPT)?"Encrypt":"Decrypt"),
@@ -779,7 +769,6 @@ static int ssi_blkcipher_process(

	CHECK_AND_RETURN_UPON_FIPS_ERROR();
	/* STAT_PHASE_0: Init and sanity checks */
	START_CYCLE_COUNT();

	/* TODO: check data length according to mode */
	if (unlikely(validate_data_size(ctx_p, nbytes))) {
@@ -811,10 +800,8 @@ static int ssi_blkcipher_process(
	/* Setup request context */
	req_ctx->gen_ctx.op_type = direction;

	END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_0);

	/* STAT_PHASE_1: Map buffers */
	START_CYCLE_COUNT();

	rc = ssi_buffer_mgr_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes, info, src, dst);
	if (unlikely(rc != 0)) {
@@ -822,10 +809,8 @@ static int ssi_blkcipher_process(
		goto exit_process;
	}

	END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_1);

	/* STAT_PHASE_2: Create sequence */
	START_CYCLE_COUNT();

	/* Setup processing */
#if SSI_CC_HAS_MULTI2
@@ -860,10 +845,8 @@ static int ssi_blkcipher_process(
		/* set the IV size (8/16 B long)*/
		ssi_req.ivgen_size = ivsize;
	}
	END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_2);

	/* STAT_PHASE_3: Lock HW and push sequence */
	START_CYCLE_COUNT();

	rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len, (areq == NULL)? 0:1);
	if(areq != NULL) {
@@ -872,13 +855,10 @@ static int ssi_blkcipher_process(
			ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
		}

		END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
	} else {
		if (rc != 0) {
			ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
			END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
		} else {
			END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
			rc = ssi_blkcipher_complete(dev, ctx_p, req_ctx, dst,
						    src, ivsize, NULL,
						    ctx_p->drvdata->cc_base);
+0 −6
Original line number Diff line number Diff line
@@ -30,15 +30,9 @@
// #define DX_DUMP_BYTES
// #define CC_DEBUG
#define ENABLE_CC_SYSFS		/* Enable sysfs interface for debugging REE driver */
//#define ENABLE_CC_CYCLE_COUNT
//#define DX_IRQ_DELAY 100000
#define DMA_BIT_MASK_LEN	48	/* was 32 bit, but for juno's sake it was enlarged to 48 bit */

#if defined ENABLE_CC_CYCLE_COUNT && defined ENABLE_CC_SYSFS
#define CC_CYCLE_COUNT
#endif


#if defined (CONFIG_ARM64)	// TODO currently only this mode was test on Juno (which is ARM64), need to enable coherent also.
#define DISABLE_COHERENT_DMA_OPS
#endif
+0 −8
Original line number Diff line number Diff line
@@ -118,10 +118,8 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
	void __iomem *cc_base = drvdata->cc_base;
	u32 irr;
	u32 imr;
	DECL_CYCLE_COUNT_RESOURCES;

	/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
	START_CYCLE_COUNT();

	/* read the interrupt status */
	irr = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR));
@@ -168,9 +166,6 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
		/* Just warning */
	}

	END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_0);
	START_CYCLE_COUNT_AT(drvdata->isr_exit_cycles);

	return IRQ_HANDLED;
}

@@ -509,9 +504,6 @@ static int cc7x_remove(struct platform_device *plat_dev)
	cleanup_cc_resources(plat_dev);

	SSI_LOG(KERN_INFO, "ARM cc7x_ree device terminated\n");
#ifdef ENABLE_CYCLE_COUNT
	display_all_stat_db();
#endif

	return 0;
}
Loading