Commit 6191eb1d authored by Gilad Ben-Yossef's avatar Gilad Ben-Yossef Committed by Greg Kroah-Hartman
Browse files

staging: ccree: remove comparisons to NULL



Remove explicit comparisons to NULL in ccree driver.

Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 44c891af
Loading
Loading
Loading
Loading
+17 −17
Original line number Diff line number Diff line
@@ -98,7 +98,7 @@ static void ssi_aead_exit(struct crypto_aead *tfm)

	dev = &ctx->drvdata->plat_dev->dev;
	/* Unmap enckey buffer */
	if (ctx->enckey != NULL) {
	if (ctx->enckey) {
		dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr);
		SSI_LOG_DEBUG("Freed enckey DMA buffer enckey_dma_addr=0x%llX\n",
			(unsigned long long)ctx->enckey_dma_addr);
@@ -107,7 +107,7 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
	}

	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
		if (ctx->auth_state.xcbc.xcbc_keys != NULL) {
		if (ctx->auth_state.xcbc.xcbc_keys) {
			dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
				ctx->auth_state.xcbc.xcbc_keys,
				ctx->auth_state.xcbc.xcbc_keys_dma_addr);
@@ -117,7 +117,7 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
		ctx->auth_state.xcbc.xcbc_keys_dma_addr = 0;
		ctx->auth_state.xcbc.xcbc_keys = NULL;
	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
		if (ctx->auth_state.hmac.ipad_opad != NULL) {
		if (ctx->auth_state.hmac.ipad_opad) {
			dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
				ctx->auth_state.hmac.ipad_opad,
				ctx->auth_state.hmac.ipad_opad_dma_addr);
@@ -126,7 +126,7 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
			ctx->auth_state.hmac.ipad_opad_dma_addr = 0;
			ctx->auth_state.hmac.ipad_opad = NULL;
		}
		if (ctx->auth_state.hmac.padded_authkey != NULL) {
		if (ctx->auth_state.hmac.padded_authkey) {
			dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
				ctx->auth_state.hmac.padded_authkey,
				ctx->auth_state.hmac.padded_authkey_dma_addr);
@@ -160,7 +160,7 @@ static int ssi_aead_init(struct crypto_aead *tfm)
	/* Allocate key buffer, cache line aligned */
	ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
		&ctx->enckey_dma_addr, GFP_KERNEL);
	if (ctx->enckey == NULL) {
	if (!ctx->enckey) {
		SSI_LOG_ERR("Failed allocating key buffer\n");
		goto init_failed;
	}
@@ -174,7 +174,7 @@ static int ssi_aead_init(struct crypto_aead *tfm)
		ctx->auth_state.xcbc.xcbc_keys = dma_alloc_coherent(dev,
			CC_AES_128_BIT_KEY_SIZE * 3,
			&ctx->auth_state.xcbc.xcbc_keys_dma_addr, GFP_KERNEL);
		if (ctx->auth_state.xcbc.xcbc_keys == NULL) {
		if (!ctx->auth_state.xcbc.xcbc_keys) {
			SSI_LOG_ERR("Failed allocating buffer for XCBC keys\n");
			goto init_failed;
		}
@@ -183,7 +183,7 @@ static int ssi_aead_init(struct crypto_aead *tfm)
		ctx->auth_state.hmac.ipad_opad = dma_alloc_coherent(dev,
			2 * MAX_HMAC_DIGEST_SIZE,
			&ctx->auth_state.hmac.ipad_opad_dma_addr, GFP_KERNEL);
		if (ctx->auth_state.hmac.ipad_opad == NULL) {
		if (!ctx->auth_state.hmac.ipad_opad) {
			SSI_LOG_ERR("Failed allocating IPAD/OPAD buffer\n");
			goto init_failed;
		}
@@ -193,7 +193,7 @@ static int ssi_aead_init(struct crypto_aead *tfm)
		ctx->auth_state.hmac.padded_authkey = dma_alloc_coherent(dev,
			MAX_HMAC_BLOCK_SIZE,
			&ctx->auth_state.hmac.padded_authkey_dma_addr, GFP_KERNEL);
		if (ctx->auth_state.hmac.padded_authkey == NULL) {
		if (!ctx->auth_state.hmac.padded_authkey) {
			SSI_LOG_ERR("failed to allocate padded_authkey\n");
			goto init_failed;
		}
@@ -242,7 +242,7 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
				areq->cryptlen + areq_ctx->dstOffset + ctx->authsize, SSI_SG_FROM_BUF);

		/* If an IV was generated, copy it back to the user provided buffer. */
		if (areq_ctx->backup_giv != NULL) {
		if (areq_ctx->backup_giv) {
			if (ctx->cipher_mode == DRV_CIPHER_CTR)
				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_IV_SIZE);
			else if (ctx->cipher_mode == DRV_CIPHER_CCM)
@@ -1848,7 +1848,7 @@ static inline void ssi_aead_dump_gcm(
	if (ctx->cipher_mode != DRV_CIPHER_GCTR)
		return;

	if (title != NULL) {
	if (title) {
		SSI_LOG_DEBUG("----------------------------------------------------------------------------------");
		SSI_LOG_DEBUG("%s\n", title);
	}
@@ -1856,7 +1856,7 @@ static inline void ssi_aead_dump_gcm(
	SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n", \
				 ctx->cipher_mode, ctx->authsize, ctx->enc_keylen, req->assoclen, req_ctx->cryptlen);

	if (ctx->enckey != NULL)
	if (ctx->enckey)
		dump_byte_array("mac key", ctx->enckey, 16);

	dump_byte_array("req->iv", req->iv, AES_BLOCK_SIZE);
@@ -1871,10 +1871,10 @@ static inline void ssi_aead_dump_gcm(

	dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.lenA, AES_BLOCK_SIZE);

	if (req->src != NULL && req->cryptlen)
	if (req->src && req->cryptlen)
		dump_byte_array("req->src", sg_virt(req->src), req->cryptlen + req->assoclen);

	if (req->dst != NULL)
	if (req->dst)
		dump_byte_array("req->dst", sg_virt(req->dst), req->cryptlen + ctx->authsize + req->assoclen);
}
#endif
@@ -1981,7 +1981,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
		 * CTR key to first 4 bytes in CTR IV
		 */
		memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE);
		if (areq_ctx->backup_giv == NULL) /*User none-generated IV*/
		if (!areq_ctx->backup_giv) /*User none-generated IV*/
			memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
				req->iv, CTR_RFC3686_IV_SIZE);
		/* Initialize counter portion of counter block */
@@ -2033,7 +2033,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
	}

	/* do we need to generate IV? */
	if (areq_ctx->backup_giv != NULL) {
	if (areq_ctx->backup_giv) {
		/* set the DMA mapped IV address*/
		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
			ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CTR_RFC3686_NONCE_SIZE;
@@ -2685,7 +2685,7 @@ int ssi_aead_free(struct ssi_drvdata *drvdata)
	struct ssi_aead_handle *aead_handle =
		(struct ssi_aead_handle *)drvdata->aead_handle;

	if (aead_handle != NULL) {
	if (aead_handle) {
		/* Remove registered algs */
		list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
			crypto_unregister_aead(&t_alg->aead_alg);
@@ -2707,7 +2707,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
	int alg;

	aead_handle = kmalloc(sizeof(struct ssi_aead_handle), GFP_KERNEL);
	if (aead_handle == NULL) {
	if (!aead_handle) {
		rc = -ENOMEM;
		goto fail0;
	}
+22 −22
Original line number Diff line number Diff line
@@ -94,7 +94,7 @@ static unsigned int ssi_buffer_mgr_get_sgl_nents(
			sg_list = sg_next(sg_list);
		} else {
			sg_list = (struct scatterlist *)sg_page(sg_list);
			if (is_chained != NULL)
			if (is_chained)
				*is_chained = true;
		}
	}
@@ -113,7 +113,7 @@ void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
	int sg_index = 0;

	while (sg_index <= data_len) {
		if (current_sg == NULL) {
		if (!current_sg) {
			/* reached the end of the sgl --> just return back */
			return;
		}
@@ -190,7 +190,7 @@ static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
	u32 *mlli_entry_p = *mlli_entry_pp;
	s32 rc = 0;

	for ( ; (curr_sgl != NULL) && (sgl_data_len != 0);
	for ( ; (curr_sgl) && (sgl_data_len != 0);
	      curr_sgl = sg_next(curr_sgl)) {
		u32 entry_data_len =
			(sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
@@ -223,7 +223,7 @@ static int ssi_buffer_mgr_generate_mlli(
	mlli_params->mlli_virt_addr = dma_pool_alloc(
			mlli_params->curr_pool, GFP_KERNEL,
			&(mlli_params->mlli_dma_addr));
	if (unlikely(mlli_params->mlli_virt_addr == NULL)) {
	if (unlikely(!mlli_params->mlli_virt_addr)) {
		SSI_LOG_ERR("dma_pool_alloc() failed\n");
		rc = -ENOMEM;
		goto build_mlli_exit;
@@ -246,7 +246,7 @@ static int ssi_buffer_mgr_generate_mlli(
			return rc;

		/* set last bit in the current table */
		if (sg_data->mlli_nents[i] != NULL) {
		if (sg_data->mlli_nents[i]) {
			/*Calculate the current MLLI table length for the
			 *length field in the descriptor
			 */
@@ -286,7 +286,7 @@ static inline void ssi_buffer_mgr_add_buffer_entry(
	sgl_data->type[index] = DMA_BUFF_TYPE;
	sgl_data->is_last[index] = is_last_entry;
	sgl_data->mlli_nents[index] = mlli_nents;
	if (sgl_data->mlli_nents[index] != NULL)
	if (sgl_data->mlli_nents[index])
		*sgl_data->mlli_nents[index] = 0;
	sgl_data->num_of_buffers++;
}
@@ -311,7 +311,7 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry(
	sgl_data->type[index] = DMA_SGL_TYPE;
	sgl_data->is_last[index] = is_last_table;
	sgl_data->mlli_nents[index] = mlli_nents;
	if (sgl_data->mlli_nents[index] != NULL)
	if (sgl_data->mlli_nents[index])
		*sgl_data->mlli_nents[index] = 0;
	sgl_data->num_of_buffers++;
}
@@ -323,7 +323,7 @@ ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
	u32 i, j;
	struct scatterlist *l_sg = sg;
	for (i = 0; i < nents; i++) {
		if (l_sg == NULL)
		if (!l_sg)
			break;
		if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)) {
			SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
@@ -336,7 +336,7 @@ ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
err:
	/* Restore mapped parts */
	for (j = 0; j < i; j++) {
		if (sg == NULL)
		if (!sg)
			break;
		dma_unmap_sg(dev, sg, 1, direction);
		sg = sg_next(sg);
@@ -672,7 +672,7 @@ void ssi_buffer_mgr_unmap_aead_request(
	/*In case a pool was set, a table was
	 *allocated and should be released
	 */
	if (areq_ctx->mlli_params.curr_pool != NULL) {
	if (areq_ctx->mlli_params.curr_pool) {
		SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n",
			(unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
			areq_ctx->mlli_params.mlli_virt_addr);
@@ -731,12 +731,12 @@ static inline int ssi_buffer_mgr_get_aead_icv_nents(
	}

	for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
		if (sgl == NULL)
		if (!sgl)
			break;
		sgl = sg_next(sgl);
	}

	if (sgl != NULL)
	if (sgl)
		icv_max_size = sgl->length;

	if (last_entry_data_size > authsize) {
@@ -773,7 +773,7 @@ static inline int ssi_buffer_mgr_aead_chain_iv(
	struct device *dev = &drvdata->plat_dev->dev;
	int rc = 0;

	if (unlikely(req->iv == NULL)) {
	if (unlikely(!req->iv)) {
		areq_ctx->gen_ctx.iv_dma_addr = 0;
		goto chain_iv_exit;
	}
@@ -823,7 +823,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
	if (areq_ctx->is_gcm4543)
		size_of_assoc += crypto_aead_ivsize(tfm);

	if (sg_data == NULL) {
	if (!sg_data) {
		rc = -EINVAL;
		goto chain_assoc_exit;
	}
@@ -847,7 +847,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
		while (sg_index <= size_of_assoc) {
			current_sg = sg_next(current_sg);
			//if have reached the end of the sgl, then this is unexpected
			if (current_sg == NULL) {
			if (!current_sg) {
				SSI_LOG_ERR("reached end of sg list. unexpected\n");
				BUG();
			}
@@ -1108,7 +1108,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(

	offset = size_to_skip;

	if (sg_data == NULL) {
	if (!sg_data) {
		rc = -EINVAL;
		goto chain_data_exit;
	}
@@ -1126,7 +1126,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
		offset -= areq_ctx->srcSgl->length;
		areq_ctx->srcSgl = sg_next(areq_ctx->srcSgl);
		//if have reached the end of the sgl, then this is unexpected
		if (areq_ctx->srcSgl == NULL) {
		if (!areq_ctx->srcSgl) {
			SSI_LOG_ERR("reached end of sg list. unexpected\n");
			BUG();
		}
@@ -1169,7 +1169,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
		offset -= areq_ctx->dstSgl->length;
		areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl);
		//if have reached the end of the sgl, then this is unexpected
		if (areq_ctx->dstSgl == NULL) {
		if (!areq_ctx->dstSgl) {
			SSI_LOG_ERR("reached end of sg list. unexpected\n");
			BUG();
		}
@@ -1685,7 +1685,7 @@ void ssi_buffer_mgr_unmap_hash_request(
	/*In case a pool was set, a table was
	 *allocated and should be released
	 */
	if (areq_ctx->mlli_params.curr_pool != NULL) {
	if (areq_ctx->mlli_params.curr_pool) {
		SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n",
			     (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
			     areq_ctx->mlli_params.mlli_virt_addr);
@@ -1726,7 +1726,7 @@ int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)

	buff_mgr_handle = (struct buff_mgr_handle *)
		kmalloc(sizeof(struct buff_mgr_handle), GFP_KERNEL);
	if (buff_mgr_handle == NULL)
	if (!buff_mgr_handle)
		return -ENOMEM;

	drvdata->buff_mgr_handle = buff_mgr_handle;
@@ -1737,7 +1737,7 @@ int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
				LLI_ENTRY_BYTE_SIZE,
				MLLI_TABLE_MIN_ALIGNMENT, 0);

	if (unlikely(buff_mgr_handle->mlli_buffs_pool == NULL))
	if (unlikely(!buff_mgr_handle->mlli_buffs_pool))
		goto error;

	return 0;
@@ -1751,7 +1751,7 @@ int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
{
	struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;

	if (buff_mgr_handle  != NULL) {
	if (buff_mgr_handle) {
		dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
		kfree(drvdata->buff_mgr_handle);
		drvdata->buff_mgr_handle = NULL;
+6 −6
Original line number Diff line number Diff line
@@ -653,7 +653,7 @@ ssi_blkcipher_create_data_desc(
			     nbytes, NS_BIT);
		set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
			      nbytes, NS_BIT, (!areq ? 0 : 1));
		if (areq != NULL)
		if (areq)
			set_queue_last_ind(&desc[*seq_size]);

		set_flow_mode(&desc[*seq_size], flow_mode);
@@ -702,7 +702,7 @@ ssi_blkcipher_create_data_desc(
				      req_ctx->out_mlli_nents, NS_BIT,
				      (!areq ? 0 : 1));
		}
		if (areq != NULL)
		if (areq)
			set_queue_last_ind(&desc[*seq_size]);

		set_flow_mode(&desc[*seq_size], flow_mode);
@@ -829,8 +829,8 @@ static int ssi_blkcipher_process(

	/* STAT_PHASE_3: Lock HW and push sequence */

	rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len, (areq == NULL) ? 0 : 1);
	if (areq != NULL) {
	rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len, (!areq) ? 0 : 1);
	if (areq) {
		if (unlikely(rc != -EINPROGRESS)) {
			/* Failed to send the request or request completed synchronously */
			ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
@@ -1292,7 +1292,7 @@ int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
	struct device *dev;
	dev = &drvdata->plat_dev->dev;

	if (blkcipher_handle != NULL) {
	if (blkcipher_handle) {
		/* Remove registered algs */
		list_for_each_entry_safe(t_alg, n,
				&blkcipher_handle->blkcipher_alg_list,
@@ -1318,7 +1318,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)

	ablkcipher_handle = kmalloc(sizeof(struct ssi_blkcipher_handle),
		GFP_KERNEL);
	if (ablkcipher_handle == NULL)
	if (!ablkcipher_handle)
		return -ENOMEM;

	drvdata->blkcipher_handle = ablkcipher_handle;
+10 −10
Original line number Diff line number Diff line
@@ -81,7 +81,7 @@ void dump_byte_array(const char *name, const u8 *the_array, unsigned long size)
	const u8 *cur_byte;
	char line_buf[80];

	if (the_array == NULL) {
	if (!the_array) {
		SSI_LOG_ERR("cannot dump_byte_array - NULL pointer\n");
		return;
	}
@@ -231,7 +231,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
	u32 signature_val;
	int rc = 0;

	if (unlikely(new_drvdata == NULL)) {
	if (unlikely(!new_drvdata)) {
		SSI_LOG_ERR("Failed to allocate drvdata");
		rc = -ENOMEM;
		goto init_cc_res_err;
@@ -247,7 +247,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
	/* Get device resources */
	/* First CC registers space */
	new_drvdata->res_mem = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
	if (unlikely(new_drvdata->res_mem == NULL)) {
	if (unlikely(!new_drvdata->res_mem)) {
		SSI_LOG_ERR("Failed getting IO memory resource\n");
		rc = -ENODEV;
		goto init_cc_res_err;
@@ -258,14 +258,14 @@ static int init_cc_resources(struct platform_device *plat_dev)
		(unsigned long long)new_drvdata->res_mem->end);
	/* Map registers space */
	req_mem_cc_regs = request_mem_region(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem), "arm_cc7x_regs");
	if (unlikely(req_mem_cc_regs == NULL)) {
	if (unlikely(!req_mem_cc_regs)) {
		SSI_LOG_ERR("Couldn't allocate registers memory region at "
			     "0x%08X\n", (unsigned int)new_drvdata->res_mem->start);
		rc = -EBUSY;
		goto init_cc_res_err;
	}
	cc_base = ioremap(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem));
	if (unlikely(cc_base == NULL)) {
	if (unlikely(!cc_base)) {
		SSI_LOG_ERR("ioremap[CC](0x%08X,0x%08X) failed\n",
			(unsigned int)new_drvdata->res_mem->start, (unsigned int)resource_size(new_drvdata->res_mem));
		rc = -ENOMEM;
@@ -277,7 +277,7 @@ static int init_cc_resources(struct platform_device *plat_dev)

	/* Then IRQ */
	new_drvdata->res_irq = platform_get_resource(plat_dev, IORESOURCE_IRQ, 0);
	if (unlikely(new_drvdata->res_irq == NULL)) {
	if (unlikely(!new_drvdata->res_irq)) {
		SSI_LOG_ERR("Failed getting IRQ resource\n");
		rc = -ENODEV;
		goto init_cc_res_err;
@@ -302,7 +302,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
	if (rc)
		goto init_cc_res_err;

	if (new_drvdata->plat_dev->dev.dma_mask == NULL)
	if (!new_drvdata->plat_dev->dev.dma_mask)
	{
		new_drvdata->plat_dev->dev.dma_mask = &new_drvdata->plat_dev->dev.coherent_dma_mask;
	}
@@ -408,7 +408,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
init_cc_res_err:
	SSI_LOG_ERR("Freeing CC HW resources!\n");

	if (new_drvdata != NULL) {
	if (new_drvdata) {
		ssi_aead_free(new_drvdata);
		ssi_hash_free(new_drvdata);
		ssi_ablkcipher_free(new_drvdata);
@@ -422,7 +422,7 @@ init_cc_res_err:
		ssi_sysfs_fini();
#endif

		if (req_mem_cc_regs != NULL) {
		if (req_mem_cc_regs) {
			if (irq_registered) {
				free_irq(new_drvdata->res_irq->start, new_drvdata);
				new_drvdata->res_irq = NULL;
@@ -470,7 +470,7 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
	free_irq(drvdata->res_irq->start, drvdata);
	drvdata->res_irq = NULL;

	if (drvdata->cc_base != NULL) {
	if (drvdata->cc_base) {
		iounmap(drvdata->cc_base);
		release_mem_region(drvdata->res_mem->start,
			resource_size(drvdata->res_mem));
+2 −2
Original line number Diff line number Diff line
@@ -34,7 +34,7 @@ int ssi_fips_get_state(ssi_fips_state_t *p_state)
{
	int rc = 0;

	if (p_state == NULL)
	if (!p_state)
		return -EINVAL;

	rc = ssi_fips_ext_get_state(p_state);
@@ -52,7 +52,7 @@ int ssi_fips_get_error(ssi_fips_error_t *p_err)
{
	int rc = 0;

	if (p_err == NULL)
	if (!p_err)
		return -EINVAL;

	rc = ssi_fips_ext_get_error(p_err);
Loading