Commit fb90a1c8 authored by Ayush Sawal's avatar Ayush Sawal Committed by David S. Miller
Browse files

Crypto/chcr: Calculate src and dst sg lengths separately for dma map



This patch calculates src and dst sg lengths separately for
dma mapping in case of aead operation.

This fixes a panic which occurs due to the accessing of a zero
length sg.
Panic:
[  138.173225] kernel BUG at drivers/iommu/intel-iommu.c:1184!

Signed-off-by: default avatarAyush Sawal <ayush.sawal@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 934e36ec
Loading
Loading
Loading
Loading
+45 −18
Original line number Diff line number Diff line
@@ -2590,11 +2590,22 @@ int chcr_aead_dma_map(struct device *dev,
	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	unsigned int authsize = crypto_aead_authsize(tfm);
	int dst_size;
	int src_len, dst_len;

	dst_size = req->assoclen + req->cryptlen + (op_type ?
	/* calculate and handle src and dst sg length separately
	 * for inplace and out-of place operations
	 */
	if (req->src == req->dst) {
		src_len = req->assoclen + req->cryptlen + (op_type ?
							0 : authsize);
	if (!req->cryptlen || !dst_size)
		dst_len = src_len;
	} else {
		src_len = req->assoclen + req->cryptlen;
		dst_len = req->assoclen + req->cryptlen + (op_type ?
							-authsize : authsize);
	}

	if (!req->cryptlen || !src_len || !dst_len)
		return 0;
	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
					DMA_BIDIRECTIONAL);
@@ -2606,19 +2617,22 @@ int chcr_aead_dma_map(struct device *dev,
		reqctx->b0_dma = 0;
	if (req->src == req->dst) {
		error = dma_map_sg(dev, req->src,
				sg_nents_for_len(req->src, dst_size),
				sg_nents_for_len(req->src, src_len),
					DMA_BIDIRECTIONAL);
		if (!error)
			goto err;
	} else {
		error = dma_map_sg(dev, req->src, sg_nents(req->src),
		error = dma_map_sg(dev, req->src,
				   sg_nents_for_len(req->src, src_len),
				   DMA_TO_DEVICE);
		if (!error)
			goto err;
		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
		error = dma_map_sg(dev, req->dst,
				   sg_nents_for_len(req->dst, dst_len),
				   DMA_FROM_DEVICE);
		if (!error) {
			dma_unmap_sg(dev, req->src, sg_nents(req->src),
			dma_unmap_sg(dev, req->src,
				     sg_nents_for_len(req->src, src_len),
				     DMA_TO_DEVICE);
			goto err;
		}
@@ -2637,23 +2651,36 @@ void chcr_aead_dma_unmap(struct device *dev,
	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	unsigned int authsize = crypto_aead_authsize(tfm);
	int dst_size;
	int src_len, dst_len;

	dst_size = req->assoclen + req->cryptlen + (op_type ?
	/* calculate and handle src and dst sg length separately
	 * for inplace and out-of place operations
	 */
	if (req->src == req->dst) {
		src_len = req->assoclen + req->cryptlen + (op_type ?
							0 : authsize);
	if (!req->cryptlen || !dst_size)
		dst_len = src_len;
	} else {
		src_len = req->assoclen + req->cryptlen;
		dst_len = req->assoclen + req->cryptlen + (op_type ?
						-authsize : authsize);
	}

	if (!req->cryptlen || !src_len || !dst_len)
		return;

	dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
					DMA_BIDIRECTIONAL);
	if (req->src == req->dst) {
		dma_unmap_sg(dev, req->src,
			     sg_nents_for_len(req->src, dst_size),
			     sg_nents_for_len(req->src, src_len),
			     DMA_BIDIRECTIONAL);
	} else {
		dma_unmap_sg(dev, req->src, sg_nents(req->src),
		dma_unmap_sg(dev, req->src,
			     sg_nents_for_len(req->src, src_len),
			     DMA_TO_DEVICE);
		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
		dma_unmap_sg(dev, req->dst,
			     sg_nents_for_len(req->dst, dst_len),
			     DMA_FROM_DEVICE);
	}
}