Commit 0ea9ee43 authored by Max Gurtovoy's avatar Max Gurtovoy Committed by Michael S. Tsirkin
Browse files

vdpasim: protect concurrent access to iommu iotlb



Iommu iotlb can be accessed by different cores for performing IO using
multiple virt queues. Add a spinlock to synchronize iotlb accesses.

This could be easily reproduced when using more than 1 pktgen threads
to inject traffic to vdpa simulator.

Fixes: 2c53d0f6("vdpasim: vDPA device simulator")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarMax Gurtovoy <maxg@mellanox.com>
Signed-off-by: default avatarJason Wang <jasowang@redhat.com>
Link: https://lore.kernel.org/r/20200731073822.13326-1-jasowang@redhat.com


Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent 6234f805
Loading
Loading
Loading
Loading
+27 −4
Original line number Diff line number Diff line
@@ -71,6 +71,8 @@ struct vdpasim {
	u32 status;
	u32 generation;
	u64 features;
	/* spinlock to synchronize iommu table */
	spinlock_t iommu_lock;
};

/* TODO: cross-endian support */
@@ -136,7 +138,9 @@ static void vdpasim_reset(struct vdpasim *vdpasim)
	for (i = 0; i < VDPASIM_VQ_NUM; i++)
		vdpasim_vq_reset(&vdpasim->vqs[i]);

	spin_lock(&vdpasim->iommu_lock);
	vhost_iotlb_reset(vdpasim->iommu);
	spin_unlock(&vdpasim->iommu_lock);

	vdpasim->features = 0;
	vdpasim->status = 0;
@@ -254,8 +258,10 @@ static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
	/* For simplicity, use identical mapping to avoid e.g iova
	 * allocator.
	 */
	spin_lock(&vdpasim->iommu_lock);
	ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
				    pa, dir_to_perm(dir));
	spin_unlock(&vdpasim->iommu_lock);
	if (ret)
		return DMA_MAPPING_ERROR;

@@ -269,8 +275,10 @@ static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
	struct vdpasim *vdpasim = dev_to_sim(dev);
	struct vhost_iotlb *iommu = vdpasim->iommu;

	spin_lock(&vdpasim->iommu_lock);
	vhost_iotlb_del_range(iommu, (u64)dma_addr,
			      (u64)dma_addr + size - 1);
	spin_unlock(&vdpasim->iommu_lock);
}

static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
@@ -282,9 +290,10 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
	void *addr = kmalloc(size, flag);
	int ret;

	if (!addr)
	spin_lock(&vdpasim->iommu_lock);
	if (!addr) {
		*dma_addr = DMA_MAPPING_ERROR;
	else {
	} else {
		u64 pa = virt_to_phys(addr);

		ret = vhost_iotlb_add_range(iommu, (u64)pa,
@@ -297,6 +306,7 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
		} else
			*dma_addr = (dma_addr_t)pa;
	}
	spin_unlock(&vdpasim->iommu_lock);

	return addr;
}
@@ -308,8 +318,11 @@ static void vdpasim_free_coherent(struct device *dev, size_t size,
	struct vdpasim *vdpasim = dev_to_sim(dev);
	struct vhost_iotlb *iommu = vdpasim->iommu;

	spin_lock(&vdpasim->iommu_lock);
	vhost_iotlb_del_range(iommu, (u64)dma_addr,
			      (u64)dma_addr + size - 1);
	spin_unlock(&vdpasim->iommu_lock);

	kfree(phys_to_virt((uintptr_t)dma_addr));
}

@@ -555,6 +568,7 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
	u64 start = 0ULL, last = 0ULL - 1;
	int ret;

	spin_lock(&vdpasim->iommu_lock);
	vhost_iotlb_reset(vdpasim->iommu);

	for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
@@ -564,10 +578,12 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
		if (ret)
			goto err;
	}
	spin_unlock(&vdpasim->iommu_lock);
	return 0;

err:
	vhost_iotlb_reset(vdpasim->iommu);
	spin_unlock(&vdpasim->iommu_lock);
	return ret;
}

@@ -575,16 +591,23 @@ static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
			   u64 pa, u32 perm)
{
	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
	int ret;

	return vhost_iotlb_add_range(vdpasim->iommu, iova,
				     iova + size - 1, pa, perm);
	spin_lock(&vdpasim->iommu_lock);
	ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
				    perm);
	spin_unlock(&vdpasim->iommu_lock);

	return ret;
}

static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
{
	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);

	spin_lock(&vdpasim->iommu_lock);
	vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
	spin_unlock(&vdpasim->iommu_lock);

	return 0;
}