Commit ccd59dce authored by Liu Yi L's avatar Liu Yi L Committed by Alex Williamson
Browse files

vfio/type1: Refactor vfio_iommu_type1_ioctl()



This patch refactors the vfio_iommu_type1_ioctl() to use switch instead of
if-else, and each command got a helper function.

Cc: Kevin Tian <kevin.tian@intel.com>
CC: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Eric Auger <eric.auger@redhat.com>
Cc: Jean-Philippe Brucker <jean-philippe@linaro.org>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: default avatarEric Auger <eric.auger@redhat.com>
Suggested-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarLiu Yi L <yi.l.liu@intel.com>
Signed-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
parent 50173329
Loading
Loading
Loading
Loading
+213 −181
Original line number Diff line number Diff line
@@ -2455,6 +2455,23 @@ static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
	return ret;
}

static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
					    unsigned long arg)
{
	switch (arg) {
	case VFIO_TYPE1_IOMMU:
	case VFIO_TYPE1v2_IOMMU:
	case VFIO_TYPE1_NESTING_IOMMU:
		return 1;
	case VFIO_DMA_CC_IOMMU:
		if (!iommu)
			return 0;
		return vfio_domains_have_iommu_cache(iommu);
	default:
		return 0;
	}
}

static int vfio_iommu_iova_add_cap(struct vfio_info_cap *caps,
		 struct vfio_iommu_type1_info_cap_iova_range *cap_iovas,
		 size_t size)
@@ -2531,27 +2548,11 @@ static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
	return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig));
}

static long vfio_iommu_type1_ioctl(void *iommu_data,
				   unsigned int cmd, unsigned long arg)
static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu,
				     unsigned long arg)
{
	struct vfio_iommu *iommu = iommu_data;
	unsigned long minsz;

	if (cmd == VFIO_CHECK_EXTENSION) {
		switch (arg) {
		case VFIO_TYPE1_IOMMU:
		case VFIO_TYPE1v2_IOMMU:
		case VFIO_TYPE1_NESTING_IOMMU:
			return 1;
		case VFIO_DMA_CC_IOMMU:
			if (!iommu)
				return 0;
			return vfio_domains_have_iommu_cache(iommu);
		default:
			return 0;
		}
	} else if (cmd == VFIO_IOMMU_GET_INFO) {
	struct vfio_iommu_type1_info info;
	unsigned long minsz;
	struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
	unsigned long capsz;
	int ret;
@@ -2608,11 +2609,14 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,

	return copy_to_user((void __user *)arg, &info, minsz) ?
			-EFAULT : 0;
}

	} else if (cmd == VFIO_IOMMU_MAP_DMA) {
static int vfio_iommu_type1_map_dma(struct vfio_iommu *iommu,
				    unsigned long arg)
{
	struct vfio_iommu_type1_dma_map map;
		uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
				VFIO_DMA_MAP_FLAG_WRITE;
	unsigned long minsz;
	uint32_t mask = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;

	minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);

@@ -2623,10 +2627,14 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
		return -EINVAL;

	return vfio_dma_do_map(iommu, &map);
}

	} else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
static int vfio_iommu_type1_unmap_dma(struct vfio_iommu *iommu,
				      unsigned long arg)
{
	struct vfio_iommu_type1_dma_unmap unmap;
	struct vfio_bitmap bitmap = { 0 };
	unsigned long minsz;
	int ret;

	minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
@@ -2665,18 +2673,22 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,

	return copy_to_user((void __user *)arg, &unmap, minsz) ?
			-EFAULT : 0;
	} else if (cmd == VFIO_IOMMU_DIRTY_PAGES) {
}

static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
					unsigned long arg)
{
	struct vfio_iommu_type1_dirty_bitmap dirty;
	uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
			VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
			VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
	unsigned long minsz;
	int ret = 0;

	if (!iommu->v2)
		return -EACCES;

		minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap,
				    flags);
	minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap, flags);

	if (copy_from_user(&dirty, (void __user *)arg, minsz))
		return -EFAULT;
@@ -2708,8 +2720,7 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
		}
		mutex_unlock(&iommu->lock);
		return 0;
		} else if (dirty.flags &
				 VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
	} else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
		struct vfio_iommu_type1_dirty_bitmap_get range;
		unsigned long pgshift;
		size_t data_size = dirty.argsz - minsz;
@@ -2754,7 +2765,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,

		if (iommu->dirty_page_tracking)
			ret = vfio_iova_dirty_bitmap(range.bitmap.data,
						iommu, range.iova, range.size,
						     iommu, range.iova,
						     range.size,
						     range.bitmap.pgsize);
		else
			ret = -EINVAL;
@@ -2763,10 +2775,30 @@ out_unlock:

		return ret;
	}

	return -EINVAL;
}

static long vfio_iommu_type1_ioctl(void *iommu_data,
				   unsigned int cmd, unsigned long arg)
{
	struct vfio_iommu *iommu = iommu_data;

	switch (cmd) {
	case VFIO_CHECK_EXTENSION:
		return vfio_iommu_type1_check_extension(iommu, arg);
	case VFIO_IOMMU_GET_INFO:
		return vfio_iommu_type1_get_info(iommu, arg);
	case VFIO_IOMMU_MAP_DMA:
		return vfio_iommu_type1_map_dma(iommu, arg);
	case VFIO_IOMMU_UNMAP_DMA:
		return vfio_iommu_type1_unmap_dma(iommu, arg);
	case VFIO_IOMMU_DIRTY_PAGES:
		return vfio_iommu_type1_dirty_pages(iommu, arg);
	default:
		return -ENOTTY;
	}
}

static int vfio_iommu_type1_register_notifier(void *iommu_data,
					      unsigned long *events,