Commit 57b07793 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull virtio updates from Michael Tsirkin:

 - IRQ bypass support for vdpa and IFC

 - MLX5 vdpa driver

 - Endianness fixes for virtio drivers

 - Misc other fixes

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (71 commits)
  vdpa/mlx5: fix up endian-ness for mtu
  vdpa: Fix pointer math bug in vdpasim_get_config()
  vdpa/mlx5: Fix pointer math in mlx5_vdpa_get_config()
  vdpa/mlx5: fix memory allocation failure checks
  vdpa/mlx5: Fix uninitialised variable in core/mr.c
  vdpa_sim: init iommu lock
  virtio_config: fix up warnings on parisc
  vdpa/mlx5: Add VDPA driver for supported mlx5 devices
  vdpa/mlx5: Add shared memory registration code
  vdpa/mlx5: Add support library for mlx5 VDPA implementation
  vdpa/mlx5: Add hardware descriptive header file
  vdpa: Modify get_vq_state() to return error code
  net/vdpa: Use struct for set/get vq state
  vdpa: remove hard coded virtq num
  vdpasim: support batch updating
  vhost-vdpa: support IOTLB batching hints
  vhost-vdpa: support get/set backend features
  vhost: generialize backend features setting/getting
  vhost-vdpa: refine ioctl pre-processing
  vDPA: dont change vq irq after DRIVER_OK
  ...
parents ce13266d 8a7c3213
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -385,7 +385,7 @@ static irqreturn_t vu_req_interrupt(int irq, void *data)
		}
		break;
	case VHOST_USER_SLAVE_IOTLB_MSG:
		/* not supported - VIRTIO_F_IOMMU_PLATFORM */
		/* not supported - VIRTIO_F_ACCESS_PLATFORM */
	case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
		/* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
	default:
+10 −2
Original line number Diff line number Diff line
@@ -10667,11 +10667,17 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
{
	struct kvm_kernel_irqfd *irqfd =
		container_of(cons, struct kvm_kernel_irqfd, consumer);
	int ret;

	irqfd->producer = prod;

	return kvm_x86_ops.update_pi_irte(irqfd->kvm,
	kvm_arch_start_assignment(irqfd->kvm);
	ret = kvm_x86_ops.update_pi_irte(irqfd->kvm,
					 prod->irq, irqfd->gsi, 1);

	if (ret)
		kvm_arch_end_assignment(irqfd->kvm);

	return ret;
}

void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
@@ -10694,6 +10700,8 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
	if (ret)
		printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
		       " fails: %d\n", irqfd->consumer.token, ret);

	kvm_arch_end_assignment(irqfd->kvm);
}

int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
+23 −23
Original line number Diff line number Diff line
@@ -204,7 +204,7 @@ static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
	u32 status;
	int err;

	virtio_cread(vcrypto->vdev,
	virtio_cread_le(vcrypto->vdev,
			struct virtio_crypto_config, status, &status);

	/*
@@ -323,30 +323,30 @@ static int virtcrypto_probe(struct virtio_device *vdev)
	if (!vcrypto)
		return -ENOMEM;

	virtio_cread(vdev, struct virtio_crypto_config,
	virtio_cread_le(vdev, struct virtio_crypto_config,
			max_dataqueues, &max_data_queues);
	if (max_data_queues < 1)
		max_data_queues = 1;

	virtio_cread(vdev, struct virtio_crypto_config,
	virtio_cread_le(vdev, struct virtio_crypto_config,
			max_cipher_key_len, &max_cipher_key_len);
	virtio_cread(vdev, struct virtio_crypto_config,
	virtio_cread_le(vdev, struct virtio_crypto_config,
			max_auth_key_len, &max_auth_key_len);
	virtio_cread(vdev, struct virtio_crypto_config,
	virtio_cread_le(vdev, struct virtio_crypto_config,
			max_size, &max_size);
	virtio_cread(vdev, struct virtio_crypto_config,
	virtio_cread_le(vdev, struct virtio_crypto_config,
			crypto_services, &crypto_services);
	virtio_cread(vdev, struct virtio_crypto_config,
	virtio_cread_le(vdev, struct virtio_crypto_config,
			cipher_algo_l, &cipher_algo_l);
	virtio_cread(vdev, struct virtio_crypto_config,
	virtio_cread_le(vdev, struct virtio_crypto_config,
			cipher_algo_h, &cipher_algo_h);
	virtio_cread(vdev, struct virtio_crypto_config,
	virtio_cread_le(vdev, struct virtio_crypto_config,
			hash_algo, &hash_algo);
	virtio_cread(vdev, struct virtio_crypto_config,
	virtio_cread_le(vdev, struct virtio_crypto_config,
			mac_algo_l, &mac_algo_l);
	virtio_cread(vdev, struct virtio_crypto_config,
	virtio_cread_le(vdev, struct virtio_crypto_config,
			mac_algo_h, &mac_algo_h);
	virtio_cread(vdev, struct virtio_crypto_config,
	virtio_cread_le(vdev, struct virtio_crypto_config,
			aead_algo, &aead_algo);

	/* Add virtio crypto device to global table */
+8 −8
Original line number Diff line number Diff line
@@ -39,7 +39,7 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
	u32 events_read, events_clear = 0;

	/* read the config space */
	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
	virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
			events_read, &events_read);
	if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
		if (vgdev->has_edid)
@@ -49,7 +49,7 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
		drm_helper_hpd_irq_event(vgdev->ddev);
		events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
	}
	virtio_cwrite(vgdev->vdev, struct virtio_gpu_config,
	virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config,
			 events_clear, &events_clear);
}

@@ -165,7 +165,7 @@ int virtio_gpu_init(struct drm_device *dev)
	}

	/* get display info */
	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
	virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
			num_scanouts, &num_scanouts);
	vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
				    VIRTIO_GPU_MAX_SCANOUTS);
@@ -176,7 +176,7 @@ int virtio_gpu_init(struct drm_device *dev)
	}
	DRM_INFO("number of scanouts: %d\n", num_scanouts);

	virtio_cread(vgdev->vdev, struct virtio_gpu_config,
	virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
			num_capsets, &num_capsets);
	DRM_INFO("number of cap sets: %d\n", num_capsets);

+1 −1
Original line number Diff line number Diff line
@@ -141,7 +141,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
					struct virtio_gpu_mem_entry **ents,
					unsigned int *nents)
{
	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
	struct scatterlist *sg;
	int si, ret;
Loading