Commit 9313f802 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull virtio updates from Michael Tsirkin:
 "vhost, vdpa, and virtio cleanups and fixes

  A very quiet cycle, no new features"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  MAINTAINERS: add URL for virtio-mem
  vhost_vdpa: remove unnecessary spin_lock in vhost_vring_call
  vringh: fix __vringh_iov() when riov and wiov are different
  vdpa/mlx5: Setup driver only if VIRTIO_CONFIG_S_DRIVER_OK
  s390: virtio: PV needs VIRTIO I/O device protection
  virtio: let arch advertise guest's memory access restrictions
  vhost_vdpa: Fix duplicate included kernel.h
  vhost: reduce stack usage in log_used
  virtio-mem: Constify mem_id_table
  virtio_input: Constify id_table
  virtio-balloon: Constify id_table
  vdpa/mlx5: Fix failure to bring link up
  vdpa/mlx5: Make use of a specific 16 bit endianness API
parents 090a7d04 88a0d60c
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -18629,6 +18629,7 @@ VIRTIO MEM DRIVER
M:	David Hildenbrand <david@redhat.com>
L:	virtualization@lists.linux-foundation.org
S:	Maintained
W:	https://virtio-mem.gitlab.io/
F:	drivers/virtio/virtio_mem.c
F:	include/uapi/linux/virtio_mem.h
+1 −0
Original line number Diff line number Diff line
@@ -810,6 +810,7 @@ menu "Virtualization"
config PROTECTED_VIRTUALIZATION_GUEST
	def_bool n
	prompt "Protected virtualization guest support"
	select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
	help
	  Select this option, if you want to be able to run this
	  kernel as a protected virtualization KVM guest.
+11 −0
Original line number Diff line number Diff line
@@ -46,6 +46,7 @@
#include <asm/kasan.h>
#include <asm/dma-mapping.h>
#include <asm/uv.h>
#include <linux/virtio_config.h>

pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);

@@ -162,6 +163,16 @@ bool force_dma_unencrypted(struct device *dev)
	return is_prot_virt_guest();
}

#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS

int arch_has_restricted_virtio_memory_access(void)
{
	return is_prot_virt_guest();
}
EXPORT_SYMBOL(arch_has_restricted_virtio_memory_access);

#endif

/* protected virtualization */
static void pv_init(void)
{
+10 −2
Original line number Diff line number Diff line
@@ -1522,6 +1522,11 @@ static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
		(mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1));
}

static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
{
	return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
}

static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
{
	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -1535,8 +1540,8 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
		return err;

	ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
	ndev->config.mtu = __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev),
					     ndev->mtu);
	ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu);
	ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
	return err;
}

@@ -1653,6 +1658,9 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *
	if (err)
		goto err_mr;

	if (!(ndev->mvdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
		return 0;

	restore_channels_info(ndev);
	err = setup_driver(ndev);
	if (err)
+1 −8
Original line number Diff line number Diff line
@@ -22,7 +22,6 @@
#include <linux/nospec.h>
#include <linux/vhost.h>
#include <linux/virtio_net.h>
#include <linux/kernel.h>

#include "vhost.h"

@@ -97,26 +96,20 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
		return;

	irq = ops->get_vq_irq(vdpa, qid);
	spin_lock(&vq->call_ctx.ctx_lock);
	irq_bypass_unregister_producer(&vq->call_ctx.producer);
	if (!vq->call_ctx.ctx || irq < 0) {
		spin_unlock(&vq->call_ctx.ctx_lock);
	if (!vq->call_ctx.ctx || irq < 0)
		return;
	}

	vq->call_ctx.producer.token = vq->call_ctx.ctx;
	vq->call_ctx.producer.irq = irq;
	ret = irq_bypass_register_producer(&vq->call_ctx.producer);
	spin_unlock(&vq->call_ctx.ctx_lock);
}

static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
{
	struct vhost_virtqueue *vq = &v->vqs[qid];

	spin_lock(&vq->call_ctx.ctx_lock);
	irq_bypass_unregister_producer(&vq->call_ctx.producer);
	spin_unlock(&vq->call_ctx.ctx_lock);
}

static void vhost_vdpa_reset(struct vhost_vdpa *v)
Loading