Commit 7b5d753e authored by Jason Wang's avatar Jason Wang Committed by Michael S. Tsirkin
Browse files

vhost: fine grain userspace memory accessors



This is used to hide the metadata address from virtqueue helpers. This
will allow to implement a vmap based fast accessing to metadata.

Signed-off-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent 1ab5d138
Loading
Loading
Loading
Loading
+77 −17
Original line number Diff line number Diff line
@@ -887,6 +887,34 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
	ret; \
})

static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
{
	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
			      vhost_avail_event(vq));
}

static inline int vhost_put_used(struct vhost_virtqueue *vq,
				 struct vring_used_elem *head, int idx,
				 int count)
{
	return vhost_copy_to_user(vq, vq->used->ring + idx, head,
				  count * sizeof(*head));
}

static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)

{
	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
			      &vq->used->flags);
}

static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)

{
	return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
			      &vq->used->idx);
}

#define vhost_get_user(vq, x, ptr, type)		\
({ \
	int ret; \
@@ -925,6 +953,43 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d)
		mutex_unlock(&d->vqs[i]->mutex);
}

static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
				      __virtio16 *idx)
{
	return vhost_get_avail(vq, *idx, &vq->avail->idx);
}

static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
				       __virtio16 *head, int idx)
{
	return vhost_get_avail(vq, *head,
			       &vq->avail->ring[idx & (vq->num - 1)]);
}

static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
					__virtio16 *flags)
{
	return vhost_get_avail(vq, *flags, &vq->avail->flags);
}

static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
				       __virtio16 *event)
{
	return vhost_get_avail(vq, *event, vhost_used_event(vq));
}

static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
				     __virtio16 *idx)
{
	return vhost_get_used(vq, *idx, &vq->used->idx);
}

static inline int vhost_get_desc(struct vhost_virtqueue *vq,
				 struct vring_desc *desc, int idx)
{
	return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
}

static int vhost_new_umem_range(struct vhost_umem *umem,
				u64 start, u64 size, u64 end,
				u64 userspace_addr, int perm)
@@ -1862,8 +1927,7 @@ EXPORT_SYMBOL_GPL(vhost_log_write);
static int vhost_update_used_flags(struct vhost_virtqueue *vq)
{
	void __user *used;
	if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
			   &vq->used->flags) < 0)
	if (vhost_put_used_flags(vq))
		return -EFAULT;
	if (unlikely(vq->log_used)) {
		/* Make sure the flag is seen before log. */
@@ -1880,8 +1944,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)

static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
{
	if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
			   vhost_avail_event(vq)))
	if (vhost_put_avail_event(vq))
		return -EFAULT;
	if (unlikely(vq->log_used)) {
		void __user *used;
@@ -1917,7 +1980,7 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
		r = -EFAULT;
		goto err;
	}
	r = vhost_get_used(vq, last_used_idx, &vq->used->idx);
	r = vhost_get_used_idx(vq, &last_used_idx);
	if (r) {
		vq_err(vq, "Can't access used idx at %p\n",
		       &vq->used->idx);
@@ -2116,7 +2179,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
	last_avail_idx = vq->last_avail_idx;

	if (vq->avail_idx == vq->last_avail_idx) {
		if (unlikely(vhost_get_avail(vq, avail_idx, &vq->avail->idx))) {
		if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
			vq_err(vq, "Failed to access avail idx at %p\n",
				&vq->avail->idx);
			return -EFAULT;
@@ -2143,8 +2206,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,

	/* Grab the next descriptor number they're advertising, and increment
	 * the index we've seen. */
	if (unlikely(vhost_get_avail(vq, ring_head,
		     &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
	if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
		vq_err(vq, "Failed to read head: idx %d address %p\n",
		       last_avail_idx,
		       &vq->avail->ring[last_avail_idx % vq->num]);
@@ -2179,8 +2241,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
			       i, vq->num, head);
			return -EINVAL;
		}
		ret = vhost_copy_from_user(vq, &desc, vq->desc + i,
					   sizeof desc);
		ret = vhost_get_desc(vq, &desc, i);
		if (unlikely(ret)) {
			vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
			       i, vq->desc + i);
@@ -2273,7 +2334,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,

	start = vq->last_used_idx & (vq->num - 1);
	used = vq->used->ring + start;
	if (vhost_copy_to_user(vq, used, heads, count * sizeof *used)) {
	if (vhost_put_used(vq, heads, start, count)) {
		vq_err(vq, "Failed to write used");
		return -EFAULT;
	}
@@ -2315,8 +2376,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,

	/* Make sure buffer is written before we update index. */
	smp_wmb();
	if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
			   &vq->used->idx)) {
	if (vhost_put_used_idx(vq)) {
		vq_err(vq, "Failed to increment used idx");
		return -EFAULT;
	}
@@ -2349,7 +2409,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)

	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
		__virtio16 flags;
		if (vhost_get_avail(vq, flags, &vq->avail->flags)) {
		if (vhost_get_avail_flags(vq, &flags)) {
			vq_err(vq, "Failed to get flags");
			return true;
		}
@@ -2363,7 +2423,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
	if (unlikely(!v))
		return true;

	if (vhost_get_avail(vq, event, vhost_used_event(vq))) {
	if (vhost_get_used_event(vq, &event)) {
		vq_err(vq, "Failed to get used event idx");
		return true;
	}
@@ -2408,7 +2468,7 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
	if (vq->avail_idx != vq->last_avail_idx)
		return false;

	r = vhost_get_avail(vq, avail_idx, &vq->avail->idx);
	r = vhost_get_avail_idx(vq, &avail_idx);
	if (unlikely(r))
		return false;
	vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
@@ -2444,7 +2504,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
	/* They could have slipped one in as we were doing that: make
	 * sure it's written, then check again. */
	smp_mb();
	r = vhost_get_avail(vq, avail_idx, &vq->avail->idx);
	r = vhost_get_avail_idx(vq, &avail_idx);
	if (r) {
		vq_err(vq, "Failed to check avail idx at %p: %d\n",
		       &vq->avail->idx, r);