Commit 3d3bdbc0 authored by Gerd Hoffmann's avatar Gerd Hoffmann
Browse files

drm/virtio: rework virtio_gpu_transfer_to_host_ioctl fencing



Switch to the virtio_gpu_array_* helper workflow.

Signed-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
Reviewed-by: default avatarChia-I Wu <olvaffe@gmail.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20190829103301.3539-12-kraxel@redhat.com
parent 375f156a
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -279,10 +279,10 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
				   uint32_t resource_id);
				   uint32_t resource_id);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
					struct virtio_gpu_object *bo,
					uint64_t offset,
					uint64_t offset,
					__le32 width, __le32 height,
					__le32 width, __le32 height,
					__le32 x, __le32 y,
					__le32 x, __le32 y,
					struct virtio_gpu_object_array *objs,
					struct virtio_gpu_fence *fence);
					struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
				   uint32_t resource_id,
				   uint32_t resource_id,
@@ -329,10 +329,10 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
					  struct virtio_gpu_object_array *objs,
					  struct virtio_gpu_object_array *objs,
					  struct virtio_gpu_fence *fence);
					  struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
					struct virtio_gpu_object *bo,
					uint32_t ctx_id,
					uint32_t ctx_id,
					uint64_t offset, uint32_t level,
					uint64_t offset, uint32_t level,
					struct virtio_gpu_box *box,
					struct virtio_gpu_box *box,
					struct virtio_gpu_object_array *objs,
					struct virtio_gpu_fence *fence);
					struct virtio_gpu_fence *fence);
void
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
+21 −29
Original line number Original line Diff line number Diff line
@@ -383,52 +383,44 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
	struct virtio_gpu_device *vgdev = dev->dev_private;
	struct virtio_gpu_device *vgdev = dev->dev_private;
	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
	struct drm_virtgpu_3d_transfer_to_host *args = data;
	struct drm_virtgpu_3d_transfer_to_host *args = data;
	struct ttm_operation_ctx ctx = { true, false };
	struct virtio_gpu_object_array *objs;
	struct drm_gem_object *gobj = NULL;
	struct virtio_gpu_object *qobj = NULL;
	struct virtio_gpu_fence *fence;
	struct virtio_gpu_fence *fence;
	struct virtio_gpu_box box;
	struct virtio_gpu_box box;
	int ret;
	int ret;
	u32 offset = args->offset;
	u32 offset = args->offset;


	gobj = drm_gem_object_lookup(file, args->bo_handle);
	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
	if (gobj == NULL)
	if (objs == NULL)
		return -ENOENT;
		return -ENOENT;


	qobj = gem_to_virtio_gpu_obj(gobj);

	ret = virtio_gpu_object_reserve(qobj);
	if (ret)
		goto out;

	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
	if (unlikely(ret))
		goto out_unres;

	convert_to_hw_box(&box, &args->box);
	convert_to_hw_box(&box, &args->box);
	if (!vgdev->has_virgl_3d) {
	if (!vgdev->has_virgl_3d) {
		virtio_gpu_cmd_transfer_to_host_2d
		virtio_gpu_cmd_transfer_to_host_2d
			(vgdev, qobj, offset,
			(vgdev, offset,
			 box.w, box.h, box.x, box.y, NULL);
			 box.w, box.h, box.x, box.y,
			 objs, NULL);
	} else {
	} else {
		fence = virtio_gpu_fence_alloc(vgdev);
		ret = virtio_gpu_array_lock_resv(objs);
		if (!fence) {
		if (ret != 0)
			goto err_put_free;

		ret = -ENOMEM;
		ret = -ENOMEM;
			goto out_unres;
		fence = virtio_gpu_fence_alloc(vgdev);
		}
		if (!fence)
			goto err_unlock;

		virtio_gpu_cmd_transfer_to_host_3d
		virtio_gpu_cmd_transfer_to_host_3d
			(vgdev, qobj,
			(vgdev,
			 vfpriv ? vfpriv->ctx_id : 0, offset,
			 vfpriv ? vfpriv->ctx_id : 0, offset,
			 args->level, &box, fence);
			 args->level, &box, objs, fence);
		dma_resv_add_excl_fence(qobj->tbo.base.resv,
						  &fence->f);
		dma_fence_put(&fence->f);
		dma_fence_put(&fence->f);
	}
	}
	return 0;


out_unres:
err_unlock:
	virtio_gpu_object_unreserve(qobj);
	virtio_gpu_array_unlock_resv(objs);
out:
err_put_free:
	drm_gem_object_put_unlocked(gobj);
	virtio_gpu_array_put_free(objs);
	return ret;
	return ret;
}
}


+17 −4
Original line number Original line Diff line number Diff line
@@ -124,12 +124,19 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
		bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
		bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
		handle = bo->hw_res_handle;
		handle = bo->hw_res_handle;
		if (bo->dumb) {
		if (bo->dumb) {
			struct virtio_gpu_object_array *objs;

			objs = virtio_gpu_array_alloc(1);
			if (!objs)
				return;
			virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
			virtio_gpu_cmd_transfer_to_host_2d
			virtio_gpu_cmd_transfer_to_host_2d
				(vgdev, bo, 0,
				(vgdev, 0,
				 cpu_to_le32(plane->state->src_w >> 16),
				 cpu_to_le32(plane->state->src_w >> 16),
				 cpu_to_le32(plane->state->src_h >> 16),
				 cpu_to_le32(plane->state->src_h >> 16),
				 cpu_to_le32(plane->state->src_x >> 16),
				 cpu_to_le32(plane->state->src_x >> 16),
				 cpu_to_le32(plane->state->src_y >> 16), NULL);
				 cpu_to_le32(plane->state->src_y >> 16),
				 objs, NULL);
		}
		}
	} else {
	} else {
		handle = 0;
		handle = 0;
@@ -219,11 +226,17 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,


	if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
	if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
		/* new cursor -- update & wait */
		/* new cursor -- update & wait */
		struct virtio_gpu_object_array *objs;

		objs = virtio_gpu_array_alloc(1);
		if (!objs)
			return;
		virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
		virtio_gpu_cmd_transfer_to_host_2d
		virtio_gpu_cmd_transfer_to_host_2d
			(vgdev, bo, 0,
			(vgdev, 0,
			 cpu_to_le32(plane->state->crtc_w),
			 cpu_to_le32(plane->state->crtc_w),
			 cpu_to_le32(plane->state->crtc_h),
			 cpu_to_le32(plane->state->crtc_h),
			 0, 0, vgfb->fence);
			 0, 0, objs, vgfb->fence);
		dma_fence_wait(&vgfb->fence->f, true);
		dma_fence_wait(&vgfb->fence->f, true);
		dma_fence_put(&vgfb->fence->f);
		dma_fence_put(&vgfb->fence->f);
		vgfb->fence = NULL;
		vgfb->fence = NULL;
+7 −2
Original line number Original line Diff line number Diff line
@@ -491,12 +491,13 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
}
}


void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
					struct virtio_gpu_object *bo,
					uint64_t offset,
					uint64_t offset,
					__le32 width, __le32 height,
					__le32 width, __le32 height,
					__le32 x, __le32 y,
					__le32 x, __le32 y,
					struct virtio_gpu_object_array *objs,
					struct virtio_gpu_fence *fence)
					struct virtio_gpu_fence *fence)
{
{
	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
	struct virtio_gpu_transfer_to_host_2d *cmd_p;
	struct virtio_gpu_transfer_to_host_2d *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;
	struct virtio_gpu_vbuffer *vbuf;
	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
@@ -508,6 +509,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,


	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
	memset(cmd_p, 0, sizeof(*cmd_p));
	memset(cmd_p, 0, sizeof(*cmd_p));
	vbuf->objs = objs;


	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
@@ -900,12 +902,13 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
}
}


void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
					struct virtio_gpu_object *bo,
					uint32_t ctx_id,
					uint32_t ctx_id,
					uint64_t offset, uint32_t level,
					uint64_t offset, uint32_t level,
					struct virtio_gpu_box *box,
					struct virtio_gpu_box *box,
					struct virtio_gpu_object_array *objs,
					struct virtio_gpu_fence *fence)
					struct virtio_gpu_fence *fence)
{
{
	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
	struct virtio_gpu_transfer_host_3d *cmd_p;
	struct virtio_gpu_transfer_host_3d *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;
	struct virtio_gpu_vbuffer *vbuf;
	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
@@ -918,6 +921,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
	memset(cmd_p, 0, sizeof(*cmd_p));
	memset(cmd_p, 0, sizeof(*cmd_p));


	vbuf->objs = objs;

	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);