Commit a3b815f0 authored by Gerd Hoffmann's avatar Gerd Hoffmann
Browse files

drm/virtio: add iommu support.



Use the dma mapping api and properly add iommu mappings for
objects, unless virtio is in iommu quirk mode.

Signed-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
Reviewed-by: default avatarDave Airlie <airlied@redhat.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20180829122026.27012-3-kraxel@redhat.com
parent b3f13ec9
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -57,6 +57,7 @@ struct virtio_gpu_object {
	uint32_t hw_res_handle;
	uint32_t hw_res_handle;


	struct sg_table *pages;
	struct sg_table *pages;
	uint32_t mapped;
	void *vmap;
	void *vmap;
	bool dumb;
	bool dumb;
	struct ttm_place                placement_code;
	struct ttm_place                placement_code;
+37 −9
Original line number Original line Diff line number Diff line
@@ -424,7 +424,8 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
}
}


static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
						  uint32_t resource_id)
						  uint32_t resource_id,
						  struct virtio_gpu_fence **fence)
{
{
	struct virtio_gpu_resource_detach_backing *cmd_p;
	struct virtio_gpu_resource_detach_backing *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;
	struct virtio_gpu_vbuffer *vbuf;
@@ -435,7 +436,7 @@ static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgde
	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
	cmd_p->resource_id = cpu_to_le32(resource_id);
	cmd_p->resource_id = cpu_to_le32(resource_id);


	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
}
}


void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
@@ -849,9 +850,10 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
			     uint32_t resource_id,
			     uint32_t resource_id,
			     struct virtio_gpu_fence **fence)
			     struct virtio_gpu_fence **fence)
{
{
	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
	struct virtio_gpu_mem_entry *ents;
	struct virtio_gpu_mem_entry *ents;
	struct scatterlist *sg;
	struct scatterlist *sg;
	int si;
	int si, nents;


	if (!obj->pages) {
	if (!obj->pages) {
		int ret;
		int ret;
@@ -861,23 +863,33 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
			return ret;
			return ret;
	}
	}


	if (use_dma_api) {
		obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
					 obj->pages->sgl, obj->pages->nents,
					 DMA_TO_DEVICE);
		nents = obj->mapped;
	} else {
		nents = obj->pages->nents;
	}

	/* gets freed when the ring has consumed it */
	/* gets freed when the ring has consumed it */
	ents = kmalloc_array(obj->pages->nents,
	ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
			     sizeof(struct virtio_gpu_mem_entry),
			     GFP_KERNEL);
			     GFP_KERNEL);
	if (!ents) {
	if (!ents) {
		DRM_ERROR("failed to allocate ent list\n");
		DRM_ERROR("failed to allocate ent list\n");
		return -ENOMEM;
		return -ENOMEM;
	}
	}


	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
	for_each_sg(obj->pages->sgl, sg, nents, si) {
		ents[si].addr = cpu_to_le64(sg_phys(sg));
		ents[si].addr = cpu_to_le64(use_dma_api
					    ? sg_dma_address(sg)
					    : sg_phys(sg));
		ents[si].length = cpu_to_le32(sg->length);
		ents[si].length = cpu_to_le32(sg->length);
		ents[si].padding = 0;
		ents[si].padding = 0;
	}
	}


	virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
	virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
					       ents, obj->pages->nents,
					       ents, nents,
					       fence);
					       fence);
	obj->hw_res_handle = resource_id;
	obj->hw_res_handle = resource_id;
	return 0;
	return 0;
@@ -886,7 +898,23 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
			      struct virtio_gpu_object *obj)
			      struct virtio_gpu_object *obj)
{
{
	virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle);
	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
	struct virtio_gpu_fence *fence;

	if (use_dma_api && obj->mapped) {
		/* detach backing and wait for the host process it ... */
		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
		dma_fence_wait(&fence->f, true);
		dma_fence_put(&fence->f);

		/* ... then tear down iommu mappings */
		dma_unmap_sg(vgdev->vdev->dev.parent,
			     obj->pages->sgl, obj->mapped,
			     DMA_TO_DEVICE);
		obj->mapped = 0;
	} else {
		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
	}
}
}


void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,