Commit f651c8b0 authored by Gurchetan Singh's avatar Gurchetan Singh Committed by Gerd Hoffmann
Browse files

drm/virtio: factor out the sg_table from virtio_gpu_object



A resource will be a shmem based resource or a (planned)
vram based resource, so it makes sense to factor out common fields
(resource handle, dumb).

v2: move mapped field to shmem object

Signed-off-by: default avatarGurchetan Singh <gurchetansingh@chromium.org>
Link: http://patchwork.freedesktop.org/patch/msgid/20200305013212.130640-1-gurchetansingh@chromium.org


Signed-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
parent ee21ec77
Loading
Loading
Loading
Loading
+9 −4
Original line number Diff line number Diff line
@@ -69,16 +69,21 @@ struct virtio_gpu_object_params {
struct virtio_gpu_object {
	struct drm_gem_shmem_object base;
	uint32_t hw_res_handle;

	struct sg_table *pages;
	uint32_t mapped;

	bool dumb;
	bool created;
};
#define gem_to_virtio_gpu_obj(gobj) \
	container_of((gobj), struct virtio_gpu_object, base.base)

struct virtio_gpu_object_shmem {
	struct virtio_gpu_object base;
	struct sg_table *pages;
	uint32_t mapped;
};

#define to_virtio_gpu_shmem(virtio_gpu_object) \
	container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base)

struct virtio_gpu_object_array {
	struct ww_acquire_ctx ticket;
	struct list_head next;
+17 −14
Original line number Diff line number Diff line
@@ -65,16 +65,17 @@ static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t
void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
{
	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);

	if (bo->pages) {
		if (bo->mapped) {
	if (shmem->pages) {
		if (shmem->mapped) {
			dma_unmap_sg(vgdev->vdev->dev.parent,
				     bo->pages->sgl, bo->mapped,
				     shmem->pages->sgl, shmem->mapped,
				     DMA_TO_DEVICE);
			bo->mapped = 0;
			shmem->mapped = 0;
		}
		sg_free_table(bo->pages);
		bo->pages = NULL;
		sg_free_table(shmem->pages);
		shmem->pages = NULL;
		drm_gem_shmem_unpin(&bo->base.base);
	}
	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
@@ -133,6 +134,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
					unsigned int *nents)
{
	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
	struct scatterlist *sg;
	int si, ret;

@@ -140,19 +142,20 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
	if (ret < 0)
		return -EINVAL;

	bo->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
	if (!bo->pages) {
	shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
	if (!shmem->pages) {
		drm_gem_shmem_unpin(&bo->base.base);
		return -EINVAL;
	}

	if (use_dma_api) {
		bo->mapped = dma_map_sg(vgdev->vdev->dev.parent,
					bo->pages->sgl, bo->pages->nents,
		shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
					   shmem->pages->sgl,
					   shmem->pages->nents,
					   DMA_TO_DEVICE);
		*nents = bo->mapped;
		*nents = shmem->mapped;
	} else {
		*nents = bo->pages->nents;
		*nents = shmem->pages->nents;
	}

	*ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
@@ -162,7 +165,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
		return -ENOMEM;
	}

	for_each_sg(bo->pages->sgl, sg, *nents, si) {
	for_each_sg(shmem->pages->sgl, sg, *nents, si) {
		(*ents)[si].addr = cpu_to_le64(use_dma_api
					       ? sg_dma_address(sg)
					       : sg_phys(sg));
+4 −2
Original line number Diff line number Diff line
@@ -600,10 +600,11 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
	struct virtio_gpu_transfer_to_host_2d *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;
	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);

	if (use_dma_api)
		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
				       bo->pages->sgl, bo->pages->nents,
				       shmem->pages->sgl, shmem->pages->nents,
				       DMA_TO_DEVICE);

	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -1015,10 +1016,11 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
	struct virtio_gpu_transfer_host_3d *cmd_p;
	struct virtio_gpu_vbuffer *vbuf;
	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);

	if (use_dma_api)
		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
				       bo->pages->sgl, bo->pages->nents,
				       shmem->pages->sgl, shmem->pages->nents,
				       DMA_TO_DEVICE);

	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));