Commit b40be05e authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'for-5.10-drm-sg-fix' of https://github.com/mszyprow/linux into drm-next



Please pull a set of fixes for various DRM drivers that finally resolve
incorrect usage of the scatterlists (struct sg_table nents and orig_nents
entries), what causes issues when IOMMU is used.

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200910080505.24456-1-m.szyprowski@samsung.com
parents 818280d5 be0704be
Loading
Loading
Loading
Loading
+6 −7
Original line number Diff line number Diff line
@@ -140,13 +140,12 @@ struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
				      enum dma_data_direction direction)
{
	struct dma_heaps_attachment *a = attachment->priv;
	struct sg_table *table;

	table = &a->table;
	struct sg_table *table = &a->table;
	int ret;

	if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
			direction))
		table = ERR_PTR(-ENOMEM);
	ret = dma_map_sgtable(attachment->dev, table, direction, 0);
	if (ret)
		table = ERR_PTR(ret);
	return table;
}

@@ -154,7 +153,7 @@ static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
				   struct sg_table *table,
				   enum dma_data_direction direction)
{
	dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
	dma_unmap_sgtable(attachment->dev, table, direction, 0);
}

static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
+3 −4
Original line number Diff line number Diff line
@@ -63,10 +63,9 @@ static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
					GFP_KERNEL);
	if (ret < 0)
		goto err;
	if (!dma_map_sg(dev, sg->sgl, sg->nents, direction)) {
		ret = -EINVAL;
	ret = dma_map_sgtable(dev, sg, direction, 0);
	if (ret < 0)
		goto err;
	}
	return sg;

err:
@@ -78,7 +77,7 @@ err:
static void put_sg_table(struct device *dev, struct sg_table *sg,
			 enum dma_data_direction direction)
{
	dma_unmap_sg(dev, sg->sgl, sg->nents, direction);
	dma_unmap_sgtable(dev, sg, direction, 0);
	sg_free_table(sg);
	kfree(sg);
}
+11 −13
Original line number Diff line number Diff line
@@ -379,7 +379,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
	struct scatterlist *sg;
	struct sg_table *sgt;
	int i, num;
	int i;

	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt)
@@ -395,22 +395,18 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,

		mapping = dobj->obj.filp->f_mapping;

		for_each_sg(sgt->sgl, sg, count, i) {
		for_each_sgtable_sg(sgt, sg, i) {
			struct page *page;

			page = shmem_read_mapping_page(mapping, i);
			if (IS_ERR(page)) {
				num = i;
			if (IS_ERR(page))
				goto release;
			}

			sg_set_page(sg, page, PAGE_SIZE, 0);
		}

		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
			num = sgt->nents;
		if (dma_map_sgtable(attach->dev, sgt, dir, 0))
			goto release;
		}
	} else if (dobj->page) {
		/* Single contiguous page */
		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
@@ -418,7 +414,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,

		sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);

		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
		if (dma_map_sgtable(attach->dev, sgt, dir, 0))
			goto free_table;
	} else if (dobj->linear) {
		/* Single contiguous physical region - no struct page */
@@ -432,7 +428,8 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
	return sgt;

 release:
	for_each_sg(sgt->sgl, sg, num, i)
	for_each_sgtable_sg(sgt, sg, i)
		if (sg_page(sg))
			put_page(sg_page(sg));
 free_table:
	sg_free_table(sgt);
@@ -449,11 +446,12 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
	int i;

	if (!dobj->linear)
		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
		dma_unmap_sgtable(attach->dev, sgt, dir, 0);

	if (dobj->obj.filp) {
		struct scatterlist *sg;
		for_each_sg(sgt->sgl, sg, sgt->nents, i)

		for_each_sgtable_sg(sgt, sg, i)
			put_page(sg_page(sg));
	}

+1 −1
Original line number Diff line number Diff line
@@ -127,7 +127,7 @@ drm_clflush_sg(struct sg_table *st)
		struct sg_page_iter sg_iter;

		mb(); /*CLFLUSH is ordered only by using memory barriers*/
		for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
		for_each_sgtable_page(st, &sg_iter, 0)
			drm_clflush_page(sg_page_iter_page(&sg_iter));
		mb(); /*Make sure that all cache line entry is flushed*/

+3 −20
Original line number Diff line number Diff line
@@ -471,27 +471,10 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
{
	struct drm_gem_cma_object *cma_obj;

	if (sgt->nents != 1) {
	/* check if the entries in the sg_table are contiguous */
		dma_addr_t next_addr = sg_dma_address(sgt->sgl);
		struct scatterlist *s;
		unsigned int i;

		for_each_sg(sgt->sgl, s, sgt->nents, i) {
			/*
			 * sg_dma_address(s) is only valid for entries
			 * that have sg_dma_len(s) != 0
			 */
			if (!sg_dma_len(s))
				continue;

			if (sg_dma_address(s) != next_addr)
	if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
		return ERR_PTR(-EINVAL);

			next_addr = sg_dma_address(s) + sg_dma_len(s);
		}
	}

	/* Create a CMA GEM buffer. */
	cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
	if (IS_ERR(cma_obj))
Loading