Commit 707d561f authored by Gerd Hoffmann's avatar Gerd Hoffmann
Browse files

drm: allow limiting the scatter list size.



Add drm_device argument to drm_prime_pages_to_sg(), so we can
call dma_max_mapping_size() to figure the segment size limit
and call into __sg_alloc_table_from_pages() with the correct
limit.

This fixes virtio-gpu with sev.  Possibly it'll fix other bugs
too given that drm seems to totaly ignore segment size limits
so far ...

v2: place max_segment in drm driver not gem object.
v3: move max_segment next to the other gem fields.
v4: just use dma_max_mapping_size().

Signed-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/20200907112425.15610-2-kraxel@redhat.com
parent 04e89ff3
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -302,7 +302,8 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,

	switch (bo->tbo.mem.mem_type) {
	case TTM_PL_TT:
		sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages,
		sgt = drm_prime_pages_to_sg(obj->dev,
					    bo->tbo.ttm->pages,
					    bo->tbo.num_pages);
		if (IS_ERR(sgt))
			return sgt;
+1 −1
Original line number Diff line number Diff line
@@ -656,7 +656,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)

	WARN_ON(shmem->base.import_attach);

	return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);

+10 −3
Original line number Diff line number Diff line
@@ -802,9 +802,11 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
 *
 * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
 */
struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
				       struct page **pages, unsigned int nr_pages)
{
	struct sg_table *sg = NULL;
	size_t max_segment = 0;
	int ret;

	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
@@ -813,8 +815,13 @@ struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_page
		goto out;
	}

	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
				nr_pages << PAGE_SHIFT, GFP_KERNEL);
	if (dev)
		max_segment = dma_max_mapping_size(dev->dev);
	if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
		max_segment = SCATTERLIST_MAX_SEGMENT;
	ret = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
					  nr_pages << PAGE_SHIFT,
					  max_segment, GFP_KERNEL);
	if (ret)
		goto out;

+2 −1
Original line number Diff line number Diff line
@@ -103,7 +103,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
		struct sg_table *sgt;

		sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
		sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
					    etnaviv_obj->pages, npages);
		if (IS_ERR(sgt)) {
			dev_err(dev->dev, "failed to allocate sgt: %ld\n",
				PTR_ERR(sgt));
+1 −1
Original line number Diff line number Diff line
@@ -19,7 +19,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
	if (WARN_ON(!etnaviv_obj->pages))  /* should have already pinned! */
		return ERR_PTR(-EINVAL);

	return drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
	return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages);
}

void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
Loading