Commit e20bb857 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'exynos-drm-next-for-v5.8' of...

Merge tag 'exynos-drm-next-for-v5.8' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos

 into drm-next

Check imported buffer mapping in generic way
- This patch reworks exynos_drm_gem_prime_import_sg_table function,
  which checks if the imported buffer has been mapped as contiguous
  or not in generic way, and flag a exynos gem buffer type properly
  according to the mapped way.

Fixups
- Drop a reference count to in_bridge_node correctly.
- Enable the runtime power management correctly.
  . The runtime pm should be enabled before calling compont_add().

Cleanups
- Do not register "by hand" a sysfs file, and use dev_groups instead.
- Drop internal 'pages' array which aren't needed.
- Remove dead-code.
- Correct type casting.
- Drop unnecessary error messages.

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Inki Dae <inki.dae@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1589952785-24210-1-git-send-email-inki.dae@samsung.com
parents 6cf99161 f84e1ba3
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -76,7 +76,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
}

static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
	.fault = exynos_drm_gem_fault,
	.open = drm_gem_vm_open,
	.close = drm_gem_vm_close,
};
+17 −9
Original line number Diff line number Diff line
@@ -212,7 +212,7 @@

#define OLD_SCLK_MIPI_CLK_NAME "pll_clk"

static char *clk_names[5] = { "bus_clk", "sclk_mipi",
static const char *const clk_names[5] = { "bus_clk", "sclk_mipi",
	"phyclk_mipidphy0_bitclkdiv8", "phyclk_mipidphy0_rxclkesc0",
	"sclk_rgb_vclk_to_dsim0" };

@@ -1759,10 +1759,6 @@ static int exynos_dsi_probe(struct platform_device *pdev)
	dsi->dev = dev;
	dsi->driver_data = of_device_get_match_data(dev);

	ret = exynos_dsi_parse_dt(dsi);
	if (ret)
		return ret;

	dsi->supplies[0].supply = "vddcore";
	dsi->supplies[1].supply = "vddio";
	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
@@ -1809,10 +1805,8 @@ static int exynos_dsi_probe(struct platform_device *pdev)
	}

	dsi->irq = platform_get_irq(pdev, 0);
	if (dsi->irq < 0) {
		dev_err(dev, "failed to request dsi irq resource\n");
	if (dsi->irq < 0)
		return dsi->irq;
	}

	irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
	ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
@@ -1823,11 +1817,25 @@ static int exynos_dsi_probe(struct platform_device *pdev)
		return ret;
	}

	ret = exynos_dsi_parse_dt(dsi);
	if (ret)
		return ret;

	platform_set_drvdata(pdev, &dsi->encoder);

	pm_runtime_enable(dev);

	return component_add(dev, &exynos_dsi_component_ops);
	ret = component_add(dev, &exynos_dsi_component_ops);
	if (ret)
		goto err_disable_runtime;

	return 0;

err_disable_runtime:
	pm_runtime_disable(dev);
	of_node_put(dsi->in_bridge_node);

	return ret;
}

static int exynos_dsi_remove(struct platform_device *pdev)
+1 −27
Original line number Diff line number Diff line
@@ -76,7 +76,6 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
	struct fb_info *fbi;
	struct drm_framebuffer *fb = helper->fb;
	unsigned int size = fb->width * fb->height * fb->format->cpp[0];
	unsigned int nr_pages;
	unsigned long offset;

	fbi = drm_fb_helper_alloc_fbi(helper);
@@ -90,16 +89,6 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,

	drm_fb_helper_fill_info(fbi, helper, sizes);

	nr_pages = exynos_gem->size >> PAGE_SHIFT;

	exynos_gem->kvaddr = (void __iomem *) vmap(exynos_gem->pages, nr_pages,
				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
	if (!exynos_gem->kvaddr) {
		DRM_DEV_ERROR(to_dma_dev(helper->dev),
			      "failed to map pages to kernel space.\n");
		return -EIO;
	}

	offset = fbi->var.xoffset * fb->format->cpp[0];
	offset += fbi->var.yoffset * fb->pitches[0];

@@ -133,18 +122,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,

	size = mode_cmd.pitches[0] * mode_cmd.height;

	exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
	/*
	 * If physically contiguous memory allocation fails and if IOMMU is
	 * supported then try to get buffer from non physically contiguous
	 * memory area.
	 */
	if (IS_ERR(exynos_gem) && is_drm_iommu_supported(dev)) {
		dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
		exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
						   size);
	}

	exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_WC, size, true);
	if (IS_ERR(exynos_gem))
		return PTR_ERR(exynos_gem);

@@ -229,12 +207,8 @@ err_init:
static void exynos_drm_fbdev_destroy(struct drm_device *dev,
				      struct drm_fb_helper *fb_helper)
{
	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
	struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
	struct drm_framebuffer *fb;

	vunmap(exynos_gem->kvaddr);

	/* release drm framebuffer and real buffer */
	if (fb_helper->fb && fb_helper->fb->funcs) {
		fb = fb_helper->fb;
+66 −116
Original line number Diff line number Diff line
@@ -17,28 +17,23 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"

static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
{
	struct drm_device *dev = exynos_gem->base.dev;
	unsigned long attr;
	unsigned int nr_pages;
	struct sg_table sgt;
	int ret = -ENOMEM;
	unsigned long attr = 0;

	if (exynos_gem->dma_addr) {
		DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
		return 0;
	}

	exynos_gem->dma_attrs = 0;

	/*
	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
	 * region will be allocated else physically contiguous
	 * as possible.
	 */
	if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
		exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
		attr |= DMA_ATTR_FORCE_CONTIGUOUS;

	/*
	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
@@ -46,61 +41,29 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
	 */
	if (exynos_gem->flags & EXYNOS_BO_WC ||
			!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
		attr = DMA_ATTR_WRITE_COMBINE;
		attr |= DMA_ATTR_WRITE_COMBINE;
	else
		attr = DMA_ATTR_NON_CONSISTENT;

	exynos_gem->dma_attrs |= attr;
	exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;

	nr_pages = exynos_gem->size >> PAGE_SHIFT;
		attr |= DMA_ATTR_NON_CONSISTENT;

	exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
			GFP_KERNEL | __GFP_ZERO);
	if (!exynos_gem->pages) {
		DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate pages.\n");
		return -ENOMEM;
	}
	/* FBDev emulation requires kernel mapping */
	if (!kvmap)
		attr |= DMA_ATTR_NO_KERNEL_MAPPING;

	exynos_gem->dma_attrs = attr;
	exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
					     &exynos_gem->dma_addr, GFP_KERNEL,
					     exynos_gem->dma_attrs);
	if (!exynos_gem->cookie) {
		DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
		goto err_free;
	}

	ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
				    exynos_gem->dma_addr, exynos_gem->size,
				    exynos_gem->dma_attrs);
	if (ret < 0) {
		DRM_DEV_ERROR(to_dma_dev(dev), "failed to get sgtable.\n");
		goto err_dma_free;
	}

	if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
					     nr_pages)) {
		DRM_DEV_ERROR(to_dma_dev(dev), "invalid sgtable.\n");
		ret = -EINVAL;
		goto err_sgt_free;
		return -ENOMEM;
	}

	sg_free_table(&sgt);
	if (kvmap)
		exynos_gem->kvaddr = exynos_gem->cookie;

	DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);

	return 0;

err_sgt_free:
	sg_free_table(&sgt);
err_dma_free:
	dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
		       exynos_gem->dma_addr, exynos_gem->dma_attrs);
err_free:
	kvfree(exynos_gem->pages);

	return ret;
}

static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
@@ -118,8 +81,6 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
	dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
			(dma_addr_t)exynos_gem->dma_addr,
			exynos_gem->dma_attrs);

	kvfree(exynos_gem->pages);
}

static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -203,7 +164,8 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,

struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
					     unsigned int flags,
					     unsigned long size)
					     unsigned long size,
					     bool kvmap)
{
	struct exynos_drm_gem *exynos_gem;
	int ret;
@@ -237,7 +199,7 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
	/* set memory type and cache attribute from user side. */
	exynos_gem->flags = flags;

	ret = exynos_drm_alloc_buf(exynos_gem);
	ret = exynos_drm_alloc_buf(exynos_gem, kvmap);
	if (ret < 0) {
		drm_gem_object_release(&exynos_gem->base);
		kfree(exynos_gem);
@@ -254,7 +216,7 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
	struct exynos_drm_gem *exynos_gem;
	int ret;

	exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
	exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false);
	if (IS_ERR(exynos_gem))
		return PTR_ERR(exynos_gem);

@@ -365,7 +327,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
	else
		flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;

	exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
	exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false);
	if (IS_ERR(exynos_gem)) {
		dev_warn(dev->dev, "FB allocation failed.\n");
		return PTR_ERR(exynos_gem);
@@ -381,26 +343,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
	return 0;
}

vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
{
	struct vm_area_struct *vma = vmf->vma;
	struct drm_gem_object *obj = vma->vm_private_data;
	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
	unsigned long pfn;
	pgoff_t page_offset;

	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;

	if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
		DRM_ERROR("invalid page offset\n");
		return VM_FAULT_SIGBUS;
	}

	pfn = page_to_pfn(exynos_gem->pages[page_offset]);
	return vmf_insert_mixed(vma, vmf->address,
			__pfn_to_pfn_t(pfn, PFN_DEV));
}

static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
				   struct vm_area_struct *vma)
{
@@ -462,11 +404,24 @@ struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
	int npages;
	struct drm_device *drm_dev = obj->dev;
	struct sg_table *sgt;
	int ret;

	npages = exynos_gem->size >> PAGE_SHIFT;
	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt)
		return ERR_PTR(-ENOMEM);

	return drm_prime_pages_to_sg(exynos_gem->pages, npages);
	ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie,
				    exynos_gem->dma_addr, exynos_gem->size,
				    exynos_gem->dma_attrs);
	if (ret) {
		DRM_ERROR("failed to get sgtable, %d\n", ret);
		kfree(sgt);
		return ERR_PTR(ret);
	}

	return sgt;
}

struct drm_gem_object *
@@ -475,52 +430,47 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
				     struct sg_table *sgt)
{
	struct exynos_drm_gem *exynos_gem;
	int npages;
	int ret;

	exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
	if (IS_ERR(exynos_gem)) {
		ret = PTR_ERR(exynos_gem);
		return ERR_PTR(ret);
	}

	exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
	if (sgt->nents < 1)
		return ERR_PTR(-EINVAL);

	npages = exynos_gem->size >> PAGE_SHIFT;
	exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
	if (!exynos_gem->pages) {
		ret = -ENOMEM;
		goto err;
	/*
	 * Check if the provided buffer has been mapped as contiguous
	 * into DMA address space.
	 */
	if (sgt->nents > 1) {
		dma_addr_t next_addr = sg_dma_address(sgt->sgl);
		struct scatterlist *s;
		unsigned int i;

		for_each_sg(sgt->sgl, s, sgt->nents, i) {
			if (!sg_dma_len(s))
				break;
			if (sg_dma_address(s) != next_addr) {
				DRM_ERROR("buffer chunks must be mapped contiguously");
				return ERR_PTR(-EINVAL);
			}
			next_addr = sg_dma_address(s) + sg_dma_len(s);
		}
	}

	ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
					       npages);
	if (ret < 0)
		goto err_free_large;

	exynos_gem->sgt = sgt;
	exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
	if (IS_ERR(exynos_gem))
		return ERR_CAST(exynos_gem);

	if (sgt->nents == 1) {
		/* always physically continuous memory if sgt->nents is 1. */
		exynos_gem->flags |= EXYNOS_BO_CONTIG;
	} else {
	/*
		 * this case could be CONTIG or NONCONTIG type but for now
		 * sets NONCONTIG.
		 * TODO. we have to find a way that exporter can notify
		 * the type of its own buffer to importer.
	 * Buffer has been mapped as contiguous into DMA address space,
	 * but if there is IOMMU, it can be either CONTIG or NONCONTIG.
	 * We assume a simplified logic below:
	 */
	if (is_drm_iommu_supported(dev))
		exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
	}
	else
		exynos_gem->flags |= EXYNOS_BO_CONTIG;

	exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
	exynos_gem->sgt = sgt;
	return &exynos_gem->base;

err_free_large:
	kvfree(exynos_gem->pages);
err:
	drm_gem_object_release(&exynos_gem->base);
	kfree(exynos_gem);
	return ERR_PTR(ret);
}

void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
+4 −12
Original line number Diff line number Diff line
@@ -21,20 +21,15 @@
 * @base: a gem object.
 *	- a new handle to this gem object would be created
 *	by drm_gem_handle_create().
 * @buffer: a pointer to exynos_drm_gem_buffer object.
 *	- contain the information to memory region allocated
 *	by user request or at framebuffer creation.
 *	continuous memory region allocated by user request
 *	or at framebuffer creation.
 * @flags: indicate memory type to allocated buffer and cache attruibute.
 * @size: size requested from user, in bytes and this size is aligned
 *	in page unit.
 * @cookie: cookie returned by dma_alloc_attrs
 * @kvaddr: kernel virtual address to allocated memory region.
 * @kvaddr: kernel virtual address to allocated memory region (for fbdev)
 * @dma_addr: bus address(accessed by dma) to allocated memory region.
 *	- this address could be physical address without IOMMU and
 *	device address with IOMMU.
 * @pages: Array of backing pages.
 * @dma_attrs: attrs passed dma mapping framework
 * @sgt: Imported sg_table.
 *
 * P.S. this object would be transferred to user as kms_bo.handle so
@@ -48,7 +43,6 @@ struct exynos_drm_gem {
	void __iomem		*kvaddr;
	dma_addr_t		dma_addr;
	unsigned long		dma_attrs;
	struct page		**pages;
	struct sg_table		*sgt;
};

@@ -58,7 +52,8 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem);
/* create a new buffer with gem object */
struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
					     unsigned int flags,
					     unsigned long size);
					     unsigned long size,
					     bool kvmap);

/*
 * request gem object creation and buffer allocation as the size
@@ -101,9 +96,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
			       struct drm_device *dev,
			       struct drm_mode_create_dumb *args);

/* page fault handler and mmap fault address(virtual) to physical memory. */
vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf);

/* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);

Loading