Commit 7e9e5ead authored by Rob Clark's avatar Rob Clark Committed by Sean Paul
Browse files

drm/vgem: fix cache synchronization on arm/arm64



drm_cflush_pages() is no-op on arm/arm64.  But instead we can use
dma_sync API.

Fixes failures w/ vgem_test.

Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Signed-off-by: default avatarSean Paul <seanpaul@chromium.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20190717211542.30482-1-robdclark@gmail.com
parent dc25ace6
Loading
Loading
Loading
Loading
+83 −47
Original line number Diff line number Diff line
@@ -47,10 +47,16 @@ static struct vgem_device {
	struct platform_device *platform;
} *vgem_device;

static void sync_and_unpin(struct drm_vgem_gem_object *bo);
static struct page **pin_and_sync(struct drm_vgem_gem_object *bo);

static void vgem_gem_free_object(struct drm_gem_object *obj)
{
	struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);

	if (!obj->import_attach)
		sync_and_unpin(vgem_obj);

	kvfree(vgem_obj->pages);
	mutex_destroy(&vgem_obj->pages_lock);

@@ -78,40 +84,15 @@ static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
		return VM_FAULT_SIGBUS;

	mutex_lock(&obj->pages_lock);
	if (!obj->pages)
		pin_and_sync(obj);
	if (obj->pages) {
		get_page(obj->pages[page_offset]);
		vmf->page = obj->pages[page_offset];
		ret = 0;
	}
	mutex_unlock(&obj->pages_lock);
	if (ret) {
		struct page *page;

		page = shmem_read_mapping_page(
					file_inode(obj->base.filp)->i_mapping,
					page_offset);
		if (!IS_ERR(page)) {
			vmf->page = page;
			ret = 0;
		} else switch (PTR_ERR(page)) {
			case -ENOSPC:
			case -ENOMEM:
				ret = VM_FAULT_OOM;
				break;
			case -EBUSY:
				ret = VM_FAULT_RETRY;
				break;
			case -EFAULT:
			case -EINVAL:
				ret = VM_FAULT_SIGBUS;
				break;
			default:
				WARN_ON(PTR_ERR(page));
				ret = VM_FAULT_SIGBUS;
				break;
		}

	}
	return ret;
}

@@ -277,11 +258,33 @@ static const struct file_operations vgem_driver_fops = {
	.release	= drm_release,
};

static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
/* Called under pages_lock, except in free path (where it can't race): */
static void sync_and_unpin(struct drm_vgem_gem_object *bo)
{
	mutex_lock(&bo->pages_lock);
	if (bo->pages_pin_count++ == 0) {
	struct drm_device *dev = bo->base.dev;

	if (bo->table) {
		dma_sync_sg_for_cpu(dev->dev, bo->table->sgl,
				bo->table->nents, DMA_BIDIRECTIONAL);
		sg_free_table(bo->table);
		kfree(bo->table);
		bo->table = NULL;
	}

	if (bo->pages) {
		drm_gem_put_pages(&bo->base, bo->pages, true, true);
		bo->pages = NULL;
	}
}

static struct page **pin_and_sync(struct drm_vgem_gem_object *bo)
{
	struct drm_device *dev = bo->base.dev;
	int npages = bo->base.size >> PAGE_SHIFT;
	struct page **pages;
	struct sg_table *sgt;

	WARN_ON(!mutex_is_locked(&bo->pages_lock));

	pages = drm_gem_get_pages(&bo->base);
	if (IS_ERR(pages)) {
@@ -290,19 +293,58 @@ static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
		return pages;
	}

	sgt = drm_prime_pages_to_sg(pages, npages);
	if (IS_ERR(sgt)) {
		dev_err(dev->dev,
			"failed to allocate sgt: %ld\n",
			PTR_ERR(bo->table));
		drm_gem_put_pages(&bo->base, pages, false, false);
		mutex_unlock(&bo->pages_lock);
		return ERR_CAST(bo->table);
	}

	/*
	 * Flush the object from the CPU cache so that importers
	 * can rely on coherent indirect access via the exported
	 * dma-address.
	 */
	dma_sync_sg_for_device(dev->dev, sgt->sgl,
			sgt->nents, DMA_BIDIRECTIONAL);

	bo->pages = pages;
	bo->table = sgt;

	return pages;
}

static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
{
	struct page **pages;

	mutex_lock(&bo->pages_lock);
	if (bo->pages_pin_count++ == 0 && !bo->pages) {
		pages = pin_and_sync(bo);
	} else {
		WARN_ON(!bo->pages);
		pages = bo->pages;
	}
	mutex_unlock(&bo->pages_lock);

	return bo->pages;
	return pages;
}

static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
{
	/*
	 * We shouldn't hit this for imported bo's.. in the import
	 * case we don't own the scatter-table
	 */
	WARN_ON(bo->base.import_attach);

	mutex_lock(&bo->pages_lock);
	if (--bo->pages_pin_count == 0) {
		drm_gem_put_pages(&bo->base, bo->pages, true, true);
		bo->pages = NULL;
		WARN_ON(!bo->table);
		sync_and_unpin(bo);
	}
	mutex_unlock(&bo->pages_lock);
}
@@ -310,18 +352,12 @@ static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
static int vgem_prime_pin(struct drm_gem_object *obj)
{
	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
	long n_pages = obj->size >> PAGE_SHIFT;
	struct page **pages;

	pages = vgem_pin_pages(bo);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

	/* Flush the object from the CPU cache so that importers can rely
	 * on coherent indirect access via the exported dma-address.
	 */
	drm_clflush_pages(pages, n_pages);

	return 0;
}