Commit 831e9da7 authored by Tiago Vignatti's avatar Tiago Vignatti Committed by Daniel Vetter
Browse files

dma-buf: Remove range-based flush



This patch removes range-based information used for optimizations in
begin_cpu_access and end_cpu_access.

We don't have any user nor implementation using range-based flush. It seems a
consensus that if we ever want something like that again (or even more robust
using 2D, 3D sub-range regions) we can use the upcoming dma-buf sync ioctl for
such.

Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: default avatarTiago Vignatti <tiago.vignatti@intel.com>
Reviewed-by: default avatarStéphane Marchesin <marcheu@chromium.org>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1450820214-12509-3-git-send-email-tiago.vignatti@intel.com
parent bfe981a0
Loading
Loading
Loading
Loading
+8 −11
Original line number Diff line number Diff line
@@ -257,17 +257,15 @@ Access to a dma_buf from the kernel context involves three steps:

   Interface:
      int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
				   size_t start, size_t len,
				   enum dma_data_direction direction)

   This allows the exporter to ensure that the memory is actually available for
   cpu access - the exporter might need to allocate or swap-in and pin the
   backing storage. The exporter also needs to ensure that cpu access is
   coherent for the given range and access direction. The range and access
   direction can be used by the exporter to optimize the cache flushing, i.e.
   access outside of the range or with a different direction (read instead of
   write) might return stale or even bogus data (e.g. when the exporter needs to
   copy the data to temporary storage).
   coherent for the access direction. The direction can be used by the exporter
   to optimize the cache flushing, i.e. access with a different direction (read
   instead of write) might return stale or even bogus data (e.g. when the
   exporter needs to copy the data to temporary storage).

   This step might fail, e.g. in oom conditions.

@@ -322,14 +320,13 @@ Access to a dma_buf from the kernel context involves three steps:

3. Finish access

   When the importer is done accessing the range specified in begin_cpu_access,
   it needs to announce this to the exporter (to facilitate cache flushing and
   unpinning of any pinned resources). The result of any dma_buf kmap calls
   after end_cpu_access is undefined.
   When the importer is done accessing the CPU, it needs to announce this to
   the exporter (to facilitate cache flushing and unpinning of any pinned
   resources). The result of any dma_buf kmap calls after end_cpu_access is
   undefined.

   Interface:
      void dma_buf_end_cpu_access(struct dma_buf *dma_buf,
				  size_t start, size_t len,
				  enum dma_data_direction dir);


+4 −9
Original line number Diff line number Diff line
@@ -539,13 +539,11 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
 * preparations. Coherency is only guaranteed in the specified range for the
 * specified access direction.
 * @dmabuf:	[in]	buffer to prepare cpu access for.
 * @start:	[in]	start of range for cpu access.
 * @len:	[in]	length of range for cpu access.
 * @direction:	[in]	length of range for cpu access.
 *
 * Can return negative error values, returns 0 on success.
 */
int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
			     enum dma_data_direction direction)
{
	int ret = 0;
@@ -554,8 +552,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
		return -EINVAL;

	if (dmabuf->ops->begin_cpu_access)
		ret = dmabuf->ops->begin_cpu_access(dmabuf, start,
							len, direction);
		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);

	return ret;
}
@@ -567,19 +564,17 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
 * actions. Coherency is only guaranteed in the specified range for the
 * specified access direction.
 * @dmabuf:	[in]	buffer to complete cpu access for.
 * @start:	[in]	start of range for cpu access.
 * @len:	[in]	length of range for cpu access.
 * @direction:	[in]	length of range for cpu access.
 *
 * This call must always succeed.
 */
void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
			    enum dma_data_direction direction)
{
	WARN_ON(!dmabuf);

	if (dmabuf->ops->end_cpu_access)
		dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
		dmabuf->ops->end_cpu_access(dmabuf, direction);
}
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);

+1 −1
Original line number Diff line number Diff line
@@ -196,7 +196,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
	return -EINVAL;
}

static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
	struct drm_device *dev = obj->base.dev;
+2 −2
Original line number Diff line number Diff line
@@ -79,7 +79,7 @@ static void omap_gem_dmabuf_release(struct dma_buf *buffer)


static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
		size_t start, size_t len, enum dma_data_direction dir)
		enum dma_data_direction dir)
{
	struct drm_gem_object *obj = buffer->priv;
	struct page **pages;
@@ -94,7 +94,7 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
}

static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
		size_t start, size_t len, enum dma_data_direction dir)
		enum dma_data_direction dir)
{
	struct drm_gem_object *obj = buffer->priv;
	omap_gem_put_pages(obj);
+0 −2
Original line number Diff line number Diff line
@@ -409,7 +409,6 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,

	if (ufb->obj->base.import_attach) {
		ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
					       0, ufb->obj->base.size,
					       DMA_FROM_DEVICE);
		if (ret)
			goto unlock;
@@ -425,7 +424,6 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,

	if (ufb->obj->base.import_attach) {
		dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
				       0, ufb->obj->base.size,
				       DMA_FROM_DEVICE);
	}

Loading