Commit e46f468f authored by Dave Airlie's avatar Dave Airlie
Browse files

drm/ttm: drop special pipeline accel cleanup function.



The two accel cleanup paths were mostly the same once refactored.

Just pass a bool to say if the evictions are to be pipelined.

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200917064132.148521-2-airlied@gmail.com
parent 92afce90
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -500,9 +500,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,

	/* Always block for VM page tables before committing the new location */
	if (bo->type == ttm_bo_type_kernel)
		r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem);
		r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
	else
		r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
		r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
	dma_fence_put(fence);
	return r;

+1 −1
Original line number Diff line number Diff line
@@ -824,7 +824,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
			if (ret == 0) {
				ret = ttm_bo_move_accel_cleanup(bo,
								&fence->base,
								evict,
								evict, false,
								new_reg);
				nouveau_fence_unref(&fence);
			}
+1 −1
Original line number Diff line number Diff line
@@ -200,7 +200,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
	if (IS_ERR(fence))
		return PTR_ERR(fence);

	r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem);
	r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, new_mem);
	radeon_fence_unref(&fence);
	return r;
}
+32 −57
Original line number Diff line number Diff line
@@ -580,53 +580,16 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
	return 0;
}

int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
			      struct dma_fence *fence,
			      bool evict,
			      struct ttm_resource *new_mem)
static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
				       struct dma_fence *fence)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
	int ret;

	dma_resv_add_excl_fence(bo->base.resv, fence);
	if (evict)
		ret = ttm_bo_wait_free_node(bo, man->use_tt);
	else
		ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
	if (ret)
		return ret;

	ttm_bo_assign_mem(bo, new_mem);

	return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);

int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
			 struct dma_fence *fence, bool evict,
			 struct ttm_resource *new_mem)
{
	struct ttm_bo_device *bdev = bo->bdev;

	struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
	struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type);

	int ret;

	dma_resv_add_excl_fence(bo->base.resv, fence);

	if (!evict) {
		ret = ttm_bo_move_to_ghost(bo, fence, to->use_tt);
		if (ret)
			return ret;
	} else if (!from->use_tt) {

	/**
	 * BO doesn't have a TTM we need to bind/unbind. Just remember
	 * this eviction and free up the allocation
	 */

	spin_lock(&from->move_lock);
	if (!from->move || dma_fence_is_later(fence, from->move)) {
		dma_fence_put(from->move);
@@ -638,23 +601,35 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,

	dma_fence_put(bo->moving);
	bo->moving = dma_fence_get(fence);
}

int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
			      struct dma_fence *fence,
			      bool evict,
			      bool pipeline,
			      struct ttm_resource *new_mem)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
	int ret = 0;

	dma_resv_add_excl_fence(bo->base.resv, fence);
	if (!evict)
		ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
	else if (!from->use_tt && pipeline)
		ttm_bo_move_pipeline_evict(bo, fence);
	else
		ret = ttm_bo_wait_free_node(bo, man->use_tt);

	} else {
		/**
		 * Last resort, wait for the move to be completed.
		 *
		 * Should never happen in pratice.
		 */
		ret = ttm_bo_wait_free_node(bo, to->use_tt);
	if (ret)
		return ret;
	}

	ttm_bo_assign_mem(bo, new_mem);

	return 0;
}
EXPORT_SYMBOL(ttm_bo_pipeline_move);
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);

int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
{
+2 −15
Original line number Diff line number Diff line
@@ -642,6 +642,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
 * @bo: A pointer to a struct ttm_buffer_object.
 * @fence: A fence object that signals when moving is complete.
 * @evict: This is an evict move. Don't return until the buffer is idle.
 * @pipeline: evictions are to be pipelined.
 * @new_mem: struct ttm_resource indicating where to move.
 *
 * Accelerated move function to be called when an accelerated move
@@ -653,21 +654,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
 */
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
			      struct dma_fence *fence, bool evict,
			      struct ttm_resource *new_mem);

/**
 * ttm_bo_pipeline_move.
 *
 * @bo: A pointer to a struct ttm_buffer_object.
 * @fence: A fence object that signals when moving is complete.
 * @evict: This is an evict move. Don't return until the buffer is idle.
 * @new_mem: struct ttm_resource indicating where to move.
 *
 * Function for pipelining accelerated moves. Either free the memory
 * immediately or hang it on a temporary buffer object.
 */
int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
			 struct dma_fence *fence, bool evict,
			      bool pipeline,
			      struct ttm_resource *new_mem);

/**