Commit 4251fa5f authored by Thomas Hellstrom's avatar Thomas Hellstrom
Browse files

drm/vmwgfx: Assign eviction priorities to resources



TTM provides a means to assign eviction priorities to buffer object. This
means that all buffer objects with a lower priority will be evicted first
on memory pressure.
Use this to make sure surfaces and in particular non-dirty surfaces are
evicted first. Evicting in particular shaders, cotables and contexts imply
a significant performance hit on vmwgfx, so make sure these resources are
evicted last.
Some buffer objects are sub-allocated in user-space which means we can have
many resources attached to a single buffer object or resource. In that case
the buffer object is given the highest priority of the attached resources.

Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarDeepak Rawat <drawat@vmware.com>
Reviewed-by: default avatarEmil Velikov <emil.velikov@collabora.com>
parent 561564be
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -510,6 +510,8 @@ int vmw_bo_init(struct vmw_private *dev_priv,

	acc_size = vmw_bo_acc_size(dev_priv, size, user);
	memset(vmw_bo, 0, sizeof(*vmw_bo));
	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
	vmw_bo->base.priority = 3;

	INIT_LIST_HEAD(&vmw_bo->res_list);

+4 −0
Original line number Diff line number Diff line
@@ -88,6 +88,8 @@ static const struct vmw_res_func vmw_gb_context_func = {
	.res_type = vmw_res_context,
	.needs_backup = true,
	.may_evict = true,
	.prio = 3,
	.dirty_prio = 3,
	.type_name = "guest backed contexts",
	.backup_placement = &vmw_mob_placement,
	.create = vmw_gb_context_create,
@@ -100,6 +102,8 @@ static const struct vmw_res_func vmw_dx_context_func = {
	.res_type = vmw_res_dx_context,
	.needs_backup = true,
	.may_evict = true,
	.prio = 3,
	.dirty_prio = 3,
	.type_name = "dx contexts",
	.backup_placement = &vmw_mob_placement,
	.create = vmw_dx_context_create,
+8 −5
Original line number Diff line number Diff line
@@ -116,6 +116,8 @@ static const struct vmw_res_func vmw_cotable_func = {
	.res_type = vmw_res_cotable,
	.needs_backup = true,
	.may_evict = true,
	.prio = 3,
	.dirty_prio = 3,
	.type_name = "context guest backed object tables",
	.backup_placement = &vmw_mob_placement,
	.create = vmw_cotable_create,
@@ -307,7 +309,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
	struct ttm_buffer_object *bo = val_buf->bo;
	struct vmw_fence_obj *fence;

	if (list_empty(&res->mob_head))
	if (!vmw_resource_mob_attached(res))
		return 0;

	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
@@ -453,6 +455,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
		goto out_wait;
	}

	vmw_resource_mob_detach(res);
	res->backup = buf;
	res->backup_size = new_size;
	vcotbl->size_read_back = cur_size_read_back;
@@ -467,12 +470,12 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
		res->backup = old_buf;
		res->backup_size = old_size;
		vcotbl->size_read_back = old_size_read_back;
		vmw_resource_mob_attach(res);
		goto out_wait;
	}

	vmw_resource_mob_attach(res);
	/* Let go of the old mob. */
	list_del(&res->mob_head);
	list_add_tail(&res->mob_head, &buf->res_list);
	vmw_bo_unreference(&old_buf);
	res->id = vcotbl->type;

@@ -496,7 +499,7 @@ out_wait:
 * is called before bind() in the validation sequence is instead used for two
 * things.
 * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
 *    buffer, that is, if @res->mob_head is non-empty.
 *    buffer.
 * 2) Resize the cotable if needed.
 */
static int vmw_cotable_create(struct vmw_resource *res)
@@ -512,7 +515,7 @@ static int vmw_cotable_create(struct vmw_resource *res)
		new_size *= 2;

	if (likely(new_size <= res->backup_size)) {
		if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
		if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
			ret = vmw_cotable_unscrub(res);
			if (ret)
				return ret;
+72 −0
Original line number Diff line number Diff line
@@ -86,6 +86,15 @@ struct vmw_fpriv {
	bool gb_aware; /* user-space is guest-backed aware */
};

/**
 * struct vmw_buffer_object - TTM buffer object with vmwgfx additions
 * @base: The TTM buffer object
 * @res_list: List of resources using this buffer object as a backing MOB
 * @pin_count: pin depth
 * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
 * @map: Kmap object for semi-persistent mappings
 * @res_prios: Eviction priority counts for attached resources
 */
struct vmw_buffer_object {
	struct ttm_buffer_object base;
	struct list_head res_list;
@@ -94,6 +103,7 @@ struct vmw_buffer_object {
	struct vmw_resource *dx_query_ctx;
	/* Protected by reservation */
	struct ttm_bo_kmap_obj map;
	u32 res_prios[TTM_MAX_BO_PRIORITY];
};

/**
@@ -145,6 +155,7 @@ struct vmw_resource {
	struct kref kref;
	struct vmw_private *dev_priv;
	int id;
	u32 used_prio;
	unsigned long backup_size;
	bool res_dirty;
	bool backup_dirty;
@@ -709,6 +720,19 @@ extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
void vmw_resource_mob_attach(struct vmw_resource *res);
void vmw_resource_mob_detach(struct vmw_resource *res);

/**
 * vmw_resource_mob_attached - Whether a resource currently has a mob attached
 * @res: The resource
 *
 * Return: true if the resource has a mob attached, false otherwise.
 */
static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
{
	return !list_empty(&res->mob_head);
}

/**
 * vmw_user_resource_noref_release - release a user resource pointer looked up
@@ -787,6 +811,54 @@ static inline void vmw_user_bo_noref_release(void)
	ttm_base_object_noref_release();
}

/**
 * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
 * according to attached resources
 * @vbo: The struct vmw_buffer_object
 */
static inline void vmw_bo_prio_adjust(struct vmw_buffer_object *vbo)
{
	int i = ARRAY_SIZE(vbo->res_prios);

	while (i--) {
		if (vbo->res_prios[i]) {
			vbo->base.priority = i;
			return;
		}
	}

	vbo->base.priority = 3;
}

/**
 * vmw_bo_prio_add - Notify a buffer object of a newly attached resource
 * eviction priority
 * @vbo: The struct vmw_buffer_object
 * @prio: The resource priority
 *
 * After being notified, the code assigns the highest resource eviction priority
 * to the backing buffer object (mob).
 */
static inline void vmw_bo_prio_add(struct vmw_buffer_object *vbo, int prio)
{
	if (vbo->res_prios[prio]++ == 0)
		vmw_bo_prio_adjust(vbo);
}

/**
 * vmw_bo_prio_del - Notify a buffer object of a resource with a certain
 * priority being removed
 * @vbo: The struct vmw_buffer_object
 * @prio: The resource priority
 *
 * After being notified, the code assigns the highest resource eviction priority
 * to the backing buffer object (mob).
 */
static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio)
{
	if (--vbo->res_prios[prio] == 0)
		vmw_bo_prio_adjust(vbo);
}

/**
 * Misc Ioctl functionality - vmwgfx_ioctl.c
+43 −13
Original line number Diff line number Diff line
@@ -34,6 +34,37 @@

#define VMW_RES_EVICT_ERR_COUNT 10

/**
 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
 * @res: The resource
 */
void vmw_resource_mob_attach(struct vmw_resource *res)
{
	struct vmw_buffer_object *backup = res->backup;

	lockdep_assert_held(&backup->base.resv->lock.base);
	res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
		res->func->prio;
	list_add_tail(&res->mob_head, &backup->res_list);
	vmw_bo_prio_add(backup, res->used_prio);
}

/**
 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
 * @res: The resource
 */
void vmw_resource_mob_detach(struct vmw_resource *res)
{
	struct vmw_buffer_object *backup = res->backup;

	lockdep_assert_held(&backup->base.resv->lock.base);
	if (vmw_resource_mob_attached(res)) {
		list_del_init(&res->mob_head);
		vmw_bo_prio_del(backup, res->used_prio);
	}
}


struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
{
	kref_get(&res->kref);
@@ -80,7 +111,7 @@ static void vmw_resource_release(struct kref *kref)
		struct ttm_buffer_object *bo = &res->backup->base;

		ttm_bo_reserve(bo, false, false, NULL);
		if (!list_empty(&res->mob_head) &&
		if (vmw_resource_mob_attached(res) &&
		    res->func->unbind != NULL) {
			struct ttm_validate_buffer val_buf;

@@ -89,7 +120,7 @@ static void vmw_resource_release(struct kref *kref)
			res->func->unbind(res, false, &val_buf);
		}
		res->backup_dirty = false;
		list_del_init(&res->mob_head);
		vmw_resource_mob_detach(res);
		ttm_bo_unreserve(bo);
		vmw_bo_unreference(&res->backup);
	}
@@ -179,6 +210,7 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
	res->backup_offset = 0;
	res->backup_dirty = false;
	res->res_dirty = false;
	res->used_prio = 3;
	if (delay_id)
		return 0;
	else
@@ -355,14 +387,14 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
	}

	if (func->bind &&
	    ((func->needs_backup && list_empty(&res->mob_head) &&
	    ((func->needs_backup && !vmw_resource_mob_attached(res) &&
	      val_buf->bo != NULL) ||
	     (!func->needs_backup && val_buf->bo != NULL))) {
		ret = func->bind(res, val_buf);
		if (unlikely(ret != 0))
			goto out_bind_failed;
		if (func->needs_backup)
			list_add_tail(&res->mob_head, &res->backup->res_list);
			vmw_resource_mob_attach(res);
	}

	return 0;
@@ -402,15 +434,13 @@ void vmw_resource_unreserve(struct vmw_resource *res,

	if (switch_backup && new_backup != res->backup) {
		if (res->backup) {
			lockdep_assert_held(&res->backup->base.resv->lock.base);
			list_del_init(&res->mob_head);
			vmw_resource_mob_detach(res);
			vmw_bo_unreference(&res->backup);
		}

		if (new_backup) {
			res->backup = vmw_bo_reference(new_backup);
			lockdep_assert_held(&new_backup->base.resv->lock.base);
			list_add_tail(&res->mob_head, &new_backup->res_list);
			vmw_resource_mob_attach(res);
		} else {
			res->backup = NULL;
		}
@@ -469,7 +499,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
	if (unlikely(ret != 0))
		goto out_no_reserve;

	if (res->func->needs_backup && list_empty(&res->mob_head))
	if (res->func->needs_backup && !vmw_resource_mob_attached(res))
		return 0;

	backup_dirty = res->backup_dirty;
@@ -574,11 +604,11 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
		return ret;

	if (unlikely(func->unbind != NULL &&
		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
		     (!func->needs_backup || vmw_resource_mob_attached(res)))) {
		ret = func->unbind(res, res->res_dirty, &val_buf);
		if (unlikely(ret != 0))
			goto out_no_unbind;
		list_del_init(&res->mob_head);
		vmw_resource_mob_detach(res);
	}
	ret = func->destroy(res);
	res->backup_dirty = true;
@@ -660,7 +690,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
	if (unlikely(ret != 0))
		goto out_no_validate;
	else if (!res->func->needs_backup && res->backup) {
		list_del_init(&res->mob_head);
		WARN_ON_ONCE(vmw_resource_mob_attached(res));
		vmw_bo_unreference(&res->backup);
	}

@@ -699,7 +729,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
		(void) res->func->unbind(res, res->res_dirty, &val_buf);
		res->backup_dirty = true;
		res->res_dirty = false;
		list_del_init(&res->mob_head);
		vmw_resource_mob_detach(res);
	}

	(void) ttm_bo_wait(&vbo->base, false, false);
Loading