Commit 528e083d authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: rename rmn to amn in the MMU notifier code (v2)



Just a copy&paste leftover from radeon.

v2: rebase (Alex)

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarMichel Dänzer <michel.daenzer@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent abea57d7
Loading
Loading
Loading
Loading
+70 −70
Original line number Diff line number Diff line
@@ -64,7 +64,7 @@ struct amdgpu_mn_node {
};

/**
 * amdgpu_mn_destroy - destroy the rmn
 * amdgpu_mn_destroy - destroy the amn
 *
 * @work: previously sheduled work item
 *
@@ -72,26 +72,26 @@ struct amdgpu_mn_node {
 */
static void amdgpu_mn_destroy(struct work_struct *work)
{
	struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
	struct amdgpu_device *adev = rmn->adev;
	struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
	struct amdgpu_device *adev = amn->adev;
	struct amdgpu_mn_node *node, *next_node;
	struct amdgpu_bo *bo, *next_bo;

	mutex_lock(&adev->mn_lock);
	down_write(&rmn->lock);
	hash_del(&rmn->node);
	down_write(&amn->lock);
	hash_del(&amn->node);
	rbtree_postorder_for_each_entry_safe(node, next_node,
					     &rmn->objects.rb_root, it.rb) {
					     &amn->objects.rb_root, it.rb) {
		list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
			bo->mn = NULL;
			list_del_init(&bo->mn_list);
		}
		kfree(node);
	}
	up_write(&rmn->lock);
	up_write(&amn->lock);
	mutex_unlock(&adev->mn_lock);
	mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
	kfree(rmn);
	mmu_notifier_unregister_no_release(&amn->mn, amn->mm);
	kfree(amn);
}

/**
@@ -105,9 +105,9 @@ static void amdgpu_mn_destroy(struct work_struct *work)
static void amdgpu_mn_release(struct mmu_notifier *mn,
			      struct mm_struct *mm)
{
	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
	INIT_WORK(&rmn->work, amdgpu_mn_destroy);
	schedule_work(&rmn->work);
	struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
	INIT_WORK(&amn->work, amdgpu_mn_destroy);
	schedule_work(&amn->work);
}


@@ -130,31 +130,31 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
}

/**
 * amdgpu_mn_read_lock - take the rmn read lock
 * amdgpu_mn_read_lock - take the amn read lock
 *
 * @rmn: our notifier
 * @amn: our notifier
 *
 * Take the rmn read side lock.
 * Take the amn read side lock.
 */
static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
static void amdgpu_mn_read_lock(struct amdgpu_mn *amn)
{
	mutex_lock(&rmn->read_lock);
	if (atomic_inc_return(&rmn->recursion) == 1)
		down_read_non_owner(&rmn->lock);
	mutex_unlock(&rmn->read_lock);
	mutex_lock(&amn->read_lock);
	if (atomic_inc_return(&amn->recursion) == 1)
		down_read_non_owner(&amn->lock);
	mutex_unlock(&amn->read_lock);
}

/**
 * amdgpu_mn_read_unlock - drop the rmn read lock
 * amdgpu_mn_read_unlock - drop the amn read lock
 *
 * @rmn: our notifier
 * @amn: our notifier
 *
 * Drop the rmn read side lock.
 * Drop the amn read side lock.
 */
static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn)
static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
{
	if (atomic_dec_return(&rmn->recursion) == 0)
		up_read_non_owner(&rmn->lock);
	if (atomic_dec_return(&amn->recursion) == 0)
		up_read_non_owner(&amn->lock);
}

/**
@@ -202,15 +202,15 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
						 unsigned long start,
						 unsigned long end)
{
	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
	struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
	struct interval_tree_node *it;

	/* notification is exclusive, but interval is inclusive */
	end -= 1;

	amdgpu_mn_read_lock(rmn);
	amdgpu_mn_read_lock(amn);

	it = interval_tree_iter_first(&rmn->objects, start, end);
	it = interval_tree_iter_first(&amn->objects, start, end);
	while (it) {
		struct amdgpu_mn_node *node;

@@ -238,15 +238,15 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
						 unsigned long start,
						 unsigned long end)
{
	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
	struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
	struct interval_tree_node *it;

	/* notification is exclusive, but interval is inclusive */
	end -= 1;

	amdgpu_mn_read_lock(rmn);
	amdgpu_mn_read_lock(amn);

	it = interval_tree_iter_first(&rmn->objects, start, end);
	it = interval_tree_iter_first(&amn->objects, start, end);
	while (it) {
		struct amdgpu_mn_node *node;
		struct amdgpu_bo *bo;
@@ -279,9 +279,9 @@ static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
					   unsigned long start,
					   unsigned long end)
{
	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
	struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);

	amdgpu_mn_read_unlock(rmn);
	amdgpu_mn_read_unlock(amn);
}

static const struct mmu_notifier_ops amdgpu_mn_ops[] = {
@@ -315,7 +315,7 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
				enum amdgpu_mn_type type)
{
	struct mm_struct *mm = current->mm;
	struct amdgpu_mn *rmn;
	struct amdgpu_mn *amn;
	unsigned long key = AMDGPU_MN_KEY(mm, type);
	int r;

@@ -325,41 +325,41 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
		return ERR_PTR(-EINTR);
	}

	hash_for_each_possible(adev->mn_hash, rmn, node, key)
		if (AMDGPU_MN_KEY(rmn->mm, rmn->type) == key)
	hash_for_each_possible(adev->mn_hash, amn, node, key)
		if (AMDGPU_MN_KEY(amn->mm, amn->type) == key)
			goto release_locks;

	rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
	if (!rmn) {
		rmn = ERR_PTR(-ENOMEM);
	amn = kzalloc(sizeof(*amn), GFP_KERNEL);
	if (!amn) {
		amn = ERR_PTR(-ENOMEM);
		goto release_locks;
	}

	rmn->adev = adev;
	rmn->mm = mm;
	init_rwsem(&rmn->lock);
	rmn->type = type;
	rmn->mn.ops = &amdgpu_mn_ops[type];
	rmn->objects = RB_ROOT_CACHED;
	mutex_init(&rmn->read_lock);
	atomic_set(&rmn->recursion, 0);
	amn->adev = adev;
	amn->mm = mm;
	init_rwsem(&amn->lock);
	amn->type = type;
	amn->mn.ops = &amdgpu_mn_ops[type];
	amn->objects = RB_ROOT_CACHED;
	mutex_init(&amn->read_lock);
	atomic_set(&amn->recursion, 0);

	r = __mmu_notifier_register(&rmn->mn, mm);
	r = __mmu_notifier_register(&amn->mn, mm);
	if (r)
		goto free_rmn;
		goto free_amn;

	hash_add(adev->mn_hash, &rmn->node, AMDGPU_MN_KEY(mm, type));
	hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type));

release_locks:
	up_write(&mm->mmap_sem);
	mutex_unlock(&adev->mn_lock);

	return rmn;
	return amn;

free_rmn:
free_amn:
	up_write(&mm->mmap_sem);
	mutex_unlock(&adev->mn_lock);
	kfree(rmn);
	kfree(amn);

	return ERR_PTR(r);
}
@@ -379,14 +379,14 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
	enum amdgpu_mn_type type =
		bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
	struct amdgpu_mn *rmn;
	struct amdgpu_mn *amn;
	struct amdgpu_mn_node *node = NULL, *new_node;
	struct list_head bos;
	struct interval_tree_node *it;

	rmn = amdgpu_mn_get(adev, type);
	if (IS_ERR(rmn))
		return PTR_ERR(rmn);
	amn = amdgpu_mn_get(adev, type);
	if (IS_ERR(amn))
		return PTR_ERR(amn);

	new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
	if (!new_node)
@@ -394,12 +394,12 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)

	INIT_LIST_HEAD(&bos);

	down_write(&rmn->lock);
	down_write(&amn->lock);

	while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
	while ((it = interval_tree_iter_first(&amn->objects, addr, end))) {
		kfree(node);
		node = container_of(it, struct amdgpu_mn_node, it);
		interval_tree_remove(&node->it, &rmn->objects);
		interval_tree_remove(&node->it, &amn->objects);
		addr = min(it->start, addr);
		end = max(it->last, end);
		list_splice(&node->bos, &bos);
@@ -410,7 +410,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
	else
		kfree(new_node);

	bo->mn = rmn;
	bo->mn = amn;

	node->it.start = addr;
	node->it.last = end;
@@ -418,9 +418,9 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
	list_splice(&bos, &node->bos);
	list_add(&bo->mn_list, &node->bos);

	interval_tree_insert(&node->it, &rmn->objects);
	interval_tree_insert(&node->it, &amn->objects);

	up_write(&rmn->lock);
	up_write(&amn->lock);

	return 0;
}
@@ -435,18 +435,18 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
	struct amdgpu_mn *rmn;
	struct amdgpu_mn *amn;
	struct list_head *head;

	mutex_lock(&adev->mn_lock);

	rmn = bo->mn;
	if (rmn == NULL) {
	amn = bo->mn;
	if (amn == NULL) {
		mutex_unlock(&adev->mn_lock);
		return;
	}

	down_write(&rmn->lock);
	down_write(&amn->lock);

	/* save the next list entry for later */
	head = bo->mn_list.next;
@@ -457,11 +457,11 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
	if (list_empty(head)) {
		struct amdgpu_mn_node *node;
		node = container_of(head, struct amdgpu_mn_node, bos);
		interval_tree_remove(&node->it, &rmn->objects);
		interval_tree_remove(&node->it, &amn->objects);
		kfree(node);
	}

	up_write(&rmn->lock);
	up_write(&amn->lock);
	mutex_unlock(&adev->mn_lock);
}