Commit ba74d0cb authored by Eric Sandeen's avatar Eric Sandeen Committed by Lachlan McIlroy
Browse files

[XFS] Unwrap mru_lock.



Un-obfuscate mru_lock, remove mutex_lock->spin_lock macros, call spin_lock
directly, remove extraneous cookie holdover from old xfs code.

SGI-PV: 970382
SGI-Modid: xfs-linux-melb:xfs-kern:29745a

Signed-off-by: default avatarEric Sandeen <sandeen@sandeen.net>
Signed-off-by: default avatarDonald Douwsma <donaldd@sgi.com>
Signed-off-by: default avatarTim Shimmin <tes@sgi.com>
parent 703e1f0f
Loading
Loading
Loading
Loading
+17 −17
Original line number Diff line number Diff line
@@ -245,7 +245,7 @@ _xfs_mru_cache_clear_reap_list(
		 */
		list_move(&elem->list_node, &tmp);
	}
	mutex_spinunlock(&mru->lock, 0);
	spin_unlock(&mru->lock);

	list_for_each_entry_safe(elem, next, &tmp, list_node) {

@@ -259,7 +259,7 @@ _xfs_mru_cache_clear_reap_list(
		kmem_zone_free(xfs_mru_elem_zone, elem);
	}

	mutex_spinlock(&mru->lock);
	spin_lock(&mru->lock);
}

/*
@@ -280,7 +280,7 @@ _xfs_mru_cache_reap(
	if (!mru || !mru->lists)
		return;

	mutex_spinlock(&mru->lock);
	spin_lock(&mru->lock);
	next = _xfs_mru_cache_migrate(mru, jiffies);
	_xfs_mru_cache_clear_reap_list(mru);

@@ -294,7 +294,7 @@ _xfs_mru_cache_reap(
		queue_delayed_work(xfs_mru_reap_wq, &mru->work, next);
	}

	mutex_spinunlock(&mru->lock, 0);
	spin_unlock(&mru->lock);
}

int
@@ -398,17 +398,17 @@ xfs_mru_cache_flush(
	if (!mru || !mru->lists)
		return;

	mutex_spinlock(&mru->lock);
	spin_lock(&mru->lock);
	if (mru->queued) {
		mutex_spinunlock(&mru->lock, 0);
		spin_unlock(&mru->lock);
		cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
		mutex_spinlock(&mru->lock);
		spin_lock(&mru->lock);
	}

	_xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time);
	_xfs_mru_cache_clear_reap_list(mru);

	mutex_spinunlock(&mru->lock, 0);
	spin_unlock(&mru->lock);
}

void
@@ -454,13 +454,13 @@ xfs_mru_cache_insert(
	elem->key = key;
	elem->value = value;

	mutex_spinlock(&mru->lock);
	spin_lock(&mru->lock);

	radix_tree_insert(&mru->store, key, elem);
	radix_tree_preload_end();
	_xfs_mru_cache_list_insert(mru, elem);

	mutex_spinunlock(&mru->lock, 0);
	spin_unlock(&mru->lock);

	return 0;
}
@@ -483,14 +483,14 @@ xfs_mru_cache_remove(
	if (!mru || !mru->lists)
		return NULL;

	mutex_spinlock(&mru->lock);
	spin_lock(&mru->lock);
	elem = radix_tree_delete(&mru->store, key);
	if (elem) {
		value = elem->value;
		list_del(&elem->list_node);
	}

	mutex_spinunlock(&mru->lock, 0);
	spin_unlock(&mru->lock);

	if (elem)
		kmem_zone_free(xfs_mru_elem_zone, elem);
@@ -540,14 +540,14 @@ xfs_mru_cache_lookup(
	if (!mru || !mru->lists)
		return NULL;

	mutex_spinlock(&mru->lock);
	spin_lock(&mru->lock);
	elem = radix_tree_lookup(&mru->store, key);
	if (elem) {
		list_del(&elem->list_node);
		_xfs_mru_cache_list_insert(mru, elem);
	}
	else
		mutex_spinunlock(&mru->lock, 0);
		spin_unlock(&mru->lock);

	return elem ? elem->value : NULL;
}
@@ -571,10 +571,10 @@ xfs_mru_cache_peek(
	if (!mru || !mru->lists)
		return NULL;

	mutex_spinlock(&mru->lock);
	spin_lock(&mru->lock);
	elem = radix_tree_lookup(&mru->store, key);
	if (!elem)
		mutex_spinunlock(&mru->lock, 0);
		spin_unlock(&mru->lock);

	return elem ? elem->value : NULL;
}
@@ -588,5 +588,5 @@ void
xfs_mru_cache_done(
	xfs_mru_cache_t	*mru)
{
	mutex_spinunlock(&mru->lock, 0);
	spin_unlock(&mru->lock);
}