Commit 3685c2a1 authored by Eric Sandeen's avatar Eric Sandeen Committed by Lachlan McIlroy
Browse files

[XFS] Unwrap XFS_SB_LOCK.



Un-obfuscate XFS_SB_LOCK, remove XFS_SB_LOCK->mutex_lock->spin_lock
macros, call spin_lock directly, remove extraneous cookie holdover from
old xfs code, and change lock type to spinlock_t.

SGI-PV: 970382
SGI-Modid: xfs-linux-melb:xfs-kern:29746a

Signed-off-by: default avatarEric Sandeen <sandeen@sandeen.net>
Signed-off-by: default avatarDonald Douwsma <donaldd@sgi.com>
Signed-off-by: default avatarTim Shimmin <tes@sgi.com>
parent ba74d0cb
Loading
Loading
Loading
Loading
+5 −7
Original line number Diff line number Diff line
@@ -310,7 +310,6 @@ xfs_qm_mount_quotas(
	xfs_mount_t	*mp,
	int		mfsi_flags)
{
	unsigned long	s;
	int		error = 0;
	uint		sbf;

@@ -367,13 +366,13 @@ xfs_qm_mount_quotas(

 write_changes:
	/*
	 * We actually don't have to acquire the SB_LOCK at all.
	 * We actually don't have to acquire the m_sb_lock at all.
	 * This can only be called from mount, and that's single threaded. XXX
	 */
	s = XFS_SB_LOCK(mp);
	spin_lock(&mp->m_sb_lock);
	sbf = mp->m_sb.sb_qflags;
	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
	XFS_SB_UNLOCK(mp, s);
	spin_unlock(&mp->m_sb_lock);

	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
		if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
@@ -1370,7 +1369,6 @@ xfs_qm_qino_alloc(
{
	xfs_trans_t	*tp;
	int		error;
	unsigned long	s;
	int		committed;

	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
@@ -1402,7 +1400,7 @@ xfs_qm_qino_alloc(
	 * sbfields arg may contain fields other than *QUOTINO;
	 * VERSIONNUM for example.
	 */
	s = XFS_SB_LOCK(mp);
	spin_lock(&mp->m_sb_lock);
	if (flags & XFS_QMOPT_SBVERSION) {
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
		unsigned oldv = mp->m_sb.sb_versionnum;
@@ -1429,7 +1427,7 @@ xfs_qm_qino_alloc(
		mp->m_sb.sb_uquotino = (*ip)->i_ino;
	else
		mp->m_sb.sb_gquotino = (*ip)->i_ino;
	XFS_SB_UNLOCK(mp, s);
	spin_unlock(&mp->m_sb_lock);
	xfs_mod_sb(tp, sbfields);

	if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
+8 −11
Original line number Diff line number Diff line
@@ -200,7 +200,6 @@ xfs_qm_scall_quotaoff(
	boolean_t		force)
{
	uint			dqtype;
	unsigned long	s;
	int			error;
	uint			inactivate_flags;
	xfs_qoff_logitem_t	*qoffstart;
@@ -237,9 +236,9 @@ xfs_qm_scall_quotaoff(
	if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
		mp->m_qflags &= ~(flags);

		s = XFS_SB_LOCK(mp);
		spin_lock(&mp->m_sb_lock);
		mp->m_sb.sb_qflags = mp->m_qflags;
		XFS_SB_UNLOCK(mp, s);
		spin_unlock(&mp->m_sb_lock);
		mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));

		/* XXX what to do if error ? Revert back to old vals incore ? */
@@ -415,7 +414,6 @@ xfs_qm_scall_quotaon(
	uint		flags)
{
	int		error;
	unsigned long	s;
	uint		qf;
	uint		accflags;
	__int64_t	sbflags;
@@ -468,10 +466,10 @@ xfs_qm_scall_quotaon(
	 * Change sb_qflags on disk but not incore mp->qflags
	 * if this is the root filesystem.
	 */
	s = XFS_SB_LOCK(mp);
	spin_lock(&mp->m_sb_lock);
	qf = mp->m_sb.sb_qflags;
	mp->m_sb.sb_qflags = qf | flags;
	XFS_SB_UNLOCK(mp, s);
	spin_unlock(&mp->m_sb_lock);

	/*
	 * There's nothing to change if it's the same.
@@ -815,7 +813,6 @@ xfs_qm_log_quotaoff(
{
	xfs_trans_t	       *tp;
	int			error;
	unsigned long	s;
	xfs_qoff_logitem_t     *qoffi=NULL;
	uint			oldsbqflag=0;

@@ -832,10 +829,10 @@ xfs_qm_log_quotaoff(
	qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
	xfs_trans_log_quotaoff_item(tp, qoffi);

	s = XFS_SB_LOCK(mp);
	spin_lock(&mp->m_sb_lock);
	oldsbqflag = mp->m_sb.sb_qflags;
	mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
	XFS_SB_UNLOCK(mp, s);
	spin_unlock(&mp->m_sb_lock);

	xfs_mod_sb(tp, XFS_SB_QFLAGS);

@@ -854,9 +851,9 @@ error0:
		 * No one else is modifying sb_qflags, so this is OK.
		 * We still hold the quotaofflock.
		 */
		s = XFS_SB_LOCK(mp);
		spin_lock(&mp->m_sb_lock);
		mp->m_sb.sb_qflags = oldsbqflag;
		XFS_SB_UNLOCK(mp, s);
		spin_unlock(&mp->m_sb_lock);
	}
	*qoffstartp = qoffi;
	return (error);
+3 −5
Original line number Diff line number Diff line
@@ -226,17 +226,15 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
STATIC void
xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp)
{
	unsigned long s;

	if ((mp->m_flags & XFS_MOUNT_ATTR2) &&
	    !(XFS_SB_VERSION_HASATTR2(&mp->m_sb))) {
		s = XFS_SB_LOCK(mp);
		spin_lock(&mp->m_sb_lock);
		if (!XFS_SB_VERSION_HASATTR2(&mp->m_sb)) {
			XFS_SB_VERSION_ADDATTR2(&mp->m_sb);
			XFS_SB_UNLOCK(mp, s);
			spin_unlock(&mp->m_sb_lock);
			xfs_mod_sb(tp, XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
		} else
			XFS_SB_UNLOCK(mp, s);
			spin_unlock(&mp->m_sb_lock);
	}
}

+3 −4
Original line number Diff line number Diff line
@@ -3956,7 +3956,6 @@ xfs_bmap_add_attrfork(
	xfs_bmap_free_t		flist;		/* freed extent records */
	xfs_mount_t		*mp;		/* mount structure */
	xfs_trans_t		*tp;		/* transaction pointer */
	unsigned long		s;		/* spinlock spl value */
	int			blks;		/* space reservation */
	int			version = 1;	/* superblock attr version */
	int			committed;	/* xaction was committed */
@@ -4053,7 +4052,7 @@ xfs_bmap_add_attrfork(
	   (!XFS_SB_VERSION_HASATTR2(&mp->m_sb) && version == 2)) {
		__int64_t sbfields = 0;

		s = XFS_SB_LOCK(mp);
		spin_lock(&mp->m_sb_lock);
		if (!XFS_SB_VERSION_HASATTR(&mp->m_sb)) {
			XFS_SB_VERSION_ADDATTR(&mp->m_sb);
			sbfields |= XFS_SB_VERSIONNUM;
@@ -4063,10 +4062,10 @@ xfs_bmap_add_attrfork(
			sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
		}
		if (sbfields) {
			XFS_SB_UNLOCK(mp, s);
			spin_unlock(&mp->m_sb_lock);
			xfs_mod_sb(tp, sbfields);
		} else
			XFS_SB_UNLOCK(mp, s);
			spin_unlock(&mp->m_sb_lock);
	}
	if ((error = xfs_bmap_finish(&tp, &flist, &committed)))
		goto error2;
+5 −8
Original line number Diff line number Diff line
@@ -462,15 +462,13 @@ xfs_fs_counts(
	xfs_mount_t		*mp,
	xfs_fsop_counts_t	*cnt)
{
	unsigned long	s;

	xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT);
	s = XFS_SB_LOCK(mp);
	spin_lock(&mp->m_sb_lock);
	cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
	cnt->freertx = mp->m_sb.sb_frextents;
	cnt->freeino = mp->m_sb.sb_ifree;
	cnt->allocino = mp->m_sb.sb_icount;
	XFS_SB_UNLOCK(mp, s);
	spin_unlock(&mp->m_sb_lock);
	return 0;
}

@@ -497,7 +495,6 @@ xfs_reserve_blocks(
{
	__int64_t		lcounter, delta, fdblks_delta;
	__uint64_t		request;
	unsigned long		s;

	/* If inval is null, report current values and return */
	if (inval == (__uint64_t *)NULL) {
@@ -515,7 +512,7 @@ xfs_reserve_blocks(
	 * problem. we needto work out if we are freeing or allocation
	 * blocks first, then we can do the modification as necessary.
	 *
	 * We do this under the XFS_SB_LOCK so that if we are near
	 * We do this under the m_sb_lock so that if we are near
	 * ENOSPC, we will hold out any changes while we work out
	 * what to do. This means that the amount of free space can
	 * change while we do this, so we need to retry if we end up
@@ -526,7 +523,7 @@ xfs_reserve_blocks(
	 * enabled, disabled or even compiled in....
	 */
retry:
	s = XFS_SB_LOCK(mp);
	spin_lock(&mp->m_sb_lock);
	xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED);

	/*
@@ -569,7 +566,7 @@ out:
		outval->resblks = mp->m_resblks;
		outval->resblks_avail = mp->m_resblks_avail;
	}
	XFS_SB_UNLOCK(mp, s);
	spin_unlock(&mp->m_sb_lock);

	if (fdblks_delta) {
		/*
Loading