Commit 843ccf9f authored by David Sterba's avatar David Sterba
Browse files

btrfs: use assertion helpers for spinning writers



Use the helpers where open coded. On non-debug builds, the warnings will
not trigger and extent_buffer::spining_writers become unused and can be
moved to the appropriate section, saving a few bytes.

Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent e4e9fd0f
Loading
Loading
Loading
Loading
+4 −1
Original line number Diff line number Diff line
@@ -4682,7 +4682,6 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
	atomic_set(&eb->blocking_readers, 0);
	atomic_set(&eb->blocking_writers, 0);
	atomic_set(&eb->spinning_readers, 0);
	atomic_set(&eb->spinning_writers, 0);
	eb->lock_nested = 0;
	init_waitqueue_head(&eb->write_lock_wq);
	init_waitqueue_head(&eb->read_lock_wq);
@@ -4700,6 +4699,10 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
		> MAX_INLINE_EXTENT_BUFFER_SIZE);
	BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);

#ifdef CONFIG_BTRFS_DEBUG
	atomic_set(&eb->spinning_writers, 0);
#endif

	return eb;
}

+1 −1
Original line number Diff line number Diff line
@@ -166,7 +166,6 @@ struct extent_buffer {
	atomic_t blocking_writers;
	atomic_t blocking_readers;
	atomic_t spinning_readers;
	atomic_t spinning_writers;
	short lock_nested;
	/* >= 0 if eb belongs to a log tree, -1 otherwise */
	short log_index;
@@ -185,6 +184,7 @@ struct extent_buffer {
	wait_queue_head_t read_lock_wq;
	struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
#ifdef CONFIG_BTRFS_DEBUG
	atomic_t spinning_writers;
	struct list_head leak_list;
#endif
};
+6 −10
Original line number Diff line number Diff line
@@ -64,8 +64,7 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
	if (eb->lock_nested && current->pid == eb->lock_owner)
		return;
	if (atomic_read(&eb->blocking_writers) == 0) {
		WARN_ON(atomic_read(&eb->spinning_writers) != 1);
		atomic_dec(&eb->spinning_writers);
		btrfs_assert_spinning_writers_put(eb);
		btrfs_assert_tree_locked(eb);
		atomic_inc(&eb->blocking_writers);
		write_unlock(&eb->lock);
@@ -101,8 +100,7 @@ void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
		return;
	BUG_ON(atomic_read(&eb->blocking_writers) != 1);
	write_lock(&eb->lock);
	WARN_ON(atomic_read(&eb->spinning_writers));
	atomic_inc(&eb->spinning_writers);
	btrfs_assert_spinning_writers_get(eb);
	/* atomic_dec_and_test implies a barrier */
	if (atomic_dec_and_test(&eb->blocking_writers))
		cond_wake_up_nomb(&eb->write_lock_wq);
@@ -200,7 +198,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
		return 0;
	}
	atomic_inc(&eb->write_locks);
	atomic_inc(&eb->spinning_writers);
	btrfs_assert_spinning_writers_get(eb);
	eb->lock_owner = current->pid;
	return 1;
}
@@ -266,8 +264,7 @@ again:
		write_unlock(&eb->lock);
		goto again;
	}
	WARN_ON(atomic_read(&eb->spinning_writers));
	atomic_inc(&eb->spinning_writers);
	btrfs_assert_spinning_writers_get(eb);
	atomic_inc(&eb->write_locks);
	eb->lock_owner = current->pid;
}
@@ -286,14 +283,13 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
	atomic_dec(&eb->write_locks);

	if (blockers) {
		WARN_ON(atomic_read(&eb->spinning_writers));
		btrfs_assert_no_spinning_writers(eb);
		atomic_dec(&eb->blocking_writers);
		/* Use the lighter barrier after atomic */
		smp_mb__after_atomic();
		cond_wake_up_nomb(&eb->write_lock_wq);
	} else {
		WARN_ON(atomic_read(&eb->spinning_writers) != 1);
		atomic_dec(&eb->spinning_writers);
		btrfs_assert_spinning_writers_put(eb);
		write_unlock(&eb->lock);
	}
}