Commit b272ae22 authored by David Sterba's avatar David Sterba
Browse files

btrfs: drop argument tree from btrfs_lock_and_flush_ordered_range



The tree pointer can be safely read from the inode so we can drop the
redundant argument from btrfs_lock_and_flush_ordered_range.

Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent ae6957eb
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -3331,7 +3331,7 @@ static inline void contiguous_readpages(struct extent_io_tree *tree,

	ASSERT(tree == &inode->io_tree);

	btrfs_lock_and_flush_ordered_range(tree, inode, start, end, NULL);
	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);

	for (index = 0; index < nr_pages; index++) {
		__do_readpage(tree, pages[index], btrfs_get_extent, em_cached,
@@ -3354,7 +3354,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,

	ASSERT(tree == &inode->io_tree);

	btrfs_lock_and_flush_ordered_range(tree, inode, start, end, NULL);
	btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);

	ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
			    bio_flags, read_flags, NULL);
+1 −1
Original line number Diff line number Diff line
@@ -1561,7 +1561,7 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
	lockend = round_up(pos + *write_bytes,
			   fs_info->sectorsize) - 1;

	btrfs_lock_and_flush_ordered_range(&inode->io_tree, inode, lockstart,
	btrfs_lock_and_flush_ordered_range(inode, lockstart,
					   lockend, NULL);

	num_bytes = lockend - lockstart + 1;
+1 −1
Original line number Diff line number Diff line
@@ -4619,7 +4619,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
	if (size <= hole_start)
		return 0;

	btrfs_lock_and_flush_ordered_range(io_tree, BTRFS_I(inode), hole_start,
	btrfs_lock_and_flush_ordered_range(BTRFS_I(inode), hole_start,
					   block_end - 1, &cached_state);
	cur_offset = hole_start;
	while (1) {
+3 −7
Original line number Diff line number Diff line
@@ -835,7 +835,6 @@ out:
 * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
 * ordered extents in it are run to completion.
 *
 * @tree:         IO tree used for locking out other users of the range
 * @inode:        Inode whose ordered tree is to be searched
 * @start:        Beginning of range to flush
 * @end:          Last byte of range to lock
@@ -845,8 +844,7 @@ out:
 * This function always returns with the given range locked, ensuring after it's
 * called no order extent can be pending.
 */
void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
					struct btrfs_inode *inode, u64 start,
void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
					u64 end,
					struct extent_state **cached_state)
{
@@ -854,13 +852,11 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
	struct extent_state *cache = NULL;
	struct extent_state **cachedp = &cache;

	ASSERT(tree == &inode->io_tree);

	if (cached_state)
		cachedp = cached_state;

	while (1) {
		lock_extent_bits(tree, start, end, cachedp);
		lock_extent_bits(&inode->io_tree, start, end, cachedp);
		ordered = btrfs_lookup_ordered_range(inode, start,
						     end - start + 1);
		if (!ordered) {
@@ -873,7 +869,7 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
				refcount_dec(&cache->refs);
			break;
		}
		unlock_extent_cached(tree, start, end, cachedp);
		unlock_extent_cached(&inode->io_tree, start, end, cachedp);
		btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
		btrfs_put_ordered_extent(ordered);
	}
+1 −2
Original line number Diff line number Diff line
@@ -183,8 +183,7 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
			       const u64 range_start, const u64 range_len);
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
			      const u64 range_start, const u64 range_len);
void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
					struct btrfs_inode *inode, u64 start,
void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
					u64 end,
					struct extent_state **cached_state);
int __init ordered_data_init(void);