Commit 728404da authored by Filipe Manana's avatar Filipe Manana Committed by Chris Mason
Browse files

Btrfs: add helper btrfs_fdatawrite_range



To avoid duplicating this double filemap_fdatawrite_range() call for
inodes with async extents (compressed writes) so often.

Signed-off-by: default avatarFilipe Manana <fdmanana@suse.com>
Signed-off-by: default avatarChris Mason <clm@fb.com>
parent 075bdbdb
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -3901,6 +3901,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
		      struct page **pages, size_t num_pages,
		      loff_t pos, size_t write_bytes,
		      struct extent_state **cached);
int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);

/* tree-defrag.c */
int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
+30 −9
Original line number Diff line number Diff line
@@ -1676,6 +1676,7 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
				    loff_t pos)
{
	struct file *file = iocb->ki_filp;
	struct inode *inode = file_inode(file);
	ssize_t written;
	ssize_t written_buffered;
	loff_t endbyte;
@@ -1697,13 +1698,10 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
	 * able to read what was just written.
	 */
	endbyte = pos + written_buffered - 1;
	err = filemap_fdatawrite_range(file->f_mapping, pos, endbyte);
	if (!err && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
			     &BTRFS_I(file_inode(file))->runtime_flags))
		err = filemap_fdatawrite_range(file->f_mapping, pos, endbyte);
	err = btrfs_fdatawrite_range(inode, pos, endbyte);
	if (err)
		goto out;
	err = filemap_fdatawait_range(file->f_mapping, pos, endbyte);
	err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
	if (err)
		goto out;
	written += written_buffered;
@@ -1864,10 +1862,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
	int ret;

	atomic_inc(&BTRFS_I(inode)->sync_writers);
	ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
			     &BTRFS_I(inode)->runtime_flags))
		ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
	ret = btrfs_fdatawrite_range(inode, start, end);
	atomic_dec(&BTRFS_I(inode)->sync_writers);

	return ret;
@@ -2820,3 +2815,29 @@ int btrfs_auto_defrag_init(void)

	return 0;
}

int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
{
	int ret;

	/*
	 * So with compression we will find and lock a dirty page and clear the
	 * first one as dirty, setup an async extent, and immediately return
	 * with the entire range locked but with nobody actually marked with
	 * writeback.  So we can't just filemap_write_and_wait_range() and
	 * expect it to work since it will just kick off a thread to do the
	 * actual work.  So we need to call filemap_fdatawrite_range _again_
	 * since it will wait on the page lock, which won't be unlocked until
	 * after the pages have been marked as writeback and so we're good to go
	 * from there.  We have to do this otherwise we'll miss the ordered
	 * extents and that results in badness.  Please Josef, do not think you
	 * know better and pull this out at some point in the future, it is
	 * right and you are wrong.
	 */
	ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
			     &BTRFS_I(inode)->runtime_flags))
		ret = filemap_fdatawrite_range(inode->i_mapping, start, end);

	return ret;
}
+1 −8
Original line number Diff line number Diff line
@@ -7015,14 +7015,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
			btrfs_put_ordered_extent(ordered);
		} else {
			/* Screw you mmap */
			ret = filemap_fdatawrite_range(inode->i_mapping,
						       lockstart,
						       lockend);
			if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
					     &BTRFS_I(inode)->runtime_flags))
				ret = filemap_fdatawrite_range(inode->i_mapping,
							       lockstart,
							       lockend);
			ret = btrfs_fdatawrite_range(inode, lockstart, lockend);
			if (ret)
				break;
			ret = filemap_fdatawait_range(inode->i_mapping,
+2 −22
Original line number Diff line number Diff line
@@ -725,30 +725,10 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
	/* start IO across the range first to instantiate any delalloc
	 * extents
	 */
	ret = filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
	ret = btrfs_fdatawrite_range(inode, start, orig_end);
	if (ret)
		return ret;
	/*
	 * So with compression we will find and lock a dirty page and clear the
	 * first one as dirty, setup an async extent, and immediately return
	 * with the entire range locked but with nobody actually marked with
	 * writeback.  So we can't just filemap_write_and_wait_range() and
	 * expect it to work since it will just kick off a thread to do the
	 * actual work.  So we need to call filemap_fdatawrite_range _again_
	 * since it will wait on the page lock, which won't be unlocked until
	 * after the pages have been marked as writeback and so we're good to go
	 * from there.  We have to do this otherwise we'll miss the ordered
	 * extents and that results in badness.  Please Josef, do not think you
	 * know better and pull this out at some point in the future, it is
	 * right and you are wrong.
	 */
	if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
		     &BTRFS_I(inode)->runtime_flags)) {
		ret = filemap_fdatawrite_range(inode->i_mapping, start,
					       orig_end);
		if (ret)
			return ret;
	}

	ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
	if (ret)
		return ret;