Commit f49aa1de authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull btrfs fixes from David Sterba:
 "Notable highlights:

   - fixes for some long-standing bugs in fsync that were quite hard to
     catch but now finaly fixed

   - some fixups to error handling paths that did not properly clean up
     (locking, memory)

   - fix to space reservation for inheriting properties"

* tag 'for-5.2-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  Btrfs: tree-checker: detect file extent items with overlapping ranges
  Btrfs: fix race between ranged fsync and writeback of adjacent ranges
  Btrfs: avoid fallback to transaction commit during fsync of files with holes
  btrfs: extent-tree: Fix a bug that btrfs is unable to add pinned bytes
  btrfs: sysfs: don't leak memory when failing add fsid
  btrfs: sysfs: Fix error path kobject memory leak
  Btrfs: do not abort transaction at btrfs_update_root() after failure to COW path
  btrfs: use the existing reserved items for our first prop for inheritance
  btrfs: don't double unlock on error in btrfs_punch_hole
  btrfs: Check the compression level before getting a workspace
parents 78e03651 4e9845ef
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -1008,6 +1008,7 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
	struct list_head *workspace;
	int ret;

	level = btrfs_compress_op[type]->set_level(level);
	workspace = get_workspace(type, level);
	ret = btrfs_compress_op[type]->compress_pages(workspace, mapping,
						      start, pages,
+8 −7
Original line number Diff line number Diff line
@@ -757,12 +757,14 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
}

static void add_pinned_bytes(struct btrfs_fs_info *fs_info,
			     struct btrfs_ref *ref)
			     struct btrfs_ref *ref, int sign)
{
	struct btrfs_space_info *space_info;
	s64 num_bytes = -ref->len;
	s64 num_bytes;
	u64 flags;

	ASSERT(sign == 1 || sign == -1);
	num_bytes = sign * ref->len;
	if (ref->type == BTRFS_REF_METADATA) {
		if (ref->tree_ref.root == BTRFS_CHUNK_TREE_OBJECTID)
			flags = BTRFS_BLOCK_GROUP_SYSTEM;
@@ -2063,7 +2065,7 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
	btrfs_ref_tree_mod(fs_info, generic_ref);

	if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
		add_pinned_bytes(fs_info, generic_ref);
		add_pinned_bytes(fs_info, generic_ref, -1);

	return ret;
}
@@ -3882,8 +3884,7 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
				    info->space_info_kobj, "%s",
				    alloc_name(space_info->flags));
	if (ret) {
		percpu_counter_destroy(&space_info->total_bytes_pinned);
		kfree(space_info);
		kobject_put(&space_info->kobj);
		return ret;
	}

@@ -7190,7 +7191,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
	}
out:
	if (pin)
		add_pinned_bytes(fs_info, &generic_ref);
		add_pinned_bytes(fs_info, &generic_ref, 1);

	if (last_ref) {
		/*
@@ -7238,7 +7239,7 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
		btrfs_ref_tree_mod(fs_info, ref);

	if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0)
		add_pinned_bytes(fs_info, ref);
		add_pinned_bytes(fs_info, ref, 1);

	return ret;
}
+13 −3
Original line number Diff line number Diff line
@@ -2067,6 +2067,18 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
	int ret = 0, err;
	u64 len;

	/*
	 * If the inode needs a full sync, make sure we use a full range to
	 * avoid log tree corruption, due to hole detection racing with ordered
	 * extent completion for adjacent ranges, and assertion failures during
	 * hole detection.
	 */
	if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
		     &BTRFS_I(inode)->runtime_flags)) {
		start = 0;
		end = LLONG_MAX;
	}

	/*
	 * The range length can be represented by u64, we have to do the typecasts
	 * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
@@ -2554,10 +2566,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)

	ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
					  &cached_state);
	if (ret) {
		inode_unlock(inode);
	if (ret)
		goto out_only_mutex;
	}

	path = btrfs_alloc_path();
	if (!path) {
+22 −8
Original line number Diff line number Diff line
@@ -332,6 +332,7 @@ static int inherit_props(struct btrfs_trans_handle *trans,
	struct btrfs_fs_info *fs_info = root->fs_info;
	int ret;
	int i;
	bool need_reserve = false;

	if (!test_bit(BTRFS_INODE_HAS_PROPS,
		      &BTRFS_I(parent)->runtime_flags))
@@ -357,11 +358,20 @@ static int inherit_props(struct btrfs_trans_handle *trans,
		if (ret)
			continue;

		/*
		 * Currently callers should be reserving 1 item for properties,
		 * since we only have 1 property that we currently support.  If
		 * we add more in the future we need to try and reserve more
		 * space for them.  But we should also revisit how we do space
		 * reservations if we do add more properties in the future.
		 */
		if (need_reserve) {
			num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
			ret = btrfs_block_rsv_add(root, trans->block_rsv,
					num_bytes, BTRFS_RESERVE_NO_FLUSH);
			if (ret)
				return ret;
		}

		ret = btrfs_setxattr(trans, inode, h->xattr_name, value,
				     strlen(value), 0);
@@ -375,10 +385,14 @@ static int inherit_props(struct btrfs_trans_handle *trans,
					&BTRFS_I(inode)->runtime_flags);
		}

		btrfs_block_rsv_release(fs_info, trans->block_rsv, num_bytes);
		if (need_reserve) {
			btrfs_block_rsv_release(fs_info, trans->block_rsv,
					num_bytes);
			if (ret)
				return ret;
		}
		need_reserve = true;
	}

	return 0;
}
+1 −3
Original line number Diff line number Diff line
@@ -132,10 +132,8 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
		return -ENOMEM;

	ret = btrfs_search_slot(trans, root, key, path, 0, 1);
	if (ret < 0) {
		btrfs_abort_transaction(trans, ret);
	if (ret < 0)
		goto out;
	}

	if (ret > 0) {
		btrfs_crit(fs_info,
Loading