Commit 2492a606 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong
Browse files

xfs: initialize iomap->flags in xfs_bmbt_to_iomap



Currently we don't overwrite the flags field in the iomap in
xfs_bmbt_to_iomap.  This works fine with 0-initialized iomaps on stack,
but is harmful once we want to be able to reuse an iomap in the
writeback code.  Replace the shared parameter with a set of initial
flags an thus ensures the flags field is always reinitialized.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 7684e2c4
Loading
Loading
Loading
Loading
+18 −12
Original line number Diff line number Diff line
@@ -54,7 +54,7 @@ xfs_bmbt_to_iomap(
	struct xfs_inode	*ip,
	struct iomap		*iomap,
	struct xfs_bmbt_irec	*imap,
	bool			shared)
	u16			flags)
{
	struct xfs_mount	*mp = ip->i_mount;

@@ -79,12 +79,11 @@ xfs_bmbt_to_iomap(
	iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
	iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
	iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
	iomap->flags = flags;

	if (xfs_ipincount(ip) &&
	    (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
		iomap->flags |= IOMAP_F_DIRTY;
	if (shared)
		iomap->flags |= IOMAP_F_SHARED;
	return 0;
}

@@ -540,6 +539,7 @@ xfs_file_iomap_begin_delay(
	struct xfs_iext_cursor	icur, ccur;
	xfs_fsblock_t		prealloc_blocks = 0;
	bool			eof = false, cow_eof = false, shared = false;
	u16			iomap_flags = 0;
	int			whichfork = XFS_DATA_FORK;
	int			error = 0;

@@ -707,7 +707,7 @@ retry:
	 * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
	 * them out if the write happens to fail.
	 */
	iomap->flags |= IOMAP_F_NEW;
	iomap_flags |= IOMAP_F_NEW;
	trace_xfs_iomap_alloc(ip, offset, count, whichfork,
			whichfork == XFS_DATA_FORK ? &imap : &cmap);
done:
@@ -715,14 +715,17 @@ done:
		if (imap.br_startoff > offset_fsb) {
			xfs_trim_extent(&cmap, offset_fsb,
					imap.br_startoff - offset_fsb);
			error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
			error = xfs_bmbt_to_iomap(ip, iomap, &cmap,
					IOMAP_F_SHARED);
			goto out_unlock;
		}
		/* ensure we only report blocks we have a reservation for */
		xfs_trim_extent(&imap, cmap.br_startoff, cmap.br_blockcount);
		shared = true;
	}
	error = xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
	if (shared)
		iomap_flags |= IOMAP_F_SHARED;
	error = xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
out_unlock:
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return error;
@@ -930,6 +933,7 @@ xfs_file_iomap_begin(
	xfs_fileoff_t		offset_fsb, end_fsb;
	int			nimaps = 1, error = 0;
	bool			shared = false;
	u16			iomap_flags = 0;
	unsigned		lockmode;

	if (XFS_FORCED_SHUTDOWN(mp))
@@ -1045,7 +1049,7 @@ xfs_file_iomap_begin(
	if (error)
		return error;

	iomap->flags |= IOMAP_F_NEW;
	iomap_flags |= IOMAP_F_NEW;
	trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);

out_finish:
@@ -1055,8 +1059,10 @@ out_finish:
	 * there is no other metadata changes pending or have been made here.
	 */
	if ((flags & IOMAP_WRITE) && offset + length > i_size_read(inode))
		iomap->flags |= IOMAP_F_DIRTY;
	return xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
		iomap_flags |= IOMAP_F_DIRTY;
	if (shared)
		iomap_flags |= IOMAP_F_SHARED;
	return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);

out_found:
	ASSERT(nimaps);
@@ -1200,7 +1206,7 @@ xfs_seek_iomap_begin(
		if (data_fsb < cow_fsb + cmap.br_blockcount)
			end_fsb = min(end_fsb, data_fsb);
		xfs_trim_extent(&cmap, offset_fsb, end_fsb);
		error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
		error = xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
		/*
		 * This is a COW extent, so we must probe the page cache
		 * because there could be dirty page cache being backed
@@ -1222,7 +1228,7 @@ xfs_seek_iomap_begin(
	imap.br_state = XFS_EXT_NORM;
done:
	xfs_trim_extent(&imap, offset_fsb, end_fsb);
	error = xfs_bmbt_to_iomap(ip, iomap, &imap, false);
	error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
out_unlock:
	xfs_iunlock(ip, lockmode);
	return error;
@@ -1268,7 +1274,7 @@ out_unlock:
	if (error)
		return error;
	ASSERT(nimaps);
	return xfs_bmbt_to_iomap(ip, iomap, &imap, false);
	return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
}

const struct iomap_ops xfs_xattr_iomap_ops = {
+1 −1
Original line number Diff line number Diff line
@@ -16,7 +16,7 @@ int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);

int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
		struct xfs_bmbt_irec *, bool shared);
		struct xfs_bmbt_irec *, u16);
xfs_extlen_t xfs_eof_alignment(struct xfs_inode *ip, xfs_extlen_t extsize);

static inline xfs_filblks_t
+1 −1
Original line number Diff line number Diff line
@@ -178,7 +178,7 @@ xfs_fs_map_blocks(
	}
	xfs_iunlock(ip, XFS_IOLOCK_EXCL);

	error = xfs_bmbt_to_iomap(ip, iomap, &imap, false);
	error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
	*device_generation = mp->m_generation;
	return error;
out_unlock: