Commit 9dbc1c03 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'xfs-5.10-fixes-3' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux

Pull xfs fixes from Darrick Wong:

 - Fix an uninitialized struct problem

 - Fix an iomap problem zeroing unwritten EOF blocks

 - Fix some clumsy error handling when writeback fails on filesystems
   with blocksize < pagesize

 - Fix a retry loop not resetting loop variables properly

 - Fix scrub flagging rtinherit inodes on a non-rt fs, since the kernel
   actually does permit that combination

 - Fix excessive page cache flushing when unsharing part of a file

* tag 'xfs-5.10-fixes-3' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
  xfs: only flush the unshared range in xfs_reflink_unshare
  xfs: fix scrub flagging rtinherit even if there is no rt device
  xfs: fix missing CoW blocks writeback conversion retry
  iomap: clean up writeback state logic on writepage error
  iomap: support partial page discard on writeback block mapping failure
  xfs: flush new eof page on truncate to avoid post-eof corruption
  xfs: set xefi_discard when creating a deferred agfl free log intent item
parents 6b2c4d52 46afb062
Loading
Loading
Loading
Loading
+10 −20
Original line number Diff line number Diff line
@@ -1374,6 +1374,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
	WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
	WARN_ON_ONCE(!PageLocked(page));
	WARN_ON_ONCE(PageWriteback(page));
	WARN_ON_ONCE(PageDirty(page));

	/*
	 * We cannot cancel the ioend directly here on error.  We may have
@@ -1382,33 +1383,22 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
	 * appropriately.
	 */
	if (unlikely(error)) {
		if (!count) {
		/*
			 * If the current page hasn't been added to ioend, it
			 * won't be affected by I/O completions and we must
			 * discard and unlock it right here.
		 * Let the filesystem know what portion of the current page
		 * failed to map. If the page wasn't been added to ioend, it
		 * won't be affected by I/O completion and we must unlock it
		 * now.
		 */
		if (wpc->ops->discard_page)
				wpc->ops->discard_page(page);
			wpc->ops->discard_page(page, file_offset);
		if (!count) {
			ClearPageUptodate(page);
			unlock_page(page);
			goto done;
		}

		/*
		 * If the page was not fully cleaned, we need to ensure that the
		 * higher layers come back to it correctly.  That means we need
		 * to keep the page dirty, and for WB_SYNC_ALL writeback we need
		 * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
		 * so another attempt to write this page in this writeback sweep
		 * will be made.
		 */
		set_page_writeback_keepwrite(page);
	} else {
		clear_page_dirty_for_io(page);
		set_page_writeback(page);
	}

	set_page_writeback(page);
	unlock_page(page);

	/*
+1 −0
Original line number Diff line number Diff line
@@ -2467,6 +2467,7 @@ xfs_defer_agfl_block(
	new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
	new->xefi_blockcount = 1;
	new->xefi_oinfo = *oinfo;
	new->xefi_skip_discard = false;

	trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);

+1 −1
Original line number Diff line number Diff line
@@ -52,9 +52,9 @@ struct xfs_extent_free_item
{
	xfs_fsblock_t		xefi_startblock;/* starting fs block number */
	xfs_extlen_t		xefi_blockcount;/* number of blocks in extent */
	bool			xefi_skip_discard;
	struct list_head	xefi_list;
	struct xfs_owner_info	xefi_oinfo;	/* extent owner */
	bool			xefi_skip_discard;
};

#define	XFS_BMAP_MAX_NMAP	4
+1 −2
Original line number Diff line number Diff line
@@ -121,8 +121,7 @@ xchk_inode_flags(
		goto bad;

	/* rt flags require rt device */
	if ((flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_RTINHERIT)) &&
	    !mp->m_rtdev_targp)
	if ((flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
		goto bad;

	/* new rt bitmap flag only valid for rbmino */
+12 −8
Original line number Diff line number Diff line
@@ -346,8 +346,8 @@ xfs_map_blocks(
	ssize_t			count = i_blocksize(inode);
	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + count);
	xfs_fileoff_t		cow_fsb = NULLFILEOFF;
	int			whichfork = XFS_DATA_FORK;
	xfs_fileoff_t		cow_fsb;
	int			whichfork;
	struct xfs_bmbt_irec	imap;
	struct xfs_iext_cursor	icur;
	int			retries = 0;
@@ -381,6 +381,8 @@ xfs_map_blocks(
	 * landed in a hole and we skip the block.
	 */
retry:
	cow_fsb = NULLFILEOFF;
	whichfork = XFS_DATA_FORK;
	xfs_ilock(ip, XFS_ILOCK_SHARED);
	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
	       (ip->i_df.if_flags & XFS_IFEXTENTS));
@@ -527,13 +529,15 @@ xfs_prepare_ioend(
 */
static void
xfs_discard_page(
	struct page		*page)
	struct page		*page,
	loff_t			fileoff)
{
	struct inode		*inode = page->mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	loff_t			offset = page_offset(page);
	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, offset);
	unsigned int		pageoff = offset_in_page(fileoff);
	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, fileoff);
	xfs_fileoff_t		pageoff_fsb = XFS_B_TO_FSBT(mp, pageoff);
	int			error;

	if (XFS_FORCED_SHUTDOWN(mp))
@@ -541,14 +545,14 @@ xfs_discard_page(

	xfs_alert_ratelimited(mp,
		"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
			page, ip->i_ino, offset);
			page, ip->i_ino, fileoff);

	error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
			i_blocks_per_page(inode, page));
			i_blocks_per_page(inode, page) - pageoff_fsb);
	if (error && !XFS_FORCED_SHUTDOWN(mp))
		xfs_alert(mp, "page discard unable to remove delalloc mapping.");
out_invalidate:
	iomap_invalidatepage(page, 0, PAGE_SIZE);
	iomap_invalidatepage(page, pageoff, PAGE_SIZE - pageoff);
}

static const struct iomap_writeback_ops xfs_writeback_ops = {
Loading