Commit 60263d58 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong
Browse files

iomap: fall back to buffered writes for invalidation failures



Failing to invalid the page cache means data in incoherent, which is
a very bad state for the system.  Always fall back to buffered I/O
through the page cache if we can't invalidate mappings.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarGoldwyn Rodrigues <rgoldwyn@suse.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Acked-by: default avatarBob Peterson <rpeterso@redhat.com>
Acked-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Theodore Ts'o <tytso@mit.edu> # for ext4
Reviewed-by: Andreas Gruenbacher <agruenba@redhat.com> # for gfs2
Reviewed-by: default avatarRitesh Harjani <riteshh@linux.ibm.com>
parent 80e543ae
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -544,6 +544,8 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
		iomap_ops = &ext4_iomap_overwrite_ops;
	ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
			   is_sync_kiocb(iocb) || unaligned_io || extend);
	if (ret == -ENOTBLK)
		ret = 0;

	if (extend)
		ret = ext4_handle_inode_extension(inode, offset, ret, count);
+2 −1
Original line number Diff line number Diff line
@@ -814,7 +814,8 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)

	ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
			   is_sync_kiocb(iocb));

	if (ret == -ENOTBLK)
		ret = 0;
out:
	gfs2_glock_dq(&gh);
out_uninit:
+11 −5
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@
#include <linux/backing-dev.h>
#include <linux/uio.h>
#include <linux/task_io_accounting_ops.h>
#include "trace.h"

#include "../internal.h"

@@ -401,6 +402,9 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
 * may be pure data writes. In that case, we still need to do a full data sync
 * completion.
 *
 * Returns -ENOTBLK In case of a page invalidation invalidation failure for
 * writes.  The callers needs to fall back to buffered I/O in this case.
 */
ssize_t
iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
@@ -478,13 +482,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
	if (iov_iter_rw(iter) == WRITE) {
		/*
		 * Try to invalidate cache pages for the range we are writing.
		 * If this invalidation fails, tough, the write will still work,
		 * but racing two incompatible write paths is a pretty crazy
		 * thing to do, so we don't support it 100%.
		 * If this invalidation fails, let the caller fall back to
		 * buffered I/O.
		 */
		if (invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
				end >> PAGE_SHIFT))
			dio_warn_stale_pagecache(iocb->ki_filp);
				end >> PAGE_SHIFT)) {
			trace_iomap_dio_invalidate_fail(inode, pos, count);
			ret = -ENOTBLK;
			goto out_free_dio;
		}

		if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
			ret = sb_init_dio_done_wq(inode->i_sb);
+1 −0
Original line number Diff line number Diff line
@@ -74,6 +74,7 @@ DEFINE_EVENT(iomap_range_class, name, \
DEFINE_RANGE_EVENT(iomap_writepage);
DEFINE_RANGE_EVENT(iomap_releasepage);
DEFINE_RANGE_EVENT(iomap_invalidatepage);
DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail);

#define IOMAP_TYPE_STRINGS \
	{ IOMAP_HOLE,		"HOLE" }, \
+2 −2
Original line number Diff line number Diff line
@@ -553,8 +553,8 @@ out:
	xfs_iunlock(ip, iolock);

	/*
	 * No fallback to buffered IO on errors for XFS, direct IO will either
	 * complete fully or fail.
	 * No fallback to buffered IO after short writes for XFS, direct I/O
	 * will either complete fully or return an error.
	 */
	ASSERT(ret < 0 || ret == count);
	return ret;
Loading