Commit 5ec4fabb authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Alex Elder
Browse files

xfs: cleanup data end I/O handlers



Currently we have different end I/O handlers for read vs the different
types of write I/O.  But they are all very similar so we could just
use one with a few conditionals and reduce code size a lot.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarAlex Elder <aelder@sgi.com>
Signed-off-by: default avatarAlex Elder <aelder@sgi.com>
parent 06342cf8
Loading
Loading
Loading
Loading
+26 −69
Original line number Diff line number Diff line
@@ -235,71 +235,36 @@ xfs_setfilesize(
}

/*
 * Buffered IO write completion for delayed allocate extents.
 * IO write completion.
 */
STATIC void
xfs_end_bio_delalloc(
xfs_end_io(
	struct work_struct	*work)
{
	xfs_ioend_t		*ioend =
		container_of(work, xfs_ioend_t, io_work);

	xfs_setfilesize(ioend);
	xfs_destroy_ioend(ioend);
}

/*
 * Buffered IO write completion for regular, written extents.
 */
STATIC void
xfs_end_bio_written(
	struct work_struct	*work)
{
	xfs_ioend_t		*ioend =
		container_of(work, xfs_ioend_t, io_work);

	xfs_setfilesize(ioend);
	xfs_destroy_ioend(ioend);
}
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);

	/*
 * IO write completion for unwritten extents.
 *
 * Issue transactions to convert a buffer range from unwritten
 * to written extents.
	 * For unwritten extents we need to issue transactions to convert a
	 * range to normal written extens after the data I/O has finished.
	 */
STATIC void
xfs_end_bio_unwritten(
	struct work_struct	*work)
{
	xfs_ioend_t		*ioend =
		container_of(work, xfs_ioend_t, io_work);
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
	xfs_off_t		offset = ioend->io_offset;
	size_t			size = ioend->io_size;

	if (likely(!ioend->io_error)) {
		if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
	if (ioend->io_type == IOMAP_UNWRITTEN &&
	    likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
		int error;
			error = xfs_iomap_write_unwritten(ip, offset, size);

		error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
						 ioend->io_size);
		if (error)
			ioend->io_error = error;
	}
		xfs_setfilesize(ioend);
	}
	xfs_destroy_ioend(ioend);
}

	/*
 * IO read completion for regular, written extents.
	 * We might have to update the on-disk file size after extending
	 * writes.
	 */
STATIC void
xfs_end_bio_read(
	struct work_struct	*work)
{
	xfs_ioend_t		*ioend =
		container_of(work, xfs_ioend_t, io_work);

	if (ioend->io_type != IOMAP_READ)
		xfs_setfilesize(ioend);
	xfs_destroy_ioend(ioend);
}

@@ -314,10 +279,10 @@ xfs_finish_ioend(
	int		wait)
{
	if (atomic_dec_and_test(&ioend->io_remaining)) {
		struct workqueue_struct *wq = xfsdatad_workqueue;
		if (ioend->io_work.func == xfs_end_bio_unwritten)
			wq = xfsconvertd_workqueue;
		struct workqueue_struct *wq;

		wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
			xfsconvertd_workqueue : xfsdatad_workqueue;
		queue_work(wq, &ioend->io_work);
		if (wait)
			flush_workqueue(wq);
@@ -355,15 +320,7 @@ xfs_alloc_ioend(
	ioend->io_offset = 0;
	ioend->io_size = 0;

	if (type == IOMAP_UNWRITTEN)
		INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
	else if (type == IOMAP_DELAY)
		INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
	else if (type == IOMAP_READ)
		INIT_WORK(&ioend->io_work, xfs_end_bio_read);
	else
		INIT_WORK(&ioend->io_work, xfs_end_bio_written);

	INIT_WORK(&ioend->io_work, xfs_end_io);
	return ioend;
}

@@ -1538,7 +1495,7 @@ xfs_end_io_direct(
		 * didn't map an unwritten extent so switch it's completion
		 * handler.
		 */
		INIT_WORK(&ioend->io_work, xfs_end_bio_written);
		ioend->io_type = IOMAP_NEW;
		xfs_finish_ioend(ioend, 0);
	}