Commit 4a06fd26 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Alex Elder
Browse files

xfs: remove i_iocount



We now have an i_dio_count filed and surrounding infrastructure to wait
for direct I/O completion instead of i_icount, and we have never needed
to iocount waits for buffered I/O given that we only set the page uptodate
after finishing all required work.  Thus remove i_iocount, and replace
the actually needed waits with calls to inode_dio_wait.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarAlex Elder <aelder@sgi.com>
parent 2b3ffd7e
Loading
Loading
Loading
Loading
+1 −38
Original line number Diff line number Diff line
@@ -38,40 +38,6 @@
#include <linux/pagevec.h>
#include <linux/writeback.h>


/*
 * Prime number of hash buckets since address is used as the key.
 */
#define NVSYNC		37
#define to_ioend_wq(v)	(&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
static wait_queue_head_t xfs_ioend_wq[NVSYNC];

void __init
xfs_ioend_init(void)
{
	int i;

	for (i = 0; i < NVSYNC; i++)
		init_waitqueue_head(&xfs_ioend_wq[i]);
}

void
xfs_ioend_wait(
	xfs_inode_t	*ip)
{
	wait_queue_head_t *wq = to_ioend_wq(ip);

	wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
}

STATIC void
xfs_ioend_wake(
	xfs_inode_t	*ip)
{
	if (atomic_dec_and_test(&ip->i_iocount))
		wake_up(to_ioend_wq(ip));
}

void
xfs_count_page_state(
	struct page		*page,
@@ -115,7 +81,6 @@ xfs_destroy_ioend(
	xfs_ioend_t		*ioend)
{
	struct buffer_head	*bh, *next;
	struct xfs_inode	*ip = XFS_I(ioend->io_inode);

	for (bh = ioend->io_buffer_head; bh; bh = next) {
		next = bh->b_private;
@@ -127,7 +92,7 @@ xfs_destroy_ioend(
			aio_complete(ioend->io_iocb, ioend->io_result, 0);
		inode_dio_done(ioend->io_inode);
	}
	xfs_ioend_wake(ip);

	mempool_free(ioend, xfs_ioend_pool);
}

@@ -298,7 +263,6 @@ xfs_alloc_ioend(
	ioend->io_inode = inode;
	ioend->io_buffer_head = NULL;
	ioend->io_buffer_tail = NULL;
	atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
	ioend->io_offset = 0;
	ioend->io_size = 0;
	ioend->io_iocb = NULL;
@@ -558,7 +522,6 @@ xfs_cancel_ioend(
			unlock_buffer(bh);
		} while ((bh = next_bh) != NULL);

		xfs_ioend_wake(XFS_I(ioend->io_inode));
		mempool_free(ioend, xfs_ioend_pool);
	} while ((ioend = next) != NULL);
}
+0 −3
Original line number Diff line number Diff line
@@ -61,9 +61,6 @@ typedef struct xfs_ioend {
extern const struct address_space_operations xfs_address_space_operations;
extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int);

extern void xfs_ioend_init(void);
extern void xfs_ioend_wait(struct xfs_inode *);

extern void xfs_count_page_state(struct page *, int *, int *);

#endif /* __XFS_AOPS_H__ */
+2 −6
Original line number Diff line number Diff line
@@ -149,10 +149,6 @@ xfs_file_fsync(

	xfs_iflags_clear(ip, XFS_ITRUNCATED);

	xfs_ilock(ip, XFS_IOLOCK_SHARED);
	xfs_ioend_wait(ip);
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);

	if (mp->m_flags & XFS_MOUNT_BARRIER) {
		/*
		 * If we have an RT and/or log subvolume we need to make sure
@@ -758,7 +754,7 @@ restart:
 * the dio layer.  To avoid the problem with aio, we also need to wait for
 * outstanding IOs to complete so that unwritten extent conversion is completed
 * before we try to map the overlapping block. This is currently implemented by
 * hitting it with a big hammer (i.e. xfs_ioend_wait()).
 * hitting it with a big hammer (i.e. inode_dio_wait()).
 *
 * Returns with locks held indicated by @iolock and errors indicated by
 * negative return values.
@@ -821,7 +817,7 @@ xfs_file_dio_aio_write(
	 * otherwise demote the lock if we had to flush cached pages
	 */
	if (unaligned_io)
		xfs_ioend_wait(ip);
		inode_dio_wait(inode);
	else if (*iolock == XFS_IOLOCK_EXCL) {
		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
		*iolock = XFS_IOLOCK_SHARED;
+0 −2
Original line number Diff line number Diff line
@@ -75,7 +75,6 @@ xfs_inode_alloc(
		return NULL;
	}

	ASSERT(atomic_read(&ip->i_iocount) == 0);
	ASSERT(atomic_read(&ip->i_pincount) == 0);
	ASSERT(!spin_is_locked(&ip->i_flags_lock));
	ASSERT(completion_done(&ip->i_flush));
@@ -150,7 +149,6 @@ xfs_inode_free(
	}

	/* asserts to verify all state is correct here */
	ASSERT(atomic_read(&ip->i_iocount) == 0);
	ASSERT(atomic_read(&ip->i_pincount) == 0);
	ASSERT(!spin_is_locked(&ip->i_flags_lock));
	ASSERT(completion_done(&ip->i_flush));
+0 −1
Original line number Diff line number Diff line
@@ -257,7 +257,6 @@ typedef struct xfs_inode {

	xfs_fsize_t		i_size;		/* in-memory size */
	xfs_fsize_t		i_new_size;	/* size when write completes */
	atomic_t		i_iocount;	/* outstanding I/O count */

	/* VFS inode */
	struct inode		i_vnode;	/* embedded VFS inode */
Loading