Commit 9cccef95 authored by Trond Myklebust's avatar Trond Myklebust
Browse files

NFS: Clean up write code...



The addition of nfs_page_mkwrite means that We should no longer need to
create requests inside nfs_writepage()

Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent 94387fb1
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -339,7 +339,7 @@ static int nfs_launder_page(struct page *page)
const struct address_space_operations nfs_file_aops = {
	.readpage = nfs_readpage,
	.readpages = nfs_readpages,
	.set_page_dirty = nfs_set_page_dirty,
	.set_page_dirty = __set_page_dirty_nobuffers,
	.writepage = nfs_writepage,
	.writepages = nfs_writepages,
	.prepare_write = nfs_prepare_write,
+4 −64
Original line number Diff line number Diff line
@@ -243,10 +243,7 @@ static void nfs_end_page_writeback(struct page *page)

/*
 * Find an associated nfs write request, and prepare to flush it out
 * Returns 1 if there was no write request, or if the request was
 * already tagged by nfs_set_page_dirty.Returns 0 if the request
 * was not tagged.
 * May also return an error if the user signalled nfs_wait_on_request().
 * May return an error if the user signalled nfs_wait_on_request().
 */
static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
				struct page *page)
@@ -261,7 +258,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
		req = nfs_page_find_request_locked(page);
		if (req == NULL) {
			spin_unlock(&inode->i_lock);
			return 1;
			return 0;
		}
		if (nfs_lock_request_dontget(req))
			break;
@@ -282,7 +279,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
		spin_unlock(&inode->i_lock);
		nfs_unlock_request(req);
		nfs_pageio_complete(pgio);
		return 1;
		return 0;
	}
	if (nfs_set_page_writeback(page) != 0) {
		spin_unlock(&inode->i_lock);
@@ -290,10 +287,9 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
	}
	radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
			NFS_PAGE_TAG_LOCKED);
	ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
	spin_unlock(&inode->i_lock);
	nfs_pageio_add_request(pgio, req);
	return ret;
	return 0;
}

/*
@@ -302,9 +298,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
{
	struct nfs_pageio_descriptor mypgio, *pgio;
	struct nfs_open_context *ctx;
	struct inode *inode = page->mapping->host;
	unsigned offset;
	int err;

	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
@@ -320,28 +314,7 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc
	nfs_pageio_cond_complete(pgio, page->index);

	err = nfs_page_async_flush(pgio, page);
	if (err <= 0)
		goto out;
	err = 0;
	offset = nfs_page_length(page);
	if (!offset)
		goto out;

	nfs_pageio_cond_complete(pgio, page->index);

	ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
	if (ctx == NULL) {
		err = -EBADF;
		goto out;
	}
	err = nfs_writepage_setup(ctx, page, 0, offset);
	put_nfs_open_context(ctx);
	if (err != 0)
		goto out;
	err = nfs_page_async_flush(pgio, page);
	if (err > 0)
		err = 0;
out:
	if (!wbc->for_writepages)
		nfs_pageio_complete(pgio);
	return err;
@@ -395,8 +368,6 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
	}
	SetPagePrivate(req->wb_page);
	set_page_private(req->wb_page, (unsigned long)req);
	if (PageDirty(req->wb_page))
		set_bit(PG_NEED_FLUSH, &req->wb_flags);
	nfsi->npages++;
	kref_get(&req->wb_kref);
	return 0;
@@ -416,8 +387,6 @@ static void nfs_inode_remove_request(struct nfs_page *req)
	set_page_private(req->wb_page, 0);
	ClearPagePrivate(req->wb_page);
	radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
	if (test_and_clear_bit(PG_NEED_FLUSH, &req->wb_flags))
		__set_page_dirty_nobuffers(req->wb_page);
	nfsi->npages--;
	if (!nfsi->npages) {
		spin_unlock(&inode->i_lock);
@@ -1477,35 +1446,6 @@ int nfs_wb_page(struct inode *inode, struct page* page)
	return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
}

int nfs_set_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode;
	struct nfs_page *req;
	int ret;

	if (!mapping)
		goto out_raced;
	inode = mapping->host;
	if (!inode)
		goto out_raced;
	spin_lock(&inode->i_lock);
	req = nfs_page_find_request_locked(page);
	if (req != NULL) {
		/* Mark any existing write requests for flushing */
		ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags);
		spin_unlock(&inode->i_lock);
		nfs_release_request(req);
		return ret;
	}
	ret = __set_page_dirty_nobuffers(page);
	spin_unlock(&inode->i_lock);
	return ret;
out_raced:
	return !TestSetPageDirty(page);
}


int __init nfs_init_writepagecache(void)
{
	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
+0 −1
Original line number Diff line number Diff line
@@ -420,7 +420,6 @@ extern int nfs_flush_incompatible(struct file *file, struct page *page);
extern int  nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
extern void nfs_writedata_release(void *);
extern int nfs_set_page_dirty(struct page *);

/*
 * Try to write back everything synchronously (but check the
+0 −1
Original line number Diff line number Diff line
@@ -30,7 +30,6 @@
#define PG_BUSY			0
#define PG_NEED_COMMIT		1
#define PG_NEED_RESCHED		2
#define PG_NEED_FLUSH		3

struct nfs_inode;
struct nfs_page {