Commit 155130a4 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Al Viro
Browse files

get rid of block_write_begin_newtrunc



Move the call to vmtruncate to get rid of accessive blocks to the callers
in preparation of the new truncate sequence and rename the non-truncating
version to block_write_begin.

While we're at it also remove several unused arguments to block_write_begin.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 6e1db88d
Loading
Loading
Loading
Loading
+11 −3
Original line number Diff line number Diff line
@@ -168,9 +168,17 @@ static int bfs_write_begin(struct file *file, struct address_space *mapping,
			loff_t pos, unsigned len, unsigned flags,
			struct page **pagep, void **fsdata)
{
	*pagep = NULL;
	return block_write_begin(file, mapping, pos, len, flags,
					pagep, fsdata, bfs_get_block);
	int ret;

	ret = block_write_begin(mapping, pos, len, flags, pagep,
				bfs_get_block);
	if (unlikely(ret)) {
		loff_t isize = mapping->host->i_size;
		if (pos + len > isize)
			vmtruncate(mapping->host, isize);
	}

	return ret;
}

static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
+2 −3
Original line number Diff line number Diff line
@@ -308,9 +308,8 @@ static int blkdev_write_begin(struct file *file, struct address_space *mapping,
			loff_t pos, unsigned len, unsigned flags,
			struct page **pagep, void **fsdata)
{
	*pagep = NULL;
	return block_write_begin_newtrunc(file, mapping, pos, len, flags,
				pagep, fsdata, blkdev_get_block);
	return block_write_begin(mapping, pos, len, flags, pagep,
				 blkdev_get_block);
}

static int blkdev_write_end(struct file *file, struct address_space *mapping,
+9 −52
Original line number Diff line number Diff line
@@ -1962,14 +1962,13 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
EXPORT_SYMBOL(__block_write_begin);

/*
 * Filesystems implementing the new truncate sequence should use the
 * _newtrunc postfix variant which won't incorrectly call vmtruncate.
 * block_write_begin takes care of the basic task of block allocation and
 * bringing partial write blocks uptodate first.
 *
 * The filesystem needs to handle block truncation upon failure.
 */
int block_write_begin_newtrunc(struct file *file, struct address_space *mapping,
			loff_t pos, unsigned len, unsigned flags,
			struct page **pagep, void **fsdata,
			get_block_t *get_block)
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
		unsigned flags, struct page **pagep, get_block_t *get_block)
{
	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
	struct page *page;
@@ -1989,44 +1988,6 @@ int block_write_begin_newtrunc(struct file *file, struct address_space *mapping,
	*pagep = page;
	return status;
}
EXPORT_SYMBOL(block_write_begin_newtrunc);

/*
 * block_write_begin takes care of the basic task of block allocation and
 * bringing partial write blocks uptodate first.
 *
 * If *pagep is not NULL, then block_write_begin uses the locked page
 * at *pagep rather than allocating its own. In this case, the page will
 * not be unlocked or deallocated on failure.
 */
int block_write_begin(struct file *file, struct address_space *mapping,
			loff_t pos, unsigned len, unsigned flags,
			struct page **pagep, void **fsdata,
			get_block_t *get_block)
{
	int ret;

	ret = block_write_begin_newtrunc(file, mapping, pos, len, flags,
					pagep, fsdata, get_block);

	/*
	 * prepare_write() may have instantiated a few blocks
	 * outside i_size.  Trim these off again. Don't need
	 * i_size_read because we hold i_mutex.
	 *
	 * Filesystems which pass down their own page also cannot
	 * call into vmtruncate here because it would lead to lock
	 * inversion problems (*pagep is locked). This is a further
	 * example of where the old truncate sequence is inadequate.
	 */
	if (unlikely(ret) && *pagep == NULL) {
		loff_t isize = mapping->host->i_size;
		if (pos + len > isize)
			vmtruncate(mapping->host, isize);
	}

	return ret;
}
EXPORT_SYMBOL(block_write_begin);

int block_write_end(struct file *file, struct address_space *mapping,
@@ -2357,7 +2318,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,

	err = cont_expand_zero(file, mapping, pos, bytes);
	if (err)
		goto out;
		return err;

	zerofrom = *bytes & ~PAGE_CACHE_MASK;
	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
@@ -2365,11 +2326,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
		(*bytes)++;
	}

	*pagep = NULL;
	err = block_write_begin_newtrunc(file, mapping, pos, len,
				flags, pagep, fsdata, get_block);
out:
	return err;
	return block_write_begin(mapping, pos, len, flags, pagep, get_block);
}
EXPORT_SYMBOL(cont_write_begin);

@@ -2511,8 +2468,8 @@ int nobh_write_begin(struct address_space *mapping,
		unlock_page(page);
		page_cache_release(page);
		*pagep = NULL;
		return block_write_begin_newtrunc(NULL, mapping, pos, len,
					flags, pagep, fsdata, get_block);
		return block_write_begin(mapping, pos, len, flags, pagep,
					 get_block);
	}

	if (PageMappedToDisk(page))
+2 −3
Original line number Diff line number Diff line
@@ -772,9 +772,8 @@ ext2_write_begin(struct file *file, struct address_space *mapping,
{
	int ret;

	*pagep = NULL;
	ret = block_write_begin_newtrunc(file, mapping, pos, len, flags,
					 pagep, fsdata, ext2_get_block);
	ret = block_write_begin(mapping, pos, len, flags, pagep,
				ext2_get_block);
	if (ret < 0)
		ext2_write_failed(mapping, pos + len);
	return ret;
+10 −2
Original line number Diff line number Diff line
@@ -366,9 +366,17 @@ static int minix_write_begin(struct file *file, struct address_space *mapping,
			loff_t pos, unsigned len, unsigned flags,
			struct page **pagep, void **fsdata)
{
	*pagep = NULL;
	return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
	int ret;

	ret = block_write_begin(mapping, pos, len, flags, pagep,
				minix_get_block);
	if (unlikely(ret)) {
		loff_t isize = mapping->host->i_size;
		if (pos + len > isize)
			vmtruncate(mapping->host, isize);
	}

	return ret;
}

static sector_t minix_bmap(struct address_space *mapping, sector_t block)
Loading