Commit ff9c745b authored by Matthew Wilcox's avatar Matthew Wilcox
Browse files

mm: Convert page-writeback to XArray



Includes moving mapping_tagged() to fs.h as a static inline, and
changing it to return bool.

Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent 8fa8e538
Loading
Loading
Loading
Loading
+10 −7
Original line number Diff line number Diff line
@@ -483,15 +483,18 @@ struct block_device {
	struct mutex		bd_fsfreeze_mutex;
} __randomize_layout;

/* XArray tags, for tagging dirty and writeback pages in the pagecache. */
#define PAGECACHE_TAG_DIRTY	XA_MARK_0
#define PAGECACHE_TAG_WRITEBACK	XA_MARK_1
#define PAGECACHE_TAG_TOWRITE	XA_MARK_2

/*
 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
 * radix trees
 * Returns true if any of the pages in the mapping are marked with the tag.
 */
#define PAGECACHE_TAG_DIRTY	0
#define PAGECACHE_TAG_WRITEBACK	1
#define PAGECACHE_TAG_TOWRITE	2

int mapping_tagged(struct address_space *mapping, int tag);
static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag)
{
	return xa_marked(&mapping->i_pages, tag);
}

static inline void i_mmap_lock_write(struct address_space *mapping)
{
+26 −46
Original line number Diff line number Diff line
@@ -2097,34 +2097,25 @@ void __init page_writeback_init(void)
 * dirty pages in the file (thus it is important for this function to be quick
 * so that it can tag pages faster than a dirtying process can create them).
 */
/*
 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce the i_pages lock
 * latency.
 */
void tag_pages_for_writeback(struct address_space *mapping,
			     pgoff_t start, pgoff_t end)
{
#define WRITEBACK_TAG_BATCH 4096
	unsigned long tagged = 0;
	struct radix_tree_iter iter;
	void **slot;
	XA_STATE(xas, &mapping->i_pages, start);
	unsigned int tagged = 0;
	void *page;

	xa_lock_irq(&mapping->i_pages);
	radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, start,
							PAGECACHE_TAG_DIRTY) {
		if (iter.index > end)
			break;
		radix_tree_iter_tag_set(&mapping->i_pages, &iter,
							PAGECACHE_TAG_TOWRITE);
		tagged++;
		if ((tagged % WRITEBACK_TAG_BATCH) != 0)
	xas_lock_irq(&xas);
	xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) {
		xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE);
		if (++tagged % XA_CHECK_SCHED)
			continue;
		slot = radix_tree_iter_resume(slot, &iter);
		xa_unlock_irq(&mapping->i_pages);

		xas_pause(&xas);
		xas_unlock_irq(&xas);
		cond_resched();
		xa_lock_irq(&mapping->i_pages);
		xas_lock_irq(&xas);
	}
	xa_unlock_irq(&mapping->i_pages);
	xas_unlock_irq(&xas);
}
EXPORT_SYMBOL(tag_pages_for_writeback);

@@ -2164,7 +2155,7 @@ int write_cache_pages(struct address_space *mapping,
	pgoff_t done_index;
	int cycled;
	int range_whole = 0;
	int tag;
	xa_mark_t tag;

	pagevec_init(&pvec);
	if (wbc->range_cyclic) {
@@ -2445,7 +2436,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,

/*
 * For address_spaces which do not use buffers.  Just tag the page as dirty in
 * its radix tree.
 * the xarray.
 *
 * This is also used when a single buffer is being dirtied: we want to set the
 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
@@ -2471,7 +2462,7 @@ int __set_page_dirty_nobuffers(struct page *page)
		BUG_ON(page_mapping(page) != mapping);
		WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
		account_page_dirtied(page, mapping);
		radix_tree_tag_set(&mapping->i_pages, page_index(page),
		__xa_set_mark(&mapping->i_pages, page_index(page),
				   PAGECACHE_TAG_DIRTY);
		xa_unlock_irqrestore(&mapping->i_pages, flags);
		unlock_page_memcg(page);
@@ -2634,13 +2625,13 @@ EXPORT_SYMBOL(__cancel_dirty_page);
 * Returns true if the page was previously dirty.
 *
 * This is for preparing to put the page under writeout.  We leave the page
 * tagged as dirty in the radix tree so that a concurrent write-for-sync
 * tagged as dirty in the xarray so that a concurrent write-for-sync
 * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
 * implementation will run either set_page_writeback() or set_page_dirty(),
 * at which stage we bring the page's dirty flag and radix-tree dirty tag
 * at which stage we bring the page's dirty flag and xarray dirty tag
 * back into sync.
 *
 * This incoherency between the page's dirty flag and radix-tree tag is
 * This incoherency between the page's dirty flag and xarray tag is
 * unfortunate, but it only exists while the page is locked.
 */
int clear_page_dirty_for_io(struct page *page)
@@ -2721,7 +2712,7 @@ int test_clear_page_writeback(struct page *page)
		xa_lock_irqsave(&mapping->i_pages, flags);
		ret = TestClearPageWriteback(page);
		if (ret) {
			radix_tree_tag_clear(&mapping->i_pages, page_index(page),
			__xa_clear_mark(&mapping->i_pages, page_index(page),
						PAGECACHE_TAG_WRITEBACK);
			if (bdi_cap_account_writeback(bdi)) {
				struct bdi_writeback *wb = inode_to_wb(inode);
@@ -2761,11 +2752,13 @@ int __test_set_page_writeback(struct page *page, bool keep_write)

	lock_page_memcg(page);
	if (mapping && mapping_use_writeback_tags(mapping)) {
		XA_STATE(xas, &mapping->i_pages, page_index(page));
		struct inode *inode = mapping->host;
		struct backing_dev_info *bdi = inode_to_bdi(inode);
		unsigned long flags;

		xa_lock_irqsave(&mapping->i_pages, flags);
		xas_lock_irqsave(&xas, flags);
		xas_load(&xas);
		ret = TestSetPageWriteback(page);
		if (!ret) {
			bool on_wblist;
@@ -2773,8 +2766,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
			on_wblist = mapping_tagged(mapping,
						   PAGECACHE_TAG_WRITEBACK);

			radix_tree_tag_set(&mapping->i_pages, page_index(page),
						PAGECACHE_TAG_WRITEBACK);
			xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
			if (bdi_cap_account_writeback(bdi))
				inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);

@@ -2787,12 +2779,10 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
				sb_mark_inode_writeback(mapping->host);
		}
		if (!PageDirty(page))
			radix_tree_tag_clear(&mapping->i_pages, page_index(page),
						PAGECACHE_TAG_DIRTY);
			xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
		if (!keep_write)
			radix_tree_tag_clear(&mapping->i_pages, page_index(page),
						PAGECACHE_TAG_TOWRITE);
		xa_unlock_irqrestore(&mapping->i_pages, flags);
			xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
		xas_unlock_irqrestore(&xas, flags);
	} else {
		ret = TestSetPageWriteback(page);
	}
@@ -2806,16 +2796,6 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
}
EXPORT_SYMBOL(__test_set_page_writeback);

/*
 * Return true if any of the pages in the mapping are marked with the
 * passed tag.
 */
int mapping_tagged(struct address_space *mapping, int tag)
{
	return radix_tree_tagged(&mapping->i_pages, tag);
}
EXPORT_SYMBOL(mapping_tagged);

/**
 * wait_for_stable_page() - wait for writeback to finish, if necessary.
 * @page:	The page to wait on.