Commit c5bbd451 authored by Matthew Wilcox's avatar Matthew Wilcox
Browse files

dax: Reinstate RCU protection of inode



For the device-dax case, it is possible that the inode can go away
underneath us.  The rcu_read_lock() was there to prevent it from
being freed, and not (as I thought) to protect the tree.  Bring back
the rcu_read_lock() protection.  Also add a little kernel-doc; while
this function is not exported to modules, it is used from outside dax.c

Reported-by: default avatarDan Williams <dan.j.williams@intel.com>
Fixes: 9f32d221 ("dax: Convert dax_lock_mapping_entry to XArray")
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent 7ae2ea7d
Loading
Loading
Loading
Loading
+19 −3
Original line number Diff line number Diff line
@@ -353,16 +353,27 @@ static struct page *dax_busy_page(void *entry)
	return NULL;
}

/*
 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
 * @page: The page whose entry we want to lock
 *
 * Context: Process context.
 * Return: %true if the entry was locked or does not need to be locked.
 */
bool dax_lock_mapping_entry(struct page *page)
{
	XA_STATE(xas, NULL, 0);
	void *entry;
	bool locked;

	/* Ensure page->mapping isn't freed while we look at it */
	rcu_read_lock();
	for (;;) {
		struct address_space *mapping = READ_ONCE(page->mapping);

		locked = false;
		if (!dax_mapping(mapping))
			return false;
			break;

		/*
		 * In the device-dax case there's no need to lock, a
@@ -371,8 +382,9 @@ bool dax_lock_mapping_entry(struct page *page)
		 * otherwise we would not have a valid pfn_to_page()
		 * translation.
		 */
		locked = true;
		if (S_ISCHR(mapping->host->i_mode))
			return true;
			break;

		xas.xa = &mapping->i_pages;
		xas_lock_irq(&xas);
@@ -383,14 +395,18 @@ bool dax_lock_mapping_entry(struct page *page)
		xas_set(&xas, page->index);
		entry = xas_load(&xas);
		if (dax_is_locked(entry)) {
			rcu_read_unlock();
			entry = get_unlocked_entry(&xas);
			xas_unlock_irq(&xas);
			rcu_read_lock();
			continue;
		}
		dax_lock_entry(&xas, entry);
		xas_unlock_irq(&xas);
		return true;
		break;
	}
	rcu_read_unlock();
	return locked;
}

void dax_unlock_mapping_entry(struct page *page)