Commit 2a1a2c1a authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull dax fix from Dan Williams:
 "Clean up unnecessary usage of prepare_to_wait_exclusive().

  While I feel a bit silly sending a single-commit pull-request there is
  nothing else queued up for dax this cycle. This change has shipped in
  -next for multiple releases"

* tag 'dax-fix-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
  dax: Use non-exclusive wait in wait_entry_unlocked()
parents 9ab97aea d8a70641
Loading
Loading
Loading
Loading
+7 −9
Original line number Diff line number Diff line
@@ -246,18 +246,16 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
	ewait.wait.func = wake_exceptional_entry_func;

	wq = dax_entry_waitqueue(xas, entry, &ewait.key);
	prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
	/*
	 * Unlike get_unlocked_entry() there is no guarantee that this
	 * path ever successfully retrieves an unlocked entry before an
	 * inode dies. Perform a non-exclusive wait in case this path
	 * never successfully performs its own wake up.
	 */
	prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
	xas_unlock_irq(xas);
	schedule();
	finish_wait(wq, &ewait.wait);

	/*
	 * Entry lock waits are exclusive. Wake up the next waiter since
	 * we aren't sure we will acquire the entry lock and thus wake
	 * the next waiter up on unlock.
	 */
	if (waitqueue_active(wq))
		__wake_up(wq, TASK_NORMAL, 1, &ewait.key);
}

static void put_unlocked_entry(struct xa_state *xas, void *entry)