Commit bcd7351e authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull FS-Cache updates from David Howells:
 "This contains a number of fixes for various FS-Cache issues plus some
  cleanups.  The commits are, in order:

   1) Provide a system wait_on_atomic_t() and wake_up_atomic_t() sharing
      the bit-wait table (enhancement for #8).

   2) Don't put spin_lock() in a while-condition as spin_lock() may have
      a do {} while(0) wrapper (cleanup).

   3) Symbolically name i_mutex lock classes rather than using numbers
      in CacheFiles (cleanup).

   4) Don't sleep in page release if __GFP_FS is not set (deadlock vs
      ext4).

   5) Uninline fscache_object_init() (cleanup for #7).

   6) Wrap checks on object state (cleanup for #7).

   7) Simplify the object state machine by separating work states from
      wait states.

   8) Simplify cookie retention by objects (NULL pointer deref fix).

   9) Remove unused list_to_page() macro (cleanup).

  10) Make the remaining-pages counter in the retrieval op atomic
      (assertion failure fix).

  11) Don't use spin_is_locked() in assertions (assertion failure fix)"

* tag 'fscache-20130702' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs:
  FS-Cache: Don't use spin_is_locked() in assertions
  FS-Cache: The retrieval remaining-pages counter needs to be atomic_t
  cachefiles: remove unused macro list_to_page()
  FS-Cache: Simplify cookie retention for fscache_objects, fixing oops
  FS-Cache: Fix object state machine to have separate work and wait states
  FS-Cache: Wrap checks on object state
  FS-Cache: Uninline fscache_object_init()
  FS-Cache: Don't sleep in page release if __GFP_FS is not set
  CacheFiles: name i_mutex lock class explicitly
  fs/fscache: remove spin_lock() from the condition in while()
  Add wait_on_atomic_t() and wake_up_atomic_t()
parents 6072a93b dcfae32f
Loading
Loading
Loading
Loading
+10 −3
Original line number Diff line number Diff line
@@ -13,8 +13,6 @@
#include <linux/mount.h>
#include "internal.h"

#define list_to_page(head) (list_entry((head)->prev, struct page, lru))

struct cachefiles_lookup_data {
	struct cachefiles_xattr	*auxdata;	/* auxiliary data */
	char			*key;		/* key path */
@@ -212,20 +210,29 @@ static void cachefiles_update_object(struct fscache_object *_object)
	object = container_of(_object, struct cachefiles_object, fscache);
	cache = container_of(object->fscache.cache, struct cachefiles_cache,
			     cache);

	if (!fscache_use_cookie(_object)) {
		_leave(" [relinq]");
		return;
	}

	cookie = object->fscache.cookie;

	if (!cookie->def->get_aux) {
		fscache_unuse_cookie(_object);
		_leave(" [no aux]");
		return;
	}

	auxdata = kmalloc(2 + 512 + 3, cachefiles_gfp);
	if (!auxdata) {
		fscache_unuse_cookie(_object);
		_leave(" [nomem]");
		return;
	}

	auxlen = cookie->def->get_aux(cookie->netfs_data, auxdata->data, 511);
	fscache_unuse_cookie(_object);
	ASSERTCMP(auxlen, <, 511);

	auxdata->len = auxlen + 1;
@@ -263,7 +270,7 @@ static void cachefiles_drop_object(struct fscache_object *_object)
#endif

	/* delete retired objects */
	if (object->fscache.state == FSCACHE_OBJECT_RECYCLING &&
	if (test_bit(FSCACHE_COOKIE_RETIRED, &object->fscache.cookie->flags) &&
	    _object != cache->cache.fsdef
	    ) {
		_debug("- retire object OBJ%x", object->fscache.debug_id);
+5 −5
Original line number Diff line number Diff line
@@ -38,7 +38,7 @@ void __cachefiles_printk_object(struct cachefiles_object *object,
	printk(KERN_ERR "%sobject: OBJ%x\n",
	       prefix, object->fscache.debug_id);
	printk(KERN_ERR "%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n",
	       prefix, fscache_object_states[object->fscache.state],
	       prefix, object->fscache.state->name,
	       object->fscache.flags, work_busy(&object->fscache.work),
	       object->fscache.events, object->fscache.event_mask);
	printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
@@ -127,10 +127,10 @@ static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
found_dentry:
	kdebug("preemptive burial: OBJ%x [%s] %p",
	       object->fscache.debug_id,
	       fscache_object_states[object->fscache.state],
	       object->fscache.state->name,
	       dentry);

	if (object->fscache.state < FSCACHE_OBJECT_DYING) {
	if (fscache_object_is_live(&object->fscache)) {
		printk(KERN_ERR "\n");
		printk(KERN_ERR "CacheFiles: Error:"
		       " Can't preemptively bury live object\n");
@@ -192,7 +192,7 @@ try_again:
	/* an old object from a previous incarnation is hogging the slot - we
	 * need to wait for it to be destroyed */
wait_for_old_object:
	if (xobject->fscache.state < FSCACHE_OBJECT_DYING) {
	if (fscache_object_is_live(&object->fscache)) {
		printk(KERN_ERR "\n");
		printk(KERN_ERR "CacheFiles: Error:"
		       " Unexpected object collision\n");
@@ -836,7 +836,7 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
	//       dir->d_name.len, dir->d_name.len, dir->d_name.name, filename);

	/* look up the victim */
	mutex_lock_nested(&dir->d_inode->i_mutex, 1);
	mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);

	start = jiffies;
	victim = lookup_one_len(filename, dir, strlen(filename));
+2 −4
Original line number Diff line number Diff line
@@ -109,13 +109,12 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object,
	struct dentry *dentry = object->dentry;
	int ret;

	ASSERT(object->fscache.cookie);
	ASSERT(dentry);

	_enter("%p,#%d", object, auxdata->len);

	/* attempt to install the cache metadata directly */
	_debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len);
	_debug("SET #%u", auxdata->len);

	ret = vfs_setxattr(dentry, cachefiles_xattr_cache,
			   &auxdata->type, auxdata->len,
@@ -138,13 +137,12 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
	struct dentry *dentry = object->dentry;
	int ret;

	ASSERT(object->fscache.cookie);
	ASSERT(dentry);

	_enter("%p,#%d", object, auxdata->len);

	/* attempt to install the cache metadata directly */
	_debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len);
	_debug("SET #%u", auxdata->len);

	ret = vfs_setxattr(dentry, cachefiles_xattr_cache,
			   &auxdata->type, auxdata->len,
+18 −16
Original line number Diff line number Diff line
@@ -115,7 +115,7 @@ struct fscache_cache *fscache_select_cache_for_object(
				     struct fscache_object, cookie_link);

		cache = object->cache;
		if (object->state >= FSCACHE_OBJECT_DYING ||
		if (fscache_object_is_dying(object) ||
		    test_bit(FSCACHE_IOERROR, &cache->flags))
			cache = NULL;

@@ -224,8 +224,10 @@ int fscache_add_cache(struct fscache_cache *cache,
	BUG_ON(!ifsdef);

	cache->flags = 0;
	ifsdef->event_mask = ULONG_MAX & ~(1 << FSCACHE_OBJECT_EV_CLEARED);
	ifsdef->state = FSCACHE_OBJECT_ACTIVE;
	ifsdef->event_mask =
		((1 << NR_FSCACHE_OBJECT_EVENTS) - 1) &
		~(1 << FSCACHE_OBJECT_EV_CLEARED);
	__set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &ifsdef->flags);

	if (!tagname)
		tagname = cache->identifier;
@@ -330,25 +332,25 @@ static void fscache_withdraw_all_objects(struct fscache_cache *cache,
{
	struct fscache_object *object;

	while (!list_empty(&cache->object_list)) {
		spin_lock(&cache->object_list_lock);

	while (!list_empty(&cache->object_list)) {
		if (!list_empty(&cache->object_list)) {
			object = list_entry(cache->object_list.next,
					    struct fscache_object, cache_link);
			list_move_tail(&object->cache_link, dying_objects);

			_debug("withdraw %p", object->cookie);

		spin_lock(&object->lock);
		spin_unlock(&cache->object_list_lock);
		fscache_raise_event(object, FSCACHE_OBJECT_EV_WITHDRAW);
		spin_unlock(&object->lock);

		cond_resched();
		spin_lock(&cache->object_list_lock);
			/* This must be done under object_list_lock to prevent
			 * a race with fscache_drop_object().
			 */
			fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
		}

		spin_unlock(&cache->object_list_lock);
		cond_resched();
	}
}

/**
+32 −61
Original line number Diff line number Diff line
@@ -95,6 +95,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
	atomic_set(&cookie->usage, 1);
	atomic_set(&cookie->n_children, 0);

	/* We keep the active count elevated until relinquishment to prevent an
	 * attempt to wake up every time the object operations queue quiesces.
	 */
	atomic_set(&cookie->n_active, 1);

	atomic_inc(&parent->usage);
	atomic_inc(&parent->n_children);

@@ -177,7 +182,6 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)

	cookie->flags =
		(1 << FSCACHE_COOKIE_LOOKING_UP) |
		(1 << FSCACHE_COOKIE_CREATING) |
		(1 << FSCACHE_COOKIE_NO_DATA_YET);

	/* ask the cache to allocate objects for this cookie and its parent
@@ -205,7 +209,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)

	/* initiate the process of looking up all the objects in the chain
	 * (done by fscache_initialise_object()) */
	fscache_enqueue_object(object);
	fscache_raise_event(object, FSCACHE_OBJECT_EV_NEW_CHILD);

	spin_unlock(&cookie->lock);

@@ -285,7 +289,7 @@ static int fscache_alloc_object(struct fscache_cache *cache,

object_already_extant:
	ret = -ENOBUFS;
	if (object->state >= FSCACHE_OBJECT_DYING) {
	if (fscache_object_is_dead(object)) {
		spin_unlock(&cookie->lock);
		goto error;
	}
@@ -321,7 +325,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
	ret = -EEXIST;
	hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
		if (p->cache == object->cache) {
			if (p->state >= FSCACHE_OBJECT_DYING)
			if (fscache_object_is_dying(p))
				ret = -ENOBUFS;
			goto cant_attach_object;
		}
@@ -332,7 +336,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
	hlist_for_each_entry(p, &cookie->parent->backing_objects,
			     cookie_link) {
		if (p->cache == object->cache) {
			if (p->state >= FSCACHE_OBJECT_DYING) {
			if (fscache_object_is_dying(p)) {
				ret = -ENOBUFS;
				spin_unlock(&cookie->parent->lock);
				goto cant_attach_object;
@@ -400,7 +404,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
			object = hlist_entry(cookie->backing_objects.first,
					     struct fscache_object,
					     cookie_link);
			if (object->state < FSCACHE_OBJECT_DYING)
			if (fscache_object_is_live(object))
				fscache_raise_event(
					object, FSCACHE_OBJECT_EV_INVALIDATE);
		}
@@ -467,9 +471,7 @@ EXPORT_SYMBOL(__fscache_update_cookie);
 */
void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
{
	struct fscache_cache *cache;
	struct fscache_object *object;
	unsigned long event;

	fscache_stat(&fscache_n_relinquishes);
	if (retire)
@@ -481,8 +483,11 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
		return;
	}

	_enter("%p{%s,%p},%d",
	       cookie, cookie->def->name, cookie->netfs_data, retire);
	_enter("%p{%s,%p,%d},%d",
	       cookie, cookie->def->name, cookie->netfs_data,
	       atomic_read(&cookie->n_active), retire);

	ASSERTCMP(atomic_read(&cookie->n_active), >, 0);

	if (atomic_read(&cookie->n_children) != 0) {
		printk(KERN_ERR "FS-Cache: Cookie '%s' still has children\n",
@@ -490,62 +495,28 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
		BUG();
	}

	/* wait for the cookie to finish being instantiated (or to fail) */
	if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
		fscache_stat(&fscache_n_relinquishes_waitcrt);
		wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
			    fscache_wait_bit, TASK_UNINTERRUPTIBLE);
	}

	event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE;
	/* No further netfs-accessing operations on this cookie permitted */
	set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags);
	if (retire)
		set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);

try_again:
	spin_lock(&cookie->lock);

	/* break links with all the active objects */
	while (!hlist_empty(&cookie->backing_objects)) {
		int n_reads;
		object = hlist_entry(cookie->backing_objects.first,
				     struct fscache_object,
				     cookie_link);

		_debug("RELEASE OBJ%x", object->debug_id);

		set_bit(FSCACHE_COOKIE_WAITING_ON_READS, &cookie->flags);
		n_reads = atomic_read(&object->n_reads);
		if (n_reads) {
			int n_ops = object->n_ops;
			int n_in_progress = object->n_in_progress;
			spin_unlock(&cookie->lock);
			printk(KERN_ERR "FS-Cache:"
			       " Cookie '%s' still has %d outstanding reads (%d,%d)\n",
			       cookie->def->name,
			       n_reads, n_ops, n_in_progress);
			wait_on_bit(&cookie->flags, FSCACHE_COOKIE_WAITING_ON_READS,
				    fscache_wait_bit, TASK_UNINTERRUPTIBLE);
			printk("Wait finished\n");
			goto try_again;
	hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
		fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
	}
	spin_unlock(&cookie->lock);

		/* detach each cache object from the object cookie */
		spin_lock(&object->lock);
		hlist_del_init(&object->cookie_link);

		cache = object->cache;
		object->cookie = NULL;
		fscache_raise_event(object, event);
		spin_unlock(&object->lock);

		if (atomic_dec_and_test(&cookie->usage))
			/* the cookie refcount shouldn't be reduced to 0 yet */
			BUG();
	}
	/* Wait for cessation of activity requiring access to the netfs (when
	 * n_active reaches 0).
	 */
	if (!atomic_dec_and_test(&cookie->n_active))
		wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
				 TASK_UNINTERRUPTIBLE);

	/* detach pointers back to the netfs */
	/* Clear pointers back to the netfs */
	cookie->netfs_data	= NULL;
	cookie->def		= NULL;

	spin_unlock(&cookie->lock);
	BUG_ON(cookie->stores.rnode);

	if (cookie->parent) {
		ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
@@ -553,7 +524,7 @@ try_again:
		atomic_dec(&cookie->parent->n_children);
	}

	/* finally dispose of the cookie */
	/* Dispose of the netfs's link to the cookie */
	ASSERTCMP(atomic_read(&cookie->usage), >, 0);
	fscache_cookie_put(cookie);

Loading