Commit a0e3cc65 authored by Andreas Gruenbacher's avatar Andreas Gruenbacher
Browse files

gfs2: Turn gl_delete into a delayed work



This requires flushing delayed work items in gfs2_make_fs_ro (which is called
before unmounting a filesystem).

When inodes are deleted and then recreated, pending gl_delete work items would
have no effect because the inode generations will have changed, so we can
cancel any pending gl_delete works before reusing iopen glocks.

Signed-off-by: default avatarAndreas Gruenbacher <agruenba@redhat.com>
parent f286d627
Loading
Loading
Loading
Loading
+45 −2
Original line number Diff line number Diff line
@@ -776,11 +776,16 @@ bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation)

static void delete_work_func(struct work_struct *work)
{
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
	struct delayed_work *dwork = to_delayed_work(work);
	struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete);
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
	struct inode *inode;
	u64 no_addr = gl->gl_name.ln_number;

	spin_lock(&gl->gl_lockref.lock);
	clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
	spin_unlock(&gl->gl_lockref.lock);

	/* If someone's using this glock to create a new dinode, the block must
	   have been freed by another node, then re-used, in which case our
	   iopen callback is too late after the fact. Ignore it. */
@@ -949,7 +954,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
	gl->gl_object = NULL;
	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
	INIT_WORK(&gl->gl_delete, delete_work_func);
	INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);

	mapping = gfs2_glock2aspace(gl);
	if (mapping) {
@@ -1772,6 +1777,44 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
	rhashtable_walk_exit(&iter);
}

bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay)
{
	bool queued;

	spin_lock(&gl->gl_lockref.lock);
	queued = queue_delayed_work(gfs2_delete_workqueue,
				    &gl->gl_delete, delay);
	if (queued)
		set_bit(GLF_PENDING_DELETE, &gl->gl_flags);
	spin_unlock(&gl->gl_lockref.lock);
	return queued;
}

void gfs2_cancel_delete_work(struct gfs2_glock *gl)
{
	if (cancel_delayed_work_sync(&gl->gl_delete)) {
		clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
		gfs2_glock_put(gl);
	}
}

bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
{
	return test_bit(GLF_PENDING_DELETE, &gl->gl_flags);
}

static void flush_delete_work(struct gfs2_glock *gl)
{
	flush_delayed_work(&gl->gl_delete);
	gfs2_glock_queue_work(gl, 0);
}

void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
{
	glock_hash_walk(flush_delete_work, sdp);
	flush_workqueue(gfs2_delete_workqueue);
}

/**
 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
 * @gl: The glock to thaw
+4 −0
Original line number Diff line number Diff line
@@ -235,6 +235,10 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,

extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay);
extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
+8 −1
Original line number Diff line number Diff line
@@ -608,11 +608,17 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
	    gl->gl_state == LM_ST_SHARED && ip) {
		gl->gl_lockref.count++;
		if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
		if (!queue_delayed_work(gfs2_delete_workqueue,
					&gl->gl_delete, 0))
			gl->gl_lockref.count--;
	}
}

static int iopen_go_demote_ok(const struct gfs2_glock *gl)
{
       return !gfs2_delete_work_queued(gl);
}

/**
 * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
 * @gl: glock being freed
@@ -716,6 +722,7 @@ const struct gfs2_glock_operations gfs2_freeze_glops = {
const struct gfs2_glock_operations gfs2_iopen_glops = {
	.go_type = LM_TYPE_IOPEN,
	.go_callback = iopen_go_callback,
	.go_demote_ok = iopen_go_demote_ok,
	.go_flags = GLOF_LRU | GLOF_NONDISK,
};

+3 −2
Original line number Diff line number Diff line
@@ -345,6 +345,7 @@ enum {
	GLF_OBJECT			= 14, /* Used only for tracing */
	GLF_BLOCKING			= 15,
	GLF_INODE_CREATING		= 16, /* Inode creation occurring */
	GLF_PENDING_DELETE		= 17,
	GLF_FREEING			= 18, /* Wait for glock to be freed */
};

@@ -378,8 +379,8 @@ struct gfs2_glock {
	atomic_t gl_revokes;
	struct delayed_work gl_work;
	union {
		/* For inode and iopen glocks only */
		struct work_struct gl_delete;
		/* For iopen glocks only */
		struct delayed_work gl_delete;
		/* For rgrp glocks only */
		struct {
			loff_t start;
+2 −0
Original line number Diff line number Diff line
@@ -170,6 +170,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
		error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
		if (unlikely(error))
			goto fail;
		gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
		glock_set_object(ip->i_iopen_gh.gh_gl, ip);
		gfs2_glock_put(io_gl);
		io_gl = NULL;
@@ -724,6 +725,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
	if (error)
		goto fail_gunlock2;

	gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
	glock_set_object(ip->i_iopen_gh.gh_gl, ip);
	gfs2_set_iop(inode);
	insert_inode_hash(inode);
Loading