Commit 2ecb7923 authored by Miao Xie's avatar Miao Xie Committed by Chris Mason
Browse files

Btrfs: fix unprotected ->log_batch



We forget to protect ->log_batch when syncing a file, this patch fix
this problem by atomic operation. And ->log_batch is used to check
if there are parallel sync operations or not, so it is unnecessary to
reset it to 0 after the sync operation of the current log tree complete.

Signed-off-by: default avatarMiao Xie <miaox@cn.fujitsu.com>
parent 48c03c4b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1491,9 +1491,9 @@ struct btrfs_root {
	wait_queue_head_t log_commit_wait[2];
	atomic_t log_writers;
	atomic_t log_commit[2];
	atomic_t log_batch;
	unsigned long log_transid;
	unsigned long last_log_commit;
	unsigned long log_batch;
	pid_t log_start_pid;
	bool log_multiple_pids;

+1 −1
Original line number Diff line number Diff line
@@ -1168,8 +1168,8 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
	atomic_set(&root->log_commit[0], 0);
	atomic_set(&root->log_commit[1], 0);
	atomic_set(&root->log_writers, 0);
	atomic_set(&root->log_batch, 0);
	atomic_set(&root->orphan_inodes, 0);
	root->log_batch = 0;
	root->log_transid = 0;
	root->last_log_commit = 0;
	extent_io_tree_init(&root->dirty_log_pages,
+2 −2
Original line number Diff line number Diff line
@@ -1551,9 +1551,9 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
	 * ordered range does a filemape_write_and_wait_range which is why we
	 * don't do it above like other file systems.
	 */
	root->log_batch++;
	atomic_inc(&root->log_batch);
	btrfs_wait_ordered_range(inode, start, end);
	root->log_batch++;
	atomic_inc(&root->log_batch);

	/*
	 * check the transaction that last modified this inode
+5 −7
Original line number Diff line number Diff line
@@ -147,7 +147,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
			root->log_multiple_pids = true;
		}

		root->log_batch++;
		atomic_inc(&root->log_batch);
		atomic_inc(&root->log_writers);
		mutex_unlock(&root->log_mutex);
		return 0;
@@ -166,7 +166,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
			err = ret;
	}
	mutex_unlock(&root->fs_info->tree_log_mutex);
	root->log_batch++;
	atomic_inc(&root->log_batch);
	atomic_inc(&root->log_writers);
	mutex_unlock(&root->log_mutex);
	return err;
@@ -2036,7 +2036,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
	if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
		wait_log_commit(trans, root, root->log_transid - 1);
	while (1) {
		unsigned long batch = root->log_batch;
		int batch = atomic_read(&root->log_batch);
		/* when we're on an ssd, just kick the log commit out */
		if (!btrfs_test_opt(root, SSD) && root->log_multiple_pids) {
			mutex_unlock(&root->log_mutex);
@@ -2044,7 +2044,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
			mutex_lock(&root->log_mutex);
		}
		wait_for_writer(trans, root);
		if (batch == root->log_batch)
		if (batch == atomic_read(&root->log_batch))
			break;
	}

@@ -2073,7 +2073,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,

	btrfs_set_root_node(&log->root_item, log->node);

	root->log_batch = 0;
	root->log_transid++;
	log->log_transid = root->log_transid;
	root->log_start_pid = 0;
@@ -2086,7 +2085,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
	mutex_unlock(&root->log_mutex);

	mutex_lock(&log_root_tree->log_mutex);
	log_root_tree->log_batch++;
	atomic_inc(&log_root_tree->log_batch);
	atomic_inc(&log_root_tree->log_writers);
	mutex_unlock(&log_root_tree->log_mutex);

@@ -2156,7 +2155,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
	btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
				btrfs_header_level(log_root_tree->node));

	log_root_tree->log_batch = 0;
	log_root_tree->log_transid++;
	smp_mb();