Commit d458b054 authored by Qu Wenruo's avatar Qu Wenruo Committed by Josef Bacik
Browse files

btrfs: Cleanup the "_struct" suffix in btrfs_workequeue



Since the "_struct" suffix is mainly used for distinguish the differnt
btrfs_work between the original and the newly created one,
there is no need using the suffix since all btrfs_workers are changed
into btrfs_workqueue.

Also this patch fixed some codes whose code style is changed due to the
too long "_struct" suffix.

Signed-off-by: default avatarQu Wenruo <quwenruo@cn.fujitsu.com>
Tested-by: default avatarDavid Sterba <dsterba@suse.cz>
Signed-off-by: default avatarJosef Bacik <jbacik@fb.com>
parent a046e9c8
Loading
Loading
Loading
Loading
+33 −33
Original line number Diff line number Diff line
@@ -32,7 +32,7 @@
#define NO_THRESHOLD (-1)
#define DFT_THRESHOLD (32)

struct __btrfs_workqueue_struct {
struct __btrfs_workqueue {
	struct workqueue_struct *normal_wq;
	/* List head pointing to ordered work list */
	struct list_head ordered_list;
@@ -49,15 +49,15 @@ struct __btrfs_workqueue_struct {
	spinlock_t thres_lock;
};

struct btrfs_workqueue_struct {
	struct __btrfs_workqueue_struct *normal;
	struct __btrfs_workqueue_struct *high;
struct btrfs_workqueue {
	struct __btrfs_workqueue *normal;
	struct __btrfs_workqueue *high;
};

static inline struct __btrfs_workqueue_struct
static inline struct __btrfs_workqueue
*__btrfs_alloc_workqueue(char *name, int flags, int max_active, int thresh)
{
	struct __btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS);
	struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);

	if (unlikely(!ret))
		return NULL;
@@ -95,14 +95,14 @@ static inline struct __btrfs_workqueue_struct
}

static inline void
__btrfs_destroy_workqueue(struct __btrfs_workqueue_struct *wq);
__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);

struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
struct btrfs_workqueue *btrfs_alloc_workqueue(char *name,
					      int flags,
					      int max_active,
					      int thresh)
{
	struct btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS);
	struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);

	if (unlikely(!ret))
		return NULL;
@@ -131,7 +131,7 @@ struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
 * This hook WILL be called in IRQ handler context,
 * so workqueue_set_max_active MUST NOT be called in this hook
 */
static inline void thresh_queue_hook(struct __btrfs_workqueue_struct *wq)
static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
{
	if (wq->thresh == NO_THRESHOLD)
		return;
@@ -143,7 +143,7 @@ static inline void thresh_queue_hook(struct __btrfs_workqueue_struct *wq)
 * This hook is called in kthread content.
 * So workqueue_set_max_active is called here.
 */
static inline void thresh_exec_hook(struct __btrfs_workqueue_struct *wq)
static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
{
	int new_max_active;
	long pending;
@@ -186,10 +186,10 @@ out:
	}
}

static void run_ordered_work(struct __btrfs_workqueue_struct *wq)
static void run_ordered_work(struct __btrfs_workqueue *wq)
{
	struct list_head *list = &wq->ordered_list;
	struct btrfs_work_struct *work;
	struct btrfs_work *work;
	spinlock_t *lock = &wq->list_lock;
	unsigned long flags;

@@ -197,7 +197,7 @@ static void run_ordered_work(struct __btrfs_workqueue_struct *wq)
		spin_lock_irqsave(lock, flags);
		if (list_empty(list))
			break;
		work = list_entry(list->next, struct btrfs_work_struct,
		work = list_entry(list->next, struct btrfs_work,
				  ordered_list);
		if (!test_bit(WORK_DONE_BIT, &work->flags))
			break;
@@ -229,11 +229,11 @@ static void run_ordered_work(struct __btrfs_workqueue_struct *wq)

static void normal_work_helper(struct work_struct *arg)
{
	struct btrfs_work_struct *work;
	struct __btrfs_workqueue_struct *wq;
	struct btrfs_work *work;
	struct __btrfs_workqueue *wq;
	int need_order = 0;

	work = container_of(arg, struct btrfs_work_struct, normal_work);
	work = container_of(arg, struct btrfs_work, normal_work);
	/*
	 * We should not touch things inside work in the following cases:
	 * 1) after work->func() if it has no ordered_free
@@ -254,10 +254,10 @@ static void normal_work_helper(struct work_struct *arg)
	}
}

void btrfs_init_work(struct btrfs_work_struct *work,
		     void (*func)(struct btrfs_work_struct *),
		     void (*ordered_func)(struct btrfs_work_struct *),
		     void (*ordered_free)(struct btrfs_work_struct *))
void btrfs_init_work(struct btrfs_work *work,
		     void (*func)(struct btrfs_work *),
		     void (*ordered_func)(struct btrfs_work *),
		     void (*ordered_free)(struct btrfs_work *))
{
	work->func = func;
	work->ordered_func = ordered_func;
@@ -267,8 +267,8 @@ void btrfs_init_work(struct btrfs_work_struct *work,
	work->flags = 0;
}

static inline void __btrfs_queue_work(struct __btrfs_workqueue_struct *wq,
				      struct btrfs_work_struct *work)
static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
				      struct btrfs_work *work)
{
	unsigned long flags;

@@ -282,10 +282,10 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue_struct *wq,
	queue_work(wq->normal_wq, &work->normal_work);
}

void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
		      struct btrfs_work_struct *work)
void btrfs_queue_work(struct btrfs_workqueue *wq,
		      struct btrfs_work *work)
{
	struct __btrfs_workqueue_struct *dest_wq;
	struct __btrfs_workqueue *dest_wq;

	if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
		dest_wq = wq->high;
@@ -295,13 +295,13 @@ void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
}

static inline void
__btrfs_destroy_workqueue(struct __btrfs_workqueue_struct *wq)
__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
{
	destroy_workqueue(wq->normal_wq);
	kfree(wq);
}

void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq)
void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
{
	if (!wq)
		return;
@@ -310,14 +310,14 @@ void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq)
	__btrfs_destroy_workqueue(wq->normal);
}

void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max)
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max)
{
	wq->normal->max_active = max;
	if (wq->high)
		wq->high->max_active = max;
}

void btrfs_set_work_high_priority(struct btrfs_work_struct *work)
void btrfs_set_work_high_priority(struct btrfs_work *work)
{
	set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
}
+17 −17
Original line number Diff line number Diff line
@@ -20,33 +20,33 @@
#ifndef __BTRFS_ASYNC_THREAD_
#define __BTRFS_ASYNC_THREAD_

struct btrfs_workqueue_struct;
struct btrfs_workqueue;
/* Internal use only */
struct __btrfs_workqueue_struct;
struct __btrfs_workqueue;

struct btrfs_work_struct {
	void (*func)(struct btrfs_work_struct *arg);
	void (*ordered_func)(struct btrfs_work_struct *arg);
	void (*ordered_free)(struct btrfs_work_struct *arg);
struct btrfs_work {
	void (*func)(struct btrfs_work *arg);
	void (*ordered_func)(struct btrfs_work *arg);
	void (*ordered_free)(struct btrfs_work *arg);

	/* Don't touch things below */
	struct work_struct normal_work;
	struct list_head ordered_list;
	struct __btrfs_workqueue_struct *wq;
	struct __btrfs_workqueue *wq;
	unsigned long flags;
};

struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
struct btrfs_workqueue *btrfs_alloc_workqueue(char *name,
						     int flags,
						     int max_active,
						     int thresh);
void btrfs_init_work(struct btrfs_work_struct *work,
		     void (*func)(struct btrfs_work_struct *),
		     void (*ordered_func)(struct btrfs_work_struct *),
		     void (*ordered_free)(struct btrfs_work_struct *));
void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
		      struct btrfs_work_struct *work);
void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq);
void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max);
void btrfs_set_work_high_priority(struct btrfs_work_struct *work);
void btrfs_init_work(struct btrfs_work *work,
		     void (*func)(struct btrfs_work *),
		     void (*ordered_func)(struct btrfs_work *),
		     void (*ordered_free)(struct btrfs_work *));
void btrfs_queue_work(struct btrfs_workqueue *wq,
		      struct btrfs_work *work);
void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
void btrfs_set_work_high_priority(struct btrfs_work *work);
#endif
+22 −22
Original line number Diff line number Diff line
@@ -1221,7 +1221,7 @@ struct btrfs_caching_control {
	struct list_head list;
	struct mutex mutex;
	wait_queue_head_t wait;
	struct btrfs_work_struct work;
	struct btrfs_work work;
	struct btrfs_block_group_cache *block_group;
	u64 progress;
	atomic_t count;
@@ -1504,27 +1504,27 @@ struct btrfs_fs_info {
	 * A third pool does submit_bio to avoid deadlocking with the other
	 * two
	 */
	struct btrfs_workqueue_struct *workers;
	struct btrfs_workqueue_struct *delalloc_workers;
	struct btrfs_workqueue_struct *flush_workers;
	struct btrfs_workqueue_struct *endio_workers;
	struct btrfs_workqueue_struct *endio_meta_workers;
	struct btrfs_workqueue_struct *endio_raid56_workers;
	struct btrfs_workqueue_struct *rmw_workers;
	struct btrfs_workqueue_struct *endio_meta_write_workers;
	struct btrfs_workqueue_struct *endio_write_workers;
	struct btrfs_workqueue_struct *endio_freespace_worker;
	struct btrfs_workqueue_struct *submit_workers;
	struct btrfs_workqueue_struct *caching_workers;
	struct btrfs_workqueue_struct *readahead_workers;
	struct btrfs_workqueue *workers;
	struct btrfs_workqueue *delalloc_workers;
	struct btrfs_workqueue *flush_workers;
	struct btrfs_workqueue *endio_workers;
	struct btrfs_workqueue *endio_meta_workers;
	struct btrfs_workqueue *endio_raid56_workers;
	struct btrfs_workqueue *rmw_workers;
	struct btrfs_workqueue *endio_meta_write_workers;
	struct btrfs_workqueue *endio_write_workers;
	struct btrfs_workqueue *endio_freespace_worker;
	struct btrfs_workqueue *submit_workers;
	struct btrfs_workqueue *caching_workers;
	struct btrfs_workqueue *readahead_workers;

	/*
	 * fixup workers take dirty pages that didn't properly go through
	 * the cow mechanism and make them safe to write.  It happens
	 * for the sys_munmap function call path
	 */
	struct btrfs_workqueue_struct *fixup_workers;
	struct btrfs_workqueue_struct *delayed_workers;
	struct btrfs_workqueue *fixup_workers;
	struct btrfs_workqueue *delayed_workers;
	struct task_struct *transaction_kthread;
	struct task_struct *cleaner_kthread;
	int thread_pool_size;
@@ -1604,9 +1604,9 @@ struct btrfs_fs_info {
	atomic_t scrub_cancel_req;
	wait_queue_head_t scrub_pause_wait;
	int scrub_workers_refcnt;
	struct btrfs_workqueue_struct *scrub_workers;
	struct btrfs_workqueue_struct *scrub_wr_completion_workers;
	struct btrfs_workqueue_struct *scrub_nocow_workers;
	struct btrfs_workqueue *scrub_workers;
	struct btrfs_workqueue *scrub_wr_completion_workers;
	struct btrfs_workqueue *scrub_nocow_workers;

#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
	u32 check_integrity_print_mask;
@@ -1647,9 +1647,9 @@ struct btrfs_fs_info {
	/* qgroup rescan items */
	struct mutex qgroup_rescan_lock; /* protects the progress item */
	struct btrfs_key qgroup_rescan_progress;
	struct btrfs_workqueue_struct *qgroup_rescan_workers;
	struct btrfs_workqueue *qgroup_rescan_workers;
	struct completion qgroup_rescan_completion;
	struct btrfs_work_struct qgroup_rescan_work;
	struct btrfs_work qgroup_rescan_work;

	/* filesystem state */
	unsigned long fs_state;
@@ -3680,7 +3680,7 @@ struct btrfs_delalloc_work {
	int delay_iput;
	struct completion completion;
	struct list_head list;
	struct btrfs_work_struct work;
	struct btrfs_work work;
};

struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
+2 −2
Original line number Diff line number Diff line
@@ -1318,10 +1318,10 @@ void btrfs_remove_delayed_node(struct inode *inode)
struct btrfs_async_delayed_work {
	struct btrfs_delayed_root *delayed_root;
	int nr;
	struct btrfs_work_struct work;
	struct btrfs_work work;
};

static void btrfs_async_run_delayed_root(struct btrfs_work_struct *work)
static void btrfs_async_run_delayed_root(struct btrfs_work *work)
{
	struct btrfs_async_delayed_work *async_work;
	struct btrfs_delayed_root *delayed_root;
+7 −7
Original line number Diff line number Diff line
@@ -55,7 +55,7 @@
#endif

static struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work_struct *work);
static void end_workqueue_fn(struct btrfs_work *work);
static void free_fs_root(struct btrfs_root *root);
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
				    int read_only);
@@ -86,7 +86,7 @@ struct end_io_wq {
	int error;
	int metadata;
	struct list_head list;
	struct btrfs_work_struct work;
	struct btrfs_work work;
};

/*
@@ -108,7 +108,7 @@ struct async_submit_bio {
	 * can't tell us where in the file the bio should go
	 */
	u64 bio_offset;
	struct btrfs_work_struct work;
	struct btrfs_work work;
	int error;
};

@@ -742,7 +742,7 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
	return 256 * limit;
}

static void run_one_async_start(struct btrfs_work_struct *work)
static void run_one_async_start(struct btrfs_work *work)
{
	struct async_submit_bio *async;
	int ret;
@@ -755,7 +755,7 @@ static void run_one_async_start(struct btrfs_work_struct *work)
		async->error = ret;
}

static void run_one_async_done(struct btrfs_work_struct *work)
static void run_one_async_done(struct btrfs_work *work)
{
	struct btrfs_fs_info *fs_info;
	struct async_submit_bio *async;
@@ -782,7 +782,7 @@ static void run_one_async_done(struct btrfs_work_struct *work)
			       async->bio_offset);
}

static void run_one_async_free(struct btrfs_work_struct *work)
static void run_one_async_free(struct btrfs_work *work)
{
	struct async_submit_bio *async;

@@ -1668,7 +1668,7 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
 * called by the kthread helper functions to finally call the bio end_io
 * functions.  This is where read checksum verification actually happens
 */
static void end_workqueue_fn(struct btrfs_work_struct *work)
static void end_workqueue_fn(struct btrfs_work *work)
{
	struct bio *bio;
	struct end_io_wq *end_io_wq;
Loading