Commit 6f10f7d1 authored by Coly Li's avatar Coly Li Committed by Jens Axboe
Browse files

bcache: style fix to replace 'unsigned' by 'unsigned int'



This patch fixes warning reported by checkpatch.pl by replacing 'unsigned'
with 'unsigned int'.

Signed-off-by: default avatarColy Li <colyli@suse.de>
Reviewed-by: default avatarShenghui Wang <shhuiw@foxmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b86d865c
Loading
Loading
Loading
Loading
+20 −16
Original line number Diff line number Diff line
@@ -87,8 +87,8 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
{
	struct cache *ca;
	struct bucket *b;
	unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
	unsigned i;
	unsigned int next = c->nbuckets * c->sb.bucket_size / 1024;
	unsigned int i;
	int r;

	atomic_sub(sectors, &c->rescale);
@@ -169,7 +169,7 @@ static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)

#define bucket_prio(b)							\
({									\
	unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;	\
	unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;	\
									\
	(b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);	\
})
@@ -301,7 +301,7 @@ do { \

static int bch_allocator_push(struct cache *ca, long bucket)
{
	unsigned i;
	unsigned int i;

	/* Prios/gens are actually the most important reserve */
	if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
@@ -385,7 +385,7 @@ out:

/* Allocation */

long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
{
	DEFINE_WAIT(w);
	struct bucket *b;
@@ -421,7 +421,7 @@ out:
	if (expensive_debug_checks(ca->set)) {
		size_t iter;
		long i;
		unsigned j;
		unsigned int j;

		for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
			BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
@@ -470,14 +470,14 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b)

void bch_bucket_free(struct cache_set *c, struct bkey *k)
{
	unsigned i;
	unsigned int i;

	for (i = 0; i < KEY_PTRS(k); i++)
		__bch_bucket_free(PTR_CACHE(c, k, i),
				  PTR_BUCKET(c, k, i));
}

int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
			   struct bkey *k, int n, bool wait)
{
	int i;
@@ -510,7 +510,7 @@ err:
	return -1;
}

int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
			 struct bkey *k, int n, bool wait)
{
	int ret;
@@ -524,8 +524,8 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,

struct open_bucket {
	struct list_head	list;
	unsigned		last_write_point;
	unsigned		sectors_free;
	unsigned int		last_write_point;
	unsigned int		sectors_free;
	BKEY_PADDED(key);
};

@@ -556,7 +556,7 @@ struct open_bucket {
 */
static struct open_bucket *pick_data_bucket(struct cache_set *c,
					    const struct bkey *search,
					    unsigned write_point,
					    unsigned int write_point,
					    struct bkey *alloc)
{
	struct open_bucket *ret, *ret_task = NULL;
@@ -595,12 +595,16 @@ found:
 *
 * If s->writeback is true, will not fail.
 */
bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
		       unsigned write_point, unsigned write_prio, bool wait)
bool bch_alloc_sectors(struct cache_set *c,
		       struct bkey *k,
		       unsigned int sectors,
		       unsigned int write_point,
		       unsigned int write_prio,
		       bool wait)
{
	struct open_bucket *b;
	BKEY_PADDED(key) alloc;
	unsigned i;
	unsigned int i;

	/*
	 * We might have to allocate a new bucket, which we can't do with a
@@ -613,7 +617,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
	spin_lock(&c->data_bucket_lock);

	while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
		unsigned watermark = write_prio
		unsigned int watermark = write_prio
			? RESERVE_MOVINGGC
			: RESERVE_NONE;

+54 −53
Original line number Diff line number Diff line
@@ -252,7 +252,7 @@ struct bcache_device {
	struct kobject		kobj;

	struct cache_set	*c;
	unsigned		id;
	unsigned int		id;
#define BCACHEDEVNAME_SIZE	12
	char			name[BCACHEDEVNAME_SIZE];

@@ -264,18 +264,18 @@ struct bcache_device {
#define BCACHE_DEV_UNLINK_DONE		2
#define BCACHE_DEV_WB_RUNNING		3
#define BCACHE_DEV_RATE_DW_RUNNING	4
	unsigned		nr_stripes;
	unsigned		stripe_size;
	unsigned int		nr_stripes;
	unsigned int		stripe_size;
	atomic_t		*stripe_sectors_dirty;
	unsigned long		*full_dirty_stripes;

	struct bio_set		bio_split;

	unsigned		data_csum:1;
	unsigned int		data_csum:1;

	int (*cache_miss)(struct btree *, struct search *,
			  struct bio *, unsigned);
	int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
			  struct bio *, unsigned int);
	int (*ioctl) (struct bcache_device *, fmode_t, unsigned int, unsigned long);
};

struct io {
@@ -284,7 +284,7 @@ struct io {
	struct list_head	lru;

	unsigned long		jiffies;
	unsigned		sequential;
	unsigned int		sequential;
	sector_t		last;
};

@@ -358,18 +358,18 @@ struct cached_dev {
	struct cache_accounting	accounting;

	/* The rest of this all shows up in sysfs */
	unsigned		sequential_cutoff;
	unsigned		readahead;
	unsigned int		sequential_cutoff;
	unsigned int		readahead;

	unsigned		io_disable:1;
	unsigned		verify:1;
	unsigned		bypass_torture_test:1;
	unsigned int		io_disable:1;
	unsigned int		verify:1;
	unsigned int		bypass_torture_test:1;

	unsigned		partial_stripes_expensive:1;
	unsigned		writeback_metadata:1;
	unsigned		writeback_running:1;
	unsigned int		partial_stripes_expensive:1;
	unsigned int		writeback_metadata:1;
	unsigned int		writeback_running:1;
	unsigned char		writeback_percent;
	unsigned		writeback_delay;
	unsigned int		writeback_delay;

	uint64_t		writeback_rate_target;
	int64_t			writeback_rate_proportional;
@@ -377,16 +377,16 @@ struct cached_dev {
	int64_t			writeback_rate_integral_scaled;
	int32_t			writeback_rate_change;

	unsigned		writeback_rate_update_seconds;
	unsigned		writeback_rate_i_term_inverse;
	unsigned		writeback_rate_p_term_inverse;
	unsigned		writeback_rate_minimum;
	unsigned int		writeback_rate_update_seconds;
	unsigned int		writeback_rate_i_term_inverse;
	unsigned int		writeback_rate_p_term_inverse;
	unsigned int		writeback_rate_minimum;

	enum stop_on_failure	stop_when_cache_set_failed;
#define DEFAULT_CACHED_DEV_ERROR_LIMIT	64
	atomic_t		io_errors;
	unsigned		error_limit;
	unsigned		offline_seconds;
	unsigned int		error_limit;
	unsigned int		offline_seconds;

	char			backing_dev_name[BDEVNAME_SIZE];
};
@@ -447,7 +447,7 @@ struct cache {
	 * until a gc finishes - otherwise we could pointlessly burn a ton of
	 * cpu
	 */
	unsigned		invalidate_needs_gc;
	unsigned int		invalidate_needs_gc;

	bool			discard; /* Get rid of? */

@@ -472,7 +472,7 @@ struct gc_stat {

	size_t			nkeys;
	uint64_t		data;	/* sectors */
	unsigned		in_use; /* percent */
	unsigned int		in_use; /* percent */
};

/*
@@ -518,7 +518,7 @@ struct cache_set {
	int			caches_loaded;

	struct bcache_device	**devices;
	unsigned		devices_max_used;
	unsigned int		devices_max_used;
	atomic_t		attached_dev_nr;
	struct list_head	cached_devs;
	uint64_t		cached_dev_sectors;
@@ -548,7 +548,7 @@ struct cache_set {
	 * Default number of pages for a new btree node - may be less than a
	 * full bucket
	 */
	unsigned		btree_pages;
	unsigned int		btree_pages;

	/*
	 * Lists of struct btrees; lru is the list for structs that have memory
@@ -571,7 +571,7 @@ struct cache_set {
	struct list_head	btree_cache_freed;

	/* Number of elements in btree_cache + btree_cache_freeable lists */
	unsigned		btree_cache_used;
	unsigned int		btree_cache_used;

	/*
	 * If we need to allocate memory for a new btree node and that
@@ -649,7 +649,7 @@ struct cache_set {
	struct mutex		verify_lock;
#endif

	unsigned		nr_uuids;
	unsigned int		nr_uuids;
	struct uuid_entry	*uuids;
	BKEY_PADDED(uuid_bucket);
	struct closure		uuid_write;
@@ -670,12 +670,12 @@ struct cache_set {
	struct journal		journal;

#define CONGESTED_MAX		1024
	unsigned		congested_last_us;
	unsigned int		congested_last_us;
	atomic_t		congested;

	/* The rest of this all shows up in sysfs */
	unsigned		congested_read_threshold_us;
	unsigned		congested_write_threshold_us;
	unsigned int		congested_read_threshold_us;
	unsigned int		congested_write_threshold_us;

	struct time_stats	btree_gc_time;
	struct time_stats	btree_split_time;
@@ -694,16 +694,16 @@ struct cache_set {
		ON_ERROR_PANIC,
	}			on_error;
#define DEFAULT_IO_ERROR_LIMIT 8
	unsigned		error_limit;
	unsigned		error_decay;
	unsigned int		error_limit;
	unsigned int		error_decay;

	unsigned short		journal_delay_ms;
	bool			expensive_debug_checks;
	unsigned		verify:1;
	unsigned		key_merging_disabled:1;
	unsigned		gc_always_rewrite:1;
	unsigned		shrinker_disabled:1;
	unsigned		copy_gc_enabled:1;
	unsigned int		verify:1;
	unsigned int		key_merging_disabled:1;
	unsigned int		gc_always_rewrite:1;
	unsigned int		shrinker_disabled:1;
	unsigned int		copy_gc_enabled:1;

#define BUCKET_HASH_BITS	12
	struct hlist_head	bucket_hash[1 << BUCKET_HASH_BITS];
@@ -712,7 +712,7 @@ struct cache_set {
};

struct bbio {
	unsigned		submit_time_us;
	unsigned int		submit_time_us;
	union {
		struct bkey	key;
		uint64_t	_pad[3];
@@ -729,10 +729,10 @@ struct bbio {

#define btree_bytes(c)		((c)->btree_pages * PAGE_SIZE)
#define btree_blocks(b)							\
	((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
	((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits))

#define btree_default_blocks(c)						\
	((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
	((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))

#define bucket_pages(c)		((c)->sb.bucket_size / PAGE_SECTORS)
#define bucket_bytes(c)		((c)->sb.bucket_size << 9)
@@ -761,21 +761,21 @@ static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)

static inline struct cache *PTR_CACHE(struct cache_set *c,
				      const struct bkey *k,
				      unsigned ptr)
				      unsigned int ptr)
{
	return c->cache[PTR_DEV(k, ptr)];
}

static inline size_t PTR_BUCKET_NR(struct cache_set *c,
				   const struct bkey *k,
				   unsigned ptr)
				   unsigned int ptr)
{
	return sector_to_bucket(c, PTR_OFFSET(k, ptr));
}

static inline struct bucket *PTR_BUCKET(struct cache_set *c,
					const struct bkey *k,
					unsigned ptr)
					unsigned int ptr)
{
	return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
}
@@ -787,13 +787,13 @@ static inline uint8_t gen_after(uint8_t a, uint8_t b)
}

static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
				unsigned i)
				unsigned int i)
{
	return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
}

static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
				 unsigned i)
				 unsigned int i)
{
	return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
}
@@ -888,7 +888,7 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
static inline void wake_up_allocators(struct cache_set *c)
{
	struct cache *ca;
	unsigned i;
	unsigned int i;

	for_each_cache(ca, c, i)
		wake_up_process(ca->alloc_thread);
@@ -933,7 +933,8 @@ void bch_bbio_free(struct bio *, struct cache_set *);
struct bio *bch_bbio_alloc(struct cache_set *);

void __bch_submit_bbio(struct bio *, struct cache_set *);
void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
void bch_submit_bbio(struct bio *, struct cache_set *,
		     struct bkey *, unsigned int);

uint8_t bch_inc_gen(struct cache *, struct bucket *);
void bch_rescale_priorities(struct cache_set *, int);
@@ -944,13 +945,13 @@ void __bch_invalidate_one_bucket(struct cache *, struct bucket *);
void __bch_bucket_free(struct cache *, struct bucket *);
void bch_bucket_free(struct cache_set *, struct bkey *);

long bch_bucket_alloc(struct cache *, unsigned, bool);
int __bch_bucket_alloc_set(struct cache_set *, unsigned,
long bch_bucket_alloc(struct cache *, unsigned int, bool);
int __bch_bucket_alloc_set(struct cache_set *, unsigned int,
			   struct bkey *, int, bool);
int bch_bucket_alloc_set(struct cache_set *, unsigned,
int bch_bucket_alloc_set(struct cache_set *, unsigned int,
			 struct bkey *, int, bool);
bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
		       unsigned, unsigned, bool);
bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned int,
		       unsigned int, unsigned int, bool);
bool bch_cached_dev_error(struct cached_dev *dc);

__printf(2, 3)
+60 −54
Original line number Diff line number Diff line
@@ -18,7 +18,7 @@

#ifdef CONFIG_BCACHE_DEBUG

void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set)
{
	struct bkey *k, *next;

@@ -26,7 +26,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
		next = bkey_next(k);

		printk(KERN_ERR "block %u key %u/%u: ", set,
		       (unsigned) ((u64 *) k - i->d), i->keys);
		       (unsigned int) ((u64 *) k - i->d), i->keys);

		if (b->ops->key_dump)
			b->ops->key_dump(b, k);
@@ -42,7 +42,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)

void bch_dump_bucket(struct btree_keys *b)
{
	unsigned i;
	unsigned int i;

	console_lock();
	for (i = 0; i <= b->nsets; i++)
@@ -53,7 +53,7 @@ void bch_dump_bucket(struct btree_keys *b)

int __bch_count_data(struct btree_keys *b)
{
	unsigned ret = 0;
	unsigned int ret = 0;
	struct btree_iter iter;
	struct bkey *k;

@@ -128,7 +128,7 @@ static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}

/* Keylists */

int __bch_keylist_realloc(struct keylist *l, unsigned u64s)
int __bch_keylist_realloc(struct keylist *l, unsigned int u64s)
{
	size_t oldsize = bch_keylist_nkeys(l);
	size_t newsize = oldsize + u64s;
@@ -180,7 +180,7 @@ void bch_keylist_pop_front(struct keylist *l)
/* Key/pointer manipulation */

void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
			      unsigned i)
			      unsigned int i)
{
	BUG_ON(i > KEY_PTRS(src));

@@ -194,7 +194,7 @@ void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,

bool __bch_cut_front(const struct bkey *where, struct bkey *k)
{
	unsigned i, len = 0;
	unsigned int i, len = 0;

	if (bkey_cmp(where, &START_KEY(k)) <= 0)
		return false;
@@ -214,7 +214,7 @@ bool __bch_cut_front(const struct bkey *where, struct bkey *k)

bool __bch_cut_back(const struct bkey *where, struct bkey *k)
{
	unsigned len = 0;
	unsigned int len = 0;

	if (bkey_cmp(where, k) >= 0)
		return false;
@@ -240,9 +240,9 @@ bool __bch_cut_back(const struct bkey *where, struct bkey *k)
#define BKEY_MANTISSA_MASK	((1 << BKEY_MANTISSA_BITS) - 1)

struct bkey_float {
	unsigned	exponent:BKEY_EXPONENT_BITS;
	unsigned	m:BKEY_MID_BITS;
	unsigned	mantissa:BKEY_MANTISSA_BITS;
	unsigned int	exponent:BKEY_EXPONENT_BITS;
	unsigned int	m:BKEY_MID_BITS;
	unsigned int	mantissa:BKEY_MANTISSA_BITS;
} __packed;

/*
@@ -311,7 +311,7 @@ void bch_btree_keys_free(struct btree_keys *b)
}
EXPORT_SYMBOL(bch_btree_keys_free);

int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp)
int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order, gfp_t gfp)
{
	struct bset_tree *t = b->set;

@@ -345,7 +345,7 @@ EXPORT_SYMBOL(bch_btree_keys_alloc);
void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
			 bool *expensive_debug_checks)
{
	unsigned i;
	unsigned int i;

	b->ops = ops;
	b->expensive_debug_checks = expensive_debug_checks;
@@ -370,7 +370,7 @@ EXPORT_SYMBOL(bch_btree_keys_init);
 * return array index next to j when does in-order traverse
 * of a binary tree which is stored in a linear array
 */
static unsigned inorder_next(unsigned j, unsigned size)
static unsigned int inorder_next(unsigned int j, unsigned int size)
{
	if (j * 2 + 1 < size) {
		j = j * 2 + 1;
@@ -387,7 +387,7 @@ static unsigned inorder_next(unsigned j, unsigned size)
 * return array index previous to j when does in-order traverse
 * of a binary tree which is stored in a linear array
 */
static unsigned inorder_prev(unsigned j, unsigned size)
static unsigned int inorder_prev(unsigned int j, unsigned int size)
{
	if (j * 2 < size) {
		j = j * 2;
@@ -413,10 +413,12 @@ static unsigned inorder_prev(unsigned j, unsigned size)
 * extra is a function of size:
 *   extra = (size - rounddown_pow_of_two(size - 1)) << 1;
 */
static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
static unsigned int __to_inorder(unsigned int j,
				  unsigned int size,
				  unsigned int extra)
{
	unsigned b = fls(j);
	unsigned shift = fls(size - 1) - b;
	unsigned int b = fls(j);
	unsigned int shift = fls(size - 1) - b;

	j  ^= 1U << (b - 1);
	j <<= 1;
@@ -433,14 +435,16 @@ static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
 * Return the cacheline index in bset_tree->data, where j is index
 * from a linear array which stores the auxiliar binary tree
 */
static unsigned to_inorder(unsigned j, struct bset_tree *t)
static unsigned int to_inorder(unsigned int j, struct bset_tree *t)
{
	return __to_inorder(j, t->size, t->extra);
}

static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
static unsigned int __inorder_to_tree(unsigned int j,
				      unsigned int size,
				      unsigned int extra)
{
	unsigned shift;
	unsigned int shift;

	if (j > extra)
		j += j - extra;
@@ -457,7 +461,7 @@ static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
 * Return an index from a linear array which stores the auxiliar binary
 * tree, j is the cacheline index of t->data.
 */
static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
static unsigned int inorder_to_tree(unsigned int j, struct bset_tree *t)
{
	return __inorder_to_tree(j, t->size, t->extra);
}
@@ -468,11 +472,11 @@ void inorder_test(void)
	unsigned long done = 0;
	ktime_t start = ktime_get();

	for (unsigned size = 2;
	for (unsigned int size = 2;
	     size < 65536000;
	     size++) {
		unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1;
		unsigned i = 1, j = rounddown_pow_of_two(size - 1);
		unsigned int extra = (size - rounddown_pow_of_two(size - 1)) << 1;
		unsigned int i = 1, j = rounddown_pow_of_two(size - 1);

		if (!(size % 4096))
			printk(KERN_NOTICE "loop %u, %llu per us\n", size,
@@ -518,30 +522,31 @@ void inorder_test(void)
 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
 */

static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline,
				      unsigned offset)
static struct bkey *cacheline_to_bkey(struct bset_tree *t,
				      unsigned int cacheline,
				      unsigned int offset)
{
	return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
}

static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
{
	return ((void *) k - (void *) t->data) / BSET_CACHELINE;
}

static unsigned bkey_to_cacheline_offset(struct bset_tree *t,
					 unsigned cacheline,
static unsigned int bkey_to_cacheline_offset(struct bset_tree *t,
					 unsigned int cacheline,
					 struct bkey *k)
{
	return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
}

static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned int j)
{
	return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
}

static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned int j)
{
	return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
}
@@ -550,7 +555,7 @@ static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
 * For the write set - the one we're currently inserting keys into - we don't
 * maintain a full search tree, we just keep a simple lookup table in t->prev.
 */
static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
static struct bkey *table_to_bkey(struct bset_tree *t, unsigned int cacheline)
{
	return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
}
@@ -576,14 +581,14 @@ static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
 * See make_bfloat() to check when most significant bit of f->exponent
 * is set or not.
 */
static inline unsigned bfloat_mantissa(const struct bkey *k,
static inline unsigned int bfloat_mantissa(const struct bkey *k,
				       struct bkey_float *f)
{
	const uint64_t *p = &k->low - (f->exponent >> 6);
	return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
}

static void make_bfloat(struct bset_tree *t, unsigned j)
static void make_bfloat(struct bset_tree *t, unsigned int j)
{
	struct bkey_float *f = &t->tree[j];
	struct bkey *m = tree_to_bkey(t, j);
@@ -631,7 +636,7 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
{
	if (t != b->set) {
		unsigned j = roundup(t[-1].size,
		unsigned int j = roundup(t[-1].size,
				     64 / sizeof(struct bkey_float));

		t->tree = t[-1].tree + j;
@@ -686,13 +691,13 @@ void bch_bset_build_written_tree(struct btree_keys *b)
{
	struct bset_tree *t = bset_tree_last(b);
	struct bkey *prev = NULL, *k = t->data->start;
	unsigned j, cacheline = 1;
	unsigned int j, cacheline = 1;

	b->last_set_unwritten = 0;

	bset_alloc_tree(b, t);

	t->size = min_t(unsigned,
	t->size = min_t(unsigned int,
			bkey_to_cacheline(t, bset_bkey_last(t->data)),
			b->set->tree + btree_keys_cachelines(b) - t->tree);

@@ -732,7 +737,7 @@ EXPORT_SYMBOL(bch_bset_build_written_tree);
void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
{
	struct bset_tree *t;
	unsigned inorder, j = 1;
	unsigned int inorder, j = 1;

	for (t = b->set; t <= bset_tree_last(b); t++)
		if (k < bset_bkey_last(t->data))
@@ -779,8 +784,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b,
				      struct bset_tree *t,
				      struct bkey *k)
{
	unsigned shift = bkey_u64s(k);
	unsigned j = bkey_to_cacheline(t, k);
	unsigned int shift = bkey_u64s(k);
	unsigned int j = bkey_to_cacheline(t, k);

	/* We're getting called from btree_split() or btree_gc, just bail out */
	if (!t->size)
@@ -867,10 +872,10 @@ void bch_bset_insert(struct btree_keys *b, struct bkey *where,
}
EXPORT_SYMBOL(bch_bset_insert);

unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
			      struct bkey *replace_key)
{
	unsigned status = BTREE_INSERT_STATUS_NO_INSERT;
	unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
	struct bset *i = bset_tree_last(b)->data;
	struct bkey *m, *prev = NULL;
	struct btree_iter iter;
@@ -922,10 +927,10 @@ struct bset_search_iter {
static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
						     const struct bkey *search)
{
	unsigned li = 0, ri = t->size;
	unsigned int li = 0, ri = t->size;

	while (li + 1 != ri) {
		unsigned m = (li + ri) >> 1;
		unsigned int m = (li + ri) >> 1;

		if (bkey_cmp(table_to_bkey(t, m), search) > 0)
			ri = m;
@@ -944,7 +949,7 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
{
	struct bkey *l, *r;
	struct bkey_float *f;
	unsigned inorder, j, n = 1;
	unsigned int inorder, j, n = 1;

	do {
		/*
@@ -958,7 +963,7 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
		 *		p = 0;
		 * but a branch instruction is avoided.
		 */
		unsigned p = n << 4;
		unsigned int p = n << 4;
		p &= ((int) (p - t->size)) >> 31;

		prefetch(&t->tree[p]);
@@ -978,7 +983,7 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
		 * to work  - that's done in make_bfloat()
		 */
		if (likely(f->exponent != 127))
			n = j * 2 + (((unsigned)
			n = j * 2 + (((unsigned int)
				      (f->mantissa -
				       bfloat_mantissa(search, f))) >> 31);
		else
@@ -1184,7 +1189,8 @@ void bch_bset_sort_state_free(struct bset_sort_state *state)
	mempool_exit(&state->pool);
}

int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
int bch_bset_sort_state_init(struct bset_sort_state *state,
			     unsigned int page_order)
{
	spin_lock_init(&state->time.lock);

@@ -1237,7 +1243,7 @@ static void btree_mergesort(struct btree_keys *b, struct bset *out,
}

static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
			 unsigned start, unsigned order, bool fixup,
			 unsigned int start, unsigned int order, bool fixup,
			 struct bset_sort_state *state)
{
	uint64_t start_time;
@@ -1288,7 +1294,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
		bch_time_stats_update(&state->time, start_time);
}

void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
			    struct bset_sort_state *state)
{
	size_t order = b->page_order, keys = 0;
@@ -1298,7 +1304,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
	__bch_btree_iter_init(b, &iter, NULL, &b->set[start]);

	if (start) {
		unsigned i;
		unsigned int i;

		for (i = start; i <= b->nsets; i++)
			keys += b->set[i].data->keys;
@@ -1338,7 +1344,7 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,

void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
{
	unsigned crit = SORT_CRIT;
	unsigned int crit = SORT_CRIT;
	int i;

	/* Don't sort if nothing to do */
@@ -1367,7 +1373,7 @@ EXPORT_SYMBOL(bch_btree_sort_lazy);

void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
{
	unsigned i;
	unsigned int i;

	for (i = 0; i <= b->nsets; i++) {
		struct bset_tree *t = &b->set[i];
+17 −17

File changed.

Preview size limit exceeded, changes collapsed.

+25 −25
Original line number Diff line number Diff line
@@ -183,7 +183,7 @@ static void bch_btree_init_next(struct btree *b)

void bkey_put(struct cache_set *c, struct bkey *k)
{
	unsigned i;
	unsigned int i;

	for (i = 0; i < KEY_PTRS(k); i++)
		if (ptr_available(c, k, i))
@@ -479,7 +479,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)

void bch_btree_node_write(struct btree *b, struct closure *parent)
{
	unsigned nsets = b->keys.nsets;
	unsigned int nsets = b->keys.nsets;

	lockdep_assert_held(&b->lock);

@@ -581,7 +581,7 @@ static void mca_bucket_free(struct btree *b)
	list_move(&b->list, &b->c->btree_cache_freeable);
}

static unsigned btree_order(struct bkey *k)
static unsigned int btree_order(struct bkey *k)
{
	return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
}
@@ -589,7 +589,7 @@ static unsigned btree_order(struct bkey *k)
static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
{
	if (!bch_btree_keys_alloc(&b->keys,
				  max_t(unsigned,
				  max_t(unsigned int,
					ilog2(b->c->btree_pages),
					btree_order(k)),
				  gfp)) {
@@ -620,7 +620,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
	return b;
}

static int mca_reap(struct btree *b, unsigned min_order, bool flush)
static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
{
	struct closure cl;

@@ -786,7 +786,7 @@ void bch_btree_cache_free(struct cache_set *c)

int bch_btree_cache_alloc(struct cache_set *c)
{
	unsigned i;
	unsigned int i;

	for (i = 0; i < mca_reserve(c); i++)
		if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
@@ -1136,7 +1136,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,

static void make_btree_freeing_key(struct btree *b, struct bkey *k)
{
	unsigned i;
	unsigned int i;

	mutex_lock(&b->c->bucket_lock);

@@ -1157,7 +1157,7 @@ static int btree_check_reserve(struct btree *b, struct btree_op *op)
{
	struct cache_set *c = b->c;
	struct cache *ca;
	unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
	unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;

	mutex_lock(&c->bucket_lock);

@@ -1181,7 +1181,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
				    struct bkey *k)
{
	uint8_t stale = 0;
	unsigned i;
	unsigned int i;
	struct bucket *g;

	/*
@@ -1219,7 +1219,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
			SET_GC_MARK(g, GC_MARK_RECLAIMABLE);

		/* guard against overflow */
		SET_GC_SECTORS_USED(g, min_t(unsigned,
		SET_GC_SECTORS_USED(g, min_t(unsigned int,
					     GC_SECTORS_USED(g) + KEY_SIZE(k),
					     MAX_GC_SECTORS_USED));

@@ -1233,7 +1233,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,

void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
{
	unsigned i;
	unsigned int i;

	for (i = 0; i < KEY_PTRS(k); i++)
		if (ptr_available(c, k, i) &&
@@ -1259,7 +1259,7 @@ void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
{
	uint8_t stale = 0;
	unsigned keys = 0, good_keys = 0;
	unsigned int keys = 0, good_keys = 0;
	struct bkey *k;
	struct btree_iter iter;
	struct bset_tree *t;
@@ -1302,7 +1302,7 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)

struct gc_merge_info {
	struct btree	*b;
	unsigned	keys;
	unsigned int	keys;
};

static int bch_btree_insert_node(struct btree *, struct btree_op *,
@@ -1311,7 +1311,7 @@ static int bch_btree_insert_node(struct btree *, struct btree_op *,
static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
			     struct gc_stat *gc, struct gc_merge_info *r)
{
	unsigned i, nodes = 0, keys = 0, blocks;
	unsigned int i, nodes = 0, keys = 0, blocks;
	struct btree *new_nodes[GC_MERGE_NODES];
	struct keylist keylist;
	struct closure cl;
@@ -1511,11 +1511,11 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
	return -EINTR;
}

static unsigned btree_gc_count_keys(struct btree *b)
static unsigned int btree_gc_count_keys(struct btree *b)
{
	struct bkey *k;
	struct btree_iter iter;
	unsigned ret = 0;
	unsigned int ret = 0;

	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
		ret += bkey_u64s(k);
@@ -1678,7 +1678,7 @@ static void btree_gc_start(struct cache_set *c)
{
	struct cache *ca;
	struct bucket *b;
	unsigned i;
	unsigned int i;

	if (!c->gc_mark_valid)
		return;
@@ -1704,7 +1704,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
{
	struct bucket *b;
	struct cache *ca;
	unsigned i;
	unsigned int i;

	mutex_lock(&c->bucket_lock);

@@ -1722,7 +1722,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
		struct bcache_device *d = c->devices[i];
		struct cached_dev *dc;
		struct keybuf_key *w, *n;
		unsigned j;
		unsigned int j;

		if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
			continue;
@@ -1814,7 +1814,7 @@ static void bch_btree_gc(struct cache_set *c)
static bool gc_should_run(struct cache_set *c)
{
	struct cache *ca;
	unsigned i;
	unsigned int i;

	for_each_cache(ca, c, i)
		if (ca->invalidate_needs_gc)
@@ -1905,7 +1905,7 @@ void bch_initial_gc_finish(struct cache_set *c)
{
	struct cache *ca;
	struct bucket *b;
	unsigned i;
	unsigned int i;

	bch_btree_gc_finish(c);

@@ -1945,7 +1945,7 @@ void bch_initial_gc_finish(struct cache_set *c)
static bool btree_insert_key(struct btree *b, struct bkey *k,
			     struct bkey *replace_key)
{
	unsigned status;
	unsigned int status;

	BUG_ON(bkey_cmp(k, &b->key) > 0);

@@ -2044,7 +2044,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
			   block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;

	if (split) {
		unsigned keys = 0;
		unsigned int keys = 0;

		trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);

@@ -2300,7 +2300,7 @@ int bch_btree_insert(struct cache_set *c, struct keylist *keys,

void bch_btree_set_root(struct btree *b)
{
	unsigned i;
	unsigned int i;
	struct closure cl;

	closure_init_stack(&cl);
@@ -2412,7 +2412,7 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,

struct refill {
	struct btree_op	op;
	unsigned	nr_found;
	unsigned int	nr_found;
	struct keybuf	*buf;
	struct bkey	*end;
	keybuf_pred_fn	*pred;
Loading