Commit 6da6eabe authored by Lisa Nguyen's avatar Lisa Nguyen Committed by Greg Kroah-Hartman
Browse files

staging: lustre: Remove typedef and update cfs_hash struct



Remove typedef keyword and rename the cfs_hash_t struct to
cfs_hash in libcfs_hash.h. These changes resolve the "Do
not add new typedefs" warning generated by checkpatch.pl
and meet kernel coding style.

Struct variables in other header and source files that
depend on libcfs_hash.h are updated as well.

Signed-off-by: default avatarLisa Nguyen <lisa@xenapiadmin.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 6ea510c1
Loading
Loading
Loading
Loading
+94 −94
Original line number Diff line number Diff line
@@ -210,7 +210,7 @@ enum cfs_hash_tag {
 * locations; additions must take care to only insert into the new bucket.
 */

typedef struct cfs_hash {
struct cfs_hash {
	/** serialize with rehash, or serialize all operations if
	 * the hash-table has CFS_HASH_NO_BKTLOCK */
	union cfs_hash_lock	     hs_lock;
@@ -272,7 +272,7 @@ typedef struct cfs_hash {
#endif
	/** name of htable */
	char			hs_name[0];
} cfs_hash_t;
};

typedef struct cfs_hash_lock_ops {
	/** lock the hash table */
@@ -287,20 +287,20 @@ typedef struct cfs_hash_lock_ops {

typedef struct cfs_hash_hlist_ops {
	/** return hlist_head of hash-head of @bd */
	struct hlist_head *(*hop_hhead)(cfs_hash_t *hs, struct cfs_hash_bd *bd);
	struct hlist_head *(*hop_hhead)(struct cfs_hash *hs, struct cfs_hash_bd *bd);
	/** return hash-head size */
	int (*hop_hhead_size)(cfs_hash_t *hs);
	int (*hop_hhead_size)(struct cfs_hash *hs);
	/** add @hnode to hash-head of @bd */
	int (*hop_hnode_add)(cfs_hash_t *hs,
	int (*hop_hnode_add)(struct cfs_hash *hs,
			     struct cfs_hash_bd *bd, struct hlist_node *hnode);
	/** remove @hnode from hash-head of @bd */
	int (*hop_hnode_del)(cfs_hash_t *hs,
	int (*hop_hnode_del)(struct cfs_hash *hs,
			     struct cfs_hash_bd *bd, struct hlist_node *hnode);
} cfs_hash_hlist_ops_t;

typedef struct cfs_hash_ops {
	/** return hashed value from @key */
	unsigned (*hs_hash)(cfs_hash_t *hs, const void *key, unsigned mask);
	unsigned (*hs_hash)(struct cfs_hash *hs, const void *key, unsigned mask);
	/** return key address of @hnode */
	void *   (*hs_key)(struct hlist_node *hnode);
	/** copy key from @hnode to @key */
@@ -313,13 +313,13 @@ typedef struct cfs_hash_ops {
	/** return object address of @hnode, i.e: container_of(...hnode) */
	void *   (*hs_object)(struct hlist_node *hnode);
	/** get refcount of item, always called with holding bucket-lock */
	void     (*hs_get)(cfs_hash_t *hs, struct hlist_node *hnode);
	void     (*hs_get)(struct cfs_hash *hs, struct hlist_node *hnode);
	/** release refcount of item */
	void     (*hs_put)(cfs_hash_t *hs, struct hlist_node *hnode);
	void     (*hs_put)(struct cfs_hash *hs, struct hlist_node *hnode);
	/** release refcount of item, always called with holding bucket-lock */
	void     (*hs_put_locked)(cfs_hash_t *hs, struct hlist_node *hnode);
	void     (*hs_put_locked)(struct cfs_hash *hs, struct hlist_node *hnode);
	/** it's called before removing of @hnode */
	void     (*hs_exit)(cfs_hash_t *hs, struct hlist_node *hnode);
	void     (*hs_exit)(struct cfs_hash *hs, struct hlist_node *hnode);
} cfs_hash_ops_t;

/** total number of buckets in @hs */
@@ -340,41 +340,41 @@ typedef struct cfs_hash_ops {
#define CFS_HASH_RH_NHLIST(hs)  (1U << (hs)->hs_rehash_bits)

static inline int
cfs_hash_with_no_lock(cfs_hash_t *hs)
cfs_hash_with_no_lock(struct cfs_hash *hs)
{
	/* caller will serialize all operations for this hash-table */
	return (hs->hs_flags & CFS_HASH_NO_LOCK) != 0;
}

static inline int
cfs_hash_with_no_bktlock(cfs_hash_t *hs)
cfs_hash_with_no_bktlock(struct cfs_hash *hs)
{
	/* no bucket lock, one single lock to protect the hash-table */
	return (hs->hs_flags & CFS_HASH_NO_BKTLOCK) != 0;
}

static inline int
cfs_hash_with_rw_bktlock(cfs_hash_t *hs)
cfs_hash_with_rw_bktlock(struct cfs_hash *hs)
{
	/* rwlock to protect hash bucket */
	return (hs->hs_flags & CFS_HASH_RW_BKTLOCK) != 0;
}

static inline int
cfs_hash_with_spin_bktlock(cfs_hash_t *hs)
cfs_hash_with_spin_bktlock(struct cfs_hash *hs)
{
	/* spinlock to protect hash bucket */
	return (hs->hs_flags & CFS_HASH_SPIN_BKTLOCK) != 0;
}

static inline int
cfs_hash_with_add_tail(cfs_hash_t *hs)
cfs_hash_with_add_tail(struct cfs_hash *hs)
{
	return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0;
}

static inline int
cfs_hash_with_no_itemref(cfs_hash_t *hs)
cfs_hash_with_no_itemref(struct cfs_hash *hs)
{
	/* hash-table doesn't keep refcount on item,
	 * item can't be removed from hash unless it's
@@ -383,73 +383,73 @@ cfs_hash_with_no_itemref(cfs_hash_t *hs)
}

static inline int
cfs_hash_with_bigname(cfs_hash_t *hs)
cfs_hash_with_bigname(struct cfs_hash *hs)
{
	return (hs->hs_flags & CFS_HASH_BIGNAME) != 0;
}

static inline int
cfs_hash_with_counter(cfs_hash_t *hs)
cfs_hash_with_counter(struct cfs_hash *hs)
{
	return (hs->hs_flags & CFS_HASH_COUNTER) != 0;
}

static inline int
cfs_hash_with_rehash(cfs_hash_t *hs)
cfs_hash_with_rehash(struct cfs_hash *hs)
{
	return (hs->hs_flags & CFS_HASH_REHASH) != 0;
}

static inline int
cfs_hash_with_rehash_key(cfs_hash_t *hs)
cfs_hash_with_rehash_key(struct cfs_hash *hs)
{
	return (hs->hs_flags & CFS_HASH_REHASH_KEY) != 0;
}

static inline int
cfs_hash_with_shrink(cfs_hash_t *hs)
cfs_hash_with_shrink(struct cfs_hash *hs)
{
	return (hs->hs_flags & CFS_HASH_SHRINK) != 0;
}

static inline int
cfs_hash_with_assert_empty(cfs_hash_t *hs)
cfs_hash_with_assert_empty(struct cfs_hash *hs)
{
	return (hs->hs_flags & CFS_HASH_ASSERT_EMPTY) != 0;
}

static inline int
cfs_hash_with_depth(cfs_hash_t *hs)
cfs_hash_with_depth(struct cfs_hash *hs)
{
	return (hs->hs_flags & CFS_HASH_DEPTH) != 0;
}

static inline int
cfs_hash_with_nblk_change(cfs_hash_t *hs)
cfs_hash_with_nblk_change(struct cfs_hash *hs)
{
	return (hs->hs_flags & CFS_HASH_NBLK_CHANGE) != 0;
}

static inline int
cfs_hash_is_exiting(cfs_hash_t *hs)
cfs_hash_is_exiting(struct cfs_hash *hs)
{       /* cfs_hash_destroy is called */
	return hs->hs_exiting;
}

static inline int
cfs_hash_is_rehashing(cfs_hash_t *hs)
cfs_hash_is_rehashing(struct cfs_hash *hs)
{       /* rehash is launched */
	return hs->hs_rehash_bits != 0;
}

static inline int
cfs_hash_is_iterating(cfs_hash_t *hs)
cfs_hash_is_iterating(struct cfs_hash *hs)
{       /* someone is calling cfs_hash_for_each_* */
	return hs->hs_iterating || hs->hs_iterators != 0;
}

static inline int
cfs_hash_bkt_size(cfs_hash_t *hs)
cfs_hash_bkt_size(struct cfs_hash *hs)
{
	return offsetof(struct cfs_hash_bucket, hsb_head[0]) +
	       hs->hs_hops->hop_hhead_size(hs) * CFS_HASH_BKT_NHLIST(hs) +
@@ -459,19 +459,19 @@ cfs_hash_bkt_size(cfs_hash_t *hs)
#define CFS_HOP(hs, op)	   (hs)->hs_ops->hs_ ## op

static inline unsigned
cfs_hash_id(cfs_hash_t *hs, const void *key, unsigned mask)
cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned mask)
{
	return CFS_HOP(hs, hash)(hs, key, mask);
}

static inline void *
cfs_hash_key(cfs_hash_t *hs, struct hlist_node *hnode)
cfs_hash_key(struct cfs_hash *hs, struct hlist_node *hnode)
{
	return CFS_HOP(hs, key)(hnode);
}

static inline void
cfs_hash_keycpy(cfs_hash_t *hs, struct hlist_node *hnode, void *key)
cfs_hash_keycpy(struct cfs_hash *hs, struct hlist_node *hnode, void *key)
{
	if (CFS_HOP(hs, keycpy) != NULL)
		CFS_HOP(hs, keycpy)(hnode, key);
@@ -481,25 +481,25 @@ cfs_hash_keycpy(cfs_hash_t *hs, struct hlist_node *hnode, void *key)
 * Returns 1 on a match,
 */
static inline int
cfs_hash_keycmp(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
cfs_hash_keycmp(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
{
	return CFS_HOP(hs, keycmp)(key, hnode);
}

static inline void *
cfs_hash_object(cfs_hash_t *hs, struct hlist_node *hnode)
cfs_hash_object(struct cfs_hash *hs, struct hlist_node *hnode)
{
	return CFS_HOP(hs, object)(hnode);
}

static inline void
cfs_hash_get(cfs_hash_t *hs, struct hlist_node *hnode)
cfs_hash_get(struct cfs_hash *hs, struct hlist_node *hnode)
{
	return CFS_HOP(hs, get)(hs, hnode);
}

static inline void
cfs_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
cfs_hash_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
{
	LASSERT(CFS_HOP(hs, put_locked) != NULL);

@@ -507,7 +507,7 @@ cfs_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
}

static inline void
cfs_hash_put(cfs_hash_t *hs, struct hlist_node *hnode)
cfs_hash_put(struct cfs_hash *hs, struct hlist_node *hnode)
{
	LASSERT(CFS_HOP(hs, put) != NULL);

@@ -515,36 +515,36 @@ cfs_hash_put(cfs_hash_t *hs, struct hlist_node *hnode)
}

static inline void
cfs_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode)
cfs_hash_exit(struct cfs_hash *hs, struct hlist_node *hnode)
{
	if (CFS_HOP(hs, exit))
		CFS_HOP(hs, exit)(hs, hnode);
}

static inline void cfs_hash_lock(cfs_hash_t *hs, int excl)
static inline void cfs_hash_lock(struct cfs_hash *hs, int excl)
{
	hs->hs_lops->hs_lock(&hs->hs_lock, excl);
}

static inline void cfs_hash_unlock(cfs_hash_t *hs, int excl)
static inline void cfs_hash_unlock(struct cfs_hash *hs, int excl)
{
	hs->hs_lops->hs_unlock(&hs->hs_lock, excl);
}

static inline int cfs_hash_dec_and_lock(cfs_hash_t *hs,
static inline int cfs_hash_dec_and_lock(struct cfs_hash *hs,
					atomic_t *condition)
{
	LASSERT(cfs_hash_with_no_bktlock(hs));
	return atomic_dec_and_lock(condition, &hs->hs_lock.spin);
}

static inline void cfs_hash_bd_lock(cfs_hash_t *hs,
static inline void cfs_hash_bd_lock(struct cfs_hash *hs,
				    struct cfs_hash_bd *bd, int excl)
{
	hs->hs_lops->hs_bkt_lock(&bd->bd_bucket->hsb_lock, excl);
}

static inline void cfs_hash_bd_unlock(cfs_hash_t *hs,
static inline void cfs_hash_bd_unlock(struct cfs_hash *hs,
				      struct cfs_hash_bd *bd, int excl)
{
	hs->hs_lops->hs_bkt_unlock(&bd->bd_bucket->hsb_lock, excl);
@@ -554,21 +554,21 @@ static inline void cfs_hash_bd_unlock(cfs_hash_t *hs,
 * operations on cfs_hash bucket (bd: bucket descriptor),
 * they are normally for hash-table without rehash
 */
void cfs_hash_bd_get(cfs_hash_t *hs, const void *key, struct cfs_hash_bd *bd);
void cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd);

static inline void cfs_hash_bd_get_and_lock(cfs_hash_t *hs, const void *key,
static inline void cfs_hash_bd_get_and_lock(struct cfs_hash *hs, const void *key,
					    struct cfs_hash_bd *bd, int excl)
{
	cfs_hash_bd_get(hs, key, bd);
	cfs_hash_bd_lock(hs, bd, excl);
}

static inline unsigned cfs_hash_bd_index_get(cfs_hash_t *hs, struct cfs_hash_bd *bd)
static inline unsigned cfs_hash_bd_index_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
	return bd->bd_offset | (bd->bd_bucket->hsb_index << hs->hs_bkt_bits);
}

static inline void cfs_hash_bd_index_set(cfs_hash_t *hs,
static inline void cfs_hash_bd_index_set(struct cfs_hash *hs,
					 unsigned index, struct cfs_hash_bd *bd)
{
	bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits];
@@ -576,7 +576,7 @@ static inline void cfs_hash_bd_index_set(cfs_hash_t *hs,
}

static inline void *
cfs_hash_bd_extra_get(cfs_hash_t *hs, struct cfs_hash_bd *bd)
cfs_hash_bd_extra_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
	return (void *)bd->bd_bucket +
	       cfs_hash_bkt_size(hs) - hs->hs_extra_bytes;
@@ -614,14 +614,14 @@ cfs_hash_bd_compare(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
	return 0;
}

void cfs_hash_bd_add_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd,
void cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
			    struct hlist_node *hnode);
void cfs_hash_bd_del_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd,
void cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
			    struct hlist_node *hnode);
void cfs_hash_bd_move_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd_old,
void cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
			     struct cfs_hash_bd *bd_new, struct hlist_node *hnode);

static inline int cfs_hash_bd_dec_and_lock(cfs_hash_t *hs, struct cfs_hash_bd *bd,
static inline int cfs_hash_bd_dec_and_lock(struct cfs_hash *hs, struct cfs_hash_bd *bd,
					   atomic_t *condition)
{
	LASSERT(cfs_hash_with_spin_bktlock(hs));
@@ -629,21 +629,21 @@ static inline int cfs_hash_bd_dec_and_lock(cfs_hash_t *hs, struct cfs_hash_bd *b
				       &bd->bd_bucket->hsb_lock.spin);
}

static inline struct hlist_head *cfs_hash_bd_hhead(cfs_hash_t *hs,
static inline struct hlist_head *cfs_hash_bd_hhead(struct cfs_hash *hs,
						  struct cfs_hash_bd *bd)
{
	return hs->hs_hops->hop_hhead(hs, bd);
}

struct hlist_node *cfs_hash_bd_lookup_locked(cfs_hash_t *hs,
struct hlist_node *cfs_hash_bd_lookup_locked(struct cfs_hash *hs,
					    struct cfs_hash_bd *bd, const void *key);
struct hlist_node *cfs_hash_bd_peek_locked(cfs_hash_t *hs,
struct hlist_node *cfs_hash_bd_peek_locked(struct cfs_hash *hs,
					  struct cfs_hash_bd *bd, const void *key);
struct hlist_node *cfs_hash_bd_findadd_locked(cfs_hash_t *hs,
struct hlist_node *cfs_hash_bd_findadd_locked(struct cfs_hash *hs,
					     struct cfs_hash_bd *bd, const void *key,
					     struct hlist_node *hnode,
					     int insist_add);
struct hlist_node *cfs_hash_bd_finddel_locked(cfs_hash_t *hs,
struct hlist_node *cfs_hash_bd_finddel_locked(struct cfs_hash *hs,
					     struct cfs_hash_bd *bd, const void *key,
					     struct hlist_node *hnode);

@@ -651,87 +651,87 @@ struct hlist_node *cfs_hash_bd_finddel_locked(cfs_hash_t *hs,
 * operations on cfs_hash bucket (bd: bucket descriptor),
 * they are safe for hash-table with rehash
 */
void cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, struct cfs_hash_bd *bds);
void cfs_hash_dual_bd_lock(cfs_hash_t *hs, struct cfs_hash_bd *bds, int excl);
void cfs_hash_dual_bd_unlock(cfs_hash_t *hs, struct cfs_hash_bd *bds, int excl);
void cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bds);
void cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl);
void cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl);

static inline void cfs_hash_dual_bd_get_and_lock(cfs_hash_t *hs, const void *key,
static inline void cfs_hash_dual_bd_get_and_lock(struct cfs_hash *hs, const void *key,
						 struct cfs_hash_bd *bds, int excl)
{
	cfs_hash_dual_bd_get(hs, key, bds);
	cfs_hash_dual_bd_lock(hs, bds, excl);
}

struct hlist_node *cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs,
struct hlist_node *cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs,
						 struct cfs_hash_bd *bds,
						 const void *key);
struct hlist_node *cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs,
struct hlist_node *cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs,
						  struct cfs_hash_bd *bds,
						  const void *key,
						  struct hlist_node *hnode,
						  int insist_add);
struct hlist_node *cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs,
struct hlist_node *cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs,
						  struct cfs_hash_bd *bds,
						  const void *key,
						  struct hlist_node *hnode);

/* Hash init/cleanup functions */
cfs_hash_t *cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
struct cfs_hash *cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
			    unsigned bkt_bits, unsigned extra_bytes,
			    unsigned min_theta, unsigned max_theta,
			    cfs_hash_ops_t *ops, unsigned flags);

cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs);
void cfs_hash_putref(cfs_hash_t *hs);
struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs);
void cfs_hash_putref(struct cfs_hash *hs);

/* Hash addition functions */
void cfs_hash_add(cfs_hash_t *hs, const void *key,
void cfs_hash_add(struct cfs_hash *hs, const void *key,
		  struct hlist_node *hnode);
int cfs_hash_add_unique(cfs_hash_t *hs, const void *key,
int cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
			struct hlist_node *hnode);
void *cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
void *cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
			      struct hlist_node *hnode);

/* Hash deletion functions */
void *cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode);
void *cfs_hash_del_key(cfs_hash_t *hs, const void *key);
void *cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode);
void *cfs_hash_del_key(struct cfs_hash *hs, const void *key);

/* Hash lookup/for_each functions */
#define CFS_HASH_LOOP_HOG       1024

typedef int (*cfs_hash_for_each_cb_t)(cfs_hash_t *hs, struct cfs_hash_bd *bd,
typedef int (*cfs_hash_for_each_cb_t)(struct cfs_hash *hs, struct cfs_hash_bd *bd,
				      struct hlist_node *node, void *data);
void *cfs_hash_lookup(cfs_hash_t *hs, const void *key);
void cfs_hash_for_each(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
void cfs_hash_for_each_safe(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
int  cfs_hash_for_each_nolock(cfs_hash_t *hs,
void *cfs_hash_lookup(struct cfs_hash *hs, const void *key);
void cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
void cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
int  cfs_hash_for_each_nolock(struct cfs_hash *hs,
			      cfs_hash_for_each_cb_t, void *data);
int  cfs_hash_for_each_empty(cfs_hash_t *hs,
int  cfs_hash_for_each_empty(struct cfs_hash *hs,
			     cfs_hash_for_each_cb_t, void *data);
void cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
void cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
			   cfs_hash_for_each_cb_t, void *data);
typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data);
void cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t, void *data);
void cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t, void *data);

void cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
void cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
			     cfs_hash_for_each_cb_t, void *data);
int  cfs_hash_is_empty(cfs_hash_t *hs);
__u64 cfs_hash_size_get(cfs_hash_t *hs);
int  cfs_hash_is_empty(struct cfs_hash *hs);
__u64 cfs_hash_size_get(struct cfs_hash *hs);

/*
 * Rehash - Theta is calculated to be the average chained
 * hash depth assuming a perfectly uniform hash function.
 */
void cfs_hash_rehash_cancel_locked(cfs_hash_t *hs);
void cfs_hash_rehash_cancel(cfs_hash_t *hs);
int  cfs_hash_rehash(cfs_hash_t *hs, int do_rehash);
void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
void cfs_hash_rehash_cancel_locked(struct cfs_hash *hs);
void cfs_hash_rehash_cancel(struct cfs_hash *hs);
int  cfs_hash_rehash(struct cfs_hash *hs, int do_rehash);
void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
			 void *new_key, struct hlist_node *hnode);

#if CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1
/* Validate hnode references the correct key */
static inline void
cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
cfs_hash_key_validate(struct cfs_hash *hs, const void *key,
		      struct hlist_node *hnode)
{
	LASSERT(cfs_hash_keycmp(hs, key, hnode));
@@ -739,7 +739,7 @@ cfs_hash_key_validate(cfs_hash_t *hs, const void *key,

/* Validate hnode is in the correct bucket */
static inline void
cfs_hash_bucket_validate(cfs_hash_t *hs, struct cfs_hash_bd *bd,
cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
			 struct hlist_node *hnode)
{
	struct cfs_hash_bd   bds[2];
@@ -752,11 +752,11 @@ cfs_hash_bucket_validate(cfs_hash_t *hs, struct cfs_hash_bd *bd,
#else /* CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 */

static inline void
cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
cfs_hash_key_validate(struct cfs_hash *hs, const void *key,
		      struct hlist_node *hnode) {}

static inline void
cfs_hash_bucket_validate(cfs_hash_t *hs, struct cfs_hash_bd *bd,
cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
			 struct hlist_node *hnode) {}

#endif /* CFS_HASH_DEBUG_LEVEL */
@@ -778,13 +778,13 @@ static inline int __cfs_hash_theta_frac(int theta)
	       (__cfs_hash_theta_int(theta) * 1000);
}

static inline int __cfs_hash_theta(cfs_hash_t *hs)
static inline int __cfs_hash_theta(struct cfs_hash *hs)
{
	return (atomic_read(&hs->hs_count) <<
		CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
}

static inline void __cfs_hash_set_theta(cfs_hash_t *hs, int min, int max)
static inline void __cfs_hash_set_theta(struct cfs_hash *hs, int min, int max)
{
	LASSERT(min < max);
	hs->hs_min_theta = (__u16)min;
@@ -794,7 +794,7 @@ static inline void __cfs_hash_set_theta(cfs_hash_t *hs, int min, int max)
/* Generic debug formatting routines mainly for proc handler */
struct seq_file;
int cfs_hash_debug_header(struct seq_file *m);
int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m);
int cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m);

/*
 * Generic djb2 hash algorithm for character arrays.
+1 −1
Original line number Diff line number Diff line
@@ -622,7 +622,7 @@ struct lu_site {
	/**
	 * objects hash table
	 */
	cfs_hash_t	       *ls_obj_hash;
	struct cfs_hash	       *ls_obj_hash;
	/**
	 * index of bucket on hash table while purging
	 */
+1 −1
Original line number Diff line number Diff line
@@ -375,7 +375,7 @@ struct ldlm_namespace {
	ldlm_side_t		ns_client;

	/** Resource hash table for namespace. */
	cfs_hash_t		*ns_rs_hash;
	struct cfs_hash		*ns_rs_hash;

	/** serialize */
	spinlock_t		ns_lock;
+2 −2
Original line number Diff line number Diff line
@@ -197,12 +197,12 @@ struct obd_export {
	/** Connection count value from last succesful reconnect rpc */
	__u32		     exp_conn_cnt;
	/** Hash list of all ldlm locks granted on this export */
	cfs_hash_t	       *exp_lock_hash;
	struct cfs_hash	       *exp_lock_hash;
	/**
	 * Hash list for Posix lock deadlock detection, added with
	 * ldlm_lock::l_exp_flock_hash.
	 */
	cfs_hash_t	       *exp_flock_hash;
	struct cfs_hash	       *exp_flock_hash;
	struct list_head		exp_outstanding_replies;
	struct list_head		exp_uncommitted_replies;
	spinlock_t		  exp_uncommitted_replies_lock;
+2 −2
Original line number Diff line number Diff line
@@ -1427,7 +1427,7 @@ struct nrs_fifo_req {
struct nrs_crrn_net {
	struct ptlrpc_nrs_resource	cn_res;
	cfs_binheap_t		       *cn_binheap;
	cfs_hash_t		       *cn_cli_hash;
	struct cfs_hash		       *cn_cli_hash;
	/**
	 * Used when a new scheduling round commences, in order to synchronize
	 * all clients with the new round number.
@@ -1568,7 +1568,7 @@ struct nrs_orr_key {
struct nrs_orr_data {
	struct ptlrpc_nrs_resource	od_res;
	cfs_binheap_t		       *od_binheap;
	cfs_hash_t		       *od_obj_hash;
	struct cfs_hash		       *od_obj_hash;
	struct kmem_cache		       *od_cache;
	/**
	 * Used when a new scheduling round commences, in order to synchronize
Loading