Commit 1e27138b authored by Lisa Nguyen's avatar Lisa Nguyen Committed by Greg Kroah-Hartman
Browse files

staging: lustre: Remove typedef and update cfs_hash_bucket struct



Remove typedef keyword and rename the cfs_hash_bucket_t struct to
cfs_hash_bucket in libcfs_hash.h. These changes resolve the
"Do not add new typedefs" warning generated by checkpatch.pl and
meet kernel coding style.

The struct variables in hash.c are updated to reflect this change
as well.

Signed-off-by: default avatarLisa Nguyen <lisa@xenapiadmin.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 7c64884b
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -97,20 +97,20 @@ union cfs_hash_lock {
 *   which depends on requirement of user
 * - some extra bytes (caller can require it while creating hash)
 */
typedef struct cfs_hash_bucket {
struct cfs_hash_bucket {
	union cfs_hash_lock	hsb_lock;	/**< bucket lock */
	__u32			hsb_count;	/**< current entries */
	__u32			hsb_version;	/**< change version */
	unsigned int		hsb_index;	/**< index of bucket */
	int			hsb_depmax;	/**< max depth on bucket */
	long			hsb_head[0];	/**< hash-head array */
} cfs_hash_bucket_t;
};

/**
 * cfs_hash bucket descriptor, it's normally in stack of caller
 */
typedef struct cfs_hash_bd {
	cfs_hash_bucket_t	  *bd_bucket;      /**< address of bucket */
	struct cfs_hash_bucket	*bd_bucket;      /**< address of bucket */
	unsigned int		bd_offset;      /**< offset in bucket */
} cfs_hash_bd_t;

@@ -221,7 +221,7 @@ typedef struct cfs_hash {
	/** hash list operations */
	struct cfs_hash_hlist_ops  *hs_hops;
	/** hash buckets-table */
	cfs_hash_bucket_t	 **hs_buckets;
	struct cfs_hash_bucket	 **hs_buckets;
	/** total number of items on this hash-table */
	atomic_t		hs_count;
	/** hash flags, see cfs_hash_tag for detail */
@@ -255,7 +255,7 @@ typedef struct cfs_hash {
	/** refcount on this hash table */
	atomic_t		hs_refcount;
	/** rehash buckets-table */
	cfs_hash_bucket_t	 **hs_rehash_buckets;
	struct cfs_hash_bucket	 **hs_rehash_buckets;
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
	/** serialize debug members */
	spinlock_t			hs_dep_lock;
@@ -451,7 +451,7 @@ cfs_hash_is_iterating(cfs_hash_t *hs)
static inline int
cfs_hash_bkt_size(cfs_hash_t *hs)
{
	return offsetof(cfs_hash_bucket_t, hsb_head[0]) +
	return offsetof(struct cfs_hash_bucket, hsb_head[0]) +
	       hs->hs_hops->hop_hhead_size(hs) * CFS_HASH_BKT_NHLIST(hs) +
	       hs->hs_extra_bytes;
}
+11 −11
Original line number Diff line number Diff line
@@ -469,7 +469,7 @@ cfs_hash_hlist_setup(cfs_hash_t *hs)
}

static void
cfs_hash_bd_from_key(cfs_hash_t *hs, cfs_hash_bucket_t **bkts,
cfs_hash_bd_from_key(cfs_hash_t *hs, struct cfs_hash_bucket **bkts,
		     unsigned int bits, const void *key, cfs_hash_bd_t *bd)
{
	unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
@@ -563,8 +563,8 @@ void
cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
			cfs_hash_bd_t *bd_new, struct hlist_node *hnode)
{
	cfs_hash_bucket_t *obkt = bd_old->bd_bucket;
	cfs_hash_bucket_t *nbkt = bd_new->bd_bucket;
	struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
	struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
	int		rc;

	if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
@@ -698,7 +698,7 @@ static void
cfs_hash_multi_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
		       unsigned n, int excl)
{
	cfs_hash_bucket_t *prev = NULL;
	struct cfs_hash_bucket *prev = NULL;
	int		i;

	/**
@@ -721,7 +721,7 @@ static void
cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
			 unsigned n, int excl)
{
	cfs_hash_bucket_t *prev = NULL;
	struct cfs_hash_bucket *prev = NULL;
	int		i;

	cfs_hash_for_each_bd(bds, n, i) {
@@ -884,7 +884,7 @@ cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);

static void
cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
		      int bkt_size, int prev_size, int size)
{
	int     i;
@@ -902,11 +902,11 @@ cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
 * needed, the newly allocated buckets if allocation was needed and
 * successful, and NULL on error.
 */
static cfs_hash_bucket_t **
cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
static struct cfs_hash_bucket **
cfs_hash_buckets_realloc(cfs_hash_t *hs, struct cfs_hash_bucket **old_bkts,
			 unsigned int old_size, unsigned int new_size)
{
	cfs_hash_bucket_t **new_bkts;
	struct cfs_hash_bucket **new_bkts;
	int		 i;

	LASSERT(old_size == 0 || old_bkts != NULL);
@@ -1874,7 +1874,7 @@ static int
cfs_hash_rehash_worker(cfs_workitem_t *wi)
{
	cfs_hash_t	 *hs = container_of(wi, cfs_hash_t, hs_rehash_wi);
	cfs_hash_bucket_t **bkts;
	struct cfs_hash_bucket **bkts;
	cfs_hash_bd_t       bd;
	unsigned int	old_size;
	unsigned int	new_size;
@@ -2028,7 +2028,7 @@ int cfs_hash_debug_header(struct seq_file *m)
}
EXPORT_SYMBOL(cfs_hash_debug_header);

static cfs_hash_bucket_t **
static struct cfs_hash_bucket **
cfs_hash_full_bkts(cfs_hash_t *hs)
{
	/* NB: caller should hold hs->hs_rwlock if REHASH is set */