Commit 11957be2 authored by Cong Wang's avatar Cong Wang Committed by David S. Miller
Browse files

htb: use anonymous union for simplicity



cl->leaf.q is slightly more readable than cl->un.leaf.q.

Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: default avatarCong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8ecc7c8a
Loading
Loading
Loading
Loading
+49 −49
Original line number Diff line number Diff line
@@ -132,7 +132,7 @@ struct htb_class {
		struct htb_class_inner {
			struct htb_prio clprio[TC_HTB_NUMPRIO];
		} inner;
	} un;
	};
	s64			pq_key;

	int			prio_activity;	/* for which prios are we active */
@@ -411,13 +411,13 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
			int prio = ffz(~m);
			m &= ~(1 << prio);

			if (p->un.inner.clprio[prio].feed.rb_node)
			if (p->inner.clprio[prio].feed.rb_node)
				/* parent already has its feed in use so that
				 * reset bit in mask as parent is already ok
				 */
				mask &= ~(1 << prio);

			htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio);
			htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
		}
		p->prio_activity |= mask;
		cl = p;
@@ -447,19 +447,19 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
			int prio = ffz(~m);
			m &= ~(1 << prio);

			if (p->un.inner.clprio[prio].ptr == cl->node + prio) {
			if (p->inner.clprio[prio].ptr == cl->node + prio) {
				/* we are removing child which is pointed to from
				 * parent feed - forget the pointer but remember
				 * classid
				 */
				p->un.inner.clprio[prio].last_ptr_id = cl->common.classid;
				p->un.inner.clprio[prio].ptr = NULL;
				p->inner.clprio[prio].last_ptr_id = cl->common.classid;
				p->inner.clprio[prio].ptr = NULL;
			}

			htb_safe_rb_erase(cl->node + prio,
					  &p->un.inner.clprio[prio].feed);
					  &p->inner.clprio[prio].feed);

			if (!p->un.inner.clprio[prio].feed.rb_node)
			if (!p->inner.clprio[prio].feed.rb_node)
				mask |= 1 << prio;
		}

@@ -555,7 +555,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
 */
static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
{
	WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
	WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);

	if (!cl->prio_activity) {
		cl->prio_activity = 1 << cl->prio;
@@ -599,7 +599,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
		__qdisc_drop(skb, to_free);
		return ret;
#endif
	} else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q,
	} else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
					to_free)) != NET_XMIT_SUCCESS) {
		if (net_xmit_drop_count(ret)) {
			qdisc_qstats_drop(sch);
@@ -807,7 +807,7 @@ static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
			cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
			if (!cl->level)
				return cl;
			clp = &cl->un.inner.clprio[prio];
			clp = &cl->inner.clprio[prio];
			(++sp)->root = clp->feed.rb_node;
			sp->pptr = &clp->ptr;
			sp->pid = &clp->last_ptr_id;
@@ -841,7 +841,7 @@ next:
		 * graft operation on the leaf since last dequeue;
		 * simply deactivate and skip such class
		 */
		if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
		if (unlikely(cl->leaf.q->q.qlen == 0)) {
			struct htb_class *next;
			htb_deactivate(q, cl);

@@ -857,12 +857,12 @@ next:
			goto next;
		}

		skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
		skb = cl->leaf.q->dequeue(cl->leaf.q);
		if (likely(skb != NULL))
			break;

		qdisc_warn_nonwc("htb", cl->un.leaf.q);
		htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr:
		qdisc_warn_nonwc("htb", cl->leaf.q);
		htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
					 &q->hlevel[0].hprio[prio].ptr);
		cl = htb_lookup_leaf(hprio, prio);

@@ -870,16 +870,16 @@ next:

	if (likely(skb != NULL)) {
		bstats_update(&cl->bstats, skb);
		cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
		if (cl->un.leaf.deficit[level] < 0) {
			cl->un.leaf.deficit[level] += cl->quantum;
			htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr :
		cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
		if (cl->leaf.deficit[level] < 0) {
			cl->leaf.deficit[level] += cl->quantum;
			htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
						 &q->hlevel[0].hprio[prio].ptr);
		}
		/* this used to be after charge_class but this constelation
		 * gives us slightly better performance
		 */
		if (!cl->un.leaf.q->q.qlen)
		if (!cl->leaf.q->q.qlen)
			htb_deactivate(q, cl);
		htb_charge_class(q, cl, level, skb);
	}
@@ -956,10 +956,10 @@ static void htb_reset(struct Qdisc *sch)
	for (i = 0; i < q->clhash.hashsize; i++) {
		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
			if (cl->level)
				memset(&cl->un.inner, 0, sizeof(cl->un.inner));
				memset(&cl->inner, 0, sizeof(cl->inner));
			else {
				if (cl->un.leaf.q)
					qdisc_reset(cl->un.leaf.q);
				if (cl->leaf.q)
					qdisc_reset(cl->leaf.q);
			}
			cl->prio_activity = 0;
			cl->cmode = HTB_CAN_SEND;
@@ -1082,8 +1082,8 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
	 */
	tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
	tcm->tcm_handle = cl->common.classid;
	if (!cl->level && cl->un.leaf.q)
		tcm->tcm_info = cl->un.leaf.q->handle;
	if (!cl->level && cl->leaf.q)
		tcm->tcm_info = cl->leaf.q->handle;

	nest = nla_nest_start(skb, TCA_OPTIONS);
	if (nest == NULL)
@@ -1126,9 +1126,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
	};
	__u32 qlen = 0;

	if (!cl->level && cl->un.leaf.q) {
		qlen = cl->un.leaf.q->q.qlen;
		qs.backlog = cl->un.leaf.q->qstats.backlog;
	if (!cl->level && cl->leaf.q) {
		qlen = cl->leaf.q->q.qlen;
		qs.backlog = cl->leaf.q->qstats.backlog;
	}
	cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
				    INT_MIN, INT_MAX);
@@ -1156,14 +1156,14 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
				     cl->common.classid, extack)) == NULL)
		return -ENOBUFS;

	*old = qdisc_replace(sch, new, &cl->un.leaf.q);
	*old = qdisc_replace(sch, new, &cl->leaf.q);
	return 0;
}

static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
{
	struct htb_class *cl = (struct htb_class *)arg;
	return !cl->level ? cl->un.leaf.q : NULL;
	return !cl->level ? cl->leaf.q : NULL;
}

static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
@@ -1189,15 +1189,15 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
{
	struct htb_class *parent = cl->parent;

	WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
	WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);

	if (parent->cmode != HTB_CAN_SEND)
		htb_safe_rb_erase(&parent->pq_node,
				  &q->hlevel[parent->level].wait_pq);

	parent->level = 0;
	memset(&parent->un.inner, 0, sizeof(parent->un.inner));
	parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
	memset(&parent->inner, 0, sizeof(parent->inner));
	parent->leaf.q = new_q ? new_q : &noop_qdisc;
	parent->tokens = parent->buffer;
	parent->ctokens = parent->cbuffer;
	parent->t_c = ktime_get_ns();
@@ -1207,8 +1207,8 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
{
	if (!cl->level) {
		WARN_ON(!cl->un.leaf.q);
		qdisc_destroy(cl->un.leaf.q);
		WARN_ON(!cl->leaf.q);
		qdisc_destroy(cl->leaf.q);
	}
	gen_kill_estimator(&cl->rate_est);
	tcf_block_put(cl->block);
@@ -1270,11 +1270,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
	sch_tree_lock(sch);

	if (!cl->level) {
		unsigned int qlen = cl->un.leaf.q->q.qlen;
		unsigned int backlog = cl->un.leaf.q->qstats.backlog;
		unsigned int qlen = cl->leaf.q->q.qlen;
		unsigned int backlog = cl->leaf.q->qstats.backlog;

		qdisc_reset(cl->un.leaf.q);
		qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
		qdisc_reset(cl->leaf.q);
		qdisc_tree_reduce_backlog(cl->leaf.q, qlen, backlog);
	}

	/* delete from hash and active; remainder in destroy_class */
@@ -1403,13 +1403,13 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
					  classid, NULL);
		sch_tree_lock(sch);
		if (parent && !parent->level) {
			unsigned int qlen = parent->un.leaf.q->q.qlen;
			unsigned int backlog = parent->un.leaf.q->qstats.backlog;
			unsigned int qlen = parent->leaf.q->q.qlen;
			unsigned int backlog = parent->leaf.q->qstats.backlog;

			/* turn parent into inner node */
			qdisc_reset(parent->un.leaf.q);
			qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
			qdisc_destroy(parent->un.leaf.q);
			qdisc_reset(parent->leaf.q);
			qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog);
			qdisc_destroy(parent->leaf.q);
			if (parent->prio_activity)
				htb_deactivate(q, parent);

@@ -1420,10 +1420,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
			}
			parent->level = (parent->parent ? parent->parent->level
					 : TC_HTB_MAXDEPTH) - 1;
			memset(&parent->un.inner, 0, sizeof(parent->un.inner));
			memset(&parent->inner, 0, sizeof(parent->inner));
		}
		/* leaf (we) needs elementary qdisc */
		cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
		cl->leaf.q = new_q ? new_q : &noop_qdisc;

		cl->common.classid = classid;
		cl->parent = parent;
@@ -1439,8 +1439,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
		qdisc_class_hash_insert(&q->clhash, &cl->common);
		if (parent)
			parent->children++;
		if (cl->un.leaf.q != &noop_qdisc)
			qdisc_hash_add(cl->un.leaf.q, true);
		if (cl->leaf.q != &noop_qdisc)
			qdisc_hash_add(cl->leaf.q, true);
	} else {
		if (tca[TCA_RATE]) {
			err = gen_replace_estimator(&cl->bstats, NULL,
@@ -1462,7 +1462,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
	psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);

	/* it used to be a nasty bug here, we have to check that node
	 * is really leaf before changing cl->un.leaf !
	 * is really leaf before changing cl->leaf !
	 */
	if (!cl->level) {
		u64 quantum = cl->rate.rate_bytes_ps;