Commit aebe4426 authored by Petr Machata's avatar Petr Machata Committed by David S. Miller
Browse files

net: sched: Pass root lock to Qdisc_ops.enqueue



A following patch introduces qevents, points in qdisc algorithm where
packet can be processed by user-defined filters. Should this processing
lead to a situation where a new packet is to be enqueued on the same port,
holding the root lock would lead to deadlocks. To solve the issue, qevent
handler needs to unlock and relock the root lock when necessary.

To that end, add the root lock argument to the qdisc op enqueue, and
propagate throughout.

Signed-off-by: default avatarPetr Machata <petrm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5e701e49
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -57,6 +57,7 @@ struct qdisc_skb_head {
struct Qdisc {
	int 			(*enqueue)(struct sk_buff *skb,
					   struct Qdisc *sch,
					   spinlock_t *root_lock,
					   struct sk_buff **to_free);
	struct sk_buff *	(*dequeue)(struct Qdisc *sch);
	unsigned int		flags;
@@ -241,6 +242,7 @@ struct Qdisc_ops {

	int 			(*enqueue)(struct sk_buff *skb,
					   struct Qdisc *sch,
					   spinlock_t *root_lock,
					   struct sk_buff **to_free);
	struct sk_buff *	(*dequeue)(struct Qdisc *);
	struct sk_buff *	(*peek)(struct Qdisc *);
@@ -788,11 +790,11 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
#endif
}

static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
				struct sk_buff **to_free)
{
	qdisc_calculate_pkt_len(skb, sch);
	return sch->enqueue(skb, sch, to_free);
	return sch->enqueue(skb, sch, root_lock, to_free);
}

static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
+2 −2
Original line number Diff line number Diff line
@@ -3749,7 +3749,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
	qdisc_calculate_pkt_len(skb, q);

	if (q->flags & TCQ_F_NOLOCK) {
		rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
		rc = q->enqueue(skb, q, NULL, &to_free) & NET_XMIT_MASK;
		qdisc_run(q);

		if (unlikely(to_free))
@@ -3792,7 +3792,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
		qdisc_run_end(q);
		rc = NET_XMIT_SUCCESS;
	} else {
		rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
		rc = q->enqueue(skb, q, root_lock, &to_free) & NET_XMIT_MASK;
		if (qdisc_run_begin(q)) {
			if (unlikely(contended)) {
				spin_unlock(&q->busylock);
+2 −2
Original line number Diff line number Diff line
@@ -374,7 +374,7 @@ static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl,

/* --------------------------- Qdisc operations ---------------------------- */

static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
			  struct sk_buff **to_free)
{
	struct atm_qdisc_data *p = qdisc_priv(sch);
@@ -432,7 +432,7 @@ done:
#endif
	}

	ret = qdisc_enqueue(skb, flow->q, to_free);
	ret = qdisc_enqueue(skb, flow->q, root_lock, to_free);
	if (ret != NET_XMIT_SUCCESS) {
drop: __maybe_unused
		if (net_xmit_drop_count(ret)) {
+1 −1
Original line number Diff line number Diff line
@@ -13,7 +13,7 @@
#include <linux/skbuff.h>
#include <net/pkt_sched.h>

static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
			     struct sk_buff **to_free)
{
	qdisc_drop(skb, sch, to_free);
+1 −1
Original line number Diff line number Diff line
@@ -1687,7 +1687,7 @@ hash:

static void cake_reconfigure(struct Qdisc *sch);

static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
			struct sk_buff **to_free)
{
	struct cake_sched_data *q = qdisc_priv(sch);
Loading