Commit 7f013ede authored by David S. Miller's avatar David S. Miller
Browse files


Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

The following patchset contains Netfilter updates for net-next, they are:

1) Incorrect uapi header comment in bitwise, from Jeremy Sowden.

2) Fetch flow statistics if flow is still active.

3) Restrict flow matching on hardware based on input device.

4) Add nf_flow_offload_work_alloc() helper function.

5) Remove the last client of the FLOW_OFFLOAD_DYING flag, use teardown
   instead.

6) Use atomic bitwise operation to operate with flow flags.

7) Add nf_flowtable_hw_offload() helper function to check for the
   NF_FLOWTABLE_HW_OFFLOAD flag.

8) Add NF_FLOW_HW_REFRESH to retry hardware offload from the flowtable
   software datapath.

9) Remove indirect calls in xt_hashlimit, from Florian Westphal.

10) Add nf_flow_offload_tuple() helper to consolidate code.

11) Add nf_flow_table_offload_cmd() helper function.

12) A few whitespace cleanups in nf_tables in bitwise and the bitmap/hash
    set types, from Jeremy Sowden.

13) Cleanup netlink attribute checks in bitwise, from Jeremy Sowden.

14) Replace goto by return in error path of nft_bitwise_dump(), from
    Jeremy Sowden.

15) Add bitwise operation netlink attribute, also from Jeremy.

16) Add nft_bitwise_init_bool(), from Jeremy Sowden.

17) Add nft_bitwise_eval_bool(), also from Jeremy.

18) Add nft_bitwise_dump_bool(), from Jeremy Sowden.

19) Disallow hardware offload for other that NFT_BITWISE_BOOL,
    from Jeremy Sowden.

20) Add NFTA_BITWISE_DATA netlink attribute, again from Jeremy.

21) Add support for bitwise shift operation, from Jeremy Sowden.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9aaa2949 567d746b
Loading
Loading
Loading
Loading
+15 −12
Original line number Diff line number Diff line
@@ -47,6 +47,11 @@ struct nf_flowtable {
	possible_net_t			net;
};

static inline bool nf_flowtable_hw_offload(struct nf_flowtable *flowtable)
{
	return flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD;
}

enum flow_offload_tuple_dir {
	FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL,
	FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY,
@@ -83,13 +88,15 @@ struct flow_offload_tuple_rhash {
	struct flow_offload_tuple	tuple;
};

#define FLOW_OFFLOAD_SNAT	0x1
#define FLOW_OFFLOAD_DNAT	0x2
#define FLOW_OFFLOAD_DYING	0x4
#define FLOW_OFFLOAD_TEARDOWN	0x8
#define FLOW_OFFLOAD_HW		0x10
#define FLOW_OFFLOAD_HW_DYING	0x20
#define FLOW_OFFLOAD_HW_DEAD	0x40
enum nf_flow_flags {
	NF_FLOW_SNAT,
	NF_FLOW_DNAT,
	NF_FLOW_TEARDOWN,
	NF_FLOW_HW,
	NF_FLOW_HW_DYING,
	NF_FLOW_HW_DEAD,
	NF_FLOW_HW_REFRESH,
};

enum flow_offload_type {
	NF_FLOW_OFFLOAD_UNSPEC	= 0,
@@ -99,7 +106,7 @@ enum flow_offload_type {
struct flow_offload {
	struct flow_offload_tuple_rhash		tuplehash[FLOW_OFFLOAD_DIR_MAX];
	struct nf_conn				*ct;
	u16					flags;
	unsigned long				flags;
	u16					type;
	u32					timeout;
	struct rcu_head				rcu_head;
@@ -134,10 +141,6 @@ int nf_flow_table_init(struct nf_flowtable *flow_table);
void nf_flow_table_free(struct nf_flowtable *flow_table);

void flow_offload_teardown(struct flow_offload *flow);
static inline void flow_offload_dead(struct flow_offload *flow)
{
	flow->flags |= FLOW_OFFLOAD_DYING;
}

int nf_flow_snat_port(const struct flow_offload *flow,
		      struct sk_buff *skb, unsigned int thoff,
+23 −3
Original line number Diff line number Diff line
@@ -484,6 +484,20 @@ enum nft_immediate_attributes {
};
#define NFTA_IMMEDIATE_MAX	(__NFTA_IMMEDIATE_MAX - 1)

/**
 * enum nft_bitwise_ops - nf_tables bitwise operations
 *
 * @NFT_BITWISE_BOOL: mask-and-xor operation used to implement NOT, AND, OR and
 *                    XOR boolean operations
 * @NFT_BITWISE_LSHIFT: left-shift operation
 * @NFT_BITWISE_RSHIFT: right-shift operation
 */
enum nft_bitwise_ops {
	NFT_BITWISE_BOOL,
	NFT_BITWISE_LSHIFT,
	NFT_BITWISE_RSHIFT,
};

/**
 * enum nft_bitwise_attributes - nf_tables bitwise expression netlink attributes
 *
@@ -492,16 +506,20 @@ enum nft_immediate_attributes {
 * @NFTA_BITWISE_LEN: length of operands (NLA_U32)
 * @NFTA_BITWISE_MASK: mask value (NLA_NESTED: nft_data_attributes)
 * @NFTA_BITWISE_XOR: xor value (NLA_NESTED: nft_data_attributes)
 * @NFTA_BITWISE_OP: type of operation (NLA_U32: nft_bitwise_ops)
 * @NFTA_BITWISE_DATA: argument for non-boolean operations
 *                     (NLA_NESTED: nft_data_attributes)
 *
 * The bitwise expression performs the following operation:
 * The bitwise expression supports boolean and shift operations.  It implements
 * the boolean operations by performing the following operation:
 *
 * dreg = (sreg & mask) ^ xor
 *
 * which allow to express all bitwise operations:
 * with these mask and xor values:
 *
 * 		mask	xor
 * NOT:		1	1
 * OR:		0	x
 * OR:		~x	x
 * XOR:		1	x
 * AND:		x	0
 */
@@ -512,6 +530,8 @@ enum nft_bitwise_attributes {
	NFTA_BITWISE_LEN,
	NFTA_BITWISE_MASK,
	NFTA_BITWISE_XOR,
	NFTA_BITWISE_OP,
	NFTA_BITWISE_DATA,
	__NFTA_BITWISE_MAX
};
#define NFTA_BITWISE_MAX	(__NFTA_BITWISE_MAX - 1)
+15 −16
Original line number Diff line number Diff line
@@ -61,9 +61,9 @@ struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
	flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);

	if (ct->status & IPS_SRC_NAT)
		flow->flags |= FLOW_OFFLOAD_SNAT;
		__set_bit(NF_FLOW_SNAT, &flow->flags);
	if (ct->status & IPS_DST_NAT)
		flow->flags |= FLOW_OFFLOAD_DNAT;
		__set_bit(NF_FLOW_DNAT, &flow->flags);

	return flow;

@@ -182,8 +182,6 @@ void flow_offload_free(struct flow_offload *flow)
	default:
		break;
	}
	if (flow->flags & FLOW_OFFLOAD_DYING)
		nf_ct_delete(flow->ct, 0, 0);
	nf_ct_put(flow->ct);
	kfree_rcu(flow, rcu_head);
}
@@ -245,8 +243,10 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
		return err;
	}

	if (flow_table->flags & NF_FLOWTABLE_HW_OFFLOAD)
	if (nf_flowtable_hw_offload(flow_table)) {
		__set_bit(NF_FLOW_HW, &flow->flags);
		nf_flow_offload_add(flow_table, flow);
	}

	return 0;
}
@@ -271,7 +271,7 @@ static void flow_offload_del(struct nf_flowtable *flow_table,

	if (nf_flow_has_expired(flow))
		flow_offload_fixup_ct(flow->ct);
	else if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
	else if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
		flow_offload_fixup_ct_timeout(flow->ct);

	flow_offload_free(flow);
@@ -279,7 +279,7 @@ static void flow_offload_del(struct nf_flowtable *flow_table,

void flow_offload_teardown(struct flow_offload *flow)
{
	flow->flags |= FLOW_OFFLOAD_TEARDOWN;
	set_bit(NF_FLOW_TEARDOWN, &flow->flags);

	flow_offload_fixup_ct_state(flow->ct);
}
@@ -300,7 +300,7 @@ flow_offload_lookup(struct nf_flowtable *flow_table,

	dir = tuplehash->tuple.dir;
	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
	if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))
	if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
		return NULL;

	if (unlikely(nf_ct_is_dying(flow->ct)))
@@ -348,19 +348,18 @@ static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
{
	struct nf_flowtable *flow_table = data;

	if (flow->flags & FLOW_OFFLOAD_HW)
		nf_flow_offload_stats(flow_table, flow);

	if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct) ||
	    (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))) {
		if (flow->flags & FLOW_OFFLOAD_HW) {
			if (!(flow->flags & FLOW_OFFLOAD_HW_DYING))
	    test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
		if (test_bit(NF_FLOW_HW, &flow->flags)) {
			if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
				nf_flow_offload_del(flow_table, flow);
			else if (flow->flags & FLOW_OFFLOAD_HW_DEAD)
			else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
				flow_offload_del(flow_table, flow);
		} else {
			flow_offload_del(flow_table, flow);
		}
	} else if (test_bit(NF_FLOW_HW, &flow->flags)) {
		nf_flow_offload_stats(flow_table, flow);
	}
}

@@ -524,7 +523,7 @@ static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
	if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
	    (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
	     flow->tuplehash[1].tuple.iifidx == dev->ifindex))
		flow_offload_dead(flow);
		flow_offload_teardown(flow);
}

static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
+17 −4
Original line number Diff line number Diff line
@@ -144,11 +144,11 @@ static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
{
	struct iphdr *iph = ip_hdr(skb);

	if (flow->flags & FLOW_OFFLOAD_SNAT &&
	if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
	    (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
	     nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
		return -1;
	if (flow->flags & FLOW_OFFLOAD_DNAT &&
	if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
	    (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
	     nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
		return -1;
@@ -232,6 +232,13 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
	return NF_STOLEN;
}

static bool nf_flow_offload_refresh(struct nf_flowtable *flow_table,
				    struct flow_offload *flow)
{
	return nf_flowtable_hw_offload(flow_table) &&
	       test_and_clear_bit(NF_FLOW_HW_REFRESH, &flow->flags);
}

unsigned int
nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
			const struct nf_hook_state *state)
@@ -272,6 +279,9 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
	if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
		return NF_ACCEPT;

	if (unlikely(nf_flow_offload_refresh(flow_table, flow)))
		nf_flow_offload_add(flow_table, flow);

	if (nf_flow_offload_dst_check(&rt->dst)) {
		flow_offload_teardown(flow);
		return NF_ACCEPT;
@@ -414,11 +424,11 @@ static int nf_flow_nat_ipv6(const struct flow_offload *flow,
	struct ipv6hdr *ip6h = ipv6_hdr(skb);
	unsigned int thoff = sizeof(*ip6h);

	if (flow->flags & FLOW_OFFLOAD_SNAT &&
	if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
	    (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
	     nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
		return -1;
	if (flow->flags & FLOW_OFFLOAD_DNAT &&
	if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
	    (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
	     nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
		return -1;
@@ -498,6 +508,9 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
				sizeof(*ip6h)))
		return NF_ACCEPT;

	if (unlikely(nf_flow_offload_refresh(flow_table, flow)))
		nf_flow_offload_add(flow_table, flow);

	if (nf_flow_offload_dst_check(&rt->dst)) {
		flow_offload_teardown(flow);
		return NF_ACCEPT;
+94 −70
Original line number Diff line number Diff line
@@ -24,6 +24,7 @@ struct flow_offload_work {
};

struct nf_flow_key {
	struct flow_dissector_key_meta			meta;
	struct flow_dissector_key_control		control;
	struct flow_dissector_key_basic			basic;
	union {
@@ -55,6 +56,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
	struct nf_flow_key *mask = &match->mask;
	struct nf_flow_key *key = &match->key;

	NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_META, meta);
	NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
	NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic);
	NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
@@ -62,6 +64,9 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
	NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
	NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);

	key->meta.ingress_ifindex = tuple->iifidx;
	mask->meta.ingress_ifindex = 0xffffffff;

	switch (tuple->l3proto) {
	case AF_INET:
		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
@@ -105,7 +110,8 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
	key->tp.dst = tuple->dst_port;
	mask->tp.dst = 0xffff;

	match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL) |
	match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_META) |
				      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
				      BIT(FLOW_DISSECTOR_KEY_BASIC) |
				      BIT(FLOW_DISSECTOR_KEY_PORTS);
	return 0;
@@ -444,16 +450,16 @@ int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
	    flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
		return -1;

	if (flow->flags & FLOW_OFFLOAD_SNAT) {
	if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
		flow_offload_ipv4_snat(net, flow, dir, flow_rule);
		flow_offload_port_snat(net, flow, dir, flow_rule);
	}
	if (flow->flags & FLOW_OFFLOAD_DNAT) {
	if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
		flow_offload_ipv4_dnat(net, flow, dir, flow_rule);
		flow_offload_port_dnat(net, flow, dir, flow_rule);
	}
	if (flow->flags & FLOW_OFFLOAD_SNAT ||
	    flow->flags & FLOW_OFFLOAD_DNAT)
	if (test_bit(NF_FLOW_SNAT, &flow->flags) ||
	    test_bit(NF_FLOW_DNAT, &flow->flags))
		flow_offload_ipv4_checksum(net, flow, flow_rule);

	flow_offload_redirect(flow, dir, flow_rule);
@@ -470,11 +476,11 @@ int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
	    flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
		return -1;

	if (flow->flags & FLOW_OFFLOAD_SNAT) {
	if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
		flow_offload_ipv6_snat(net, flow, dir, flow_rule);
		flow_offload_port_snat(net, flow, dir, flow_rule);
	}
	if (flow->flags & FLOW_OFFLOAD_DNAT) {
	if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
		flow_offload_ipv6_dnat(net, flow, dir, flow_rule);
		flow_offload_port_dnat(net, flow, dir, flow_rule);
	}
@@ -586,23 +592,25 @@ static void nf_flow_offload_init(struct flow_cls_offload *cls_flow,
	cls_flow->cookie = (unsigned long)tuple;
}

static int flow_offload_tuple_add(struct flow_offload_work *offload,
static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
				 struct flow_offload *flow,
				 struct nf_flow_rule *flow_rule,
				  enum flow_offload_tuple_dir dir)
				 enum flow_offload_tuple_dir dir,
				 int priority, int cmd,
				 struct list_head *block_cb_list)
{
	struct nf_flowtable *flowtable = offload->flowtable;
	struct flow_cls_offload cls_flow = {};
	struct flow_block_cb *block_cb;
	struct netlink_ext_ack extack;
	__be16 proto = ETH_P_ALL;
	int err, i = 0;

	nf_flow_offload_init(&cls_flow, proto, offload->priority,
			     FLOW_CLS_REPLACE,
			     &offload->flow->tuplehash[dir].tuple, &extack);
	nf_flow_offload_init(&cls_flow, proto, priority, cmd,
			     &flow->tuplehash[dir].tuple, &extack);
	if (cmd == FLOW_CLS_REPLACE)
		cls_flow.rule = flow_rule->rule;

	list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list) {
	list_for_each_entry(block_cb, block_cb_list, list) {
		err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
				   block_cb->cb_priv);
		if (err < 0)
@@ -614,23 +622,22 @@ static int flow_offload_tuple_add(struct flow_offload_work *offload,
	return i;
}

static void flow_offload_tuple_del(struct flow_offload_work *offload,
static int flow_offload_tuple_add(struct flow_offload_work *offload,
				  struct nf_flow_rule *flow_rule,
				  enum flow_offload_tuple_dir dir)
{
	struct nf_flowtable *flowtable = offload->flowtable;
	struct flow_cls_offload cls_flow = {};
	struct flow_block_cb *block_cb;
	struct netlink_ext_ack extack;
	__be16 proto = ETH_P_ALL;

	nf_flow_offload_init(&cls_flow, proto, offload->priority,
			     FLOW_CLS_DESTROY,
			     &offload->flow->tuplehash[dir].tuple, &extack);

	list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
		block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv);
	return nf_flow_offload_tuple(offload->flowtable, offload->flow,
				     flow_rule, dir, offload->priority,
				     FLOW_CLS_REPLACE,
				     &offload->flowtable->flow_block.cb_list);
}

	offload->flow->flags |= FLOW_OFFLOAD_HW_DEAD;
static void flow_offload_tuple_del(struct flow_offload_work *offload,
				   enum flow_offload_tuple_dir dir)
{
	nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
			      offload->priority, FLOW_CLS_DESTROY,
			      &offload->flowtable->flow_block.cb_list);
}

static int flow_offload_rule_add(struct flow_offload_work *offload,
@@ -648,20 +655,20 @@ static int flow_offload_rule_add(struct flow_offload_work *offload,
	return 0;
}

static int flow_offload_work_add(struct flow_offload_work *offload)
static void flow_offload_work_add(struct flow_offload_work *offload)
{
	struct nf_flow_rule *flow_rule[FLOW_OFFLOAD_DIR_MAX];
	int err;

	err = nf_flow_offload_alloc(offload, flow_rule);
	if (err < 0)
		return -ENOMEM;
		return;

	err = flow_offload_rule_add(offload, flow_rule);
	if (err < 0)
		set_bit(NF_FLOW_HW_REFRESH, &offload->flow->flags);

	nf_flow_offload_destroy(flow_rule);

	return err;
}

static void flow_offload_work_del(struct flow_offload_work *offload)
@@ -706,7 +713,6 @@ static void flow_offload_work_handler(struct work_struct *work)
{
	struct flow_offload_work *offload, *next;
	LIST_HEAD(offload_pending_list);
	int ret;

	spin_lock_bh(&flow_offload_pending_list_lock);
	list_replace_init(&flow_offload_pending_list, &offload_pending_list);
@@ -715,9 +721,7 @@ static void flow_offload_work_handler(struct work_struct *work)
	list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
		switch (offload->cmd) {
		case FLOW_CLS_REPLACE:
			ret = flow_offload_work_add(offload);
			if (ret < 0)
				offload->flow->flags &= ~FLOW_OFFLOAD_HW;
			flow_offload_work_add(offload);
			break;
		case FLOW_CLS_DESTROY:
			flow_offload_work_del(offload);
@@ -742,20 +746,33 @@ static void flow_offload_queue_work(struct flow_offload_work *offload)
	schedule_work(&nf_flow_offload_work);
}

void nf_flow_offload_add(struct nf_flowtable *flowtable,
			 struct flow_offload *flow)
static struct flow_offload_work *
nf_flow_offload_work_alloc(struct nf_flowtable *flowtable,
			   struct flow_offload *flow, unsigned int cmd)
{
	struct flow_offload_work *offload;

	offload = kmalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
	if (!offload)
		return;
		return NULL;

	offload->cmd = FLOW_CLS_REPLACE;
	offload->cmd = cmd;
	offload->flow = flow;
	offload->priority = flowtable->priority;
	offload->flowtable = flowtable;
	flow->flags |= FLOW_OFFLOAD_HW;

	return offload;
}


void nf_flow_offload_add(struct nf_flowtable *flowtable,
			 struct flow_offload *flow)
{
	struct flow_offload_work *offload;

	offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_REPLACE);
	if (!offload)
		return;

	flow_offload_queue_work(offload);
}
@@ -765,15 +782,11 @@ void nf_flow_offload_del(struct nf_flowtable *flowtable,
{
	struct flow_offload_work *offload;

	offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
	offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_DESTROY);
	if (!offload)
		return;

	offload->cmd = FLOW_CLS_DESTROY;
	offload->flow = flow;
	offload->flow->flags |= FLOW_OFFLOAD_HW_DYING;
	offload->flowtable = flowtable;

	set_bit(NF_FLOW_HW_DYING, &flow->flags);
	flow_offload_queue_work(offload);
}

@@ -784,24 +797,19 @@ void nf_flow_offload_stats(struct nf_flowtable *flowtable,
	__s32 delta;

	delta = nf_flow_timeout_delta(flow->timeout);
	if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) ||
	    flow->flags & FLOW_OFFLOAD_HW_DYING)
	if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10))
		return;

	offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
	offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_STATS);
	if (!offload)
		return;

	offload->cmd = FLOW_CLS_STATS;
	offload->flow = flow;
	offload->flowtable = flowtable;

	flow_offload_queue_work(offload);
}

void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
{
	if (flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD)
	if (nf_flowtable_hw_offload(flowtable))
		flush_work(&nf_flow_offload_work);
}

@@ -830,28 +838,44 @@ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
	return err;
}

int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
static int nf_flow_table_offload_cmd(struct flow_block_offload *bo,
				     struct nf_flowtable *flowtable,
				     struct net_device *dev,
				enum flow_block_command cmd)
				     enum flow_block_command cmd,
				     struct netlink_ext_ack *extack)
{
	struct netlink_ext_ack extack = {};
	struct flow_block_offload bo = {};
	int err;

	if (!(flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD))
	if (!nf_flowtable_hw_offload(flowtable))
		return 0;

	if (!dev->netdev_ops->ndo_setup_tc)
		return -EOPNOTSUPP;

	bo.net		= dev_net(dev);
	bo.block	= &flowtable->flow_block;
	bo.command	= cmd;
	bo.binder_type	= FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
	bo.extack	= &extack;
	INIT_LIST_HEAD(&bo.cb_list);
	memset(bo, 0, sizeof(*bo));
	bo->net		= dev_net(dev);
	bo->block	= &flowtable->flow_block;
	bo->command	= cmd;
	bo->binder_type	= FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
	bo->extack	= extack;
	INIT_LIST_HEAD(&bo->cb_list);

	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo);
	if (err < 0)
		return err;

	return 0;
}

int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
				struct net_device *dev,
				enum flow_block_command cmd)
{
	struct netlink_ext_ack extack = {};
	struct flow_block_offload bo;
	int err;

	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, &bo);
	err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd, &extack);
	if (err < 0)
		return err;

Loading