Commit 39ac237c authored by Paul Blakey's avatar Paul Blakey Committed by Saeed Mahameed
Browse files

net/mlx5: E-Switch, Refactor chains and priorities



To support the entire chain and prio range (32bit + 16bit),
instead of a using a static array of chains/prios of limited size, create
them dynamically, and use a rhashtable to search for existing chains/prio
combinations.

This will be used in next patch to actually increase the number using
unamanged tables support and ignore flow level capability.

Signed-off-by: default avatarPaul Blakey <paulb@mellanox.com>
Reviewed-by: default avatarRoi Dayan <roid@mellanox.com>
Reviewed-by: default avatarOz Shlomo <ozsh@mellanox.com>
Reviewed-by: default avatarMark Bloch <markb@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 82270e12
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -42,7 +42,7 @@ mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH)   += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
				      ecpf.o rdma.o
				      ecpf.o rdma.o eswitch_offloads_chains.o
mlx5_core-$(CONFIG_MLX5_MPFS)      += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN)          += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
+6 −5
Original line number Diff line number Diff line
@@ -41,6 +41,7 @@
#include <net/ipv6_stubs.h>

#include "eswitch.h"
#include "eswitch_offloads_chains.h"
#include "en.h"
#include "en_rep.h"
#include "en_tc.h"
@@ -1262,25 +1263,25 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
	case TC_SETUP_CLSFLOWER:
		memcpy(&tmp, f, sizeof(*f));

		if (!mlx5_eswitch_prios_supported(esw) ||
		if (!mlx5_esw_chains_prios_supported(esw) ||
		    tmp.common.chain_index)
			return -EOPNOTSUPP;

		/* Re-use tc offload path by moving the ft flow to the
		 * reserved ft chain.
		 *
		 * FT offload can use prio range [0, INT_MAX], so we
		 * normalize it to range [1, mlx5_eswitch_get_prio_range(esw)]
		 * FT offload can use prio range [0, INT_MAX], so we normalize
		 * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
		 * as with tc, where prio 0 isn't supported.
		 *
		 * We only support chain 0 of FT offload.
		 */
		if (tmp.common.prio >= mlx5_eswitch_get_prio_range(esw))
		if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw))
			return -EOPNOTSUPP;
		if (tmp.common.chain_index != 0)
			return -EOPNOTSUPP;

		tmp.common.chain_index = mlx5_eswitch_get_ft_chain(esw);
		tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
		tmp.common.prio++;
		err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
		memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
+8 −6
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@
#include "en_rep.h"
#include "en_tc.h"
#include "eswitch.h"
#include "eswitch_offloads_chains.h"
#include "fs_core.h"
#include "en/port.h"
#include "en/tc_tun.h"
@@ -1083,7 +1084,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
	memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
	slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
	slow_attr->split_count = 0;
	slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
	slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;

	rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
	if (!IS_ERR(rule))
@@ -1100,7 +1101,7 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
	memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
	slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
	slow_attr->split_count = 0;
	slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
	slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
	mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
	flow_flag_clear(flow, SLOW);
}
@@ -1160,19 +1161,18 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
		      struct netlink_ext_ack *extack)
{
	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
	u32 max_chain = mlx5_eswitch_get_chain_range(esw);
	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
	struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
	u16 max_prio = mlx5_eswitch_get_prio_range(esw);
	struct net_device *out_dev, *encap_dev = NULL;
	struct mlx5_fc *counter = NULL;
	struct mlx5e_rep_priv *rpriv;
	struct mlx5e_priv *out_priv;
	bool encap_valid = true;
	u32 max_prio, max_chain;
	int err = 0;
	int out_index;

	if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
	if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
		NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
		return -EOPNOTSUPP;
	}
@@ -1182,11 +1182,13 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
	 * FDB_FT_CHAIN which is outside tc range.
	 * See mlx5e_rep_setup_ft_cb().
	 */
	max_chain = mlx5_esw_chains_get_chain_range(esw);
	if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
		NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
		return -EOPNOTSUPP;
	}

	max_prio = mlx5_esw_chains_get_prio_range(esw);
	if (attr->prio > max_prio) {
		NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
		return -EOPNOTSUPP;
@@ -3469,7 +3471,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
			break;
		case FLOW_ACTION_GOTO: {
			u32 dest_chain = act->chain_index;
			u32 max_chain = mlx5_eswitch_get_chain_range(esw);
			u32 max_chain = mlx5_esw_chains_get_chain_range(esw);

			if (ft_flow) {
				NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
+8 −22
Original line number Diff line number Diff line
@@ -157,7 +157,7 @@ enum offloads_fdb_flags {
	ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
};

extern const unsigned int ESW_POOLS[4];
struct mlx5_esw_chains_priv;

struct mlx5_eswitch_fdb {
	union {
@@ -182,14 +182,7 @@ struct mlx5_eswitch_fdb {
			struct mlx5_flow_handle *miss_rule_multi;
			int vlan_push_pop_refcount;

			struct {
				struct mlx5_flow_table *fdb;
				u32 num_rules;
			} fdb_prio[FDB_NUM_CHAINS][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO];
			/* Protects fdb_prio table */
			struct mutex fdb_prio_lock;

			int fdb_left[ARRAY_SIZE(ESW_POOLS)];
			struct mlx5_esw_chains_priv *esw_chains_priv;
		} offloads;
	};
	u32 flags;
@@ -355,18 +348,6 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
			  struct mlx5_flow_handle *rule,
			  struct mlx5_esw_flow_attr *attr);

bool
mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw);

u16
mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw);

u32
mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw);

unsigned int
mlx5_eswitch_get_ft_chain(struct mlx5_eswitch *esw);

struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
				  struct mlx5_flow_destination *dest);
@@ -391,6 +372,11 @@ enum {
	MLX5_ESW_DEST_ENCAP_VALID   = BIT(1),
};

enum {
	MLX5_ESW_ATTR_FLAG_VLAN_HANDLED  = BIT(0),
	MLX5_ESW_ATTR_FLAG_SLOW_PATH     = BIT(1),
};

struct mlx5_esw_flow_attr {
	struct mlx5_eswitch_rep *in_rep;
	struct mlx5_core_dev	*in_mdev;
@@ -404,7 +390,6 @@ struct mlx5_esw_flow_attr {
	u16	vlan_vid[MLX5_FS_VLAN_DEPTH];
	u8	vlan_prio[MLX5_FS_VLAN_DEPTH];
	u8	total_vlan;
	bool	vlan_handled;
	struct {
		u32 flags;
		struct mlx5_eswitch_rep *rep;
@@ -419,6 +404,7 @@ struct mlx5_esw_flow_attr {
	u32	chain;
	u16	prio;
	u32	dest_chain;
	u32	flags;
	struct mlx5e_tc_flow_parse_attr *parse_attr;
};

+45 −258
Original line number Diff line number Diff line
@@ -37,6 +37,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
#include "eswitch_offloads_chains.h"
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
@@ -47,10 +48,6 @@
 * one for multicast.
 */
#define MLX5_ESW_MISS_FLOWS (2)

#define fdb_prio_table(esw, chain, prio, level) \
	(esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]

#define UPLINK_REP_INDEX 0

static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
@@ -62,37 +59,6 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
	return &esw->offloads.vport_reps[idx];
}

static struct mlx5_flow_table *
esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
static void
esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);

bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
{
	return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
}

u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
{
	if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
		return FDB_TC_MAX_CHAIN;

	return 0;
}

u32 mlx5_eswitch_get_ft_chain(struct mlx5_eswitch *esw)
{
	return mlx5_eswitch_get_chain_range(esw) + 1;
}

u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
{
	if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
		return FDB_TC_MAX_PRIO;

	return 1;
}

static bool
esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
				   const struct mlx5_vport *vport)
@@ -180,10 +146,17 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
	}

	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
		if (attr->dest_chain) {
		struct mlx5_flow_table *ft;

			ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
		if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
			flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
			dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
			dest[i].ft = esw->fdb_table.offloads.slow_fdb;
			i++;
		} else if (attr->dest_chain) {
			flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
			ft = mlx5_esw_chains_get_table(esw, attr->dest_chain,
						       1, 0);
			if (IS_ERR(ft)) {
				rule = ERR_CAST(ft);
				goto err_create_goto_table;
@@ -228,7 +201,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
		flow_act.modify_hdr = attr->modify_hdr;

	fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
	fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio,
					!!split);
	if (IS_ERR(fdb)) {
		rule = ERR_CAST(fdb);
		goto err_esw_get;
@@ -247,10 +221,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
	return rule;

err_add_rule:
	esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
	mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, !!split);
err_esw_get:
	if (attr->dest_chain)
		esw_put_prio_table(esw, attr->dest_chain, 1, 0);
	if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
		mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
err_create_goto_table:
	return rule;
}
@@ -267,13 +241,13 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
	struct mlx5_flow_handle *rule;
	int i;

	fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
	fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0);
	if (IS_ERR(fast_fdb)) {
		rule = ERR_CAST(fast_fdb);
		goto err_get_fast;
	}

	fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
	fwd_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 1);
	if (IS_ERR(fwd_fdb)) {
		rule = ERR_CAST(fwd_fdb);
		goto err_get_fwd;
@@ -310,9 +284,9 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,

	return rule;
add_err:
	esw_put_prio_table(esw, attr->chain, attr->prio, 1);
	mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
err_get_fwd:
	esw_put_prio_table(esw, attr->chain, attr->prio, 0);
	mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
err_get_fast:
	return rule;
}
@@ -337,12 +311,13 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
	atomic64_dec(&esw->offloads.num_flows);

	if (fwd_rule)  {
		esw_put_prio_table(esw, attr->chain, attr->prio, 1);
		esw_put_prio_table(esw, attr->chain, attr->prio, 0);
		mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1);
		mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
	} else {
		esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
		mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
					  !!split);
		if (attr->dest_chain)
			esw_put_prio_table(esw, attr->dest_chain, 1, 0);
			mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
	}
}

@@ -456,7 +431,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
	if (err)
		goto unlock;

	attr->vlan_handled = false;
	attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;

	vport = esw_vlan_action_get_vport(attr, push, pop);

@@ -464,7 +439,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
		/* tracks VF --> wire rules without vlan push action */
		if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
			vport->vlan_refcount++;
			attr->vlan_handled = true;
			attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
		}

		goto unlock;
@@ -495,7 +470,7 @@ skip_set_push:
	}
out:
	if (!err)
		attr->vlan_handled = true;
		attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
unlock:
	mutex_unlock(&esw->state_lock);
	return err;
@@ -513,7 +488,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
	if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
		return 0;

	if (!attr->vlan_handled)
	if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
		return 0;

	push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
@@ -587,8 +562,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
	dest.vport.num = vport;
	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;

	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
					&flow_act, &dest, 1);
	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
					spec, &flow_act, &dest, 1);
	if (IS_ERR(flow_rule))
		esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
out:
@@ -829,8 +804,8 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
	dest.vport.num = esw->manager_vport;
	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;

	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
					&flow_act, &dest, 1);
	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
					spec, &flow_act, &dest, 1);
	if (IS_ERR(flow_rule)) {
		err = PTR_ERR(flow_rule);
		esw_warn(esw->dev,  "FDB: Failed to add unicast miss flow rule err %d\n", err);
@@ -844,8 +819,8 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
	dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
			      outer_headers.dmac_47_16);
	dmac_v[0] = 0x01;
	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
					&flow_act, &dest, 1);
	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
					spec, &flow_act, &dest, 1);
	if (IS_ERR(flow_rule)) {
		err = PTR_ERR(flow_rule);
		esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
@@ -860,175 +835,6 @@ out:
	return err;
}

#define ESW_OFFLOADS_NUM_GROUPS  4

/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
 * for each flow table pool. We can allocate up to 16M of each pool,
 * and we keep track of how much we used via put/get_sz_to_pool.
 * Firmware doesn't report any of this for now.
 * ESW_POOL is expected to be sorted from large to small
 */
#define ESW_SIZE (16 * 1024 * 1024)
const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
				    64 * 1024, 4 * 1024 };

static int
get_sz_from_pool(struct mlx5_eswitch *esw)
{
	int sz = 0, i;

	for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
		if (esw->fdb_table.offloads.fdb_left[i]) {
			--esw->fdb_table.offloads.fdb_left[i];
			sz = ESW_POOLS[i];
			break;
		}
	}

	return sz;
}

static void
put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
		if (sz >= ESW_POOLS[i]) {
			++esw->fdb_table.offloads.fdb_left[i];
			break;
		}
	}
}

static struct mlx5_flow_table *
create_next_size_table(struct mlx5_eswitch *esw,
		       struct mlx5_flow_namespace *ns,
		       u16 table_prio,
		       int level,
		       u32 flags)
{
	struct mlx5_flow_table_attr ft_attr = {};
	struct mlx5_flow_table *fdb;
	int sz;

	sz = get_sz_from_pool(esw);
	if (!sz)
		return ERR_PTR(-ENOSPC);

	ft_attr.max_fte = sz;
	ft_attr.prio = table_prio;
	ft_attr.level = level;
	ft_attr.flags = flags;
	ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS;
	fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
	if (IS_ERR(fdb)) {
		esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
			 (int)PTR_ERR(fdb), table_prio, level, sz);
		put_sz_to_pool(esw, sz);
	}

	return fdb;
}

static struct mlx5_flow_table *
esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
{
	struct mlx5_core_dev *dev = esw->dev;
	struct mlx5_flow_table *fdb = NULL;
	struct mlx5_flow_namespace *ns;
	int table_prio, l = 0;
	u32 flags = 0;

	if (chain == FDB_TC_SLOW_PATH_CHAIN)
		return esw->fdb_table.offloads.slow_fdb;

	mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);

	fdb = fdb_prio_table(esw, chain, prio, level).fdb;
	if (fdb) {
		/* take ref on earlier levels as well */
		while (level >= 0)
			fdb_prio_table(esw, chain, prio, level--).num_rules++;
		mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
		return fdb;
	}

	ns = mlx5_get_fdb_sub_ns(dev, chain);
	if (!ns) {
		esw_warn(dev, "Failed to get FDB sub namespace\n");
		mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
		return ERR_PTR(-EOPNOTSUPP);
	}

	if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
		flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
			  MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);

	table_prio = prio - 1;

	/* create earlier levels for correct fs_core lookup when
	 * connecting tables
	 */
	for (l = 0; l <= level; l++) {
		if (fdb_prio_table(esw, chain, prio, l).fdb) {
			fdb_prio_table(esw, chain, prio, l).num_rules++;
			continue;
		}

		fdb = create_next_size_table(esw, ns, table_prio, l, flags);
		if (IS_ERR(fdb)) {
			l--;
			goto err_create_fdb;
		}

		fdb_prio_table(esw, chain, prio, l).fdb = fdb;
		fdb_prio_table(esw, chain, prio, l).num_rules = 1;
	}

	mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
	return fdb;

err_create_fdb:
	mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
	if (l >= 0)
		esw_put_prio_table(esw, chain, prio, l);

	return fdb;
}

static void
esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
{
	int l;

	if (chain == FDB_TC_SLOW_PATH_CHAIN)
		return;

	mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);

	for (l = level; l >= 0; l--) {
		if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
			continue;

		put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
		mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
		fdb_prio_table(esw, chain, prio, l).fdb = NULL;
	}

	mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
}

static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
{
	/* If lazy creation isn't supported, deref the fast path tables */
	if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
		esw_put_prio_table(esw, 0, 1, 1);
		esw_put_prio_table(esw, 0, 1, 0);
	}
}

#define MAX_PF_SQ 256
#define MAX_SQ_NVPORTS 32

@@ -1061,16 +867,16 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
	struct mlx5_flow_table_attr ft_attr = {};
	struct mlx5_core_dev *dev = esw->dev;
	u32 *flow_group_in, max_flow_counter;
	struct mlx5_flow_namespace *root_ns;
	struct mlx5_flow_table *fdb = NULL;
	int table_size, ix, err = 0, i;
	u32 flags = 0, *flow_group_in;
	int table_size, ix, err = 0;
	struct mlx5_flow_group *g;
	u32 flags = 0, fdb_max;
	void *match_criteria;
	u8 *dmac;

	esw_debug(esw->dev, "Create offloads FDB Tables\n");

	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
	if (!flow_group_in)
		return -ENOMEM;
@@ -1089,19 +895,6 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
		goto ns_err;
	}

	max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
			    MLX5_CAP_GEN(dev, max_flow_counter_15_0);
	fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);

	esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(%d))\n",
		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
		  max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
		  fdb_max);

	for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
		esw->fdb_table.offloads.fdb_left[i] =
			ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;

	table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
		MLX5_ESW_MISS_FLOWS + esw->total_vports;

@@ -1124,16 +917,10 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
	}
	esw->fdb_table.offloads.slow_fdb = fdb;

	/* If lazy creation isn't supported, open the fast path tables now */
	if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
	    esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
		esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
		esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
		esw_get_prio_table(esw, 0, 1, 0);
		esw_get_prio_table(esw, 0, 1, 1);
	} else {
		esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
		esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
	err = mlx5_esw_chains_create(esw);
	if (err) {
		esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
		goto fdb_chains_err;
	}

	/* create send-to-vport group */
@@ -1224,7 +1011,8 @@ miss_err:
peer_miss_err:
	mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err:
	esw_destroy_offloads_fast_fdb_tables(esw);
	mlx5_esw_chains_destroy(esw);
fdb_chains_err:
	mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
slow_fdb_err:
	/* Holds true only as long as DMFS is the default */
@@ -1246,8 +1034,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
	mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
	mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);

	mlx5_esw_chains_destroy(esw);
	mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
	esw_destroy_offloads_fast_fdb_tables(esw);
	/* Holds true only as long as DMFS is the default */
	mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
				     MLX5_FLOW_STEERING_MODE_DMFS);
@@ -2117,7 +1905,6 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
		total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);

	memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
	mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);

	err = esw_create_uplink_offloads_acl_tables(esw);
	if (err)
Loading