Commit 50172733 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge tag 'mlx5-updates-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux



Saeed Mahameed says:

====================
mlx5-updates-2020-10-12

Updates to mlx5 driver:
- Cleanup fix of uininitialized pointer read
- xfrm IPSec TX offload
====================

Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents d5e6f064 5be01904
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -236,6 +236,7 @@ struct mlx5e_accel_fs_tcp;

struct mlx5e_flow_steering {
	struct mlx5_flow_namespace      *ns;
	struct mlx5_flow_namespace      *egress_ns;
#ifdef CONFIG_MLX5_EN_RXNFC
	struct mlx5e_ethtool_steering   ethtool;
#endif
+40 −6
Original line number Diff line number Diff line
@@ -107,6 +107,9 @@ struct mlx5e_accel_tx_state {
#ifdef CONFIG_MLX5_EN_TLS
	struct mlx5e_accel_tx_tls_state tls;
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
	struct mlx5e_accel_tx_ipsec_state ipsec;
#endif
};

static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
@@ -125,22 +128,46 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
	}
#endif

#ifdef CONFIG_MLX5_EN_IPSEC
	if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
		if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec)))
			return false;
	}
#endif

	return true;
}

static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *state)
{
#ifdef CONFIG_MLX5_EN_IPSEC
	return mlx5e_ipsec_is_tx_flow(&state->ipsec);
#endif

	return false;
}

static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
						  struct mlx5e_accel_tx_state *state)
{
#ifdef CONFIG_MLX5_EN_IPSEC
	if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
		return mlx5e_ipsec_tx_ids_len(&state->ipsec);
#endif

	return 0;
}

/* Part of the eseg touched by TX offloads */
#define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss)

static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
				       struct mlx5e_txqsq *sq,
				       struct sk_buff *skb,
				       struct mlx5_wqe_eth_seg *eseg)
{
#ifdef CONFIG_MLX5_EN_IPSEC
	if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
		if (unlikely(!mlx5e_ipsec_handle_tx_skb(priv, eseg, skb)))
			return false;
	}
	if (xfrm_offload(skb))
		mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
#endif

#if IS_ENABLED(CONFIG_GENEVE)
@@ -153,11 +180,18 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,

static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
					 struct mlx5e_tx_wqe *wqe,
					 struct mlx5e_accel_tx_state *state)
					 struct mlx5e_accel_tx_state *state,
					 struct mlx5_wqe_inline_seg *inlseg)
{
#ifdef CONFIG_MLX5_EN_TLS
	mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
#endif

#ifdef CONFIG_MLX5_EN_IPSEC
	if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
	    state->ipsec.xo && state->ipsec.tailen)
		mlx5e_ipsec_handle_tx_wqe(wqe, &state->ipsec, inlseg);
#endif
}

static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
+3 −0
Original line number Diff line number Diff line
@@ -560,6 +560,9 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
		return;
	}

	if (mlx5_is_ipsec_device(mdev))
		netdev->gso_partial_features |= NETIF_F_GSO_ESP;

	mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
	netdev->features |= NETIF_F_GSO_ESP;
	netdev->hw_features |= NETIF_F_GSO_ESP;
+2 −0
Original line number Diff line number Diff line
@@ -76,6 +76,7 @@ struct mlx5e_ipsec_stats {
};

struct mlx5e_accel_fs_esp;
struct mlx5e_ipsec_tx;

struct mlx5e_ipsec {
	struct mlx5e_priv *en_priv;
@@ -87,6 +88,7 @@ struct mlx5e_ipsec {
	struct mlx5e_ipsec_stats stats;
	struct workqueue_struct *wq;
	struct mlx5e_accel_fs_esp *rx_fs;
	struct mlx5e_ipsec_tx *tx_fs;
};

struct mlx5e_ipsec_esn_state {
+174 −4
Original line number Diff line number Diff line
@@ -34,6 +34,12 @@ struct mlx5e_accel_fs_esp {
	struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES];
};

struct mlx5e_ipsec_tx {
	struct mlx5_flow_table *ft;
	struct mutex mutex; /* Protect IPsec TX steering */
	u32 refcnt;
};

/* IPsec RX flow steering */
static enum mlx5e_traffic_types fs_esp2tt(enum accel_fs_esp_type i)
{
@@ -323,6 +329,77 @@ out:
	mutex_unlock(&fs_prot->prot_mutex);
}

/* IPsec TX flow steering */
static int tx_create(struct mlx5e_priv *priv)
{
	struct mlx5_flow_table_attr ft_attr = {};
	struct mlx5e_ipsec *ipsec = priv->ipsec;
	struct mlx5_flow_table *ft;
	int err;

	priv->fs.egress_ns =
		mlx5_get_flow_namespace(priv->mdev,
					MLX5_FLOW_NAMESPACE_EGRESS_KERNEL);
	if (!priv->fs.egress_ns)
		return -EOPNOTSUPP;

	ft_attr.max_fte = NUM_IPSEC_FTE;
	ft_attr.autogroup.max_num_groups = 1;
	ft = mlx5_create_auto_grouped_flow_table(priv->fs.egress_ns, &ft_attr);
	if (IS_ERR(ft)) {
		err = PTR_ERR(ft);
		netdev_err(priv->netdev, "fail to create ipsec tx ft err=%d\n", err);
		return err;
	}
	ipsec->tx_fs->ft = ft;
	return 0;
}

static void tx_destroy(struct mlx5e_priv *priv)
{
	struct mlx5e_ipsec *ipsec = priv->ipsec;

	if (IS_ERR_OR_NULL(ipsec->tx_fs->ft))
		return;

	mlx5_destroy_flow_table(ipsec->tx_fs->ft);
	ipsec->tx_fs->ft = NULL;
}

static int tx_ft_get(struct mlx5e_priv *priv)
{
	struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
	int err = 0;

	mutex_lock(&tx_fs->mutex);
	if (tx_fs->refcnt++)
		goto out;

	err = tx_create(priv);
	if (err) {
		tx_fs->refcnt--;
		goto out;
	}

out:
	mutex_unlock(&tx_fs->mutex);
	return err;
}

static void tx_ft_put(struct mlx5e_priv *priv)
{
	struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;

	mutex_lock(&tx_fs->mutex);
	if (--tx_fs->refcnt)
		goto out;

	tx_destroy(priv);

out:
	mutex_unlock(&tx_fs->mutex);
}

static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
			     u32 ipsec_obj_id,
			     struct mlx5_flow_spec *spec,
@@ -457,6 +534,54 @@ out:
	return err;
}

static int tx_add_rule(struct mlx5e_priv *priv,
		       struct mlx5_accel_esp_xfrm_attrs *attrs,
		       u32 ipsec_obj_id,
		       struct mlx5e_ipsec_rule *ipsec_rule)
{
	struct mlx5_flow_act flow_act = {};
	struct mlx5_flow_handle *rule;
	struct mlx5_flow_spec *spec;
	int err = 0;

	err = tx_ft_get(priv);
	if (err)
		return err;

	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
	if (!spec) {
		err = -ENOMEM;
		goto out;
	}

	setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);

	/* Add IPsec indicator in metadata_reg_a */
	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
	MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
		 MLX5_ETH_WQE_FT_META_IPSEC);
	MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
		 MLX5_ETH_WQE_FT_META_IPSEC);

	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
			  MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT;
	rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0);
	if (IS_ERR(rule)) {
		err = PTR_ERR(rule);
		netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
			   attrs->action, err);
		goto out;
	}

	ipsec_rule->rule = rule;

out:
	kvfree(spec);
	if (err)
		tx_ft_put(priv);
	return err;
}

static void rx_del_rule(struct mlx5e_priv *priv,
			struct mlx5_accel_esp_xfrm_attrs *attrs,
			struct mlx5e_ipsec_rule *ipsec_rule)
@@ -470,15 +595,27 @@ static void rx_del_rule(struct mlx5e_priv *priv,
	rx_ft_put(priv, attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
}

static void tx_del_rule(struct mlx5e_priv *priv,
			struct mlx5e_ipsec_rule *ipsec_rule)
{
	mlx5_del_flow_rules(ipsec_rule->rule);
	ipsec_rule->rule = NULL;

	tx_ft_put(priv);
}

int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
				  struct mlx5_accel_esp_xfrm_attrs *attrs,
				  u32 ipsec_obj_id,
				  struct mlx5e_ipsec_rule *ipsec_rule)
{
	if (!priv->ipsec->rx_fs || attrs->action != MLX5_ACCEL_ESP_ACTION_DECRYPT)
	if (!priv->ipsec->rx_fs)
		return -EOPNOTSUPP;

	if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
		return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
	else
		return tx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
}

void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
@@ -488,7 +625,18 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
	if (!priv->ipsec->rx_fs)
		return;

	if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
		rx_del_rule(priv, attrs, ipsec_rule);
	else
		tx_del_rule(priv, ipsec_rule);
}

static void fs_cleanup_tx(struct mlx5e_priv *priv)
{
	mutex_destroy(&priv->ipsec->tx_fs->mutex);
	WARN_ON(priv->ipsec->tx_fs->refcnt);
	kfree(priv->ipsec->tx_fs);
	priv->ipsec->tx_fs = NULL;
}

static void fs_cleanup_rx(struct mlx5e_priv *priv)
@@ -507,6 +655,17 @@ static void fs_cleanup_rx(struct mlx5e_priv *priv)
	priv->ipsec->rx_fs = NULL;
}

static int fs_init_tx(struct mlx5e_priv *priv)
{
	priv->ipsec->tx_fs =
		kzalloc(sizeof(struct mlx5e_ipsec_tx), GFP_KERNEL);
	if (!priv->ipsec->tx_fs)
		return -ENOMEM;

	mutex_init(&priv->ipsec->tx_fs->mutex);
	return 0;
}

static int fs_init_rx(struct mlx5e_priv *priv)
{
	struct mlx5e_accel_fs_esp_prot *fs_prot;
@@ -532,13 +691,24 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv)
	if (!priv->ipsec->rx_fs)
		return;

	fs_cleanup_tx(priv);
	fs_cleanup_rx(priv);
}

int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv)
{
	int err;

	if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec)
		return -EOPNOTSUPP;

	return fs_init_rx(priv);
	err = fs_init_tx(priv);
	if (err)
		return err;

	err = fs_init_rx(priv);
	if (err)
		fs_cleanup_tx(priv);

	return err;
}
Loading