Commit d64fee0a authored by David S. Miller's avatar David S. Miller
Browse files

Merge tag 'mlx5-updates-2019-03-20' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux



Saeed Mahameed says:

====================
mlx5-updates-2019-03-20

This series includes updates to mlx5 driver,

1) Compiler warnings cleanup from Saeed Mahameed
2) Parav Pandit simplifies sriov enable/disables
3) Gustavo A. R. Silva, Removes a redundant assignment
4) Moshe Shemesh, Adds Geneve tunnel stateless offload support
5) Eli Britstein, Adds the Support for VLAN modify action and
   Replaces TC VLAN pop and push actions with VLAN modify

Note: This series includes two simple non-mlx5 patches,

1) Declare IANA_VXLAN_UDP_PORT definition in include/net/vxlan.h,
and use it in some drivers.
2) Declare GENEVE_UDP_PORT definition in include/net/geneve.h,
and use it in mlx5 and nfp drivers.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 071d08af 76b496b1
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -826,12 +826,12 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
 */
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
#define IANA_VXLAN_PORT	4789
	union l4_hdr_info l4;

	l4.hdr = skb_transport_header(skb);

	if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
	if (!(!skb->encapsulation &&
	      l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
		return false;

	skb_checksum_help(skb);
+47 −0
Original line number Diff line number Diff line
@@ -884,6 +884,53 @@ static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
		MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
}

static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
{
	return MLX5_CAP_ETH(mdev, swp) &&
		MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
}

struct mlx5e_swp_spec {
	__be16 l3_proto;
	u8 l4_proto;
	u8 is_tun;
	__be16 tun_l3_proto;
	u8 tun_l4_proto;
};

static inline void
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
		   struct mlx5e_swp_spec *swp_spec)
{
	/* SWP offsets are in 2-bytes words */
	eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
	if (swp_spec->l3_proto == htons(ETH_P_IPV6))
		eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
	if (swp_spec->l4_proto) {
		eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
		if (swp_spec->l4_proto == IPPROTO_UDP)
			eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
	}

	if (swp_spec->is_tun) {
		eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
		if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
	} else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
		eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
		if (swp_spec->l3_proto == htons(ETH_P_IPV6))
			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
	}
	switch (swp_spec->tun_l4_proto) {
	case IPPROTO_UDP:
		eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
		/* fall through */
	case IPPROTO_TCP:
		eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
		break;
	}
}

static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
				      struct mlx5e_tx_wqe **wqe,
				      u16 *pi)
+15 −15
Original line number Diff line number Diff line
@@ -165,23 +165,23 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
}

/**
 * update_buffer_lossy()
 *   mtu: device's MTU
 *   pfc_en: <input> current pfc configuration
 *   buffer: <input> current prio to buffer mapping
 *   xoff:   <input> xoff value
 *   port_buffer: <output> port receive buffer configuration
 *   change: <output>
 *	update_buffer_lossy	- Update buffer configuration based on pfc
 *	@mtu: device's MTU
 *	@pfc_en: <input> current pfc configuration
 *	@buffer: <input> current prio to buffer mapping
 *	@xoff:   <input> xoff value
 *	@port_buffer: <output> port receive buffer configuration
 *	@change: <output>
 *
 *   Update buffer configuration based on pfc configuraiton and priority
 *   to buffer mapping.
 *	Update buffer configuration based on pfc configuraiton and
 *	priority to buffer mapping.
 *	Buffer's lossy bit is changed to:
 *     lossless if there is at least one PFC enabled priority mapped to this buffer
 *     lossy if all priorities mapped to this buffer are PFC disabled
 *		lossless if there is at least one PFC enabled priority
 *		mapped to this buffer lossy if all priorities mapped to
 *		this buffer are PFC disabled
 *
 *   Return:
 *     Return 0 if no error.
 *     Set change to true if buffer configuration is modified.
 *	@return: 0 if no error,
 *	sets change to true if buffer configuration was modified.
 */
static int update_buffer_lossy(unsigned int mtu,
			       u8 pfc_en, u8 *buffer, u32 xoff,
+51 −0
Original line number Diff line number Diff line
@@ -40,6 +40,57 @@
#include "en_accel/tls_rxtx.h"
#include "en.h"

#if IS_ENABLED(CONFIG_GENEVE)
static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
{
	return mlx5_tx_swp_supported(mdev);
}

static inline void
mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
{
	struct mlx5e_swp_spec swp_spec = {};
	unsigned int offset = 0;
	__be16 l3_proto;
	u8 l4_proto;

	l3_proto = vlan_get_protocol(skb);
	switch (l3_proto) {
	case htons(ETH_P_IP):
		l4_proto = ip_hdr(skb)->protocol;
		break;
	case htons(ETH_P_IPV6):
		l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
		break;
	default:
		return;
	}

	if (l4_proto != IPPROTO_UDP ||
	    udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT))
		return;
	swp_spec.l3_proto = l3_proto;
	swp_spec.l4_proto = l4_proto;
	swp_spec.is_tun = true;
	if (inner_ip_hdr(skb)->version == 6) {
		swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
		swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
	} else {
		swp_spec.tun_l3_proto = htons(ETH_P_IP);
		swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
	}

	mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
}

#else
static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
{
	return false;
}

#endif /* CONFIG_GENEVE */

static inline void
mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
{
+12 −24
Original line number Diff line number Diff line
@@ -136,7 +136,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
				struct mlx5_wqe_eth_seg *eseg, u8 mode,
				struct xfrm_offload *xo)
{
	u8 proto;
	struct mlx5e_swp_spec swp_spec = {};

	/* Tunnel Mode:
	 * SWP:      OutL3       InL3  InL4
@@ -146,35 +146,23 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
	 * SWP:      OutL3       InL4
	 *           InL3
	 * Pkt: MAC  IP     ESP  L4
	 *
	 * Offsets are in 2-byte words, counting from start of frame
	 */
	eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
	if (skb->protocol == htons(ETH_P_IPV6))
		eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;

	if (mode == XFRM_MODE_TUNNEL) {
		eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
	swp_spec.l3_proto = skb->protocol;
	swp_spec.is_tun = mode == XFRM_MODE_TUNNEL;
	if (swp_spec.is_tun) {
		if (xo->proto == IPPROTO_IPV6) {
			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
			proto = inner_ipv6_hdr(skb)->nexthdr;
			swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
			swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
		} else {
			proto = inner_ip_hdr(skb)->protocol;
			swp_spec.tun_l3_proto = htons(ETH_P_IP);
			swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
		}
	} else {
		eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
		if (skb->protocol == htons(ETH_P_IPV6))
			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
		proto = xo->proto;
	}
	switch (proto) {
	case IPPROTO_UDP:
		eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
		/* Fall through */
	case IPPROTO_TCP:
		eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
		break;
		swp_spec.tun_l3_proto = skb->protocol;
		swp_spec.tun_l4_proto = xo->proto;
	}

	mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
}

void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
Loading