Commit 1c715a65 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull networking fixes from David Miller:

 1) Various mptcp fixupes from Florian Westphal and Geery Uytterhoeven.

 2) Don't clear the node/port GUIDs after we've assigned the correct
    values to them. From Leon Romanovsky.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net:
  net/core: Do not clear VF index for node/port GUIDs query
  mptcp: Fix undefined mptcp_handle_ipv6_mapped for modular IPV6
  net: drop_monitor: Use kstrdup
  udp: document udp_rcv_segment special case for looped packets
  mptcp: MPTCP_HMAC_TEST should depend on MPTCP
  mptcp: Fix incorrect IPV6 dependency check
  Revert "MAINTAINERS: mptcp@ mailing list is moderated"
  mptcp: handle tcp fallback when using syn cookies
  mptcp: avoid a lockdep splat when mcast group was joined
  mptcp: fix panic on user pointer access
  mptcp: defer freeing of cached ext until last moment
  net: mvneta: fix XDP support if sw bm is used as fallback
  sch_choke: Use kvcalloc
  mptcp: Fix build with PROC_FS disabled.
  MAINTAINERS: mptcp@ mailing list is moderated
parents 5e237e8c 9fbf082f
Loading
Loading
Loading
Loading
+7 −3
Original line number Diff line number Diff line
@@ -324,8 +324,7 @@
	      ETH_HLEN + ETH_FCS_LEN,			     \
	      cache_line_size())

#define MVNETA_SKB_HEADROOM	(max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
				 NET_IP_ALIGN)
#define MVNETA_SKB_HEADROOM	max(XDP_PACKET_HEADROOM, NET_SKB_PAD)
#define MVNETA_SKB_PAD	(SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
			 MVNETA_SKB_HEADROOM))
#define MVNETA_SKB_SIZE(len)	(SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
@@ -1167,6 +1166,7 @@ bm_mtu_err:
	mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);

	pp->bm_priv = NULL;
	pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
	mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
	netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
}
@@ -4948,7 +4948,6 @@ static int mvneta_probe(struct platform_device *pdev)
	SET_NETDEV_DEV(dev, &pdev->dev);

	pp->id = global_port_id++;
	pp->rx_offset_correction = MVNETA_SKB_HEADROOM;

	/* Obtain access to BM resources if enabled and already initialized */
	bm_node = of_parse_phandle(dn, "buffer-manager", 0);
@@ -4973,6 +4972,10 @@ static int mvneta_probe(struct platform_device *pdev)
	}
	of_node_put(bm_node);

	/* sw buffer management */
	if (!pp->bm_priv)
		pp->rx_offset_correction = MVNETA_SKB_HEADROOM;

	err = mvneta_init(&pdev->dev, pp);
	if (err < 0)
		goto err_netdev;
@@ -5130,6 +5133,7 @@ static int mvneta_resume(struct device *device)
		err = mvneta_bm_port_init(pdev, pp);
		if (err < 0) {
			dev_info(&pdev->dev, "use SW buffer management\n");
			pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
			pp->bm_priv = NULL;
		}
	}
+0 −2
Original line number Diff line number Diff line
@@ -148,9 +148,7 @@ struct tcp_request_sock {
	const struct tcp_request_sock_ops *af_specific;
	u64				snt_synack; /* first SYNACK sent time */
	bool				tfo_listener;
#if IS_ENABLED(CONFIG_MPTCP)
	bool				is_mptcp;
#endif
	u32				txhash;
	u32				rcv_isn;
	u32				snt_isn;
+3 −6
Original line number Diff line number Diff line
@@ -174,15 +174,12 @@ static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,

#endif /* CONFIG_MPTCP */

void mptcp_handle_ipv6_mapped(struct sock *sk, bool mapped);

#if IS_ENABLED(CONFIG_MPTCP_IPV6)
int mptcpv6_init(void);
void mptcpv6_handle_mapped(struct sock *sk, bool mapped);
#elif IS_ENABLED(CONFIG_IPV6)
static inline int mptcpv6_init(void)
{
	return 0;
}
static inline int mptcpv6_init(void) { return 0; }
static inline void mptcpv6_handle_mapped(struct sock *sk, bool mapped) { }
#endif

#endif /* __NET_MPTCP_H */
+7 −0
Original line number Diff line number Diff line
@@ -476,6 +476,13 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
	if (!inet_get_convert_csum(sk))
		features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;

	/* UDP segmentation expects packets of type CHECKSUM_PARTIAL or
	 * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
	 * packets in udp_gro_complete_segment. As does UDP GSO, verified by
	 * udp_send_skb. But when those packets are looped in dev_loopback_xmit
	 * their ip_summed is set to CHECKSUM_UNNECESSARY. Reset in this
	 * specific case, where PARTIAL is both correct and required.
	 */
	if (skb->pkt_type == PACKET_LOOPBACK)
		skb->ip_summed = CHECKSUM_PARTIAL;

+2 −6
Original line number Diff line number Diff line
@@ -802,16 +802,12 @@ net_dm_hw_metadata_clone(const struct net_dm_hw_metadata *hw_metadata)
	if (!n_hw_metadata)
		return NULL;

	trap_group_name = kmemdup(hw_metadata->trap_group_name,
				  strlen(hw_metadata->trap_group_name) + 1,
				  GFP_ATOMIC | __GFP_ZERO);
	trap_group_name = kstrdup(hw_metadata->trap_group_name, GFP_ATOMIC);
	if (!trap_group_name)
		goto free_hw_metadata;
	n_hw_metadata->trap_group_name = trap_group_name;

	trap_name = kmemdup(hw_metadata->trap_name,
			    strlen(hw_metadata->trap_name) + 1,
			    GFP_ATOMIC | __GFP_ZERO);
	trap_name = kstrdup(hw_metadata->trap_name, GFP_ATOMIC);
	if (!trap_name)
		goto free_trap_group;
	n_hw_metadata->trap_name = trap_name;
Loading