Commit 4b280531 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'XDP-generic-fixes'



Stephen Hemminger says:

====================
XDP generic fixes

This set of patches came about while investigating XDP
generic on Azure. The split brain nature of the accelerated
networking exposed issues with the stack device model.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f2696099 458bf2f2
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -2000,6 +2000,12 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
	struct netvsc_vf_pcpu_stats *pcpu_stats
		 = this_cpu_ptr(ndev_ctx->vf_stats);

	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return RX_HANDLER_CONSUMED;

	*pskb = skb;

	skb->dev = ndev;

	u64_stats_update_begin(&pcpu_stats->syncp);
+12 −46
Original line number Diff line number Diff line
@@ -4502,23 +4502,6 @@ static int netif_rx_internal(struct sk_buff *skb)

	trace_netif_rx(skb);

	if (static_branch_unlikely(&generic_xdp_needed_key)) {
		int ret;

		preempt_disable();
		rcu_read_lock();
		ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
		rcu_read_unlock();
		preempt_enable();

		/* Consider XDP consuming the packet a success from
		 * the netdev point of view we do not want to count
		 * this as an error.
		 */
		if (ret != XDP_PASS)
			return NET_RX_SUCCESS;
	}

#ifdef CONFIG_RPS
	if (static_branch_unlikely(&rps_needed)) {
		struct rps_dev_flow voidflow, *rflow = &voidflow;
@@ -4858,6 +4841,18 @@ another_round:

	__this_cpu_inc(softnet_data.processed);

	if (static_branch_unlikely(&generic_xdp_needed_key)) {
		int ret2;

		preempt_disable();
		ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
		preempt_enable();

		if (ret2 != XDP_PASS)
			return NET_RX_DROP;
		skb_reset_mac_len(skb);
	}

	if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
	    skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
		skb = skb_vlan_untag(skb);
@@ -5178,19 +5173,6 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
	if (skb_defer_rx_timestamp(skb))
		return NET_RX_SUCCESS;

	if (static_branch_unlikely(&generic_xdp_needed_key)) {
		int ret;

		preempt_disable();
		rcu_read_lock();
		ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
		rcu_read_unlock();
		preempt_enable();

		if (ret != XDP_PASS)
			return NET_RX_DROP;
	}

	rcu_read_lock();
#ifdef CONFIG_RPS
	if (static_branch_unlikely(&rps_needed)) {
@@ -5211,7 +5193,6 @@ static int netif_receive_skb_internal(struct sk_buff *skb)

static void netif_receive_skb_list_internal(struct list_head *head)
{
	struct bpf_prog *xdp_prog = NULL;
	struct sk_buff *skb, *next;
	struct list_head sublist;

@@ -5224,21 +5205,6 @@ static void netif_receive_skb_list_internal(struct list_head *head)
	}
	list_splice_init(&sublist, head);

	if (static_branch_unlikely(&generic_xdp_needed_key)) {
		preempt_disable();
		rcu_read_lock();
		list_for_each_entry_safe(skb, next, head, list) {
			xdp_prog = rcu_dereference(skb->dev->xdp_prog);
			skb_list_del_init(skb);
			if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
				list_add_tail(&skb->list, &sublist);
		}
		rcu_read_unlock();
		preempt_enable();
		/* Put passed packets back on main list */
		list_splice_init(&sublist, head);
	}

	rcu_read_lock();
#ifdef CONFIG_RPS
	if (static_branch_unlikely(&rps_needed)) {