Commit 55131dec authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by David S. Miller
Browse files

net: socionext: netsec: always grab descriptor lock



Always acquire tx descriptor spinlock even if a xdp program is not loaded
on the netsec device since ndo_xdp_xmit can run concurrently with
netsec_netdev_start_xmit and netsec_clean_tx_dring. This can happen
loading a xdp program on a different device (e.g virtio-net) and
xdp_do_redirect_map/xdp_do_redirect_slow can redirect to netsec even if
we do not have a xdp program on it.

Fixes: ba2b2321 ("net: netsec: add XDP support")
Tested-by: default avatarIlias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Reviewed-by: default avatarIlias Apalodimas <ilias.apalodimas@linaro.org>
Acked-by: default avatarToke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 68ce6688
Loading
Loading
Loading
Loading
+7 −23
Original line number Diff line number Diff line
@@ -282,7 +282,6 @@ struct netsec_desc_ring {
	void *vaddr;
	u16 head, tail;
	u16 xdp_xmit; /* netsec_xdp_xmit packets */
	bool is_xdp;
	struct page_pool *page_pool;
	struct xdp_rxq_info xdp_rxq;
	spinlock_t lock; /* XDP tx queue locking */
@@ -634,7 +633,6 @@ static bool netsec_clean_tx_dring(struct netsec_priv *priv)
	unsigned int bytes;
	int cnt = 0;

	if (dring->is_xdp)
	spin_lock(&dring->lock);

	bytes = 0;
@@ -682,7 +680,7 @@ next:
		entry = dring->vaddr + DESC_SZ * tail;
		cnt++;
	}
	if (dring->is_xdp)

	spin_unlock(&dring->lock);

	if (!cnt)
@@ -799,9 +797,6 @@ static void netsec_set_tx_de(struct netsec_priv *priv,
	de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
	de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
	de->attr = attr;
	/* under spin_lock if using XDP */
	if (!dring->is_xdp)
		dma_wmb();

	dring->desc[idx] = *desc;
	if (desc->buf_type == TYPE_NETSEC_SKB)
@@ -1123,11 +1118,9 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
	u16 tso_seg_len = 0;
	int filled;

	if (dring->is_xdp)
	spin_lock_bh(&dring->lock);
	filled = netsec_desc_used(dring);
	if (netsec_check_stop_tx(priv, filled)) {
		if (dring->is_xdp)
		spin_unlock_bh(&dring->lock);
		net_warn_ratelimited("%s %s Tx queue full\n",
				     dev_name(priv->dev), ndev->name);
@@ -1161,7 +1154,6 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
	tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
					  skb_headlen(skb), DMA_TO_DEVICE);
	if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
		if (dring->is_xdp)
		spin_unlock_bh(&dring->lock);
		netif_err(priv, drv, priv->ndev,
			  "%s: DMA mapping failed\n", __func__);
@@ -1177,7 +1169,6 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
	netdev_sent_queue(priv->ndev, skb->len);

	netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
	if (dring->is_xdp)
	spin_unlock_bh(&dring->lock);
	netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */

@@ -1262,7 +1253,6 @@ err:
static void netsec_setup_tx_dring(struct netsec_priv *priv)
{
	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
	struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
	int i;

	for (i = 0; i < DESC_NUM; i++) {
@@ -1275,12 +1265,6 @@ static void netsec_setup_tx_dring(struct netsec_priv *priv)
		 */
		de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
	}

	if (xdp_prog)
		dring->is_xdp = true;
	else
		dring->is_xdp = false;

}

static int netsec_setup_rx_dring(struct netsec_priv *priv)