Commit c4655761 authored by Magnus Karlsson's avatar Magnus Karlsson Committed by Daniel Borkmann
Browse files

xsk: i40e: ice: ixgbe: mlx5: Rename xsk zero-copy driver interfaces



Rename the AF_XDP zero-copy driver interface functions to better
reflect what they do after the replacement of umems with buffer
pools in the previous commit. Mostly it is about replacing the
umem name from the function names with xsk_buff and also have
them take the a buffer pool pointer instead of a umem. The
various ring functions have also been renamed in the process so
that they have the same naming convention as the internal
functions in xsk_queue.h. This so that it will be clearer what
they do and also for consistency.

Signed-off-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Link: https://lore.kernel.org/bpf/1598603189-32145-3-git-send-email-magnus.karlsson@intel.com
parent 1742b3d5
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -3138,7 +3138,7 @@ static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
	if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
		return NULL;

	return xdp_get_xsk_pool_from_qid(ring->vsi->netdev, qid);
	return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
}

/**
@@ -3286,7 +3286,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
		if (ret)
			return ret;
		ring->rx_buf_len =
		  xsk_umem_get_rx_frame_size(ring->xsk_pool->umem);
		  xsk_pool_get_rx_frame_size(ring->xsk_pool);
		/* For AF_XDP ZC, we disallow packets to span on
		 * multiple buffers, thus letting us skip that
		 * handling in the fast-path.
@@ -3370,7 +3370,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
	writel(0, ring->tail);

	if (ring->xsk_pool) {
		xsk_buff_set_rxq_info(ring->xsk_pool->umem, &ring->xdp_rxq);
		xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
		ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
	} else {
		ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+16 −18
Original line number Diff line number Diff line
@@ -55,8 +55,7 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
	    qid >= netdev->real_num_tx_queues)
		return -EINVAL;

	err = xsk_buff_dma_map(pool->umem, &vsi->back->pdev->dev,
			       I40E_RX_DMA_ATTR);
	err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
	if (err)
		return err;

@@ -97,7 +96,7 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
	bool if_running;
	int err;

	pool = xdp_get_xsk_pool_from_qid(netdev, qid);
	pool = xsk_get_pool_from_qid(netdev, qid);
	if (!pool)
		return -EINVAL;

@@ -110,7 +109,7 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
	}

	clear_bit(qid, vsi->af_xdp_zc_qps);
	xsk_buff_dma_unmap(pool->umem, I40E_RX_DMA_ATTR);
	xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);

	if (if_running) {
		err = i40e_queue_pair_enable(vsi, qid);
@@ -196,7 +195,7 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
	rx_desc = I40E_RX_DESC(rx_ring, ntu);
	bi = i40e_rx_bi(rx_ring, ntu);
	do {
		xdp = xsk_buff_alloc(rx_ring->xsk_pool->umem);
		xdp = xsk_buff_alloc(rx_ring->xsk_pool);
		if (!xdp) {
			ok = false;
			goto no_buffers;
@@ -363,11 +362,11 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
	i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
	i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);

	if (xsk_umem_uses_need_wakeup(rx_ring->xsk_pool->umem)) {
	if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
		if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
			xsk_set_rx_need_wakeup(rx_ring->xsk_pool->umem);
			xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
		else
			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool->umem);
			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);

		return (int)total_rx_packets;
	}
@@ -390,12 +389,11 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
	dma_addr_t dma;

	while (budget-- > 0) {
		if (!xsk_umem_consume_tx(xdp_ring->xsk_pool->umem, &desc))
		if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
			break;

		dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool->umem,
					   desc.addr);
		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool->umem, dma,
		dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
						 desc.len);

		tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
@@ -422,7 +420,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
						 I40E_TXD_QW1_CMD_SHIFT);
		i40e_xdp_ring_update_tail(xdp_ring);

		xsk_umem_consume_tx_done(xdp_ring->xsk_pool->umem);
		xsk_tx_release(xdp_ring->xsk_pool);
		i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes);
	}

@@ -494,13 +492,13 @@ skip:
		tx_ring->next_to_clean -= tx_ring->count;

	if (xsk_frames)
		xsk_umem_complete_tx(bp->umem, xsk_frames);
		xsk_tx_completed(bp, xsk_frames);

	i40e_arm_wb(tx_ring, vsi, completed_frames);

out_xmit:
	if (xsk_umem_uses_need_wakeup(tx_ring->xsk_pool->umem))
		xsk_set_tx_need_wakeup(tx_ring->xsk_pool->umem);
	if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
		xsk_set_tx_need_wakeup(tx_ring->xsk_pool);

	return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
}
@@ -591,7 +589,7 @@ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
	}

	if (xsk_frames)
		xsk_umem_complete_tx(bp->umem, xsk_frames);
		xsk_tx_completed(bp, xsk_frames);
}

/**
@@ -607,7 +605,7 @@ bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
	int i;

	for (i = 0; i < vsi->num_queue_pairs; i++) {
		if (xdp_get_xsk_pool_from_qid(netdev, i))
		if (xsk_get_pool_from_qid(netdev, i))
			return true;
	}

+3 −3
Original line number Diff line number Diff line
@@ -313,7 +313,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
			xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);

			ring->rx_buf_len =
				xsk_umem_get_rx_frame_size(ring->xsk_pool->umem);
				xsk_pool_get_rx_frame_size(ring->xsk_pool);
			/* For AF_XDP ZC, we disallow packets to span on
			 * multiple buffers, thus letting us skip that
			 * handling in the fast-path.
@@ -324,7 +324,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
							 NULL);
			if (err)
				return err;
			xsk_buff_set_rxq_info(ring->xsk_pool->umem, &ring->xdp_rxq);
			xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);

			dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
				 ring->q_index);
@@ -418,7 +418,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
	writel(0, ring->tail);

	if (ring->xsk_pool) {
		if (!xsk_buff_can_alloc(ring->xsk_pool->umem, num_bufs)) {
		if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
			dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
				 num_bufs, ring->q_index);
			dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
+14 −14
Original line number Diff line number Diff line
@@ -311,7 +311,7 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
	    !vsi->xsk_pools[qid])
		return -EINVAL;

	xsk_buff_dma_unmap(vsi->xsk_pools[qid]->umem, ICE_RX_DMA_ATTR);
	xsk_pool_dma_unmap(vsi->xsk_pools[qid], ICE_RX_DMA_ATTR);
	ice_xsk_remove_pool(vsi, qid);

	return 0;
@@ -348,7 +348,7 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
	vsi->xsk_pools[qid] = pool;
	vsi->num_xsk_pools_used++;

	err = xsk_buff_dma_map(vsi->xsk_pools[qid]->umem, ice_pf_to_dev(vsi->back),
	err = xsk_pool_dma_map(vsi->xsk_pools[qid], ice_pf_to_dev(vsi->back),
			       ICE_RX_DMA_ATTR);
	if (err)
		return err;
@@ -425,7 +425,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
	rx_buf = &rx_ring->rx_buf[ntu];

	do {
		rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool->umem);
		rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
		if (!rx_buf->xdp) {
			ret = true;
			break;
@@ -645,11 +645,11 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
	ice_finalize_xdp_rx(rx_ring, xdp_xmit);
	ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);

	if (xsk_umem_uses_need_wakeup(rx_ring->xsk_pool->umem)) {
	if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
		if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
			xsk_set_rx_need_wakeup(rx_ring->xsk_pool->umem);
			xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
		else
			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool->umem);
			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);

		return (int)total_rx_packets;
	}
@@ -682,11 +682,11 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)

		tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];

		if (!xsk_umem_consume_tx(xdp_ring->xsk_pool->umem, &desc))
		if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
			break;

		dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool->umem, desc.addr);
		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool->umem, dma,
		dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
						 desc.len);

		tx_buf->bytecount = desc.len;
@@ -703,7 +703,7 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)

	if (tx_desc) {
		ice_xdp_ring_update_tail(xdp_ring);
		xsk_umem_consume_tx_done(xdp_ring->xsk_pool->umem);
		xsk_tx_release(xdp_ring->xsk_pool);
	}

	return budget > 0 && work_done;
@@ -777,10 +777,10 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
	xdp_ring->next_to_clean = ntc;

	if (xsk_frames)
		xsk_umem_complete_tx(xdp_ring->xsk_pool->umem, xsk_frames);
		xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);

	if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_pool->umem))
		xsk_set_tx_need_wakeup(xdp_ring->xsk_pool->umem);
	if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
		xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);

	ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
	xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
@@ -896,5 +896,5 @@ void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
	}

	if (xsk_frames)
		xsk_umem_complete_tx(xdp_ring->xsk_pool->umem, xsk_frames);
		xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
}
+3 −3
Original line number Diff line number Diff line
@@ -3714,7 +3714,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,

	/* configure the packet buffer length */
	if (rx_ring->xsk_pool) {
		u32 xsk_buf_len = xsk_umem_get_rx_frame_size(rx_ring->xsk_pool->umem);
		u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool);

		/* If the MAC support setting RXDCTL.RLPML, the
		 * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
@@ -4064,7 +4064,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
		WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
						   MEM_TYPE_XSK_BUFF_POOL,
						   NULL));
		xsk_buff_set_rxq_info(ring->xsk_pool->umem, &ring->xdp_rxq);
		xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
	} else {
		WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
						   MEM_TYPE_PAGE_SHARED, NULL));
@@ -4120,7 +4120,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
	}

	if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
		u32 xsk_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_pool->umem);
		u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);

		rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
			    IXGBE_RXDCTL_RLPML_EN);
Loading