Commit e8a492cd authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'Avoid-local_irq_save-and-use-napi_alloc_frag-where-possible'



Sebastian Andrzej says:

====================
Avoid local_irq_save() and use napi_alloc_frag() where possible

The first two patches remove local_irq_save() around
`netdev_alloc_cache' which does not work on -RT. Besides helping -RT it
whould benefit the users of the function since they can avoid disabling
interrupts and save a few cycles.
The remaining patches are from a time when I tried to remove
`netdev_alloc_cache' but then noticed that we still have non-NAPI
drivers using netdev_alloc_skb() and I dropped that idea. Using
napi_alloc_frag() over netdev_alloc_frag() would skip the not required
local_bh_disable() around the allocation.

v1…v2:
  - 1/7 + 2/7 use now "(in_irq() || irqs_disabled())" instead just
    "irqs_disabled()" to align with __dev_kfree_skb_any(). Pointed out
    by Eric Dumazet.

  - 6/7 has a typo less. Pointed out by Sergei Shtylyov.

  - 3/7 + 4/7 added acks from Ioana Radulescu.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9e49fe4d 6dcdd884
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -684,7 +684,7 @@ static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
		if (unlikely(gfpflags_allow_blocking(gfp_mask)))
			return (void *)__get_free_page(gfp_mask);

		return netdev_alloc_frag(fp->rx_frag_size);
		return napi_alloc_frag(fp->rx_frag_size);
	}

	return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
+1 −1
Original line number Diff line number Diff line
@@ -6710,7 +6710,7 @@ static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
	if (skb_size <= PAGE_SIZE) {
		data = netdev_alloc_frag(skb_size);
		data = napi_alloc_frag(skb_size);
		*frag_size = skb_size;
	} else {
		data = kmalloc(skb_size, GFP_ATOMIC);
+1 −10
Original line number Diff line number Diff line
@@ -555,7 +555,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
	/* Prepare the HW SGT structure */
	sgt_buf_size = priv->tx_data_offset +
		       sizeof(struct dpaa2_sg_entry) *  num_dma_bufs;
	sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
	sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
	if (unlikely(!sgt_buf)) {
		err = -ENOMEM;
		goto sgt_buf_alloc_failed;
@@ -997,13 +997,6 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
	int i, j;
	int new_count;

	/* This is the lazy seeding of Rx buffer pools.
	 * dpaa2_add_bufs() is also used on the Rx hotpath and calls
	 * napi_alloc_frag(). The trouble with that is that it in turn ends up
	 * calling this_cpu_ptr(), which mandates execution in atomic context.
	 * Rather than splitting up the code, do a one-off preempt disable.
	 */
	preempt_disable();
	for (j = 0; j < priv->num_channels; j++) {
		for (i = 0; i < DPAA2_ETH_NUM_BUFS;
		     i += DPAA2_ETH_BUFS_PER_CMD) {
@@ -1011,12 +1004,10 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
			priv->channel[j]->buf_count += new_count;

			if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
				preempt_enable();
				return -ENOMEM;
			}
		}
	}
	preempt_enable();

	return 0;
}
+1 −1
Original line number Diff line number Diff line
@@ -1119,7 +1119,7 @@ static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
			SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));

	/* Fill entire long pool */
	num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
	num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
	if (num != hwbm_pool->size) {
		WARN(1, "pool %d: %d of %d allocated\n",
		     bm_pool->id, num, hwbm_pool->size);
+2 −2
Original line number Diff line number Diff line
@@ -190,7 +190,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
		hwbm_pool->construct = mvneta_bm_construct;
		hwbm_pool->priv = new_pool;
		spin_lock_init(&hwbm_pool->lock);
		mutex_init(&hwbm_pool->buf_lock);

		/* Create new pool */
		err = mvneta_bm_pool_create(priv, new_pool);
@@ -201,7 +201,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
		}

		/* Allocate buffers for this pool */
		num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
		num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
		if (num != hwbm_pool->size) {
			WARN(1, "pool %d: %d of %d allocated\n",
			     new_pool->id, num, hwbm_pool->size);
Loading