Commit d5c3a62d authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'Convert-skb_frag_t-to-bio_vec'



Matthew Wilcox says:

====================
Convert skb_frag_t to bio_vec

The skb_frag_t and bio_vec are fundamentally the same (page, offset,
length) tuple.  This patch series unifies the two, leaving the
skb_frag_t typedef in place.  This has the immediate advantage that
we already have iov_iter support for bvecs and don't need to add
support for iterating skbuffs.  It enables a long-term plan to use
bvecs more broadly within the kernel and should make network-storage
drivers able to do less work converting between skbuffs and biovecs.

It will consume more memory on 32-bit kernels.  If that proves
problematic, we can look at ways of addressing it.

v3: Rebase on latest Linus with net-next merged.
  - Reorder the uncontroversial 'Use skb accessors' patches first so you
    can apply just those two if you want to hold off on the full
    conversion.
  - Convert all the users of 'struct skb_frag_struct' to skb_frag_t.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7e24b4ed 8842d285
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -1134,7 +1134,9 @@ copy:
			}
			/* Update the skb. */
			if (merge) {
				skb_shinfo(skb)->frags[i - 1].size += copy;
				skb_frag_size_add(
						&skb_shinfo(skb)->frags[i - 1],
						copy);
			} else {
				skb_fill_page_desc(skb, i, page, off, copy);
				if (off + copy < pg_size) {
@@ -1247,7 +1249,7 @@ new_buf:

		i = skb_shinfo(skb)->nr_frags;
		if (skb_can_coalesce(skb, i, page, offset)) {
			skb_shinfo(skb)->frags[i - 1].size += copy;
			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
		} else if (i < MAX_SKB_FRAGS) {
			get_page(page);
			skb_fill_page_desc(skb, i, page, offset, copy);
+2 −1
Original line number Diff line number Diff line
@@ -181,7 +181,8 @@ static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg)
		sg = sg_next(sg);
		BUG_ON(!sg);
		frag = &skb_shinfo(skb)->frags[i];
		sg_set_page(sg, frag->page.p, frag->size, frag->page_offset);
		sg_set_page(sg, skb_frag_page(frag), skb_frag_size(frag),
				frag->page_offset);
	}
}

+1 −1
Original line number Diff line number Diff line
@@ -102,7 +102,7 @@ static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
		goto bail_txadd;

	for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
		struct skb_frag_struct *frag = &skb_shinfo(tx->skb)->frags[i];
		skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i];

		/* combine physically continuous fragments later? */
		ret = sdma_txadd_page(sde->dd,
+1 −1
Original line number Diff line number Diff line
@@ -2175,7 +2175,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)

			dma_addr = skb_frag_dma_map(vp->gendev, frag,
						    0,
						    frag->size,
						    skb_frag_size(frag),
						    DMA_TO_DEVICE);
			if (dma_mapping_error(vp->gendev, dma_addr)) {
				for(i = i-1; i >= 0; i--)
+3 −3
Original line number Diff line number Diff line
@@ -2426,7 +2426,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
	u32 thiscopy, remainder;
	struct sk_buff *skb = tcb->skb;
	u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
	struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
	skb_frag_t *frags = &skb_shinfo(skb)->frags[0];
	struct phy_device *phydev = adapter->netdev->phydev;
	dma_addr_t dma_addr;
	struct tx_ring *tx_ring = &adapter->tx_ring;
@@ -2488,11 +2488,11 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
				frag++;
			}
		} else {
			desc[frag].len_vlan = frags[i - 1].size;
			desc[frag].len_vlan = skb_frag_size(&frags[i - 1]);
			dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
						    &frags[i - 1],
						    0,
						    frags[i - 1].size,
						    desc[frag].len_vlan,
						    DMA_TO_DEVICE);
			desc[frag].addr_lo = lower_32_bits(dma_addr);
			desc[frag].addr_hi = upper_32_bits(dma_addr);
Loading