Commit 4a956bd2 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'DPAA-Ethernet-changes'



Madalin Bucur says:

====================
DPAA Ethernet changes

v2: remove excess braces

Here are some more changes for the DPAA 1.x area.
In summary, these changes use pages for the receive buffers and
for the scatter-gather table fed to the HW on the Tx path, perform
a bit of cleanup in some convoluted parts of the code, add some
minor fixes related to DMA (un)mapping sequencing for a not so
common scenario, add a device link that removes the interfaces
when the QMan portal in use by them is removed.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2bd7c3e1 e06eea55
Loading
Loading
Loading
Loading
+125 −149
Original line number Diff line number Diff line
@@ -178,31 +178,9 @@ struct fm_port_fqs {
/* All the dpa bps in use at any moment */
static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];

/* The raw buffer size must be cacheline aligned */
#define DPAA_BP_RAW_SIZE 4096
/* When using more than one buffer pool, the raw sizes are as follows:
 * 1 bp: 4KB
 * 2 bp: 2KB, 4KB
 * 3 bp: 1KB, 2KB, 4KB
 * 4 bp: 1KB, 2KB, 4KB, 8KB
 */
static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
{
	size_t res = DPAA_BP_RAW_SIZE / 4;
	u8 i;

	for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
		res *= 2;
	return res;
}

/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
 * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
 * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
 * half-page-aligned buffers, so we reserve some more space for start-of-buffer
 * alignment.
 */
#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)

static int dpaa_max_frm;

@@ -288,7 +266,7 @@ static int dpaa_stop(struct net_device *net_dev)
	/* Allow the Fman (Tx) port to process in-flight frames before we
	 * try switching it off.
	 */
	usleep_range(5000, 10000);
	msleep(200);

	err = mac_dev->stop(mac_dev);
	if (err < 0)
@@ -305,6 +283,8 @@ static int dpaa_stop(struct net_device *net_dev)
		phy_disconnect(net_dev->phydev);
	net_dev->phydev = NULL;

	msleep(200);

	return err;
}

@@ -596,10 +576,7 @@ static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)

static void dpaa_bps_free(struct dpaa_priv *priv)
{
	int i;

	for (i = 0; i < DPAA_BPS_NUM; i++)
		dpaa_bp_free(priv->dpaa_bps[i]);
	dpaa_bp_free(priv->dpaa_bp);
}

/* Use multiple WQs for FQ assignment:
@@ -773,7 +750,7 @@ static void dpaa_release_channel(void)
	qman_release_pool(rx_pool_channel);
}

static void dpaa_eth_add_channel(u16 channel)
static void dpaa_eth_add_channel(u16 channel, struct device *dev)
{
	u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
	const cpumask_t *cpus = qman_affine_cpus();
@@ -783,6 +760,7 @@ static void dpaa_eth_add_channel(u16 channel)
	for_each_cpu_and(cpu, cpus, cpu_online_mask) {
		portal = qman_get_affine_portal(cpu);
		qman_p_static_dequeue_add(portal, pool);
		qman_start_using_portal(portal, dev);
	}
}

@@ -1197,15 +1175,15 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
	return err;
}

static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
				 size_t count, struct dpaa_fq *errq,
static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
				 struct dpaa_fq *errq,
				 struct dpaa_fq *defq, struct dpaa_fq *pcdq,
				 struct dpaa_buffer_layout *buf_layout)
{
	struct fman_buffer_prefix_content buf_prefix_content;
	struct fman_port_rx_params *rx_p;
	struct fman_port_params params;
	int i, err;
	int err;

	memset(&params, 0, sizeof(params));
	memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
@@ -1224,12 +1202,9 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
		rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
	}

	count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
	rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
	for (i = 0; i < count; i++) {
		rx_p->ext_buf_pools.ext_buf_pool[i].id =  bps[i]->bpid;
		rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
	}
	rx_p->ext_buf_pools.num_of_pools_used = 1;
	rx_p->ext_buf_pools.ext_buf_pool[0].id =  bp->bpid;
	rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size;

	err = fman_port_config(port, &params);
	if (err) {
@@ -1252,7 +1227,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
}

static int dpaa_eth_init_ports(struct mac_device *mac_dev,
			       struct dpaa_bp **bps, size_t count,
			       struct dpaa_bp *bp,
			       struct fm_port_fqs *port_fqs,
			       struct dpaa_buffer_layout *buf_layout,
			       struct device *dev)
@@ -1266,7 +1241,7 @@ static int dpaa_eth_init_ports(struct mac_device *mac_dev,
	if (err)
		return err;

	err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
	err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq,
				    port_fqs->rx_defq, port_fqs->rx_pcdq,
				    &buf_layout[RX]);

@@ -1335,13 +1310,14 @@ static void dpaa_fd_release(const struct net_device *net_dev,
		vaddr = phys_to_virt(qm_fd_addr(fd));
		sgt = vaddr + qm_fd_get_offset(fd);

		dma_unmap_single(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
				 dpaa_bp->size, DMA_FROM_DEVICE);
		dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
			       DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);

		dpaa_release_sgt_members(sgt);

		addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, vaddr,
				      dpaa_bp->size, DMA_FROM_DEVICE);
		addr = dma_map_page(dpaa_bp->priv->rx_dma_dev,
				    virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
				    DMA_FROM_DEVICE);
		if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
			netdev_err(net_dev, "DMA mapping failed\n");
			return;
@@ -1396,7 +1372,7 @@ static void count_ern(struct dpaa_percpu_priv *percpu_priv,
static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
			       struct sk_buff *skb,
			       struct qm_fd *fd,
			       char *parse_results)
			       void *parse_results)
{
	struct fman_prs_result *parse_result;
	u16 ethertype = ntohs(skb->protocol);
@@ -1491,21 +1467,18 @@ static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
	struct net_device *net_dev = dpaa_bp->priv->net_dev;
	struct bm_buffer bmb[8];
	dma_addr_t addr;
	void *new_buf;
	struct page *p;
	u8 i;

	for (i = 0; i < 8; i++) {
		new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
		if (unlikely(!new_buf)) {
			netdev_err(net_dev,
				   "netdev_alloc_frag() failed, size %zu\n",
				   dpaa_bp->raw_size);
		p = dev_alloc_pages(0);
		if (unlikely(!p)) {
			netdev_err(net_dev, "dev_alloc_pages() failed\n");
			goto release_previous_buffs;
		}
		new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);

		addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, new_buf,
				      dpaa_bp->size, DMA_FROM_DEVICE);
		addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0,
				    DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
		if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
					       addr))) {
			netdev_err(net_dev, "DMA map failed\n");
@@ -1583,17 +1556,16 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
{
	struct dpaa_bp *dpaa_bp;
	int *countptr;
	int res, i;
	int res;

	for (i = 0; i < DPAA_BPS_NUM; i++) {
		dpaa_bp = priv->dpaa_bps[i];
	dpaa_bp = priv->dpaa_bp;
	if (!dpaa_bp)
		return -EINVAL;
	countptr = this_cpu_ptr(dpaa_bp->percpu_count);
	res  = dpaa_eth_refill_bpool(dpaa_bp, countptr);
	if (res)
		return res;
	}

	return 0;
}

@@ -1602,68 +1574,74 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
 * Skb freeing is not handled here.
 *
 * This function may be called on error paths in the Tx function, so guard
 * against cases when not all fd relevant fields were filled in.
 * against cases when not all fd relevant fields were filled in. To avoid
 * reading the invalid transmission timestamp for the error paths set ts to
 * false.
 *
 * Return the skb backpointer, since for S/G frames the buffer containing it
 * gets freed here.
 */
static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
					  const struct qm_fd *fd)
					  const struct qm_fd *fd, bool ts)
{
	const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
	struct device *dev = priv->net_dev->dev.parent;
	struct skb_shared_hwtstamps shhwtstamps;
	dma_addr_t addr = qm_fd_addr(fd);
	void *vaddr = phys_to_virt(addr);
	const struct qm_sg_entry *sgt;
	struct sk_buff **skbh, *skb;
	int nr_frags, i;
	struct sk_buff *skb;
	u64 ns;

	skbh = (struct sk_buff **)phys_to_virt(addr);
	skb = *skbh;

	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
		memset(&shhwtstamps, 0, sizeof(shhwtstamps));

		if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
					  &ns)) {
			shhwtstamps.hwtstamp = ns_to_ktime(ns);
			skb_tstamp_tx(skb, &shhwtstamps);
		} else {
			dev_warn(dev, "fman_port_get_tstamp failed!\n");
		}
	}
	int i;

	if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
		nr_frags = skb_shinfo(skb)->nr_frags;
		dma_unmap_single(priv->tx_dma_dev, addr,
		dma_unmap_page(priv->tx_dma_dev, addr,
			       qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
			       dma_dir);

		/* The sgt buffer has been allocated with netdev_alloc_frag(),
		 * it's from lowmem.
		 */
		sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
		sgt = vaddr + qm_fd_get_offset(fd);

		/* sgt[0] is from lowmem, was dma_map_single()-ed */
		dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
				 qm_sg_entry_get_len(&sgt[0]), dma_dir);

		/* remaining pages were mapped with skb_frag_dma_map() */
		for (i = 1; i <= nr_frags; i++) {
		for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) &&
		     !qm_sg_entry_is_final(&sgt[i - 1]); i++) {
			WARN_ON(qm_sg_entry_is_ext(&sgt[i]));

			dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
				       qm_sg_entry_get_len(&sgt[i]), dma_dir);
		}

		/* Free the page frag that we allocated on Tx */
		skb_free_frag(phys_to_virt(addr));
	} else {
		dma_unmap_single(priv->tx_dma_dev, addr,
				 skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
				 priv->tx_headroom + qm_fd_get_length(fd),
				 dma_dir);
	}

	skb = *(struct sk_buff **)vaddr;

	/* DMA unmapping is required before accessing the HW provided info */
	if (ts && priv->tx_tstamp &&
	    skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
		memset(&shhwtstamps, 0, sizeof(shhwtstamps));

		if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr,
					  &ns)) {
			shhwtstamps.hwtstamp = ns_to_ktime(ns);
			skb_tstamp_tx(skb, &shhwtstamps);
		} else {
			dev_warn(dev, "fman_port_get_tstamp failed!\n");
		}
	}

	if (qm_fd_get_format(fd) == qm_fd_sg)
		/* Free the page that we allocated on Tx for the SGT */
		free_pages((unsigned long)vaddr, 0);

	return skb;
}

@@ -1717,7 +1695,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
	return skb;

free_buffer:
	skb_free_frag(vaddr);
	free_pages((unsigned long)vaddr, 0);
	return NULL;
}

@@ -1764,8 +1742,8 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
			goto free_buffers;

		count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
		dma_unmap_single(dpaa_bp->priv->rx_dma_dev, sg_addr,
				 dpaa_bp->size, DMA_FROM_DEVICE);
		dma_unmap_page(priv->rx_dma_dev, sg_addr,
			       DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
		if (!skb) {
			sz = dpaa_bp->size +
				SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1817,7 +1795,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
	WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");

	/* free the SG table buffer */
	skb_free_frag(vaddr);
	free_pages((unsigned long)vaddr, 0);

	return skb;

@@ -1834,7 +1812,7 @@ free_buffers:
	for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
		sg_addr = qm_sg_addr(&sgt[i]);
		sg_vaddr = phys_to_virt(sg_addr);
		skb_free_frag(sg_vaddr);
		free_pages((unsigned long)sg_vaddr, 0);
		dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
		if (dpaa_bp) {
			count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
@@ -1845,7 +1823,7 @@ free_buffers:
			break;
	}
	/* free the SGT fragment */
	skb_free_frag(vaddr);
	free_pages((unsigned long)vaddr, 0);

	return NULL;
}
@@ -1856,7 +1834,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
{
	struct net_device *net_dev = priv->net_dev;
	enum dma_data_direction dma_dir;
	unsigned char *buffer_start;
	unsigned char *buff_start;
	struct sk_buff **skbh;
	dma_addr_t addr;
	int err;
@@ -1865,10 +1843,10 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
	 * available, so just use that for offset.
	 */
	fd->bpid = FSL_DPAA_BPID_INV;
	buffer_start = skb->data - priv->tx_headroom;
	buff_start = skb->data - priv->tx_headroom;
	dma_dir = DMA_TO_DEVICE;

	skbh = (struct sk_buff **)buffer_start;
	skbh = (struct sk_buff **)buff_start;
	*skbh = skb;

	/* Enable L3/L4 hardware checksum computation.
@@ -1877,7 +1855,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
	 * need to write into the skb.
	 */
	err = dpaa_enable_tx_csum(priv, skb, fd,
				  ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE);
				  buff_start + DPAA_TX_PRIV_DATA_SIZE);
	if (unlikely(err < 0)) {
		if (net_ratelimit())
			netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
@@ -1890,8 +1868,8 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
	fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);

	/* Map the entire buffer size that may be seen by FMan, but no more */
	addr = dma_map_single(priv->tx_dma_dev, skbh,
			      skb_tail_pointer(skb) - buffer_start, dma_dir);
	addr = dma_map_single(priv->tx_dma_dev, buff_start,
			      priv->tx_headroom + skb->len, dma_dir);
	if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
		if (net_ratelimit())
			netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
@@ -1910,21 +1888,20 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
	struct net_device *net_dev = priv->net_dev;
	struct qm_sg_entry *sgt;
	struct sk_buff **skbh;
	int i, j, err, sz;
	void *buffer_start;
	void *buff_start;
	skb_frag_t *frag;
	dma_addr_t addr;
	size_t frag_len;
	void *sgt_buf;

	/* get a page frag to store the SGTable */
	sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
	sgt_buf = netdev_alloc_frag(sz);
	if (unlikely(!sgt_buf)) {
		netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
			   sz);
	struct page *p;
	int i, j, err;

	/* get a page to store the SGTable */
	p = dev_alloc_pages(0);
	if (unlikely(!p)) {
		netdev_err(net_dev, "dev_alloc_pages() failed\n");
		return -ENOMEM;
	}
	buff_start = page_address(p);

	/* Enable L3/L4 hardware checksum computation.
	 *
@@ -1932,7 +1909,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
	 * need to write into the skb.
	 */
	err = dpaa_enable_tx_csum(priv, skb, fd,
				  sgt_buf + DPAA_TX_PRIV_DATA_SIZE);
				  buff_start + DPAA_TX_PRIV_DATA_SIZE);
	if (unlikely(err < 0)) {
		if (net_ratelimit())
			netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
@@ -1941,7 +1918,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
	}

	/* SGT[0] is used by the linear part */
	sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
	sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom);
	frag_len = skb_headlen(skb);
	qm_sg_entry_set_len(&sgt[0], frag_len);
	sgt[0].bpid = FSL_DPAA_BPID_INV;
@@ -1979,14 +1956,14 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
	/* Set the final bit in the last used entry of the SGT */
	qm_sg_entry_set_f(&sgt[nr_frags], frag_len);

	/* set fd offset to priv->tx_headroom */
	qm_fd_set_sg(fd, priv->tx_headroom, skb->len);

	/* DMA map the SGT page */
	buffer_start = (void *)sgt - priv->tx_headroom;
	skbh = (struct sk_buff **)buffer_start;
	skbh = (struct sk_buff **)buff_start;
	*skbh = skb;

	addr = dma_map_single(priv->tx_dma_dev, buffer_start,
	addr = dma_map_page(priv->tx_dma_dev, p, 0,
			    priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
	if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
		netdev_err(priv->net_dev, "DMA mapping failed\n");
@@ -2007,7 +1984,7 @@ sg_map_failed:
			       qm_sg_entry_get_len(&sgt[j]), dma_dir);
sg0_map_failed:
csum_failed:
	skb_free_frag(sgt_buf);
	free_pages((unsigned long)buff_start, 0);

	return err;
}
@@ -2114,7 +2091,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
	if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
		return NETDEV_TX_OK;

	dpaa_cleanup_tx_fd(priv, &fd);
	dpaa_cleanup_tx_fd(priv, &fd, false);
skb_to_fd_failed:
enomem:
	percpu_stats->tx_errors++;
@@ -2160,7 +2137,7 @@ static void dpaa_tx_error(struct net_device *net_dev,

	percpu_priv->stats.tx_errors++;

	skb = dpaa_cleanup_tx_fd(priv, fd);
	skb = dpaa_cleanup_tx_fd(priv, fd, false);
	dev_kfree_skb(skb);
}

@@ -2200,7 +2177,7 @@ static void dpaa_tx_conf(struct net_device *net_dev,

	percpu_priv->tx_confirm++;

	skb = dpaa_cleanup_tx_fd(priv, fd);
	skb = dpaa_cleanup_tx_fd(priv, fd, true);

	consume_skb(skb);
}
@@ -2304,7 +2281,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
		return qman_cb_dqrr_consume;
	}

	dma_unmap_single(dpaa_bp->priv->rx_dma_dev, addr, dpaa_bp->size,
	dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
		       DMA_FROM_DEVICE);

	/* prefetch the first 64 bytes of the frame or the SGT start */
@@ -2427,7 +2404,7 @@ static void egress_ern(struct qman_portal *portal,
	percpu_priv->stats.tx_fifo_errors++;
	count_ern(percpu_priv, msg);

	skb = dpaa_cleanup_tx_fd(priv, fd);
	skb = dpaa_cleanup_tx_fd(priv, fd, false);
	dev_kfree_skb_any(skb);
}

@@ -2660,7 +2637,8 @@ static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
{
	dma_addr_t addr = bm_buf_addr(bmb);

	dma_unmap_single(bp->priv->rx_dma_dev, addr, bp->size, DMA_FROM_DEVICE);
	dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
		       DMA_FROM_DEVICE);

	skb_free_frag(phys_to_virt(addr));
}
@@ -2761,13 +2739,13 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)

static int dpaa_eth_probe(struct platform_device *pdev)
{
	struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
	struct net_device *net_dev = NULL;
	struct dpaa_bp *dpaa_bp = NULL;
	struct dpaa_fq *dpaa_fq, *tmp;
	struct dpaa_priv *priv = NULL;
	struct fm_port_fqs port_fqs;
	struct mac_device *mac_dev;
	int err = 0, i, channel;
	int err = 0, channel;
	struct device *dev;

	dev = &pdev->dev;
@@ -2856,23 +2834,21 @@ static int dpaa_eth_probe(struct platform_device *pdev)
	priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */

	/* bp init */
	for (i = 0; i < DPAA_BPS_NUM; i++) {
		dpaa_bps[i] = dpaa_bp_alloc(dev);
		if (IS_ERR(dpaa_bps[i])) {
			err = PTR_ERR(dpaa_bps[i]);
	dpaa_bp = dpaa_bp_alloc(dev);
	if (IS_ERR(dpaa_bp)) {
		err = PTR_ERR(dpaa_bp);
		goto free_dpaa_bps;
	}
	/* the raw size of the buffers used for reception */
		dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
	dpaa_bp->raw_size = DPAA_BP_RAW_SIZE;
	/* avoid runtime computations by keeping the usable size here */
		dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
		dpaa_bps[i]->priv = priv;
	dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size);
	dpaa_bp->priv = priv;

		err = dpaa_bp_alloc_pool(dpaa_bps[i]);
	err = dpaa_bp_alloc_pool(dpaa_bp);
	if (err < 0)
		goto free_dpaa_bps;
		priv->dpaa_bps[i] = dpaa_bps[i];
	}
	priv->dpaa_bp = dpaa_bp;

	INIT_LIST_HEAD(&priv->dpaa_fq_list);

@@ -2898,7 +2874,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
	/* Walk the CPUs with affine portals
	 * and add this pool channel to each's dequeue mask.
	 */
	dpaa_eth_add_channel(priv->channel);
	dpaa_eth_add_channel(priv->channel, &pdev->dev);

	dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);

@@ -2930,7 +2906,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
	priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);

	/* All real interfaces need their ports initialized */
	err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
	err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
				  &priv->buf_layout[0], dev);
	if (err)
		goto free_dpaa_fqs;
+1 −3
Original line number Diff line number Diff line
@@ -47,8 +47,6 @@
/* Total number of Tx queues */
#define DPAA_ETH_TXQ_NUM	(DPAA_TC_NUM * DPAA_TC_TXQ_NUM)

#define DPAA_BPS_NUM 3 /* number of bpools per interface */

/* More detailed FQ types - used for fine-grained WQ assignments */
enum dpaa_fq_type {
	FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
@@ -148,7 +146,7 @@ struct dpaa_buffer_layout {

struct dpaa_priv {
	struct dpaa_percpu_priv __percpu *percpu_priv;
	struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM];
	struct dpaa_bp *dpaa_bp;
	/* Store here the needed Tx headroom for convenience and speed
	 * (even though it can be computed based on the fields of buf_layout)
	 */
+2 −4
Original line number Diff line number Diff line
@@ -131,11 +131,9 @@ static ssize_t dpaa_eth_show_bpids(struct device *dev,
{
	struct dpaa_priv *priv = netdev_priv(to_net_dev(dev));
	ssize_t bytes = 0;
	int i = 0;

	for (i = 0; i < DPAA_BPS_NUM; i++)
	bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n",
				  priv->dpaa_bps[i]->bpid);
				  priv->dpaa_bp->bpid);

	return bytes;
}
+31 −37
Original line number Diff line number Diff line
@@ -47,6 +47,8 @@ static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
	"tx S/G",
	"tx error",
	"rx error",
	"rx dropped",
	"tx dropped",
};

static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
@@ -78,10 +80,8 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
static int dpaa_get_link_ksettings(struct net_device *net_dev,
				   struct ethtool_link_ksettings *cmd)
{
	if (!net_dev->phydev) {
		netdev_dbg(net_dev, "phy device not initialized\n");
	if (!net_dev->phydev)
		return 0;
	}

	phy_ethtool_ksettings_get(net_dev->phydev, cmd);

@@ -93,10 +93,8 @@ static int dpaa_set_link_ksettings(struct net_device *net_dev,
{
	int err;

	if (!net_dev->phydev) {
		netdev_err(net_dev, "phy device not initialized\n");
	if (!net_dev->phydev)
		return -ENODEV;
	}

	err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
	if (err < 0)
@@ -140,10 +138,8 @@ static int dpaa_nway_reset(struct net_device *net_dev)
{
	int err;

	if (!net_dev->phydev) {
		netdev_err(net_dev, "phy device not initialized\n");
	if (!net_dev->phydev)
		return -ENODEV;
	}

	err = 0;
	if (net_dev->phydev->autoneg) {
@@ -165,10 +161,8 @@ static void dpaa_get_pauseparam(struct net_device *net_dev,
	priv = netdev_priv(net_dev);
	mac_dev = priv->mac_dev;

	if (!net_dev->phydev) {
		netdev_err(net_dev, "phy device not initialized\n");
	if (!net_dev->phydev)
		return;
	}

	epause->autoneg = mac_dev->autoneg_pause;
	epause->rx_pause = mac_dev->rx_pause_active;
@@ -223,7 +217,7 @@ static int dpaa_get_sset_count(struct net_device *net_dev, int type)
	unsigned int total_stats, num_stats;

	num_stats   = num_online_cpus() + 1;
	total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM) +
	total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + 1) +
			DPAA_STATS_GLOBAL_LEN;

	switch (type) {
@@ -235,10 +229,10 @@ static int dpaa_get_sset_count(struct net_device *net_dev, int type)
}

static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
		       int crr_cpu, u64 *bp_count, u64 *data)
		       int crr_cpu, u64 bp_count, u64 *data)
{
	int num_values = num_cpus + 1;
	int crr = 0, j;
	int crr = 0;

	/* update current CPU's stats and also add them to the total values */
	data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
@@ -262,23 +256,27 @@ static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
	data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
	data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;

	for (j = 0; j < DPAA_BPS_NUM; j++) {
		data[crr * num_values + crr_cpu] = bp_count[j];
		data[crr++ * num_values + num_cpus] += bp_count[j];
	}
	data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_dropped;
	data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_dropped;

	data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_dropped;
	data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_dropped;

	data[crr * num_values + crr_cpu] = bp_count;
	data[crr++ * num_values + num_cpus] += bp_count;
}

static void dpaa_get_ethtool_stats(struct net_device *net_dev,
				   struct ethtool_stats *stats, u64 *data)
{
	u64 bp_count[DPAA_BPS_NUM], cg_time, cg_num;
	struct dpaa_percpu_priv *percpu_priv;
	struct dpaa_rx_errors rx_errors;
	unsigned int num_cpus, offset;
	u64 bp_count, cg_time, cg_num;
	struct dpaa_ern_cnt ern_cnt;
	struct dpaa_bp *dpaa_bp;
	struct dpaa_priv *priv;
	int total_stats, i, j;
	int total_stats, i;
	bool cg_status;

	total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
@@ -292,12 +290,10 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev,

	for_each_online_cpu(i) {
		percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
		for (j = 0; j < DPAA_BPS_NUM; j++) {
			dpaa_bp = priv->dpaa_bps[j];
		dpaa_bp = priv->dpaa_bp;
		if (!dpaa_bp->percpu_count)
			continue;
			bp_count[j] = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
		}
		bp_count = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
		rx_errors.dme += percpu_priv->rx_errors.dme;
		rx_errors.fpe += percpu_priv->rx_errors.fpe;
		rx_errors.fse += percpu_priv->rx_errors.fse;
@@ -315,7 +311,7 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev,
		copy_stats(percpu_priv, num_cpus, i, bp_count, data);
	}

	offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM);
	offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + 1);
	memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));

	offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
@@ -363,18 +359,16 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
		memcpy(strings, string_cpu, ETH_GSTRING_LEN);
		strings += ETH_GSTRING_LEN;
	}
	for (i = 0; i < DPAA_BPS_NUM; i++) {
	for (j = 0; j < num_cpus; j++) {
		snprintf(string_cpu, ETH_GSTRING_LEN,
				 "bpool %c [CPU %d]", 'a' + i, j);
			 "bpool [CPU %d]", j);
		memcpy(strings, string_cpu, ETH_GSTRING_LEN);
		strings += ETH_GSTRING_LEN;
	}
		snprintf(string_cpu, ETH_GSTRING_LEN, "bpool %c [TOTAL]",
			 'a' + i);
	snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]");
	memcpy(strings, string_cpu, ETH_GSTRING_LEN);
	strings += ETH_GSTRING_LEN;
	}

	memcpy(strings, dpaa_stats_global, size);
}

+7 −0
Original line number Diff line number Diff line
@@ -1749,6 +1749,13 @@ struct qman_portal *qman_get_affine_portal(int cpu)
}
EXPORT_SYMBOL(qman_get_affine_portal);

int qman_start_using_portal(struct qman_portal *p, struct device *dev)
{
	return (!device_link_add(dev, p->config->dev,
				 DL_FLAG_AUTOREMOVE_CONSUMER)) ? -EINVAL : 0;
}
EXPORT_SYMBOL(qman_start_using_portal);

int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
{
	return __poll_portal_fast(p, limit);
Loading