Commit c7b57274 authored by Thomas Bogendoerfer's avatar Thomas Bogendoerfer Committed by David S. Miller
Browse files

net: sgi: ioc3-eth: allocate space for desc rings only once



Memory for descriptor rings are allocated/freed, when interface is
brought up/down. Since the size of the rings is not changeable by
hardware, we now allocate rings now during probe and free it, when
device is removed.

Signed-off-by: default avatarThomas Bogendoerfer <tbogendoerfer@suse.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 141a7dbb
Loading
Loading
Loading
Loading
+52 −53
Original line number Diff line number Diff line
@@ -800,45 +800,26 @@ static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)

static void ioc3_free_rings(struct ioc3_private *ip)
{
	struct sk_buff *skb;
	int rx_entry, n_entry;

	if (ip->txr) {
	ioc3_clean_tx_ring(ip);
		free_pages((unsigned long)ip->txr, 2);
		ip->txr = NULL;
	}

	if (ip->rxr) {
	n_entry = ip->rx_ci;
	rx_entry = ip->rx_pi;

	while (n_entry != rx_entry) {
			skb = ip->rx_skbs[n_entry];
			if (skb)
				dev_kfree_skb_any(skb);
		dev_kfree_skb_any(ip->rx_skbs[n_entry]);

		n_entry = (n_entry + 1) & RX_RING_MASK;
	}
		free_page((unsigned long)ip->rxr);
		ip->rxr = NULL;
	}
}

static void ioc3_alloc_rings(struct net_device *dev)
{
	struct ioc3_private *ip = netdev_priv(dev);
	struct ioc3_erxbuf *rxb;
	unsigned long *rxr;
	int i;

	if (!ip->rxr) {
		/* Allocate and initialize rx ring.  4kb = 512 entries  */
		ip->rxr = (unsigned long *)get_zeroed_page(GFP_ATOMIC);
		rxr = ip->rxr;
		if (!rxr)
			pr_err("%s: get_zeroed_page() failed!\n", __func__);

	/* Now the rx buffers.  The RX ring may be larger but
	 * we only allocate 16 buffers for now.  Need to tune
	 * this for performance and memory later.
@@ -857,22 +838,15 @@ static void ioc3_alloc_rings(struct net_device *dev)
		/* Because we reserve afterwards. */
		skb_put(skb, (1664 + RX_OFFSET));
		rxb = (struct ioc3_erxbuf *)skb->data;
			rxr[i] = cpu_to_be64(ioc3_map(rxb, 1));
		ip->rxr[i] = cpu_to_be64(ioc3_map(rxb, 1));
		skb_reserve(skb, RX_OFFSET);
	}
	ip->rx_ci = 0;
	ip->rx_pi = RX_BUFFS;
	}

	if (!ip->txr) {
		/* Allocate and initialize tx rings.  16kb = 128 bufs.  */
		ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
		if (!ip->txr)
			pr_err("%s: __get_free_pages() failed!\n", __func__);
	ip->tx_pi = 0;
	ip->tx_ci = 0;
}
}

static void ioc3_init_rings(struct net_device *dev)
{
@@ -1239,6 +1213,23 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
	timer_setup(&ip->ioc3_timer, ioc3_timer, 0);

	ioc3_stop(ip);

	/* Allocate rx ring.  4kb = 512 entries, must be 4kb aligned */
	ip->rxr = (unsigned long *)get_zeroed_page(GFP_KERNEL);
	if (!ip->rxr) {
		pr_err("ioc3-eth: rx ring allocation failed\n");
		err = -ENOMEM;
		goto out_stop;
	}

	/* Allocate tx rings.  16kb = 128 bufs, must be 16kb aligned  */
	ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
	if (!ip->txr) {
		pr_err("ioc3-eth: tx ring allocation failed\n");
		err = -ENOMEM;
		goto out_stop;
	}

	ioc3_init(dev);

	ip->pdev = pdev;
@@ -1293,6 +1284,11 @@ out_stop:
	ioc3_stop(ip);
	del_timer_sync(&ip->ioc3_timer);
	ioc3_free_rings(ip);
	if (ip->rxr)
		free_page((unsigned long)ip->rxr);
	if (ip->txr)
		free_pages((unsigned long)ip->txr, 2);
	kfree(ip->txr);
out_res:
	pci_release_regions(pdev);
out_free:
@@ -1310,6 +1306,9 @@ static void ioc3_remove_one(struct pci_dev *pdev)
	struct net_device *dev = pci_get_drvdata(pdev);
	struct ioc3_private *ip = netdev_priv(dev);

	free_page((unsigned long)ip->rxr);
	free_pages((unsigned long)ip->txr, 2);

	unregister_netdev(dev);
	del_timer_sync(&ip->ioc3_timer);