Commit 5e9c51b3 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'net-DMA-API'



Christoph Hellwig says:

====================
net: don't pass a NULL struct device to DMA API functions v2

We still have a few drivers which pass a NULL struct device pointer
to DMA API functions, which generally is a bad idea as the API
implementations rely on the device not only for ops selection, but
also the dma mask and various other attributes.

This series contains all easy conversions to pass a struct device,
besides that there also is some arch code that needs separate handling,
a driver that should not use the DMA API at all, and one that is
a complete basket case to be deal with separately.

Changes since v1:
 - fix an inverted ifdef in CAIF
 - update the smc911x changelog
 - split the series, this only contains the networking patches
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 99e13114 0eb1645a
Loading
Loading
Loading
Loading
+16 −14
Original line number Diff line number Diff line
@@ -73,35 +73,37 @@ MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail alignment.");
#define LOW_WATER_MARK   100
#define HIGH_WATER_MARK  (LOW_WATER_MARK*5)

#ifdef CONFIG_UML
#ifndef CONFIG_HAS_DMA

/*
 * We sometimes use UML for debugging, but it cannot handle
 * dma_alloc_coherent so we have to wrap it.
 */
static inline void *dma_alloc(dma_addr_t *daddr)
static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr)
{
	return kmalloc(SPI_DMA_BUF_LEN, GFP_KERNEL);
}

static inline void dma_free(void *cpu_addr, dma_addr_t handle)
static inline void dma_free(struct cfspi *cfspi, void *cpu_addr,
		dma_addr_t handle)
{
	kfree(cpu_addr);
}

#else

static inline void *dma_alloc(dma_addr_t *daddr)
static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr)
{
	return dma_alloc_coherent(NULL, SPI_DMA_BUF_LEN, daddr,
	return dma_alloc_coherent(&cfspi->pdev->dev, SPI_DMA_BUF_LEN, daddr,
				GFP_KERNEL);
}

static inline void dma_free(void *cpu_addr, dma_addr_t handle)
static inline void dma_free(struct cfspi *cfspi, void *cpu_addr,
		dma_addr_t handle)
{
	dma_free_coherent(NULL, SPI_DMA_BUF_LEN, cpu_addr, handle);
	dma_free_coherent(&cfspi->pdev->dev, SPI_DMA_BUF_LEN, cpu_addr, handle);
}
#endif	/* CONFIG_UML */
#endif	/* CONFIG_HAS_DMA */

#ifdef CONFIG_DEBUG_FS

@@ -610,13 +612,13 @@ static int cfspi_init(struct net_device *dev)
	}

	/* Allocate DMA buffers. */
	cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]);
	cfspi->xfer.va_tx[0] = dma_alloc(cfspi, &cfspi->xfer.pa_tx[0]);
	if (!cfspi->xfer.va_tx[0]) {
		res = -ENODEV;
		goto err_dma_alloc_tx_0;
	}

	cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
	cfspi->xfer.va_rx = dma_alloc(cfspi, &cfspi->xfer.pa_rx);

	if (!cfspi->xfer.va_rx) {
		res = -ENODEV;
@@ -665,9 +667,9 @@ static int cfspi_init(struct net_device *dev)
	return 0;

 err_create_wq:
	dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
	dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
 err_dma_alloc_rx:
	dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
	dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
 err_dma_alloc_tx_0:
	return res;
}
@@ -683,8 +685,8 @@ static void cfspi_uninit(struct net_device *dev)

	cfspi->ndev = NULL;
	/* Free DMA buffers. */
	dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
	dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
	dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
	dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
	set_bit(SPI_TERMINATE, &cfspi->state);
	wake_up_interruptible(&cfspi->wait);
	destroy_workqueue(cfspi->wq);
+3 −3
Original line number Diff line number Diff line
@@ -1167,7 +1167,7 @@ static int au1000_probe(struct platform_device *pdev)
	/* Allocate the data buffers
	 * Snooping works fine with eth on all au1xxx
	 */
	aup->vaddr = (u32)dma_alloc_attrs(NULL, MAX_BUF_SIZE *
	aup->vaddr = (u32)dma_alloc_attrs(&pdev->dev, MAX_BUF_SIZE *
					  (NUM_TX_BUFFS + NUM_RX_BUFFS),
					  &aup->dma_addr, 0,
					  DMA_ATTR_NON_CONSISTENT);
@@ -1349,7 +1349,7 @@ err_remap3:
err_remap2:
	iounmap(aup->mac);
err_remap1:
	dma_free_attrs(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
	dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
			(void *)aup->vaddr, aup->dma_addr,
			DMA_ATTR_NON_CONSISTENT);
err_vaddr:
@@ -1383,7 +1383,7 @@ static int au1000_remove(struct platform_device *pdev)
		if (aup->tx_db_inuse[i])
			au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);

	dma_free_attrs(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
	dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
			(void *)aup->vaddr, aup->dma_addr,
			DMA_ATTR_NON_CONSISTENT);

+4 −4
Original line number Diff line number Diff line
@@ -3673,9 +3673,9 @@ static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
		/* Store packet information (to free when Tx completed) */
		lp->skb = skb;
		lp->skb_length = skb->len;
		lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
							DMA_TO_DEVICE);
		if (dma_mapping_error(NULL, lp->skb_physaddr)) {
		lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data,
						  skb->len, DMA_TO_DEVICE);
		if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) {
			dev_kfree_skb_any(skb);
			dev->stats.tx_dropped++;
			netdev_err(dev, "%s: DMA mapping error\n", __func__);
@@ -3765,7 +3765,7 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
		if (lp->skb) {
			dev_kfree_skb_irq(lp->skb);
			lp->skb = NULL;
			dma_unmap_single(NULL, lp->skb_physaddr,
			dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr,
					 lp->skb_length, DMA_TO_DEVICE);
			dev->stats.tx_packets++;
			dev->stats.tx_bytes += lp->skb_length;
+4 −2
Original line number Diff line number Diff line
@@ -112,10 +112,12 @@ struct ltq_etop_priv {
static int
ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
{
	struct ltq_etop_priv *priv = netdev_priv(ch->netdev);

	ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
	if (!ch->skb[ch->dma.desc])
		return -ENOMEM;
	ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
	ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(&priv->pdev->dev,
		ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
		DMA_FROM_DEVICE);
	ch->dma.desc_base[ch->dma.desc].addr =
@@ -487,7 +489,7 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
	netif_trans_update(dev);

	spin_lock_irqsave(&priv->lock, flags);
	desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
	desc->addr = ((unsigned int) dma_map_single(&priv->pdev->dev, skb->data, len,
						DMA_TO_DEVICE)) - byte_offset;
	wmb();
	desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
+7 −4
Original line number Diff line number Diff line
@@ -201,6 +201,7 @@ struct tx_desc {
};

struct pxa168_eth_private {
	struct platform_device *pdev;
	int port_num;		/* User Ethernet port number    */
	int phy_addr;
	int phy_speed;
@@ -331,7 +332,7 @@ static void rxq_refill(struct net_device *dev)
		used_rx_desc = pep->rx_used_desc_q;
		p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
		size = skb_end_pointer(skb) - skb->data;
		p_used_rx_desc->buf_ptr = dma_map_single(NULL,
		p_used_rx_desc->buf_ptr = dma_map_single(&pep->pdev->dev,
							 skb->data,
							 size,
							 DMA_FROM_DEVICE);
@@ -743,7 +744,7 @@ static int txq_reclaim(struct net_device *dev, int force)
				netdev_err(dev, "Error in TX\n");
			dev->stats.tx_errors++;
		}
		dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
		dma_unmap_single(&pep->pdev->dev, addr, count, DMA_TO_DEVICE);
		if (skb)
			dev_kfree_skb_irq(skb);
		released++;
@@ -805,7 +806,7 @@ static int rxq_process(struct net_device *dev, int budget)
		if (rx_next_curr_desc == rx_used_desc)
			pep->rx_resource_err = 1;
		pep->rx_desc_count--;
		dma_unmap_single(NULL, rx_desc->buf_ptr,
		dma_unmap_single(&pep->pdev->dev, rx_desc->buf_ptr,
				 rx_desc->buf_size,
				 DMA_FROM_DEVICE);
		received_packets++;
@@ -1274,7 +1275,8 @@ pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
	length = skb->len;
	pep->tx_skb[tx_index] = skb;
	desc->byte_cnt = length;
	desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
	desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, length,
					DMA_TO_DEVICE);

	skb_tx_timestamp(skb);

@@ -1528,6 +1530,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
	if (err)
		goto err_free_mdio;

	pep->pdev = pdev;
	SET_NETDEV_DEV(dev, &pdev->dev);
	pxa168_init_hw(pep);
	err = register_netdev(dev);
Loading