Commit 679cfbf7 authored by Dave Jiang's avatar Dave Jiang Committed by Vinod Koul
Browse files

dmaengine: IOATDMA: Convert pci_pool_* to dma_pool_*



Converting old pci_pool_* calls to "new" dma_pool_* to make everything
uniform.

Signed-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent 92e963f5
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -298,14 +298,14 @@ ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
	dma_addr_t phys;

	ioat_dma = to_ioatdma_device(chan->device);
	hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys);
	hw = dma_pool_alloc(ioat_dma->dma_pool, flags, &phys);
	if (!hw)
		return NULL;
	memset(hw, 0, sizeof(*hw));

	desc = kmem_cache_zalloc(ioat_cache, flags);
	if (!desc) {
		pci_pool_free(ioat_dma->dma_pool, hw, phys);
		dma_pool_free(ioat_dma->dma_pool, hw, phys);
		return NULL;
	}

@@ -321,7 +321,7 @@ void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
	struct ioatdma_device *ioat_dma;

	ioat_dma = to_ioatdma_device(chan->device);
	pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
	dma_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
	kmem_cache_free(ioat_cache, desc);
}

+2 −2
Original line number Diff line number Diff line
@@ -76,8 +76,8 @@ enum ioat_irq_mode {
struct ioatdma_device {
	struct pci_dev *pdev;
	void __iomem *reg_base;
	struct pci_pool *dma_pool;
	struct pci_pool *completion_pool;
	struct dma_pool *dma_pool;
	struct dma_pool *completion_pool;
#define MAX_SED_POOLS	5
	struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
	struct dma_device dma_dev;
+10 −10
Original line number Diff line number Diff line
@@ -505,7 +505,7 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
	struct device *dev = &pdev->dev;

	/* DMA coherent memory pool for DMA descriptor allocations */
	ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
	ioat_dma->dma_pool = dma_pool_create("dma_desc_pool", dev,
					     sizeof(struct ioat_dma_descriptor),
					     64, 0);
	if (!ioat_dma->dma_pool) {
@@ -513,7 +513,7 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
		goto err_dma_pool;
	}

	ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev,
	ioat_dma->completion_pool = dma_pool_create("completion_pool", dev,
						    sizeof(u64),
						    SMP_CACHE_BYTES,
						    SMP_CACHE_BYTES);
@@ -546,9 +546,9 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
err_self_test:
	ioat_disable_interrupts(ioat_dma);
err_setup_interrupts:
	pci_pool_destroy(ioat_dma->completion_pool);
	dma_pool_destroy(ioat_dma->completion_pool);
err_completion_pool:
	pci_pool_destroy(ioat_dma->dma_pool);
	dma_pool_destroy(ioat_dma->dma_pool);
err_dma_pool:
	return err;
}
@@ -559,8 +559,8 @@ static int ioat_register(struct ioatdma_device *ioat_dma)

	if (err) {
		ioat_disable_interrupts(ioat_dma);
		pci_pool_destroy(ioat_dma->completion_pool);
		pci_pool_destroy(ioat_dma->dma_pool);
		dma_pool_destroy(ioat_dma->completion_pool);
		dma_pool_destroy(ioat_dma->dma_pool);
	}

	return err;
@@ -576,8 +576,8 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)

	dma_async_device_unregister(dma);

	pci_pool_destroy(ioat_dma->dma_pool);
	pci_pool_destroy(ioat_dma->completion_pool);
	dma_pool_destroy(ioat_dma->dma_pool);
	dma_pool_destroy(ioat_dma->completion_pool);

	INIT_LIST_HEAD(&dma->channels);
}
@@ -669,7 +669,7 @@ static void ioat_free_chan_resources(struct dma_chan *c)
	kfree(ioat_chan->ring);
	ioat_chan->ring = NULL;
	ioat_chan->alloc_order = 0;
	pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
	dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
		      ioat_chan->completion_dma);
	spin_unlock_bh(&ioat_chan->prep_lock);
	spin_unlock_bh(&ioat_chan->cleanup_lock);
@@ -701,7 +701,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
	/* allocate a completion writeback area */
	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
	ioat_chan->completion =
		pci_pool_alloc(ioat_chan->ioat_dma->completion_pool,
		dma_pool_alloc(ioat_chan->ioat_dma->completion_pool,
			       GFP_KERNEL, &ioat_chan->completion_dma);
	if (!ioat_chan->completion)
		return -ENOMEM;