Commit 055128ee authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'dmaengine-5.2-rc1' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine updates from Vinod Koul:

 - Updates to stm32 dma residue calculations

 - Interleave dma capability to axi-dmac and support for ZynqMP arch

 - Rework of channel assignment for rcar dma

 - Debugfs for pl330 driver

 - Support for Tegra186/Tegra194, refactoring for new chips and support
   for pause/resume

 - Updates to axi-dmac, bcm2835, fsl-edma, idma64, imx-sdma, rcar-dmac,
   stm32-dma etc

 - dev_get_drvdata() updates on few drivers

* tag 'dmaengine-5.2-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (34 commits)
  dmaengine: tegra210-adma: restore channel status
  dmaengine: tegra210-dma: free dma controller in remove()
  dmaengine: tegra210-adma: add pause/resume support
  dmaengine: tegra210-adma: add support for Tegra186/Tegra194
  Documentation: DT: Add compatibility binding for Tegra186
  dmaengine: tegra210-adma: prepare for supporting newer Tegra chips
  dmaengine: at_xdmac: remove a stray bottom half unlock
  dmaengine: fsl-edma: Adjust indentation
  dmaengine: fsl-edma: Fix typo in Vybrid name
  dmaengine: stm32-dma: fix residue calculation in stm32-dma
  dmaengine: nbpfaxi: Use dev_get_drvdata()
  dmaengine: bcm-sba-raid: Use dev_get_drvdata()
  dmaengine: stm32-dma: Fix unsigned variable compared with zero
  dmaengine: stm32-dma: use platform_get_irq()
  dmaengine: rcar-dmac: Update copyright information
  dmaengine: imx-sdma: Only check ratio on parts that support 1:1
  dmaengine: xgene-dma: fix spelling mistake "descripto" -> "descriptor"
  dmaengine: idma64: Move driver name to the header
  dmaengine: bcm2835: Drop duplicate capability setting.
  dmaengine: pl330: _stop: clear interrupt status
  ...
parents ddab5337 f33e7bb3
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -18,7 +18,6 @@ Required properties for adi,channels sub-node:

Required channel sub-node properties:
 - reg: Which channel this node refers to.
 - adi,length-width: Width of the DMA transfer length register.
 - adi,source-bus-width,
   adi,destination-bus-width: Width of the source or destination bus in bits.
 - adi,source-bus-type,
@@ -28,7 +27,8 @@ Required channel sub-node properties:
	1 (AXI_DMAC_TYPE_AXI_STREAM): Streaming AXI interface
	2 (AXI_DMAC_TYPE_AXI_FIFO): FIFO interface

Optional channel properties:
Deprecated optional channel properties:
 - adi,length-width: Width of the DMA transfer length register.
 - adi,cyclic: Must be set if the channel supports hardware cyclic DMA
   transfers.
 - adi,2d: Must be set if the channel supports hardware 2D DMA transfers.
+3 −1
Original line number Diff line number Diff line
@@ -4,7 +4,9 @@ The Tegra Audio DMA controller that is used for transferring data
between system memory and the Audio Processing Engine (APE).

Required properties:
- compatible: Must be "nvidia,tegra210-adma".
- compatible: Should contain one of the following:
  - "nvidia,tegra210-adma": for Tegra210
  - "nvidia,tegra186-adma": for Tegra186 and Tegra194
- reg: Should contain DMA registers location and length. This should be
  a single entry that includes all of the per-channel registers in one
  contiguous bank.
+1 −1
Original line number Diff line number Diff line
@@ -99,7 +99,7 @@ config AT_XDMAC

config AXI_DMAC
	tristate "Analog Devices AXI-DMAC DMA support"
	depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_SOCFPGA || COMPILE_TEST
	depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_SOCFPGA || COMPILE_TEST
	select DMA_ENGINE
	select DMA_VIRTUAL_CHANNELS
	help
+16 −6
Original line number Diff line number Diff line
@@ -254,6 +254,7 @@ enum pl08x_dma_chan_state {
 * @slave: whether this channel is a device (slave) or for memcpy
 * @signal: the physical DMA request signal which this channel is using
 * @mux_use: count of descriptors using this DMA request signal setting
 * @waiting_at: time in jiffies when this channel moved to waiting state
 */
struct pl08x_dma_chan {
	struct virt_dma_chan vc;
@@ -267,6 +268,7 @@ struct pl08x_dma_chan {
	bool slave;
	int signal;
	unsigned mux_use;
	unsigned long waiting_at;
};

/**
@@ -875,6 +877,7 @@ static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
	if (!ch) {
		dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
		plchan->state = PL08X_CHAN_WAITING;
		plchan->waiting_at = jiffies;
		return;
	}

@@ -913,22 +916,29 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
{
	struct pl08x_driver_data *pl08x = plchan->host;
	struct pl08x_dma_chan *p, *next;

	unsigned long waiting_at;
 retry:
	next = NULL;
	waiting_at = jiffies;

	/* Find a waiting virtual channel for the next transfer. */
	/*
	 * Find a waiting virtual channel for the next transfer.
	 * To be fair, time when each channel reached waiting state is compared
	 * to select channel that is waiting for the longest time.
	 */
	list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
		if (p->state == PL08X_CHAN_WAITING) {
		if (p->state == PL08X_CHAN_WAITING &&
		    p->waiting_at <= waiting_at) {
			next = p;
			break;
			waiting_at = p->waiting_at;
		}

	if (!next && pl08x->has_slave) {
		list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
			if (p->state == PL08X_CHAN_WAITING) {
			if (p->state == PL08X_CHAN_WAITING &&
			    p->waiting_at <= waiting_at) {
				next = p;
				break;
				waiting_at = p->waiting_at;
			}
	}

+59 −8
Original line number Diff line number Diff line
@@ -308,6 +308,11 @@ static inline int at_xdmac_csize(u32 maxburst)
	return csize;
};

static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
{
	return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
}

static inline u8 at_xdmac_get_dwidth(u32 cfg)
{
	return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
@@ -389,7 +394,13 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
		 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));

	at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
	reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE;
	reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
	/*
	 * Request Overflow Error is only for peripheral synchronized transfers
	 */
	if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
		reg |= AT_XDMAC_CIE_ROIE;

	/*
	 * There is no end of list when doing cyclic dma, we need to get
	 * an interrupt after each periods.
@@ -1575,6 +1586,46 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
		dmaengine_desc_get_callback_invoke(txd, NULL);
}

static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
{
	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
	struct at_xdmac_desc	*bad_desc;

	/*
	 * The descriptor currently at the head of the active list is
	 * broken. Since we don't have any way to report errors, we'll
	 * just have to scream loudly and try to continue with other
	 * descriptors queued (if any).
	 */
	if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
		dev_err(chan2dev(&atchan->chan), "read bus error!!!");
	if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
		dev_err(chan2dev(&atchan->chan), "write bus error!!!");
	if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
		dev_err(chan2dev(&atchan->chan), "request overflow error!!!");

	spin_lock_bh(&atchan->lock);

	/* Channel must be disabled first as it's not done automatically */
	at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
	while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
		cpu_relax();

	bad_desc = list_first_entry(&atchan->xfers_list,
				    struct at_xdmac_desc,
				    xfer_node);

	spin_unlock_bh(&atchan->lock);

	/* Print bad descriptor's details if needed */
	dev_dbg(chan2dev(&atchan->chan),
		"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
		__func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
		bad_desc->lld.mbr_ubc);

	/* Then continue with usual descriptor management */
}

static void at_xdmac_tasklet(unsigned long data)
{
	struct at_xdmac_chan	*atchan = (struct at_xdmac_chan *)data;
@@ -1594,19 +1645,19 @@ static void at_xdmac_tasklet(unsigned long data)
		   || (atchan->irq_status & error_mask)) {
		struct dma_async_tx_descriptor  *txd;

		if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
			dev_err(chan2dev(&atchan->chan), "read bus error!!!");
		if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
			dev_err(chan2dev(&atchan->chan), "write bus error!!!");
		if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
			dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
		if (atchan->irq_status & error_mask)
			at_xdmac_handle_error(atchan);

		spin_lock(&atchan->lock);
		desc = list_first_entry(&atchan->xfers_list,
					struct at_xdmac_desc,
					xfer_node);
		dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
		BUG_ON(!desc->active_xfer);
		if (!desc->active_xfer) {
			dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
			spin_unlock(&atchan->lock);
			return;
		}

		txd = &desc->tx_dma_desc;

Loading