Commit 191bd1ca authored by Tudor Ambarus's avatar Tudor Ambarus Committed by Vinod Koul
Browse files

dmaengine: at_xdmac: Fix locking in tasklet



Tasklets run with all the interrupts enabled. This means that we should
replace all the (already present) spin_lock_irqsave() uses in the tasklet
with spin_lock_irq() to protect being interrupted by a IRQ which tries
to get the same lock (via calls to device_prep_dma_* for example).

spin_lock and spin_lock_bh in tasklets are not enough to protect from IRQs,
update these to spin_lock_irq().

at_xdmac_advance_work() can be called with all the interrupts enabled (when
called from tasklet), or with interrupts disabled (when called from
at_xdmac_issue_pending). Move the locking in the callers to be able to use
spin_lock_irq() and spin_lock_irqsave() for these cases.

Signed-off-by: default avatarTudor Ambarus <tudor.ambarus@microchip.com>
Acked-by: default avatarLudovic Desroches <ludovic.desroches@microchip.com>
Link: https://lore.kernel.org/r/20200123140237.125799-10-tudor.ambarus@microchip.com


Signed-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent 8592f2c8
Loading
Loading
Loading
Loading
+12 −11
Original line number Diff line number Diff line
@@ -1543,9 +1543,6 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
{
	struct at_xdmac_desc	*desc;
	unsigned long		flags;

	spin_lock_irqsave(&atchan->lock, flags);

	/*
	 * If channel is enabled, do nothing, advance_work will be triggered
@@ -1559,8 +1556,6 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
		if (!desc->active_xfer)
			at_xdmac_start_xfer(atchan, desc);
	}

	spin_unlock_irqrestore(&atchan->lock, flags);
}

static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
@@ -1596,7 +1591,7 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
	if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
		dev_err(chan2dev(&atchan->chan), "request overflow error!!!");

	spin_lock_bh(&atchan->lock);
	spin_lock_irq(&atchan->lock);

	/* Channel must be disabled first as it's not done automatically */
	at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
@@ -1607,7 +1602,7 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
				    struct at_xdmac_desc,
				    xfer_node);

	spin_unlock_bh(&atchan->lock);
	spin_unlock_irq(&atchan->lock);

	/* Print bad descriptor's details if needed */
	dev_dbg(chan2dev(&atchan->chan),
@@ -1640,21 +1635,21 @@ static void at_xdmac_tasklet(unsigned long data)
		if (atchan->irq_status & error_mask)
			at_xdmac_handle_error(atchan);

		spin_lock(&atchan->lock);
		spin_lock_irq(&atchan->lock);
		desc = list_first_entry(&atchan->xfers_list,
					struct at_xdmac_desc,
					xfer_node);
		dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
		if (!desc->active_xfer) {
			dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
			spin_unlock(&atchan->lock);
			spin_unlock_irq(&atchan->lock);
			return;
		}

		txd = &desc->tx_dma_desc;

		at_xdmac_remove_xfer(atchan, desc);
		spin_unlock(&atchan->lock);
		spin_unlock_irq(&atchan->lock);

		dma_cookie_complete(txd);
		if (txd->flags & DMA_PREP_INTERRUPT)
@@ -1662,7 +1657,9 @@ static void at_xdmac_tasklet(unsigned long data)

		dma_run_dependencies(txd);

		spin_lock_irq(&atchan->lock);
		at_xdmac_advance_work(atchan);
		spin_unlock_irq(&atchan->lock);
	}
}

@@ -1723,11 +1720,15 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
static void at_xdmac_issue_pending(struct dma_chan *chan)
{
	struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
	unsigned long flags;

	dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);

	if (!at_xdmac_chan_is_cyclic(atchan))
	if (!at_xdmac_chan_is_cyclic(atchan)) {
		spin_lock_irqsave(&atchan->lock, flags);
		at_xdmac_advance_work(atchan);
		spin_unlock_irqrestore(&atchan->lock, flags);
	}

	return;
}