Commit a092ab25 authored by Michael Walle's avatar Michael Walle Committed by Greg Kroah-Hartman
Browse files

tty: serial: fsl_lpuart: fix DMA mapping



Use the correct device to request the DMA mapping. Otherwise the IOMMU
doesn't get the mapping and it will generate a page fault.

The error messages look like:
[   19.012140] arm-smmu 5000000.iommu: Unhandled context fault: fsr=0x402, iova=0xbbfff800, fsynr=0x3e0021, cbfrsynra=0x828, cb=9
[   19.023593] arm-smmu 5000000.iommu: Unhandled context fault: fsr=0x402, iova=0xbbfff800, fsynr=0x3e0021, cbfrsynra=0x828, cb=9

This was tested on a custom board with a LS1028A SoC.

Signed-off-by: default avatarMichael Walle <michael@walle.cc>
Link: https://lore.kernel.org/r/20200306214433.23215-3-michael@walle.cc


Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 159381df
Loading
Loading
Loading
Loading
+28 −20
Original line number Diff line number Diff line
@@ -409,6 +409,7 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
	struct circ_buf *xmit = &sport->port.state->xmit;
	struct scatterlist *sgl = sport->tx_sgl;
	struct device *dev = sport->port.dev;
	struct dma_chan *chan = sport->dma_tx_chan;
	int ret;

	if (sport->dma_tx_in_progress)
@@ -427,17 +428,19 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
		sg_set_buf(sgl + 1, xmit->buf, xmit->head);
	}

	ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
	ret = dma_map_sg(chan->device->dev, sgl, sport->dma_tx_nents,
			 DMA_TO_DEVICE);
	if (!ret) {
		dev_err(dev, "DMA mapping error for TX.\n");
		return;
	}

	sport->dma_tx_desc = dmaengine_prep_slave_sg(sport->dma_tx_chan, sgl,
	sport->dma_tx_desc = dmaengine_prep_slave_sg(chan, sgl,
					ret, DMA_MEM_TO_DEV,
					DMA_PREP_INTERRUPT);
	if (!sport->dma_tx_desc) {
		dma_unmap_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
		dma_unmap_sg(chan->device->dev, sgl, sport->dma_tx_nents,
			      DMA_TO_DEVICE);
		dev_err(dev, "Cannot prepare TX slave DMA!\n");
		return;
	}
@@ -446,7 +449,7 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
	sport->dma_tx_desc->callback_param = sport;
	sport->dma_tx_in_progress = true;
	sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc);
	dma_async_issue_pending(sport->dma_tx_chan);
	dma_async_issue_pending(chan);
}

static bool lpuart_stopped_or_empty(struct uart_port *port)
@@ -459,11 +462,13 @@ static void lpuart_dma_tx_complete(void *arg)
	struct lpuart_port *sport = arg;
	struct scatterlist *sgl = &sport->tx_sgl[0];
	struct circ_buf *xmit = &sport->port.state->xmit;
	struct dma_chan *chan = sport->dma_tx_chan;
	unsigned long flags;

	spin_lock_irqsave(&sport->port.lock, flags);

	dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
	dma_unmap_sg(chan->device->dev, sgl, sport->dma_tx_nents,
		     DMA_TO_DEVICE);

	xmit->tail = (xmit->tail + sport->dma_tx_bytes) & (UART_XMIT_SIZE - 1);

@@ -529,15 +534,16 @@ static bool lpuart_is_32(struct lpuart_port *sport)
static void lpuart_flush_buffer(struct uart_port *port)
{
	struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
	struct dma_chan *chan = sport->dma_tx_chan;
	u32 val;

	if (sport->lpuart_dma_tx_use) {
		if (sport->dma_tx_in_progress) {
			dma_unmap_sg(sport->port.dev, &sport->tx_sgl[0],
			dma_unmap_sg(chan->device->dev, &sport->tx_sgl[0],
				sport->dma_tx_nents, DMA_TO_DEVICE);
			sport->dma_tx_in_progress = false;
		}
		dmaengine_terminate_all(sport->dma_tx_chan);
		dmaengine_terminate_all(chan);
	}

	if (lpuart_is_32(sport)) {
@@ -993,6 +999,7 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
	struct tty_port *port = &sport->port.state->port;
	struct dma_tx_state state;
	enum dma_status dmastat;
	struct dma_chan *chan = sport->dma_rx_chan;
	struct circ_buf *ring = &sport->rx_ring;
	unsigned long flags;
	int count = 0;
@@ -1053,10 +1060,7 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)

	spin_lock_irqsave(&sport->port.lock, flags);

	dmastat = dmaengine_tx_status(sport->dma_rx_chan,
				sport->dma_rx_cookie,
				&state);

	dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state);
	if (dmastat == DMA_ERROR) {
		dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
		spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -1064,7 +1068,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
	}

	/* CPU claims ownership of RX DMA buffer */
	dma_sync_sg_for_cpu(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
	dma_sync_sg_for_cpu(chan->device->dev, &sport->rx_sgl, 1,
			    DMA_FROM_DEVICE);

	/*
	 * ring->head points to the end of data already written by the DMA.
@@ -1106,7 +1111,7 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
		sport->port.icount.rx += count;
	}

	dma_sync_sg_for_device(sport->port.dev, &sport->rx_sgl, 1,
	dma_sync_sg_for_device(chan->device->dev, &sport->rx_sgl, 1,
			       DMA_FROM_DEVICE);

	spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -1138,6 +1143,7 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
	struct tty_port *port = &sport->port.state->port;
	struct tty_struct *tty = port->tty;
	struct ktermios *termios = &tty->termios;
	struct dma_chan *chan = sport->dma_rx_chan;

	baud = tty_get_baud_rate(tty);

@@ -1159,7 +1165,8 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
		return -ENOMEM;

	sg_init_one(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len);
	nent = dma_map_sg(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
	nent = dma_map_sg(chan->device->dev, &sport->rx_sgl, 1,
			  DMA_FROM_DEVICE);

	if (!nent) {
		dev_err(sport->port.dev, "DMA Rx mapping error\n");
@@ -1170,7 +1177,7 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
	dma_rx_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
	dma_rx_sconfig.src_maxburst = 1;
	dma_rx_sconfig.direction = DMA_DEV_TO_MEM;
	ret = dmaengine_slave_config(sport->dma_rx_chan, &dma_rx_sconfig);
	ret = dmaengine_slave_config(chan, &dma_rx_sconfig);

	if (ret < 0) {
		dev_err(sport->port.dev,
@@ -1178,7 +1185,7 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
		return ret;
	}

	sport->dma_rx_desc = dmaengine_prep_dma_cyclic(sport->dma_rx_chan,
	sport->dma_rx_desc = dmaengine_prep_dma_cyclic(chan,
				 sg_dma_address(&sport->rx_sgl),
				 sport->rx_sgl.length,
				 sport->rx_sgl.length / 2,
@@ -1192,7 +1199,7 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
	sport->dma_rx_desc->callback = lpuart_dma_rx_complete;
	sport->dma_rx_desc->callback_param = sport;
	sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc);
	dma_async_issue_pending(sport->dma_rx_chan);
	dma_async_issue_pending(chan);

	if (lpuart_is_32(sport)) {
		unsigned long temp = lpuart32_read(&sport->port, UARTBAUD);
@@ -1210,11 +1217,12 @@ static void lpuart_dma_rx_free(struct uart_port *port)
{
	struct lpuart_port *sport = container_of(port,
					struct lpuart_port, port);
	struct dma_chan *chan = sport->dma_rx_chan;

	if (sport->dma_rx_chan)
		dmaengine_terminate_all(sport->dma_rx_chan);
	if (chan)
		dmaengine_terminate_all(chan);

	dma_unmap_sg(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
	dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
	kfree(sport->rx_ring.buf);
	sport->rx_ring.tail = 0;
	sport->rx_ring.head = 0;