Commit 5884e17e authored by Varadarajan Narayanan's avatar Varadarajan Narayanan Committed by Mark Brown
Browse files

spi: qup: allow multiple DMA transactions per spi xfer



Much like the block mode changes, we are breaking up DMA transactions
into 64K chunks so we can reset the QUP engine.

Signed-off-by: default avatarMatthew McClintock <mmcclint@codeaurora.org>
Signed-off-by: default avatarVaradarajan Narayanan <varada@codeaurora.org>
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
parent a841b24e
Loading
Loading
Loading
Loading
+66 −26
Original line number Diff line number Diff line
@@ -418,12 +418,35 @@ static void spi_qup_dma_terminate(struct spi_master *master,
		dmaengine_terminate_all(master->dma_rx);
}

static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max,
				     u32 *nents)
{
	struct scatterlist *sg;
	u32 total = 0;

	*nents = 0;

	for (sg = sgl; sg; sg = sg_next(sg)) {
		unsigned int len = sg_dma_len(sg);

		/* check for overflow as well as limit */
		if (((total + len) < total) || ((total + len) > max))
			break;

		total += len;
		(*nents)++;
	}

	return total;
}

static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
			  unsigned long timeout)
{
	dma_async_tx_callback rx_done = NULL, tx_done = NULL;
	struct spi_master *master = spi->master;
	struct spi_qup *qup = spi_master_get_devdata(master);
	struct scatterlist *tx_sgl, *rx_sgl;
	int ret;

	if (xfer->rx_buf)
@@ -431,6 +454,21 @@ static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
	else if (xfer->tx_buf)
		tx_done = spi_qup_dma_done;

	rx_sgl = xfer->rx_sg.sgl;
	tx_sgl = xfer->tx_sg.sgl;

	do {
		u32 rx_nents, tx_nents;

		if (rx_sgl)
			qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl,
					SPI_MAX_XFER, &rx_nents) / qup->w_size;
		if (tx_sgl)
			qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl,
					SPI_MAX_XFER, &tx_nents) / qup->w_size;
		if (!qup->n_words)
			return -EIO;

		ret = spi_qup_io_config(spi, xfer);
		if (ret)
			return ret;
@@ -438,25 +476,20 @@ static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
		/* before issuing the descriptors, set the QUP to run */
		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
		if (ret) {
		dev_warn(qup->dev, "%s(%d): cannot set RUN state\n",
				__func__, __LINE__);
			dev_warn(qup->dev, "cannot set RUN state\n");
			return ret;
		}

	if (xfer->rx_buf) {
		ret = spi_qup_prep_sg(master, xfer->rx_sg.sgl,
				      xfer->rx_sg.nents, DMA_DEV_TO_MEM,
				      rx_done);
		if (rx_sgl) {
			ret = spi_qup_prep_sg(master, rx_sgl, rx_nents,
					      DMA_DEV_TO_MEM, rx_done);
			if (ret)
				return ret;

			dma_async_issue_pending(master->dma_rx);
		}

	if (xfer->tx_buf) {
		ret = spi_qup_prep_sg(master, xfer->tx_sg.sgl,
				      xfer->tx_sg.nents, DMA_MEM_TO_DEV,
				      tx_done);
		if (tx_sgl) {
			ret = spi_qup_prep_sg(master, tx_sgl, tx_nents,
					      DMA_MEM_TO_DEV, tx_done);
			if (ret)
				return ret;

@@ -466,6 +499,13 @@ static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
		if (!wait_for_completion_timeout(&qup->done, timeout))
			return -ETIMEDOUT;

		for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl))
			;
		for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl))
			;

	} while (rx_sgl || tx_sgl);

	return 0;
}