Commit 981d1aa0 authored by Boris Brezillon's avatar Boris Brezillon Committed by Miquel Raynal
Browse files

mtd: spinand: Use the spi-mem dirmap API



Make use of the spi-mem direct mapping API to let advanced controllers
optimize read/write operations when they support direct mapping.

Signed-off-by: default avatarBoris Brezillon <bbrezillon@kernel.org>
Cc: Stefan Roese <sr@denx.de>
Signed-off-by: default avatarMiquel Raynal <miquel.raynal@bootlin.com>
Tested-by: default avatarStefan Roese <sr@denx.de>
parent 9e98c678
Loading
Loading
Loading
Loading
+84 −84
Original line number Diff line number Diff line
@@ -19,21 +19,6 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>

static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
					  const struct nand_page_io_req *req,
					  u16 *column)
{
	struct nand_device *nand = spinand_to_nand(spinand);
	unsigned int shift;

	if (nand->memorg.planes_per_lun < 2)
		return;

	/* The plane number is passed in MSB just above the column address */
	shift = fls(nand->memorg.pagesize);
	*column |= req->pos.plane << shift;
}

static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
{
	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
@@ -227,27 +212,21 @@ static int spinand_load_page_op(struct spinand_device *spinand,
static int spinand_read_from_cache_op(struct spinand_device *spinand,
				      const struct nand_page_io_req *req)
{
	struct spi_mem_op op = *spinand->op_templates.read_cache;
	struct nand_device *nand = spinand_to_nand(spinand);
	struct mtd_info *mtd = nanddev_to_mtd(nand);
	struct nand_page_io_req adjreq = *req;
	struct spi_mem_dirmap_desc *rdesc;
	unsigned int nbytes = 0;
	void *buf = NULL;
	u16 column = 0;
	int ret;
	ssize_t ret;

	if (req->datalen) {
		adjreq.datalen = nanddev_page_size(nand);
		adjreq.dataoffs = 0;
		adjreq.databuf.in = spinand->databuf;
		buf = spinand->databuf;
		nbytes = adjreq.datalen;
		nbytes = nanddev_page_size(nand);
		column = 0;
	}

	if (req->ooblen) {
		adjreq.ooblen = nanddev_per_page_oobsize(nand);
		adjreq.ooboffs = 0;
		adjreq.oobbuf.in = spinand->oobbuf;
		nbytes += nanddev_per_page_oobsize(nand);
		if (!buf) {
			buf = spinand->oobbuf;
@@ -255,28 +234,19 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
		}
	}

	spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
	op.addr.val = column;
	rdesc = spinand->dirmaps[req->pos.plane].rdesc;

	/*
	 * Some controllers are limited in term of max RX data size. In this
	 * case, just repeat the READ_CACHE operation after updating the
	 * column.
	 */
	while (nbytes) {
		op.data.buf.in = buf;
		op.data.nbytes = nbytes;
		ret = spi_mem_adjust_op_size(spinand->spimem, &op);
		if (ret)
		ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
		if (ret < 0)
			return ret;

		ret = spi_mem_exec_op(spinand->spimem, &op);
		if (ret)
			return ret;
		if (!ret || ret > nbytes)
			return -EIO;

		buf += op.data.nbytes;
		nbytes -= op.data.nbytes;
		op.addr.val += op.data.nbytes;
		nbytes -= ret;
		column += ret;
		buf += ret;
	}

	if (req->datalen)
@@ -300,14 +270,12 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
static int spinand_write_to_cache_op(struct spinand_device *spinand,
				     const struct nand_page_io_req *req)
{
	struct spi_mem_op op = *spinand->op_templates.write_cache;
	struct nand_device *nand = spinand_to_nand(spinand);
	struct mtd_info *mtd = nanddev_to_mtd(nand);
	struct nand_page_io_req adjreq = *req;
	struct spi_mem_dirmap_desc *wdesc;
	unsigned int nbytes, column = 0;
	void *buf = spinand->databuf;
	unsigned int nbytes;
	u16 column = 0;
	int ret;
	ssize_t ret;

	/*
	 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
@@ -318,12 +286,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
	 */
	nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
	memset(spinand->databuf, 0xff, nbytes);
	adjreq.dataoffs = 0;
	adjreq.datalen = nanddev_page_size(nand);
	adjreq.databuf.out = spinand->databuf;
	adjreq.ooblen = nanddev_per_page_oobsize(nand);
	adjreq.ooboffs = 0;
	adjreq.oobbuf.out = spinand->oobbuf;

	if (req->datalen)
		memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
@@ -340,42 +302,19 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
			       req->ooblen);
	}

	spinand_cache_op_adjust_colum(spinand, &adjreq, &column);

	op = *spinand->op_templates.write_cache;
	op.addr.val = column;
	wdesc = spinand->dirmaps[req->pos.plane].wdesc;

	/*
	 * Some controllers are limited in term of max TX data size. In this
	 * case, split the operation into one LOAD CACHE and one or more
	 * LOAD RANDOM CACHE.
	 */
	while (nbytes) {
		op.data.buf.out = buf;
		op.data.nbytes = nbytes;

		ret = spi_mem_adjust_op_size(spinand->spimem, &op);
		if (ret)
			return ret;

		ret = spi_mem_exec_op(spinand->spimem, &op);
		if (ret)
		ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
		if (ret < 0)
			return ret;

		buf += op.data.nbytes;
		nbytes -= op.data.nbytes;
		op.addr.val += op.data.nbytes;
		if (!ret || ret > nbytes)
			return -EIO;

		/*
		 * We need to use the RANDOM LOAD CACHE operation if there's
		 * more than one iteration, because the LOAD operation might
		 * reset the cache to 0xff.
		 */
		if (nbytes) {
			column = op.addr.val;
			op = *spinand->op_templates.update_cache;
			op.addr.val = column;
		}
		nbytes -= ret;
		column += ret;
		buf += ret;
	}

	return 0;
@@ -755,6 +694,59 @@ static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
	return ret;
}

static int spinand_create_dirmap(struct spinand_device *spinand,
				 unsigned int plane)
{
	struct nand_device *nand = spinand_to_nand(spinand);
	struct spi_mem_dirmap_info info = {
		.length = nanddev_page_size(nand) +
			  nanddev_per_page_oobsize(nand),
	};
	struct spi_mem_dirmap_desc *desc;

	/* The plane number is passed in MSB just above the column address */
	info.offset = plane << fls(nand->memorg.pagesize);

	info.op_tmpl = *spinand->op_templates.update_cache;
	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
					  spinand->spimem, &info);
	if (IS_ERR(desc))
		return PTR_ERR(desc);

	spinand->dirmaps[plane].wdesc = desc;

	info.op_tmpl = *spinand->op_templates.read_cache;
	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
					  spinand->spimem, &info);
	if (IS_ERR(desc))
		return PTR_ERR(desc);

	spinand->dirmaps[plane].rdesc = desc;

	return 0;
}

static int spinand_create_dirmaps(struct spinand_device *spinand)
{
	struct nand_device *nand = spinand_to_nand(spinand);
	int i, ret;

	spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
					sizeof(*spinand->dirmaps) *
					nand->memorg.planes_per_lun,
					GFP_KERNEL);
	if (!spinand->dirmaps)
		return -ENOMEM;

	for (i = 0; i < nand->memorg.planes_per_lun; i++) {
		ret = spinand_create_dirmap(spinand, i);
		if (ret)
			return ret;
	}

	return 0;
}

static const struct nand_ops spinand_ops = {
	.erase = spinand_erase,
	.markbad = spinand_markbad,
@@ -1012,6 +1004,14 @@ static int spinand_init(struct spinand_device *spinand)
		goto err_free_bufs;
	}

	ret = spinand_create_dirmaps(spinand);
	if (ret) {
		dev_err(dev,
			"Failed to create direct mappings for read/write operations (err = %d)\n",
			ret);
		goto err_manuf_cleanup;
	}

	/* After power up, all blocks are locked, so unlock them here. */
	for (i = 0; i < nand->memorg.ntargets; i++) {
		ret = spinand_select_target(spinand, i);
+7 −0
Original line number Diff line number Diff line
@@ -302,6 +302,11 @@ struct spinand_info {
		__VA_ARGS__						\
	}

struct spinand_dirmap {
	struct spi_mem_dirmap_desc *wdesc;
	struct spi_mem_dirmap_desc *rdesc;
};

/**
 * struct spinand_device - SPI NAND device instance
 * @base: NAND device instance
@@ -341,6 +346,8 @@ struct spinand_device {
		const struct spi_mem_op *update_cache;
	} op_templates;

	struct spinand_dirmap *dirmaps;

	int (*select_target)(struct spinand_device *spinand,
			     unsigned int target);
	unsigned int cur_target;