Commit 13c15e07 authored by Boris Brezillon's avatar Boris Brezillon
Browse files

mtd: spinand: Handle the case where PROGRAM LOAD does not reset the cache



Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
the cache content to 0xFF (depends on vendor implementation), so we
must fill the page cache entirely even if we only want to program the
data portion of the page, otherwise we might corrupt the BBM or user
data previously programmed in OOB area.

Fixes: 7529df46 ("mtd: nand: Add core infrastructure to support SPI NANDs")
Reported-by: default avatarStefan Roese <sr@denx.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarBoris Brezillon <bbrezillon@kernel.org>
Tested-by: default avatarStefan Roese <sr@denx.de>
Reviewed-by: default avatarStefan Roese <sr@denx.de>
Acked-by: default avatarMiquel Raynal <miquel.raynal@bootlin.com>
parent 49a57857
Loading
Loading
Loading
Loading
+20 −22
Original line number Original line Diff line number Diff line
@@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
	struct nand_device *nand = spinand_to_nand(spinand);
	struct nand_device *nand = spinand_to_nand(spinand);
	struct mtd_info *mtd = nanddev_to_mtd(nand);
	struct mtd_info *mtd = nanddev_to_mtd(nand);
	struct nand_page_io_req adjreq = *req;
	struct nand_page_io_req adjreq = *req;
	unsigned int nbytes = 0;
	void *buf = spinand->databuf;
	void *buf = NULL;
	unsigned int nbytes;
	u16 column = 0;
	u16 column = 0;
	int ret;
	int ret;


	memset(spinand->databuf, 0xff,
	/*
	       nanddev_page_size(nand) +
	 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
	       nanddev_per_page_oobsize(nand));
	 * the cache content to 0xFF (depends on vendor implementation), so we

	 * must fill the page cache entirely even if we only want to program
	if (req->datalen) {
	 * the data portion of the page, otherwise we might corrupt the BBM or
		memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
	 * user data previously programmed in OOB area.
		       req->datalen);
	 */
	nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
	memset(spinand->databuf, 0xff, nbytes);
	adjreq.dataoffs = 0;
	adjreq.dataoffs = 0;
	adjreq.datalen = nanddev_page_size(nand);
	adjreq.datalen = nanddev_page_size(nand);
	adjreq.databuf.out = spinand->databuf;
	adjreq.databuf.out = spinand->databuf;
		nbytes = adjreq.datalen;
	adjreq.ooblen = nanddev_per_page_oobsize(nand);
		buf = spinand->databuf;
	adjreq.ooboffs = 0;
	}
	adjreq.oobbuf.out = spinand->oobbuf;

	if (req->datalen)
		memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
		       req->datalen);


	if (req->ooblen) {
	if (req->ooblen) {
		if (req->mode == MTD_OPS_AUTO_OOB)
		if (req->mode == MTD_OPS_AUTO_OOB)
@@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
		else
		else
			memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
			memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
			       req->ooblen);
			       req->ooblen);

		adjreq.ooblen = nanddev_per_page_oobsize(nand);
		adjreq.ooboffs = 0;
		nbytes += nanddev_per_page_oobsize(nand);
		if (!buf) {
			buf = spinand->oobbuf;
			column = nanddev_page_size(nand);
		}
	}
	}


	spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
	spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
@@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,


		/*
		/*
		 * We need to use the RANDOM LOAD CACHE operation if there's
		 * We need to use the RANDOM LOAD CACHE operation if there's
		 * more than one iteration, because the LOAD operation resets
		 * more than one iteration, because the LOAD operation might
		 * the cache to 0xff.
		 * reset the cache to 0xff.
		 */
		 */
		if (nbytes) {
		if (nbytes) {
			column = op.addr.val;
			column = op.addr.val;