summaryrefslogtreecommitdiff
path: root/drivers/mtd/nand
diff options
context:
space:
mode:
authorJaimeLiao <jaimeliao.tw@gmail.com>2023-01-12 10:36:37 +0100
committerMiquel Raynal <miquel.raynal@bootlin.com>2023-01-13 17:35:55 +0100
commit003fe4b9545b83cca4f7a7633695d1f69a0b0011 (patch)
tree2b9108592a58342719143da0ed69f3afa17ee922 /drivers/mtd/nand
parentb1f9ffbfda07acee9bdbc77416facf606647b523 (diff)
mtd: rawnand: Support for sequential cache reads
Add support for sequential cache reads for controllers using the generic core helpers for their fast read/write helpers. Sequential reads may reduce the overhead when accessing physically continuous data by loading in cache the next page while the previous page gets sent out on the NAND bus. The ONFI specification provides the following additional commands to handle sequential cached reads: * 0x31 - READ CACHE SEQUENTIAL: Requires the NAND chip to load the next page into cache while keeping the current cache available for host reads. * 0x3F - READ CACHE END: Tells the NAND chip this is the end of the sequential cache read, the current cache shall remain accessible for the host but no more internal cache loading operation is required. On the bus, a multi page read operation is currently handled like this: 00 -- ADDR1 -- 30 -- WAIT_RDY (tR+tRR) -- DATA1_IN 00 -- ADDR2 -- 30 -- WAIT_RDY (tR+tRR) -- DATA2_IN 00 -- ADDR3 -- 30 -- WAIT_RDY (tR+tRR) -- DATA3_IN Sequential cached reads may instead be achieved with: 00 -- ADDR1 -- 30 -- WAIT_RDY (tR) -- \ 31 -- WAIT_RDY (tRCBSY+tRR) -- DATA1_IN \ 31 -- WAIT_RDY (tRCBSY+tRR) -- DATA2_IN \ 3F -- WAIT_RDY (tRCBSY+tRR) -- DATA3_IN Below are the read speed test results with regular reads and sequential cached reads, on NXP i.MX6 VAR-SOM-SOLO in mapping mode with a NAND chip characterized with the following timings: * tR: 20 µs * tRCBSY: 5 µs * tRR: 20 ns and the following geometry: * device size: 2 MiB * eraseblock size: 128 kiB * page size: 2 kiB ============= Normal read @ 33MHz ================= mtd_speedtest: eraseblock read speed is 15633 KiB/s mtd_speedtest: page read speed is 15515 KiB/s mtd_speedtest: 2 page read speed is 15398 KiB/s =================================================== ========= Sequential cache read @ 33MHz =========== mtd_speedtest: eraseblock read speed is 18285 KiB/s mtd_speedtest: page read speed is 15875 KiB/s mtd_speedtest: 2 page read speed is 16253 KiB/s =================================================== We observe an overall speed improvement of about 5% when reading 2 pages, up to 15% when reading an entire block. This is due to the ~14us gain on each additional page read (tR - (tRCBSY + tRR)). Co-developed-by: Miquel Raynal <miquel.raynal@bootlin.com> Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> Signed-off-by: JaimeLiao <jaimeliao.tw@gmail.com> Tested-by: Liao Jaime <jaimeliao.tw@gmail.com> Link: https://lore.kernel.org/linux-mtd/20230112093637.987838-4-miquel.raynal@bootlin.com
Diffstat (limited to 'drivers/mtd/nand')
-rw-r--r--drivers/mtd/nand/raw/nand_base.c119
1 files changed, 115 insertions, 4 deletions
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index e1f74e2fa415..a6af521832aa 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -1208,6 +1208,73 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
return nand_exec_op(chip, &op);
}
+static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool check_only)
+{
+ const struct nand_interface_config *conf =
+ nand_get_interface_config(chip);
+ u8 addrs[5];
+ struct nand_op_instr start_instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(4, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 0),
+ NAND_OP_CMD(NAND_CMD_READCACHESEQ, NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
+ NAND_COMMON_TIMING_NS(conf, tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_op_instr cont_instrs[] = {
+ NAND_OP_CMD(page == chip->cont_read.last_page ?
+ NAND_CMD_READCACHEEND : NAND_CMD_READCACHESEQ,
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
+ NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
+ NAND_COMMON_TIMING_NS(conf, tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation start_op = NAND_OPERATION(chip->cur_cs, start_instrs);
+ struct nand_operation cont_op = NAND_OPERATION(chip->cur_cs, cont_instrs);
+ int ret;
+
+ if (!len) {
+ start_op.ninstrs--;
+ cont_op.ninstrs--;
+ }
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ addrs[2] = page;
+ addrs[3] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[4] = page >> 16;
+ start_instrs[1].ctx.addr.naddrs++;
+ }
+
+ /* Check if cache reads are supported */
+ if (check_only) {
+ if (nand_check_op(chip, &start_op) || nand_check_op(chip, &cont_op))
+ return -EOPNOTSUPP;
+
+ return 0;
+ }
+
+ if (page == chip->cont_read.first_page)
+ return nand_exec_op(chip, &start_op);
+ else
+ return nand_exec_op(chip, &cont_op);
+}
+
+static bool rawnand_cont_read_ongoing(struct nand_chip *chip, unsigned int page)
+{
+ return chip->cont_read.ongoing &&
+ page >= chip->cont_read.first_page &&
+ page <= chip->cont_read.last_page;
+}
+
/**
* nand_read_page_op - Do a READ PAGE operation
* @chip: The NAND chip
@@ -1233,10 +1300,16 @@ int nand_read_page_op(struct nand_chip *chip, unsigned int page,
return -EINVAL;
if (nand_has_exec_op(chip)) {
- if (mtd->writesize > 512)
- return nand_lp_exec_read_page_op(chip, page,
- offset_in_page, buf,
- len);
+ if (mtd->writesize > 512) {
+ if (rawnand_cont_read_ongoing(chip, page))
+ return nand_lp_exec_cont_read_page_op(chip, page,
+ offset_in_page,
+ buf, len, false);
+ else
+ return nand_lp_exec_read_page_op(chip, page,
+ offset_in_page, buf,
+ len);
+ }
return nand_sp_exec_read_page_op(chip, page, offset_in_page,
buf, len);
@@ -3353,6 +3426,27 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
return NULL;
}
+static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page,
+ u32 readlen, int col)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (!chip->controller->supported_op.cont_read)
+ return;
+
+ if ((col && col + readlen < (3 * mtd->writesize)) ||
+ (!col && readlen < (2 * mtd->writesize))) {
+ chip->cont_read.ongoing = false;
+ return;
+ }
+
+ chip->cont_read.ongoing = true;
+ chip->cont_read.first_page = page;
+ if (col)
+ chip->cont_read.first_page++;
+ chip->cont_read.last_page = page + ((readlen >> chip->page_shift) & chip->pagemask);
+}
+
/**
* nand_setup_read_retry - [INTERN] Set the READ RETRY mode
* @chip: NAND chip object
@@ -3426,6 +3520,8 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
oob = ops->oobbuf;
oob_required = oob ? 1 : 0;
+ rawnand_enable_cont_reads(chip, page, readlen, col);
+
while (1) {
struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
@@ -5009,12 +5105,27 @@ static void rawnand_early_check_supported_ops(struct nand_chip *chip)
rawnand_check_data_only_read_support(chip);
}
+static void rawnand_check_cont_read_support(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->read_retries)
+ return;
+
+ if (!nand_lp_exec_cont_read_page_op(chip, 0, 0, NULL,
+ mtd->writesize, true))
+ chip->controller->supported_op.cont_read = 1;
+}
+
static void rawnand_late_check_supported_ops(struct nand_chip *chip)
{
/* The supported_op fields should not be set by individual drivers */
+ WARN_ON_ONCE(chip->controller->supported_op.cont_read);
if (!nand_has_exec_op(chip))
return;
+
+ rawnand_check_cont_read_support(chip);
}
/*