aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/chips/cfi_probe.c45
-rw-r--r--drivers/mtd/devices/m25p80.c1
-rw-r--r--drivers/mtd/nand/raw/Kconfig14
-rw-r--r--drivers/mtd/nand/raw/Makefile2
-rw-r--r--drivers/mtd/nand/raw/arasan_nand.c1465
-rw-r--r--drivers/mtd/nand/raw/nand_base.c15
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c8
-rw-r--r--drivers/mtd/nand/raw/nand_timings.c6
-rw-r--r--drivers/mtd/nand/raw/pl353_nand.c1398
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c774
10 files changed, 3655 insertions, 73 deletions
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index cf426956454c..626321f8ba94 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -198,6 +198,9 @@ static int __xipram cfi_chip_setup(struct map_info *map,
__u32 base = 0;
int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor);
int i;
+ int extendedId1 = 0;
+ int extendedId2 = 0;
+ int extendedId3 = 0;
int addr_unlock1 = 0x555, addr_unlock2 = 0x2AA;
xip_enable(base, map, cfi);
@@ -222,6 +225,38 @@ static int __xipram cfi_chip_setup(struct map_info *map,
for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor);
+ /* Note we put the device back into Read Mode BEFORE going into Auto
+ * Select Mode, as some devices support nesting of modes, others
+ * don't. This way should always work.
+ * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
+ * so should be treated as nops or illegal (and so put the device
+ * back into Read Mode, which is a nop in this case).
+ */
+ cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
+ cfi->mfr = cfi_read_query16(map, base);
+ cfi->id = cfi_read_query16(map, base + ofs_factor);
+
+ /* Get device ID cycle 1,2,3 for Numonyx/ST devices */
+ if ((cfi->mfr == CFI_MFR_INTEL || cfi->mfr == CFI_MFR_ST)
+ && ((cfi->id & 0xff) == 0x7e)
+ && (le16_to_cpu(cfi->cfiq->P_ID) == 0x0002)) {
+ extendedId1 = cfi_read_query16(map, base + 0x1 * ofs_factor);
+ extendedId2 = cfi_read_query16(map, base + 0xe * ofs_factor);
+ extendedId3 = cfi_read_query16(map, base + 0xf * ofs_factor);
+ }
+
+ /* Get AMD/Spansion extended JEDEC ID */
+ if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e)
+ cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
+ cfi_read_query(map, base + 0xf * ofs_factor);
+
+ /* Put it back into Read Mode */
+ cfi_qry_mode_off(base, map, cfi);
+ xip_allowed(base, map);
+
/* Do any necessary byteswapping */
cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID);
@@ -231,6 +266,16 @@ static int __xipram cfi_chip_setup(struct map_info *map,
cfi->cfiq->InterfaceDesc = le16_to_cpu(cfi->cfiq->InterfaceDesc);
cfi->cfiq->MaxBufWriteSize = le16_to_cpu(cfi->cfiq->MaxBufWriteSize);
+ /* If the device is a M29EW used in 8-bit mode, adjust buffer size */
+ if ((cfi->cfiq->MaxBufWriteSize > 0x8) && (cfi->mfr == CFI_MFR_INTEL ||
+ cfi->mfr == CFI_MFR_ST) && (extendedId1 == 0x7E) &&
+ (extendedId2 == 0x22 || extendedId2 == 0x23 || extendedId2 == 0x28) &&
+ (extendedId3 == 0x01)) {
+ cfi->cfiq->MaxBufWriteSize = 0x8;
+ pr_warning("Adjusted buffer size on Numonyx flash M29EW family");
+ pr_warning("in 8 bit mode\n");
+ }
+
#ifdef DEBUG_CFI
/* Dump the information therein */
print_cfi_ident(cfi->cfiq);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index c50888670250..dceeec32a4ca 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -190,6 +190,7 @@ static int m25p_probe(struct spi_mem *spimem)
spi_mem_set_drvdata(spimem, flash);
flash->spimem = spimem;
+ nor->spi = spi;
if (spi->mode & SPI_RX_OCTAL) {
hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 5a711d8beaca..29b69a2f2b0a 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -546,4 +546,18 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
load time (assuming you build diskonchip as a module) with the module
parameter "inftl_bbt_write=1".
+config MTD_NAND_ARASAN
+ tristate "Support for Arasan Nand Flash controller"
+ depends on HAS_IOMEM && HAS_DMA
+ help
+ Enables the driver for the Arasan Nand Flash controller on
+ Zynq Ultrascale+ MPSoC.
+
+config MTD_NAND_PL353
+ tristate "ARM Pl353 NAND flash driver"
+ depends on MTD_NAND && ARM
+ depends on PL353_SMC
+ help
+ Enables support for PrimeCell Static Memory Controller PL353.
+
endif # MTD_RAW_NAND
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index efaf5cd25edc..cc292a592457 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -57,6 +57,8 @@ obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o
obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o
+obj-$(CONFIG_MTD_NAND_ARASAN) += arasan_nand.o
+obj-$(CONFIG_MTD_NAND_PL353) += pl353_nand.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/arasan_nand.c b/drivers/mtd/nand/raw/arasan_nand.c
new file mode 100644
index 000000000000..58ad872b3476
--- /dev/null
+++ b/drivers/mtd/nand/raw/arasan_nand.c
@@ -0,0 +1,1465 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arasan NAND Flash Controller Driver
+ *
+ * Copyright (C) 2014 - 2017 Xilinx, Inc.
+ * Author: Punnaiah Choudary Kalluri <punnaia@xilinx.com>
+ * Author: Naga Sureshkumar Relli <nagasure@xilinx.com>
+ *
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
+#define EVENT_TIMEOUT_MSEC 1000
+#define ANFC_PM_TIMEOUT 1000 /* ms */
+
+#define PKT_OFST 0x00
+#define PKT_CNT_SHIFT 12
+
+#define MEM_ADDR1_OFST 0x04
+#define MEM_ADDR2_OFST 0x08
+#define PG_ADDR_SHIFT 16
+#define BCH_MODE_SHIFT 25
+#define MEM_ADDR_MASK GENMASK(7, 0)
+#define BCH_MODE_MASK GENMASK(27, 25)
+#define CS_MASK GENMASK(31, 30)
+#define CS_SHIFT 30
+
+#define CMD_OFST 0x0C
+#define ECC_ENABLE BIT(31)
+#define DMA_EN_MASK GENMASK(27, 26)
+#define DMA_ENABLE 0x2
+#define DMA_EN_SHIFT 26
+#define REG_PAGE_SIZE_SHIFT 23
+
+#define PROG_OFST 0x10
+#define PROG_PGRD BIT(0)
+#define PROG_ERASE BIT(2)
+#define PROG_STATUS BIT(3)
+#define PROG_PGPROG BIT(4)
+#define PROG_RDID BIT(6)
+#define PROG_RDPARAM BIT(7)
+#define PROG_RST BIT(8)
+#define PROG_GET_FEATURE BIT(9)
+#define PROG_SET_FEATURE BIT(10)
+
+#define INTR_STS_EN_OFST 0x14
+#define INTR_SIG_EN_OFST 0x18
+#define XFER_COMPLETE BIT(2)
+#define READ_READY BIT(1)
+#define WRITE_READY BIT(0)
+#define MBIT_ERROR BIT(3)
+#define EVENT_MASK (XFER_COMPLETE | READ_READY | WRITE_READY | MBIT_ERROR)
+
+#define INTR_STS_OFST 0x1C
+#define READY_STS_OFST 0x20
+#define DMA_ADDR1_OFST 0x24
+#define FLASH_STS_OFST 0x28
+#define DATA_PORT_OFST 0x30
+#define ECC_OFST 0x34
+#define BCH_EN_SHIFT 27
+#define ECC_SIZE_SHIFT 16
+
+#define ECC_ERR_CNT_OFST 0x38
+#define PAGE_ERR_CNT_MASK GENMASK(16, 8)
+#define PKT_ERR_CNT_MASK GENMASK(7, 0)
+
+#define ECC_SPR_CMD_OFST 0x3C
+#define CMD2_SHIFT 8
+#define ADDR_CYCLES_SHIFT 28
+
+#define ECC_ERR_CNT_1BIT_OFST 0x40
+#define ECC_ERR_CNT_2BIT_OFST 0x44
+#define DMA_ADDR0_OFST 0x50
+#define DATA_INTERFACE_OFST 0x6C
+#define ANFC_MAX_CHUNK_SIZE 0x4000
+#define ANFC_MAX_ADDR_CYCLES 7
+
+#define REG_PAGE_SIZE_512 0
+#define REG_PAGE_SIZE_1K 5
+#define REG_PAGE_SIZE_2K 1
+#define REG_PAGE_SIZE_4K 2
+#define REG_PAGE_SIZE_8K 3
+#define REG_PAGE_SIZE_16K 4
+
+#define TEMP_BUF_SIZE 1024
+#define NVDDR_MODE_PACKET_SIZE 8
+#define SDR_MODE_PACKET_SIZE 4
+
+#define ONFI_DATA_INTERFACE_NVDDR BIT(4)
+#define NVDDR_MODE BIT(9)
+#define NVDDR_TIMING_MODE_SHIFT 3
+
+#define SDR_MODE_DEFLT_FREQ 80000000
+#define COL_ROW_ADDR(pos, val) (((val) & 0xFF) << (8 * (pos)))
+
+struct anfc_op {
+ u32 cmds[4];
+ u32 len;
+ u32 col;
+ u32 row;
+ unsigned int data_instr_idx;
+ const struct nand_op_instr *data_instr;
+};
+
+/**
+ * struct anfc_nand_chip - Defines the nand chip related information
+ * @node: Used to store NAND chips into a list.
+ * @chip: NAND chip information structure.
+ * @strength: Bch or Hamming mode enable/disable.
+ * @ecc_strength: Ecc strength 4.8/12/16.
+ * @eccval: Ecc config value.
+ * @raddr_cycles: Row address cycle information.
+ * @caddr_cycles: Column address cycle information.
+ * @pktsize: Packet size for read / write operation.
+ * @csnum: chipselect number to be used.
+ * @spktsize: Packet size in ddr mode for status operation.
+ * @inftimeval: Data interface and timing mode information
+ */
+struct anfc_nand_chip {
+ struct list_head node;
+ struct nand_chip chip;
+ bool strength;
+ u32 ecc_strength;
+ u32 eccval;
+ u16 raddr_cycles;
+ u16 caddr_cycles;
+ u32 pktsize;
+ int csnum;
+ u32 spktsize;
+ u32 inftimeval;
+};
+
+/**
+ * struct anfc_nand_controller - Defines the Arasan NAND flash controller
+ * driver instance
+ * @controller: base controller structure.
+ * @chips: list of all nand chips attached to the ctrler.
+ * @dev: Pointer to the device structure.
+ * @base: Virtual address of the NAND flash device.
+ * @curr_cmd: Current command issued.
+ * @clk_sys: Pointer to the system clock.
+ * @clk_flash: Pointer to the flash clock.
+ * @dma: Dma enable/disable.
+ * @buf: Buffer used for read/write byte operations.
+ * @irq: irq number
+ * @bufshift: Variable used for indexing buffer operation
+ * @csnum: Chip select number currently inuse.
+ * @event: Completion event for nand status events.
+ * @status: Status of the flash device.
+ * @prog: Used to initiate controller operations.
+ * @chip_active: Used to check the chip select state, active or not.
+ */
+struct anfc_nand_controller {
+ struct nand_controller controller;
+ struct list_head chips;
+ struct device *dev;
+ void __iomem *base;
+ int curr_cmd;
+ struct clk *clk_sys;
+ struct clk *clk_flash;
+ int irq;
+ int csnum;
+ struct completion event;
+ int status;
+ u32 prog;
+ u8 buf[TEMP_BUF_SIZE];
+ bool chip_active;
+};
+
+static int anfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (section >= nand->ecc.steps)
+ return -ERANGE;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = nand->ecc.total;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int anfc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (section >= nand->ecc.steps)
+ return -ERANGE;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2;
+ oobregion->length = mtd->oobsize - nand->ecc.total - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops anfc_ooblayout_ops = {
+ .ecc = anfc_ooblayout_ecc,
+ .free = anfc_ooblayout_free,
+};
+
+static inline struct anfc_nand_chip *to_anfc_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct anfc_nand_chip, chip);
+}
+
+static inline struct anfc_nand_controller *to_anfc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct anfc_nand_controller, controller);
+}
+
+static u8 anfc_page(u32 pagesize)
+{
+ switch (pagesize) {
+ case 512:
+ return REG_PAGE_SIZE_512;
+ case 1024:
+ return REG_PAGE_SIZE_1K;
+ case 2048:
+ return REG_PAGE_SIZE_2K;
+ case 4096:
+ return REG_PAGE_SIZE_4K;
+ case 8192:
+ return REG_PAGE_SIZE_8K;
+ case 16384:
+ return REG_PAGE_SIZE_16K;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline void anfc_enable_intrs(struct anfc_nand_controller *nfc, u32 val)
+{
+ writel(val, nfc->base + INTR_STS_EN_OFST);
+ writel(val, nfc->base + INTR_SIG_EN_OFST);
+}
+
+static inline void anfc_config_ecc(struct anfc_nand_controller *nfc, bool on)
+{
+ u32 val;
+
+ val = readl(nfc->base + CMD_OFST);
+ if (on)
+ val |= ECC_ENABLE;
+ else
+ val &= ~ECC_ENABLE;
+ writel(val, nfc->base + CMD_OFST);
+}
+
+static inline void anfc_config_dma(struct anfc_nand_controller *nfc, int on)
+{
+ u32 val;
+
+ val = readl(nfc->base + CMD_OFST);
+ val &= ~DMA_EN_MASK;
+ if (on)
+ val |= DMA_ENABLE << DMA_EN_SHIFT;
+ writel(val, nfc->base + CMD_OFST);
+}
+
+static inline int anfc_wait_for_event(struct anfc_nand_controller *nfc)
+{
+ return wait_for_completion_timeout(&nfc->event,
+ msecs_to_jiffies(EVENT_TIMEOUT_MSEC));
+}
+
+static inline void anfc_setpktszcnt(struct anfc_nand_controller *nfc,
+ u32 pktsize, u32 pktcount)
+{
+ writel(pktsize | (pktcount << PKT_CNT_SHIFT), nfc->base + PKT_OFST);
+}
+
+static inline void anfc_set_eccsparecmd(struct anfc_nand_controller *nfc,
+ struct anfc_nand_chip *achip, u8 cmd1,
+ u8 cmd2)
+{
+ writel(cmd1 | (cmd2 << CMD2_SHIFT) |
+ (achip->caddr_cycles << ADDR_CYCLES_SHIFT),
+ nfc->base + ECC_SPR_CMD_OFST);
+}
+
+static void anfc_setpagecoladdr(struct anfc_nand_controller *nfc, u32 page,
+ u16 col)
+{
+ u32 val;
+
+ writel(col | (page << PG_ADDR_SHIFT), nfc->base + MEM_ADDR1_OFST);
+
+ val = readl(nfc->base + MEM_ADDR2_OFST);
+ val = (val & ~MEM_ADDR_MASK) |
+ ((page >> PG_ADDR_SHIFT) & MEM_ADDR_MASK);
+ writel(val, nfc->base + MEM_ADDR2_OFST);
+}
+
+static void anfc_prepare_cmd(struct anfc_nand_controller *nfc, u8 cmd1,
+ u8 cmd2, u8 dmamode,
+ u32 pagesize, u8 addrcycles)
+{
+ u32 regval;
+
+ regval = cmd1 | (cmd2 << CMD2_SHIFT);
+ if (dmamode)
+ regval |= DMA_ENABLE << DMA_EN_SHIFT;
+ regval |= addrcycles << ADDR_CYCLES_SHIFT;
+ regval |= anfc_page(pagesize) << REG_PAGE_SIZE_SHIFT;
+ writel(regval, nfc->base + CMD_OFST);
+}
+
+static void anfc_rw_dma_op(struct mtd_info *mtd, u8 *buf, int len,
+ bool do_read, u32 prog, int pktcount, int pktsize)
+{
+ dma_addr_t paddr;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u32 eccintr = 0, dir;
+
+ if (pktsize == 0)
+ pktsize = len;
+
+ anfc_setpktszcnt(nfc, pktsize, pktcount);
+
+ if (!achip->strength)
+ eccintr = MBIT_ERROR;
+
+ if (do_read)
+ dir = DMA_FROM_DEVICE;
+ else
+ dir = DMA_TO_DEVICE;
+
+ paddr = dma_map_single(nfc->dev, buf, len, dir);
+ if (dma_mapping_error(nfc->dev, paddr)) {
+ dev_err(nfc->dev, "Read buffer mapping error");
+ return;
+ }
+ writel(paddr, nfc->base + DMA_ADDR0_OFST);
+ writel((paddr >> 32), nfc->base + DMA_ADDR1_OFST);
+ anfc_enable_intrs(nfc, (XFER_COMPLETE | eccintr));
+ writel(prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc);
+ dma_unmap_single(nfc->dev, paddr, len, dir);
+}
+
+static void anfc_rw_pio_op(struct mtd_info *mtd, u8 *buf, int len,
+ bool do_read, int prog, int pktcount, int pktsize)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u32 *bufptr = (u32 *)buf;
+ u32 cnt = 0, intr = 0;
+
+ anfc_config_dma(nfc, 0);
+
+ if (pktsize == 0)
+ pktsize = len;
+
+ anfc_setpktszcnt(nfc, pktsize, pktcount);
+
+ if (!achip->strength)
+ intr = MBIT_ERROR;
+
+ if (do_read)
+ intr |= READ_READY;
+ else
+ intr |= WRITE_READY;
+
+ anfc_enable_intrs(nfc, intr);
+ writel(prog, nfc->base + PROG_OFST);
+ while (cnt < pktcount) {
+ anfc_wait_for_event(nfc);
+ cnt++;
+ if (cnt == pktcount)
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ if (do_read)
+ ioread32_rep(nfc->base + DATA_PORT_OFST, bufptr,
+ pktsize / 4);
+ else
+ iowrite32_rep(nfc->base + DATA_PORT_OFST, bufptr,
+ pktsize / 4);
+ bufptr += (pktsize / 4);
+ if (cnt < pktcount)
+ anfc_enable_intrs(nfc, intr);
+ }
+ anfc_wait_for_event(nfc);
+}
+
+static void anfc_read_data_op(struct nand_chip *chip, u8 *buf, int len,
+ int pktcount, int pktsize)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (virt_addr_valid(buf))
+ anfc_rw_dma_op(mtd, buf, len, 1, PROG_PGRD, pktcount, pktsize);
+ else
+ anfc_rw_pio_op(mtd, buf, len, 1, PROG_PGRD, pktcount, pktsize);
+}
+
+static void anfc_write_data_op(struct nand_chip *chip, const u8 *buf,
+ int len, int pktcount, int pktsize)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (virt_addr_valid(buf))
+ anfc_rw_dma_op(mtd, (char *)buf, len, 0, PROG_PGPROG, pktcount,
+ pktsize);
+ else
+ anfc_rw_pio_op(mtd, (char *)buf, len, 0, PROG_PGPROG, pktcount,
+ pktsize);
+}
+
+static int anfc_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u8 *ecc_code = chip->ecc.code_buf;
+ u8 *p;
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int stat = 0, i;
+ u32 ret;
+ unsigned int max_bitflips = 0;
+ u32 eccsteps;
+ u32 one_bit_err = 0, multi_bit_err = 0;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ anfc_set_eccsparecmd(nfc, achip, NAND_CMD_RNDOUT, NAND_CMD_RNDOUTSTART);
+ anfc_config_ecc(nfc, true);
+ anfc_read_data_op(chip, buf, mtd->writesize,
+ DIV_ROUND_UP(mtd->writesize, achip->pktsize),
+ achip->pktsize);
+
+ if (achip->strength) {
+ /*
+ * In BCH mode Arasan NAND controller can correct ECC upto
+ * 24-bit Beyond that, it can't even detect errors.
+ */
+ multi_bit_err = readl(nfc->base + ECC_ERR_CNT_OFST);
+ multi_bit_err = ((multi_bit_err & PAGE_ERR_CNT_MASK) >> 8);
+ } else {
+ /*
+ * In Hamming mode Arasan NAND controller can correct ECC upto
+ * 1-bit and can detect upto 4-bit errors.
+ */
+ one_bit_err = readl(nfc->base + ECC_ERR_CNT_1BIT_OFST);
+ multi_bit_err = readl(nfc->base + ECC_ERR_CNT_2BIT_OFST);
+
+ /* Clear ecc error count register 1Bit, 2Bit */
+ writel(0x0, nfc->base + ECC_ERR_CNT_1BIT_OFST);
+ writel(0x0, nfc->base + ECC_ERR_CNT_2BIT_OFST);
+ }
+
+ anfc_config_ecc(nfc, false);
+
+ if (oob_required)
+ chip->ecc.read_oob(mtd, chip, page);
+
+ if (multi_bit_err || one_bit_err) {
+ if (!oob_required)
+ chip->ecc.read_oob(mtd, chip, page);
+
+ mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ eccsteps = chip->ecc.steps;
+ p = buf;
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes,
+ p += eccsize) {
+ stat = nand_check_erased_ecc_chunk(p,
+ chip->ecc.size,
+ &ecc_code[i],
+ eccbytes,
+ NULL, 0,
+ chip->ecc.strength);
+ if (stat < 0) {
+ stat = 0;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ stat);
+ }
+ }
+ }
+
+ return max_bitflips;
+}
+
+static int anfc_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ anfc_set_eccsparecmd(nfc, achip, NAND_CMD_RNDIN, 0);
+ anfc_config_ecc(nfc, true);
+ anfc_write_data_op(chip, buf, mtd->writesize,
+ DIV_ROUND_UP(mtd->writesize, achip->pktsize),
+ achip->pktsize);
+
+ if (oob_required)
+ chip->ecc.write_oob(mtd, chip, page);
+
+ anfc_config_ecc(nfc, false);
+
+ return 0;
+}
+
+static int anfc_ecc_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc, int ecc_mode)
+{
+ u32 ecc_addr;
+ unsigned int ecc_strength, steps;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+
+ ecc->mode = NAND_ECC_HW;
+ ecc->read_page = anfc_read_page_hwecc;
+ ecc->write_page = anfc_write_page_hwecc;
+
+ mtd_set_ooblayout(mtd, &anfc_ooblayout_ops);
+
+ steps = mtd->writesize / chip->ecc_step_ds;
+
+ switch (chip->ecc_strength_ds) {
+ case 12:
+ ecc_strength = 0x1;
+ break;
+ case 8:
+ ecc_strength = 0x2;
+ break;
+ case 4:
+ ecc_strength = 0x3;
+ break;
+ case 24:
+ ecc_strength = 0x4;
+ break;
+ default:
+ ecc_strength = 0x0;
+ }
+ if (!ecc_strength)
+ ecc->total = 3 * steps;
+ else
+ ecc->total =
+ DIV_ROUND_UP(fls(8 * chip->ecc_step_ds) *
+ chip->ecc_strength_ds * steps, 8);
+
+ ecc->strength = chip->ecc_strength_ds;
+ ecc->size = chip->ecc_step_ds;
+ ecc->bytes = ecc->total / steps;
+ ecc->steps = steps;
+ achip->ecc_strength = ecc_strength;
+ achip->strength = achip->ecc_strength;
+ ecc_addr = mtd->writesize + (mtd->oobsize - ecc->total);
+ achip->eccval = ecc_addr | (ecc->total << ECC_SIZE_SHIFT) |
+ (achip->strength << BCH_EN_SHIFT);
+
+ if (chip->ecc_step_ds >= 1024)
+ achip->pktsize = 1024;
+ else
+ achip->pktsize = 512;
+
+ return 0;
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static void anfc_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct anfc_op *nfc_op)
+{
+ const struct nand_op_instr *instr = NULL;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id;
+ int i = 0;
+
+ memset(nfc_op, 0, sizeof(struct anfc_op));
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int naddrs;
+
+ instr = &subop->instrs[op_id];
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (op_id)
+ nfc_op->cmds[1] = instr->ctx.cmd.opcode;
+ else
+ nfc_op->cmds[0] = instr->ctx.cmd.opcode;
+ nfc->curr_cmd = nfc_op->cmds[0];
+
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ i = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop,
+ op_id);
+
+ for (; i < naddrs; i++) {
+ u8 val = instr->ctx.addr.addrs[i];
+
+ if (nfc_op->cmds[0] == NAND_CMD_ERASE1) {
+ nfc_op->row |= COL_ROW_ADDR(i, val);
+ } else {
+ if (i < 2)
+ nfc_op->col |= COL_ROW_ADDR(i,
+ val);
+ else
+ nfc_op->row |= COL_ROW_ADDR(i -
+ 2, val);
+ }
+ }
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ break;
+ }
+ }
+}
+
+static int anfc_reset_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct anfc_op nfc_op = {};
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+
+ /*
+ * Do not execute commands other than NAND_CMD_RESET
+ * Other commands have their own patterns
+ * If there is no pattern match, that means controller
+ * is not supporting that pattern.
+ */
+ if (nfc_op.cmds[0] != NAND_CMD_RESET)
+ return 0;
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], 0, 0, 0, 0);
+ nfc->prog = PROG_RST;
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ writel(nfc->prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc);
+
+ return 0;
+}
+
+static int anfc_read_id_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_op nfc_op = {};
+ unsigned int op_id, len;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], 0, 0, 0, 1);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+ nfc->prog = PROG_RDID;
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, PROG_RDID, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_read_status_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_op nfc_op = {};
+ unsigned int op_id, len;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], 0, 0, 0, 0);
+ anfc_setpktszcnt(nfc, achip->spktsize / 4, 1);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+ nfc->prog = PROG_STATUS;
+
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ writel(nfc->prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+
+ /*
+ * The Arasan NAND controller will update the status value
+ * returned by the flash device in FLASH_STS register.
+ */
+ nfc->status = readl(nfc->base + FLASH_STS_OFST);
+ memcpy(instr->ctx.data.buf.in, &nfc->status, len);
+
+ return 0;
+}
+
+static int anfc_erase_and_zero_len_page_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop
+ *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 dma_mode = 0, write_size = 0, addrcycles = 0, len, op_id;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ if (nfc_op.cmds[0] == NAND_CMD_ERASE1) {
+ nfc->prog = PROG_ERASE;
+ addrcycles = achip->raddr_cycles;
+ write_size = 0;
+ dma_mode = 0;
+ nfc_op.col = nfc_op.row & 0xffff;
+ nfc_op.row = (nfc_op.row >> PG_ADDR_SHIFT) & 0xffff;
+ }
+ if (nfc_op.cmds[0] == NAND_CMD_READ0) {
+ nfc->prog = PROG_PGRD;
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+ write_size = mtd->writesize;
+ dma_mode = 1;
+ }
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], nfc_op.cmds[1], dma_mode,
+ write_size, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (nfc_op.cmds[0] == NAND_CMD_ERASE1) {
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ writel(nfc->prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc);
+ }
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_read_data_op(chip, instr->ctx.data.buf.in, len, 1, 0);
+
+ return 0;
+}
+
+static int anfc_read_param_get_feature_sp_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop
+ *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u32 dma_mode, addrcycles, write_size;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ if (nfc_op.cmds[0] == NAND_CMD_PARAM) {
+ nfc->prog = PROG_RDPARAM;
+ dma_mode = 0;
+ addrcycles = 1;
+ write_size = 0;
+ }
+ if (nfc_op.cmds[0] == NAND_CMD_GET_FEATURES) {
+ nfc->prog = PROG_GET_FEATURE;
+ dma_mode = 0;
+ addrcycles = 1;
+ write_size = 0;
+ }
+ if (nfc_op.cmds[0] == NAND_CMD_READ0) {
+ nfc->prog = PROG_PGRD;
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+ write_size = mtd->writesize;
+ dma_mode = 1;
+ }
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], 0, dma_mode, write_size,
+ addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, nfc->prog, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_random_datain_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, PROG_PGRD, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_setfeature_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ nfc->prog = PROG_SET_FEATURE;
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], 0, 0, 0, 1);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_write_data_op(chip, (char *)instr->ctx.data.buf.out, len, 1, 0);
+
+ return 0;
+}
+
+static int anfc_change_read_column_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ nfc->prog = PROG_PGRD;
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], nfc_op.cmds[1], 1,
+ mtd->writesize, 2);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, nfc->prog, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_page_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 addrcycles;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ nfc->prog = PROG_PGRD;
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], nfc_op.cmds[1], 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, nfc->prog, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_zero_len_page_write_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 addrcycles;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ nfc->prog = PROG_PGRD;
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], NAND_CMD_PAGEPROG, 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ return 0;
+}
+
+static int anfc_page_write_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 addrcycles;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+ nfc->prog = PROG_PGPROG;
+
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], nfc_op.cmds[1], 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_write_data_op(chip, (char *)instr->ctx.data.buf.out, len, 1, 0);
+
+ return 0;
+}
+
+static int anfc_page_write_nowait_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 addrcycles;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ nfc->prog = PROG_PGPROG;
+
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], NAND_CMD_PAGEPROG, 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ anfc_write_data_op(chip, (char *)instr->ctx.data.buf.out,
+ mtd->writesize, DIV_ROUND_UP(mtd->writesize,
+ achip->pktsize), achip->pktsize);
+
+ return 0;
+}
+
+static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER(
+ /* Use a separate function for each pattern */
+ NAND_OP_PARSER_PATTERN(
+ anfc_random_datain_type_exec,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_change_read_column_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_page_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_page_write_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_erase_and_zero_len_page_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_read_status_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_reset_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_setfeature_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_page_write_nowait_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_read_param_get_feature_sp_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_zero_len_page_write_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES)),
+ );
+
+static int anfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ return nand_op_parser_exec_op(chip, &anfc_op_parser,
+ op, check_only);
+}
+
+static void anfc_select_chip(struct mtd_info *mtd, int num)
+{
+ u32 val;
+ int ret;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+
+ if (num < 0) {
+ nfc->chip_active = false;
+ pm_runtime_mark_last_busy(nfc->dev);
+ pm_runtime_put_autosuspend(nfc->dev);
+ return;
+ }
+
+ nfc->chip_active = true;
+ ret = pm_runtime_get_sync(nfc->dev);
+ if (ret < 0) {
+ dev_err(nfc->dev, "runtime_get_sync failed\n");
+ return;
+ }
+
+ val = readl(nfc->base + MEM_ADDR2_OFST);
+ val &= (val & ~(CS_MASK | BCH_MODE_MASK));
+ val |= (achip->csnum << CS_SHIFT) |
+ (achip->ecc_strength << BCH_MODE_SHIFT);
+ writel(val, nfc->base + MEM_ADDR2_OFST);
+ nfc->csnum = achip->csnum;
+ writel(achip->eccval, nfc->base + ECC_OFST);
+ writel(achip->inftimeval, nfc->base + DATA_INTERFACE_OFST);
+}
+
+static irqreturn_t anfc_irq_handler(int irq, void *ptr)
+{
+ struct anfc_nand_controller *nfc = ptr;
+ u32 status;
+
+ status = readl(nfc->base + INTR_STS_OFST);
+ if (status & EVENT_MASK) {
+ complete(&nfc->event);
+ writel(status & EVENT_MASK, nfc->base + INTR_STS_OFST);
+ writel(0, nfc->base + INTR_STS_EN_OFST);
+ writel(0, nfc->base + INTR_SIG_EN_OFST);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int anfc_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u32 ret;
+
+ if (mtd->writesize <= SZ_512)
+ achip->caddr_cycles = 1;
+ else
+ achip->caddr_cycles = 2;
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ achip->raddr_cycles = 3;
+ else
+ achip->raddr_cycles = 2;
+
+ chip->ecc.calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ chip->ecc.code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ ret = anfc_ecc_init(mtd, &chip->ecc, chip->ecc.mode);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct nand_controller_ops anfc_nand_controller_ops = {
+ .attach_chip = anfc_nand_attach_chip,
+};
+
+static int anfc_init_timing_mode(struct anfc_nand_controller *nfc,
+ struct anfc_nand_chip *achip)
+{
+ struct nand_chip *chip = &achip->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int mode, err;
+ unsigned int feature[2];
+ u32 inftimeval;
+ bool change_sdr_clk = false;
+
+ memset(feature, 0, NVDDR_MODE_PACKET_SIZE);
+ /* Get nvddr timing modes */
+ mode = onfi_get_sync_timing_mode(chip) & 0xff;
+ if (!mode) {
+ mode = fls(onfi_get_async_timing_mode(chip)) - 1;
+ inftimeval = mode;
+ if (mode >= 2 && mode <= 5)
+ change_sdr_clk = true;
+ } else {
+ mode = fls(mode) - 1;
+ inftimeval = NVDDR_MODE | (mode << NVDDR_TIMING_MODE_SHIFT);
+ mode |= ONFI_DATA_INTERFACE_NVDDR;
+ }
+
+ feature[0] = mode;
+ chip->select_chip(mtd, achip->csnum);
+ err = chip->set_features(mtd, chip, ONFI_FEATURE_ADDR_TIMING_MODE,
+ (uint8_t *)feature);
+ chip->select_chip(mtd, -1);
+ if (err)
+ return err;
+
+ /*
+ * SDR timing modes 2-5 will not work for the arasan nand when
+ * freq > 90 MHz, so reduce the freq in SDR modes 2-5 to < 90Mhz
+ */
+ if (change_sdr_clk) {
+ clk_disable_unprepare(nfc->clk_sys);
+ err = clk_set_rate(nfc->clk_sys, SDR_MODE_DEFLT_FREQ);
+ if (err) {
+ dev_err(nfc->dev, "Can't set the clock rate\n");
+ return err;
+ }
+ err = clk_prepare_enable(nfc->clk_sys);
+ if (err) {
+ dev_err(nfc->dev, "Unable to enable sys clock.\n");
+ clk_disable_unprepare(nfc->clk_sys);
+ return err;
+ }
+ }
+ achip->inftimeval = inftimeval;
+
+ if (mode & ONFI_DATA_INTERFACE_NVDDR)
+ achip->spktsize = NVDDR_MODE_PACKET_SIZE;
+
+ return 0;
+}
+
+static int anfc_nand_chip_init(struct anfc_nand_controller *nfc,
+ struct anfc_nand_chip *anand_chip,
+ struct device_node *np)
+{
+ struct nand_chip *chip = &anand_chip->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = of_property_read_u32(np, "reg", &anand_chip->csnum);
+ if (ret) {
+ dev_err(nfc->dev, "can't get chip-select\n");
+ return -ENXIO;
+ }
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL, "arasan_nand.%d",
+ anand_chip->csnum);
+ mtd->dev.parent = nfc->dev;
+
+ chip->chip_delay = 30;
+ chip->controller = &nfc->controller;
+ chip->options = NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+ chip->select_chip = anfc_select_chip;
+ chip->exec_op = anfc_exec_op;
+ nand_set_flash_node(chip, np);
+
+ anand_chip->spktsize = SDR_MODE_PACKET_SIZE;
+
+ ret = nand_scan(mtd, 1);
+ if (ret) {
+ dev_err(nfc->dev, "nand_scan_tail for NAND failed\n");
+ return ret;
+ }
+
+ ret = anfc_init_timing_mode(nfc, anand_chip);
+ if (ret) {
+ dev_err(nfc->dev, "timing mode init failed\n");
+ return ret;
+ }
+
+ return mtd_device_register(mtd, NULL, 0);
+}
+
+static int anfc_probe(struct platform_device *pdev)
+{
+ struct anfc_nand_controller *nfc;
+ struct anfc_nand_chip *anand_chip;
+ struct device_node *np = pdev->dev.of_node, *child;
+ struct resource *res;
+ int err;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+ init_completion(&nfc->event);
+ nfc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, nfc);
+ nfc->csnum = -1;
+ nfc->controller.ops = &anfc_nand_controller_ops;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nfc->base))
+ return PTR_ERR(nfc->base);
+ nfc->irq = platform_get_irq(pdev, 0);
+ if (nfc->irq < 0) {
+ dev_err(&pdev->dev, "platform_get_irq failed\n");
+ return -ENXIO;
+ }
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = devm_request_irq(&pdev->dev, nfc->irq, anfc_irq_handler,
+ 0, "arasannfc", nfc);
+ if (err)
+ return err;
+ nfc->clk_sys = devm_clk_get(&pdev->dev, "clk_sys");
+ if (IS_ERR(nfc->clk_sys)) {
+ dev_err(&pdev->dev, "sys clock not found.\n");
+ return PTR_ERR(nfc->clk_sys);
+ }
+
+ nfc->clk_flash = devm_clk_get(&pdev->dev, "clk_flash");
+ if (IS_ERR(nfc->clk_flash)) {
+ dev_err(&pdev->dev, "flash clock not found.\n");
+ return PTR_ERR(nfc->clk_flash);
+ }
+
+ err = clk_prepare_enable(nfc->clk_sys);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to enable sys clock.\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(nfc->clk_flash);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to enable flash clock.\n");
+ goto clk_dis_sys;
+ }
+
+ pm_runtime_set_autosuspend_delay(nfc->dev, ANFC_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(nfc->dev);
+ pm_runtime_set_active(nfc->dev);
+ pm_runtime_get_noresume(nfc->dev);
+ pm_runtime_enable(nfc->dev);
+ for_each_available_child_of_node(np, child) {
+ anand_chip = devm_kzalloc(&pdev->dev, sizeof(*anand_chip),
+ GFP_KERNEL);
+ if (!anand_chip) {
+ of_node_put(child);
+ err = -ENOMEM;
+ goto nandchip_clean_up;
+ }
+ err = anfc_nand_chip_init(nfc, anand_chip, child);
+ if (err) {
+ devm_kfree(&pdev->dev, anand_chip);
+ continue;
+ }
+
+ list_add_tail(&anand_chip->node, &nfc->chips);
+ }
+ pm_runtime_mark_last_busy(nfc->dev);
+ pm_runtime_put_autosuspend(nfc->dev);
+ return 0;
+
+nandchip_clean_up:
+ list_for_each_entry(anand_chip, &nfc->chips, node)
+ nand_release(nand_to_mtd(&anand_chip->chip));
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ clk_disable_unprepare(nfc->clk_flash);
+clk_dis_sys:
+ clk_disable_unprepare(nfc->clk_sys);
+
+ return err;
+}
+
+static int anfc_remove(struct platform_device *pdev)
+{
+ struct anfc_nand_controller *nfc = platform_get_drvdata(pdev);
+ struct anfc_nand_chip *anand_chip;
+
+ list_for_each_entry(anand_chip, &nfc->chips, node)
+ nand_release(nand_to_mtd(&anand_chip->chip));
+
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+
+ clk_disable_unprepare(nfc->clk_sys);
+ clk_disable_unprepare(nfc->clk_flash);
+
+ return 0;
+}
+
+static const struct of_device_id anfc_ids[] = {
+ { .compatible = "arasan,nfc-v3p10" },
+ { .compatible = "xlnx,zynqmp-nand" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, anfc_ids);
+
+static int anfc_suspend(struct device *dev)
+{
+ return pm_runtime_put_sync(dev);
+}
+
+static int anfc_resume(struct device *dev)
+{
+ return pm_runtime_get_sync(dev);
+}
+
+static int __maybe_unused anfc_runtime_suspend(struct device *dev)
+{
+ struct anfc_nand_controller *nfc = dev_get_drvdata(dev);
+
+ clk_disable(nfc->clk_sys);
+ clk_disable(nfc->clk_flash);
+
+ return 0;
+}
+
+static int __maybe_unused anfc_runtime_idle(struct device *dev)
+{
+ struct anfc_nand_controller *nfc = dev_get_drvdata(dev);
+
+ if (nfc->chip_active)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int __maybe_unused anfc_runtime_resume(struct device *dev)
+{
+ struct anfc_nand_controller *nfc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(nfc->clk_sys);
+ if (ret) {
+ dev_err(dev, "Cannot enable sys clock.\n");
+ return ret;
+ }
+
+ ret = clk_enable(nfc->clk_flash);
+ if (ret) {
+ dev_err(dev, "Cannot enable flash clock.\n");
+ clk_disable(nfc->clk_sys);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops anfc_pm_ops = {
+ .resume = anfc_resume,
+ .suspend = anfc_suspend,
+ .runtime_resume = anfc_runtime_resume,
+ .runtime_suspend = anfc_runtime_suspend,
+ .runtime_idle = anfc_runtime_idle,
+};
+
+static struct platform_driver anfc_driver = {
+ .driver = {
+ .name = "arasan-nand-controller",
+ .of_match_table = anfc_ids,
+ .pm = &anfc_pm_ops,
+ },
+ .probe = anfc_probe,
+ .remove = anfc_remove,
+};
+module_platform_driver(anfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Xilinx, Inc");
+MODULE_DESCRIPTION("Arasan NAND Flash Controller Driver");
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 360b61411f07..0354ffe3bd83 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -467,9 +467,18 @@ static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
* if we don't do this. I have no clue why, but I seem to have 'fixed'
* it in the doc2000 driver in August 1999. dwmw2.
*/
- ret = nand_reset(chip, chipnr);
- if (ret)
- return ret;
+ /*
+ * Nand onfi compatible devices may support different data interface
+ * modes like SDR, NVDDR and NVDDR2. Giving reset to device places the
+ * device in to power-up state and places the target in the SDR data
+ * interface mode. This will be the problem for devices configured for
+ * NVDDR modes. So, limiting the reset operation to Toshiba devices.
+ */
+ if (chip->parameters.onfi->jedec_id == NAND_MFR_TOSHIBA) {
+ ret = nand_reset(chip, chipnr);
+ if (ret)
+ return ret;
+ }
nand_select_target(chip, chipnr);
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
index 8fe8d7bdd203..6b1f727a2de6 100644
--- a/drivers/mtd/nand/raw/nand_onfi.c
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -149,6 +149,12 @@ int nand_onfi_detect(struct nand_chip *chip)
memorg = nanddev_get_memorg(&chip->base);
+ /* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
+ return 0;
+ }
+
/* Try ONFI for unknown chip or LP */
ret = nand_readid_op(chip, 0x20, id, sizeof(id));
if (ret || strncmp(id, "ONFI", 4))
@@ -294,6 +300,8 @@ int nand_onfi_detect(struct nand_chip *chip)
onfi->tR = le16_to_cpu(p->t_r);
onfi->tCCS = le16_to_cpu(p->t_ccs);
onfi->async_timing_mode = le16_to_cpu(p->async_timing_mode);
+ onfi->src_sync_timing_mode = le16_to_cpu(p->src_sync_timing_mode);
+ onfi->jedec_id = le16_to_cpu(p->jedec_id);
onfi->vendor_revision = le16_to_cpu(p->vendor_revision);
memcpy(onfi->vendor, p->vendor, sizeof(p->vendor));
chip->parameters.onfi = onfi;
diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c
index f12b7a7844c9..e4c161afd3e6 100644
--- a/drivers/mtd/nand/raw/nand_timings.c
+++ b/drivers/mtd/nand/raw/nand_timings.c
@@ -53,6 +53,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 120000,
.tWP_min = 50000,
.tWW_min = 100000,
+ .mode = 0,
},
},
/* Mode 1 */
@@ -95,6 +96,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 80000,
.tWP_min = 25000,
.tWW_min = 100000,
+ .mode = 1,
},
},
/* Mode 2 */
@@ -137,6 +139,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 80000,
.tWP_min = 17000,
.tWW_min = 100000,
+ .mode = 2,
},
},
/* Mode 3 */
@@ -179,6 +182,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 80000,
.tWP_min = 15000,
.tWW_min = 100000,
+ .mode = 3,
},
},
/* Mode 4 */
@@ -221,6 +225,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 80000,
.tWP_min = 12000,
.tWW_min = 100000,
+ .mode = 4,
},
},
/* Mode 5 */
@@ -263,6 +268,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 80000,
.tWP_min = 10000,
.tWW_min = 100000,
+ .mode = 5,
},
},
};
diff --git a/drivers/mtd/nand/raw/pl353_nand.c b/drivers/mtd/nand/raw/pl353_nand.c
new file mode 100644
index 000000000000..c004dfa505ac
--- /dev/null
+++ b/drivers/mtd/nand/raw/pl353_nand.c
@@ -0,0 +1,1398 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM PL353 NAND flash controller driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc
+ * Author: Punnaiah chowdary kalluri <punnaiah@xilinx.com>
+ * Author: Naga Sureshkumar Relli <nagasure@xilinx.com>
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pl353-smc.h>
+#include <linux/clk.h>
+
+#define PL353_NAND_DRIVER_NAME "pl353-nand"
+
+/* NAND flash driver defines */
+#define PL353_NAND_CMD_PHASE 1 /* End command valid in command phase */
+#define PL353_NAND_DATA_PHASE 2 /* End command valid in data phase */
+#define PL353_NAND_ECC_SIZE 512 /* Size of data for ECC operation */
+
+/* Flash memory controller operating parameters */
+
+#define PL353_NAND_ECC_CONFIG (BIT(4) | /* ECC read at end of page */ \
+ (0 << 5)) /* No Jumping */
+
+/* AXI Address definitions */
+#define START_CMD_SHIFT 3
+#define END_CMD_SHIFT 11
+#define END_CMD_VALID_SHIFT 20
+#define ADDR_CYCLES_SHIFT 21
+#define CLEAR_CS_SHIFT 21
+#define ECC_LAST_SHIFT 10
+#define COMMAND_PHASE (0 << 19)
+#define DATA_PHASE BIT(19)
+
+#define PL353_NAND_ECC_LAST BIT(ECC_LAST_SHIFT) /* Set ECC_Last */
+#define PL353_NAND_CLEAR_CS BIT(CLEAR_CS_SHIFT) /* Clear chip select */
+
+#define ONDIE_ECC_FEATURE_ADDR 0x90
+#define PL353_NAND_ECC_BUSY_TIMEOUT (1 * HZ)
+#define PL353_NAND_DEV_BUSY_TIMEOUT (1 * HZ)
+#define PL353_NAND_LAST_TRANSFER_LENGTH 4
+#define PL353_NAND_ECC_VALID_SHIFT 24
+#define PL353_NAND_ECC_VALID_MASK 0x40
+#define PL353_ECC_BITS_BYTEOFF_MASK 0x1FF
+#define PL353_ECC_BITS_BITOFF_MASK 0x7
+#define PL353_ECC_BIT_MASK 0xFFF
+#define PL353_TREA_MAX_VALUE 1
+#define PL353_MAX_ECC_CHUNKS 4
+#define PL353_MAX_ECC_BYTES 3
+
+struct pl353_nfc_op {
+ u32 cmnds[4];
+ u32 end_cmd;
+ u32 addrs;
+ u32 len;
+ u32 naddrs;
+ u32 addr5;
+ u32 addr6;
+ unsigned int data_instr_idx;
+ unsigned int rdy_timeout_ms;
+ unsigned int rdy_delay_ns;
+ unsigned int cle_ale_delay_ns;
+ const struct nand_op_instr *data_instr;
+};
+
+/**
+ * struct pl353_nand_controller - Defines the NAND flash controller driver
+ * instance
+ * @chip: NAND chip information structure
+ * @dev: Parent device (used to print error messages)
+ * @regs: Virtual address of the NAND flash device
+ * @buf_addr: Virtual address of the NAND flash device for
+ * data read/writes
+ * @addr_cycles: Address cycles
+ * @mclk: Memory controller clock
+ * @buswidth: Bus width 8 or 16
+ */
+struct pl353_nand_controller {
+ struct nand_chip chip;
+ struct device *dev;
+ void __iomem *regs;
+ void __iomem *buf_addr;
+ u8 addr_cycles;
+ struct clk *mclk;
+ u32 buswidth;
+};
+
+static int pl353_ecc_ooblayout16_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes);
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int pl353_ecc_ooblayout16_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes) + 8;
+ oobregion->length = 8;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops pl353_ecc_ooblayout16_ops = {
+ .ecc = pl353_ecc_ooblayout16_ecc,
+ .free = pl353_ecc_ooblayout16_free,
+};
+
+static int pl353_ecc_ooblayout64_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes) + 52;
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int pl353_ecc_ooblayout64_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes) + 2;
+ oobregion->length = 50;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops pl353_ecc_ooblayout64_ops = {
+ .ecc = pl353_ecc_ooblayout64_ecc,
+ .free = pl353_ecc_ooblayout64_free,
+};
+
+/* Generic flash bbt decriptors */
+static u8 bbt_pattern[] = { 'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = { '1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+static void pl353_nfc_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+
+ if (xnfc->buswidth == 8)
+ return;
+
+ if (force_8bit)
+ pl353_smc_set_buswidth(PL353_SMC_MEM_WIDTH_8);
+ else
+ pl353_smc_set_buswidth(PL353_SMC_MEM_WIDTH_16);
+}
+
+/**
+ * pl353_nand_read_data_op - read chip data into buffer
+ * @chip: Pointer to the NAND chip info structure
+ * @in: Pointer to the buffer to store read data
+ * @len: Number of bytes to read
+ * @force_8bit: Force 8-bit bus access
+ * Return: Always return zero
+ */
+static int pl353_nand_read_data_op(struct nand_chip *chip,
+ u8 *in,
+ unsigned int len, bool force_8bit)
+{
+ int i;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+
+ if (force_8bit)
+ pl353_nfc_force_byte_access(chip, true);
+
+ if ((IS_ALIGNED((uint32_t)in, sizeof(uint32_t)) &&
+ IS_ALIGNED(len, sizeof(uint32_t))) || (!force_8bit)) {
+ u32 *ptr = (u32 *)in;
+
+ len /= 4;
+ for (i = 0; i < len; i++)
+ ptr[i] = readl(xnfc->buf_addr);
+ } else {
+ for (i = 0; i < len; i++)
+ in[i] = readb(xnfc->buf_addr);
+ }
+ if (force_8bit)
+ pl353_nfc_force_byte_access(chip, false);
+
+ return 0;
+}
+
+/**
+ * pl353_nand_write_buf - write buffer to chip
+ * @mtd: Pointer to the mtd info structure
+ * @buf: Pointer to the buffer to store write data
+ * @len: Number of bytes to write
+ * @force_8bit: Force 8-bit bus access
+ */
+static void pl353_nand_write_data_op(struct mtd_info *mtd, const u8 *buf,
+ int len, bool force_8bit)
+{
+ int i;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+
+ if (force_8bit)
+ pl353_nfc_force_byte_access(chip, true);
+
+ if ((IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+ IS_ALIGNED(len, sizeof(uint32_t))) || (!force_8bit)) {
+ u32 *ptr = (u32 *)buf;
+
+ len /= 4;
+ for (i = 0; i < len; i++)
+ writel(ptr[i], xnfc->buf_addr);
+ } else {
+ for (i = 0; i < len; i++)
+ writeb(buf[i], xnfc->buf_addr);
+ }
+ if (force_8bit)
+ pl353_nfc_force_byte_access(chip, false);
+}
+
+static int pl353_wait_for_ecc_done(void)
+{
+ unsigned long timeout = jiffies + PL353_NAND_ECC_BUSY_TIMEOUT;
+
+ do {
+ if (pl353_smc_ecc_is_busy())
+ cpu_relax();
+ else
+ break;
+ } while (!time_after_eq(jiffies, timeout));
+
+ if (time_after_eq(jiffies, timeout)) {
+ pr_err("%s timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * pl353_nand_calculate_hwecc - Calculate Hardware ECC
+ * @mtd: Pointer to the mtd_info structure
+ * @data: Pointer to the page data
+ * @ecc: Pointer to the ECC buffer where ECC data needs to be stored
+ *
+ * This function retrieves the Hardware ECC data from the controller and returns
+ * ECC data back to the MTD subsystem.
+ * It operates on a number of 512 byte blocks of NAND memory and can be
+ * programmed to store the ECC codes after the data in memory. For writes,
+ * the ECC is written to the spare area of the page. For reads, the result of
+ * a block ECC check are made available to the device driver.
+ *
+ * ------------------------------------------------------------------------
+ * | n * 512 blocks | extra | ecc | |
+ * | | block | codes | |
+ * ------------------------------------------------------------------------
+ *
+ * The ECC calculation uses a simple Hamming code, using 1-bit correction 2-bit
+ * detection. It starts when a valid read or write command with a 512 byte
+ * aligned address is detected on the memory interface.
+ *
+ * Return: 0 on success or error value on failure
+ */
+static int pl353_nand_calculate_hwecc(struct mtd_info *mtd,
+ const u8 *data, u8 *ecc)
+{
+ u32 ecc_value;
+ u8 chunk, ecc_byte, ecc_status;
+
+ for (chunk = 0; chunk < PL353_MAX_ECC_CHUNKS; chunk++) {
+ /* Read ECC value for each block */
+ ecc_value = pl353_smc_get_ecc_val(chunk);
+ ecc_status = (ecc_value >> PL353_NAND_ECC_VALID_SHIFT);
+
+ /* ECC value valid */
+ if (ecc_status & PL353_NAND_ECC_VALID_MASK) {
+ for (ecc_byte = 0; ecc_byte < PL353_MAX_ECC_BYTES;
+ ecc_byte++) {
+ /* Copy ECC bytes to MTD buffer */
+ *ecc = ~ecc_value & 0xFF;
+ ecc_value = ecc_value >> 8;
+ ecc++;
+ }
+ } else {
+ pr_warn("%s status failed\n", __func__);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * pl353_nand_correct_data - ECC correction function
+ * @mtd: Pointer to the mtd_info structure
+ * @buf: Pointer to the page data
+ * @read_ecc: Pointer to the ECC value read from spare data area
+ * @calc_ecc: Pointer to the calculated ECC value
+ *
+ * This function corrects the ECC single bit errors & detects 2-bit errors.
+ *
+ * Return: 0 if no ECC errors found
+ * 1 if single bit error found and corrected.
+ * -1 if multiple uncorrectable ECC errors found.
+ */
+static int pl353_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ unsigned char bit_addr;
+ unsigned int byte_addr;
+ unsigned short ecc_odd, ecc_even, read_ecc_lower, read_ecc_upper;
+ unsigned short calc_ecc_lower, calc_ecc_upper;
+
+ read_ecc_lower = (read_ecc[0] | (read_ecc[1] << 8)) &
+ PL353_ECC_BIT_MASK;
+ read_ecc_upper = ((read_ecc[1] >> 4) | (read_ecc[2] << 4)) &
+ PL353_ECC_BIT_MASK;
+
+ calc_ecc_lower = (calc_ecc[0] | (calc_ecc[1] << 8)) &
+ PL353_ECC_BIT_MASK;
+ calc_ecc_upper = ((calc_ecc[1] >> 4) | (calc_ecc[2] << 4)) &
+ PL353_ECC_BIT_MASK;
+
+ ecc_odd = read_ecc_lower ^ calc_ecc_lower;
+ ecc_even = read_ecc_upper ^ calc_ecc_upper;
+
+ /* no error */
+ if (!ecc_odd && !ecc_even)
+ return 0;
+
+ if (ecc_odd == (~ecc_even & PL353_ECC_BIT_MASK)) {
+ /* bits [11:3] of error code is byte offset */
+ byte_addr = (ecc_odd >> 3) & PL353_ECC_BITS_BYTEOFF_MASK;
+ /* bits [2:0] of error code is bit offset */
+ bit_addr = ecc_odd & PL353_ECC_BITS_BITOFF_MASK;
+ /* Toggling error bit */
+ buf[byte_addr] ^= (BIT(bit_addr));
+ return 1;
+ }
+
+ /* one error in parity */
+ if (hweight32(ecc_odd | ecc_even) == 1)
+ return 1;
+
+ /* Uncorrectable error */
+ return -1;
+}
+
+static void pl353_prepare_cmd(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int column, int start_cmd, int end_cmd,
+ bool read)
+{
+ unsigned long data_phase_addr;
+ u32 end_cmd_valid = 0;
+ unsigned long cmd_phase_addr = 0, cmd_phase_data = 0;
+
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+
+ end_cmd_valid = read ? 1 : 0;
+
+ cmd_phase_addr = (unsigned long __force)xnfc->regs +
+ ((xnfc->addr_cycles
+ << ADDR_CYCLES_SHIFT) |
+ (end_cmd_valid << END_CMD_VALID_SHIFT) |
+ (COMMAND_PHASE) |
+ (end_cmd << END_CMD_SHIFT) |
+ (start_cmd << START_CMD_SHIFT));
+
+ /* Get the data phase address */
+ data_phase_addr = (unsigned long __force)xnfc->regs +
+ ((0x0 << CLEAR_CS_SHIFT) |
+ (0 << END_CMD_VALID_SHIFT) |
+ (DATA_PHASE) |
+ (end_cmd << END_CMD_SHIFT) |
+ (0x0 << ECC_LAST_SHIFT));
+
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ column /= 2;
+ cmd_phase_data = column;
+ if (mtd->writesize > PL353_NAND_ECC_SIZE) {
+ cmd_phase_data |= page << 16;
+ /* Another address cycle for devices > 128MiB */
+ if (chip->options & NAND_ROW_ADDR_3) {
+ writel_relaxed(cmd_phase_data,
+ (void __iomem * __force)cmd_phase_addr);
+ cmd_phase_data = (page >> 16);
+ }
+ } else {
+ cmd_phase_data |= page << 8;
+ }
+
+ writel_relaxed(cmd_phase_data, (void __iomem * __force)cmd_phase_addr);
+}
+
+/**
+ * pl353_nand_read_oob - [REPLACEABLE] the most common OOB data read function
+ * @mtd: Pointer to the mtd_info structure
+ * @chip: Pointer to the nand_chip structure
+ * @page: Page number to read
+ *
+ * Return: Always return zero
+ */
+static int pl353_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ unsigned long data_phase_addr;
+ u8 *p;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+
+ chip->pagebuf = -1;
+ if (mtd->writesize < PL353_NAND_ECC_SIZE)
+ return 0;
+
+ pl353_prepare_cmd(mtd, chip, page, mtd->writesize, NAND_CMD_READ0,
+ NAND_CMD_READSTART, 1);
+
+ nand_wait_ready(mtd);
+
+ p = chip->oob_poi;
+ pl353_nand_read_data_op(chip, p,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+ p += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ pl353_nand_read_data_op(chip, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ return 0;
+}
+
+/**
+ * pl353_nand_write_oob - [REPLACEABLE] the most common OOB data write function
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ * @page: Page number to write
+ *
+ * Return: Zero on success and EIO on failure
+ */
+static int pl353_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ const u8 *buf = chip->oob_poi;
+ unsigned long data_phase_addr;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+ u32 addrcycles = 0;
+
+ chip->pagebuf = -1;
+ addrcycles = xnfc->addr_cycles;
+ pl353_prepare_cmd(mtd, chip, page, mtd->writesize, NAND_CMD_SEQIN,
+ NAND_CMD_PAGEPROG, 0);
+
+ pl353_nand_write_data_op(mtd, buf,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+ buf += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr |= (1 << END_CMD_VALID_SHIFT);
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ pl353_nand_write_data_op(mtd, buf, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+ nand_wait_ready(mtd);
+
+ return 0;
+}
+
+/**
+ * pl353_nand_read_page_raw - [Intern] read raw page data without ecc
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ * @buf: Pointer to the data buffer
+ * @oob_required: Caller requires OOB data read to chip->oob_poi
+ * @page: Page number to read
+ *
+ * Return: Always return zero
+ */
+static int pl353_nand_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ unsigned long data_phase_addr;
+ u8 *p;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+
+ pl353_prepare_cmd(mtd, chip, page, 0, NAND_CMD_READ0,
+ NAND_CMD_READSTART, 1);
+ nand_wait_ready(mtd);
+ pl353_nand_read_data_op(chip, buf, mtd->writesize, false);
+ p = chip->oob_poi;
+ pl353_nand_read_data_op(chip, p,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+ p += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ pl353_nand_read_data_op(chip, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ return 0;
+}
+
+/**
+ * pl353_nand_write_page_raw - [Intern] raw page write function
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ * @buf: Pointer to the data buffer
+ * @oob_required: Caller requires OOB data read to chip->oob_poi
+ * @page: Page number to write
+ *
+ * Return: Always return zero
+ */
+static int pl353_nand_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ unsigned long data_phase_addr;
+ u8 *p;
+
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+
+ pl353_prepare_cmd(mtd, chip, page, 0, NAND_CMD_SEQIN,
+ NAND_CMD_PAGEPROG, 0);
+ pl353_nand_write_data_op(mtd, buf, mtd->writesize, false);
+ p = chip->oob_poi;
+ pl353_nand_write_data_op(mtd, p,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+ p += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr |= (1 << END_CMD_VALID_SHIFT);
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ pl353_nand_write_data_op(mtd, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ return 0;
+}
+
+/**
+ * nand_write_page_hwecc - Hardware ECC based page write function
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ * @buf: Pointer to the data buffer
+ * @oob_required: Caller requires OOB data read to chip->oob_poi
+ * @page: Page number to write
+ *
+ * This functions writes data and hardware generated ECC values in to the page.
+ *
+ * Return: Always return zero
+ */
+static int pl353_nand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ int eccsize = chip->ecc.size;
+ int eccsteps = chip->ecc.steps;
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *oob_ptr;
+ const u8 *p = buf;
+ u32 ret;
+ unsigned long data_phase_addr;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+
+ pl353_prepare_cmd(mtd, chip, page, 0, NAND_CMD_SEQIN,
+ NAND_CMD_PAGEPROG, 0);
+
+ for ( ; (eccsteps - 1); eccsteps--) {
+ pl353_nand_write_data_op(mtd, p, eccsize, false);
+ p += eccsize;
+ }
+ pl353_nand_write_data_op(mtd, p,
+ (eccsize - PL353_NAND_LAST_TRANSFER_LENGTH),
+ false);
+ p += (eccsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+
+ /* Set ECC Last bit to 1 */
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_ECC_LAST;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ pl353_nand_write_data_op(mtd, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ /* Wait till the ECC operation is complete or timeout */
+ ret = pl353_wait_for_ecc_done();
+ if (ret)
+ dev_err(xnfc->dev, "ECC Timeout\n");
+ p = buf;
+ ret = chip->ecc.calculate(mtd, p, &ecc_calc[0]);
+ if (ret)
+ return ret;
+
+ /* Wait for ECC to be calculated and read the error values */
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi,
+ 0, chip->ecc.total);
+ if (ret)
+ return ret;
+ /* Clear ECC last bit */
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr &= ~PL353_NAND_ECC_LAST;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ /* Write the spare area with ECC bytes */
+ oob_ptr = chip->oob_poi;
+ pl353_nand_write_data_op(mtd, oob_ptr,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr |= (1 << END_CMD_VALID_SHIFT);
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ oob_ptr += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+ pl353_nand_write_data_op(mtd, oob_ptr, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+ nand_wait_ready(mtd);
+
+ return 0;
+}
+
+/**
+ * pl353_nand_read_page_hwecc - Hardware ECC based page read function
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ * @buf: Pointer to the buffer to store read data
+ * @oob_required: Caller requires OOB data read to chip->oob_poi
+ * @page: Page number to read
+ *
+ * This functions reads data and checks the data integrity by comparing
+ * hardware generated ECC values and read ECC values from spare area.
+ * There is a limitation in SMC controller, that we must set ECC LAST on
+ * last data phase access, to tell ECC block not to expect any data further.
+ * Ex: When number of ECC STEPS are 4, then till 3 we will write to flash
+ * using SMC with HW ECC enabled. And for the last ECC STEP, we will subtract
+ * 4bytes from page size, and will initiate a transfer. And the remaining 4 as
+ * one more transfer with ECC_LAST bit set in NAND data phase register to
+ * notify ECC block not to expect any more data. The last block should be align
+ * with end of 512 byte block. Because of this limitation, we are not using
+ * core routines.
+ *
+ * Return: 0 always and updates ECC operation status in to MTD structure
+ */
+static int pl353_nand_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ int i, stat, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ u8 *p = buf;
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *ecc = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0;
+ u8 *oob_ptr;
+ u32 ret;
+ unsigned long data_phase_addr;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+
+ pl353_prepare_cmd(mtd, chip, page, 0, NAND_CMD_READ0,
+ NAND_CMD_READSTART, 1);
+ nand_wait_ready(mtd);
+
+ for ( ; (eccsteps - 1); eccsteps--) {
+ pl353_nand_read_data_op(chip, p, eccsize, false);
+ p += eccsize;
+ }
+ pl353_nand_read_data_op(chip, p,
+ (eccsize - PL353_NAND_LAST_TRANSFER_LENGTH),
+ false);
+ p += (eccsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+
+ /* Set ECC Last bit to 1 */
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_ECC_LAST;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ pl353_nand_read_data_op(chip, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ /* Wait till the ECC operation is complete or timeout */
+ ret = pl353_wait_for_ecc_done();
+ if (ret)
+ dev_err(xnfc->dev, "ECC Timeout\n");
+
+ /* Read the calculated ECC value */
+ p = buf;
+ ret = chip->ecc.calculate(mtd, p, &ecc_calc[0]);
+ if (ret)
+ return ret;
+
+ /* Clear ECC last bit */
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr &= ~PL353_NAND_ECC_LAST;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ /* Read the stored ECC value */
+ oob_ptr = chip->oob_poi;
+ pl353_nand_read_data_op(chip, oob_ptr,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+
+ /* de-assert chip select */
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ oob_ptr += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+ pl353_nand_read_data_op(chip, oob_ptr, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ /* Check ECC error for all blocks and correct if it is correctable */
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ stat = chip->ecc.correct(mtd, p, &ecc[i], &ecc_calc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ return max_bitflips;
+}
+
+/**
+ * pl353_nand_select_chip - Select the flash device
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ *
+ * This function is empty as the NAND controller handles chip select line
+ * internally based on the chip address passed in command and data phase.
+ */
+static void pl353_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static void pl353_nfc_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct pl353_nfc_op *nfc_op)
+{
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id, offset, naddrs;
+ int i, len;
+ const u8 *addrs;
+
+ memset(nfc_op, 0, sizeof(struct pl353_nfc_op));
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ nfc_op->len = nand_subop_get_data_len(subop, op_id);
+ len = nand_subop_get_data_len(subop, op_id);
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (op_id)
+ nfc_op->cmnds[1] = instr->ctx.cmd.opcode;
+ else
+ nfc_op->cmnds[0] = instr->ctx.cmd.opcode;
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+ nfc_op->addrs = instr->ctx.addr.addrs[offset];
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++) {
+ nfc_op->addrs |= instr->ctx.addr.addrs[i] <<
+ (8 * i);
+ }
+
+ if (naddrs >= 5)
+ nfc_op->addr5 = addrs[4];
+ if (naddrs >= 6)
+ nfc_op->addr6 = addrs[5];
+ nfc_op->naddrs = nand_subop_get_num_addr_cyc(subop,
+ op_id);
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ nfc_op->rdy_delay_ns = instr->delay_ns;
+ break;
+ }
+ }
+}
+
+static void cond_delay(unsigned int ns)
+{
+ if (!ns)
+ return;
+
+ if (ns < 10000)
+ ndelay(ns);
+ else
+ udelay(DIV_ROUND_UP(ns, 1000));
+}
+
+/**
+ * pl353_nand_exec_op_cmd - Send command to NAND device
+ * @chip: Pointer to the NAND chip info structure
+ * @subop: Pointer to array of instructions
+ * Return: Always return zero
+ */
+static int pl353_nand_exec_op_cmd(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_op_instr *instr;
+ struct pl353_nfc_op nfc_op = {};
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long cmd_phase_data = 0, end_cmd_valid = 0;
+ unsigned long cmd_phase_addr, data_phase_addr, end_cmd;
+ unsigned int op_id, len, offset;
+ bool reading;
+
+ pl353_nfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+
+ pl353_smc_clr_nand_int();
+ /* Get the command phase address */
+ if (nfc_op.cmnds[1] != 0) {
+ if (nfc_op.cmnds[0] == NAND_CMD_SEQIN)
+ end_cmd_valid = 0;
+ else
+ end_cmd_valid = 1;
+ end_cmd = nfc_op.cmnds[1];
+ } else {
+ end_cmd = 0x0;
+ }
+
+ /*
+ * The SMC defines two phases of commands when transferring data to or
+ * from NAND flash.
+ * Command phase: Commands and optional address information are written
+ * to the NAND flash.The command and address can be associated with
+ * either a data phase operation to write to or read from the array,
+ * or a status/ID register transfer.
+ * Data phase: Data is either written to or read from the NAND flash.
+ * This data can be either data transferred to or from the array,
+ * or status/ID register information.
+ */
+ cmd_phase_addr = (unsigned long __force)xnfc->regs +
+ ((nfc_op.naddrs << ADDR_CYCLES_SHIFT) |
+ (end_cmd_valid << END_CMD_VALID_SHIFT) |
+ (COMMAND_PHASE) |
+ (end_cmd << END_CMD_SHIFT) |
+ (nfc_op.cmnds[0] << START_CMD_SHIFT));
+
+ /* Get the data phase address */
+ end_cmd_valid = 0;
+
+ data_phase_addr = (unsigned long __force)xnfc->regs +
+ ((0x0 << CLEAR_CS_SHIFT) |
+ (end_cmd_valid << END_CMD_VALID_SHIFT) |
+ (DATA_PHASE) |
+ (end_cmd << END_CMD_SHIFT) |
+ (0x0 << ECC_LAST_SHIFT));
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ /* Command phase AXI Read & Write */
+ if (nfc_op.naddrs >= 5) {
+ if (mtd->writesize > PL353_NAND_ECC_SIZE) {
+ cmd_phase_data = nfc_op.addrs;
+ /* Another address cycle for devices > 128MiB */
+ if (chip->options & NAND_ROW_ADDR_3) {
+ writel_relaxed(cmd_phase_data,
+ (void __iomem * __force)
+ cmd_phase_addr);
+ cmd_phase_data = nfc_op.addr5;
+ if (nfc_op.naddrs >= 6)
+ cmd_phase_data |= (nfc_op.addr6 << 8);
+ }
+ }
+ } else {
+ if (nfc_op.addrs != -1) {
+ int column = nfc_op.addrs;
+ /*
+ * Change read/write column, read id etc
+ * Adjust columns for 16 bit bus width
+ */
+ if ((chip->options & NAND_BUSWIDTH_16) &&
+ (nfc_op.cmnds[0] == NAND_CMD_READ0 ||
+ nfc_op.cmnds[0] == NAND_CMD_SEQIN ||
+ nfc_op.cmnds[0] == NAND_CMD_RNDOUT ||
+ nfc_op.cmnds[0] == NAND_CMD_RNDIN)) {
+ column >>= 1;
+ }
+ cmd_phase_data = column;
+ }
+ }
+ writel_relaxed(cmd_phase_data, (void __iomem * __force)cmd_phase_addr);
+
+ if (!nfc_op.data_instr) {
+ if (nfc_op.rdy_timeout_ms)
+ nand_wait_ready(mtd);
+ return 0;
+ }
+
+ reading = (nfc_op.data_instr->type == NAND_OP_DATA_IN_INSTR);
+ if (!reading) {
+ pl353_nand_write_data_op(mtd, instr->ctx.data.buf.out,
+ len, instr->ctx.data.force_8bit);
+ if (nfc_op.rdy_timeout_ms)
+ nand_wait_ready(mtd);
+ cond_delay(nfc_op.rdy_delay_ns);
+ }
+ if (reading) {
+ cond_delay(nfc_op.rdy_delay_ns);
+ if (nfc_op.rdy_timeout_ms)
+ nand_wait_ready(mtd);
+ pl353_nand_read_data_op(chip, instr->ctx.data.buf.in, len,
+ instr->ctx.data.force_8bit);
+ }
+
+ return 0;
+}
+
+static const struct nand_op_parser pl353_nfc_op_parser = NAND_OP_PARSER
+ (NAND_OP_PARSER_PATTERN
+ (pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 2048)),
+ NAND_OP_PARSER_PATTERN
+ (pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 2048)),
+ NAND_OP_PARSER_PATTERN
+ (pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN
+ (pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, 8),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2048),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN
+ (pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ );
+
+static int pl353_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ return nand_op_parser_exec_op(chip, &pl353_nfc_op_parser,
+ op, check_only);
+}
+
+/**
+ * pl353_nand_device_ready - Check device ready/busy line
+ * @mtd: Pointer to the mtd_info structure
+ *
+ * Return: 0 on busy or 1 on ready state
+ */
+static int pl353_nand_device_ready(struct mtd_info *mtd)
+{
+ if (pl353_smc_get_nand_int_status_raw()) {
+ pl353_smc_clr_nand_int();
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * pl353_nand_ecc_init - Initialize the ecc information as per the ecc mode
+ * @mtd: Pointer to the mtd_info structure
+ * @ecc: Pointer to ECC control structure
+ * @ecc_mode: ondie ecc status
+ *
+ * This function initializes the ecc block and functional pointers as per the
+ * ecc mode
+ *
+ * Return: 0 on success or negative errno.
+ */
+static int pl353_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
+ int ecc_mode)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ int err = 0;
+
+ ecc->read_oob = pl353_nand_read_oob;
+ ecc->write_oob = pl353_nand_write_oob;
+
+ if (ecc_mode == NAND_ECC_ON_DIE) {
+ ecc->write_page_raw = pl353_nand_write_page_raw;
+ ecc->read_page_raw = pl353_nand_read_page_raw;
+ pl353_smc_set_ecc_mode(PL353_SMC_ECCMODE_BYPASS);
+ /*
+ * On-Die ECC spare bytes offset 8 is used for ECC codes
+ * Use the BBT pattern descriptors
+ */
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ } else {
+
+ ecc->mode = NAND_ECC_HW;
+ /* Hardware ECC generates 3 bytes ECC code for each 512 bytes */
+ ecc->bytes = 3;
+ ecc->strength = 1;
+ ecc->calculate = pl353_nand_calculate_hwecc;
+ ecc->correct = pl353_nand_correct_data;
+ ecc->read_page = pl353_nand_read_page_hwecc;
+ ecc->size = PL353_NAND_ECC_SIZE;
+ ecc->read_page = pl353_nand_read_page_hwecc;
+ ecc->write_page = pl353_nand_write_page_hwecc;
+ pl353_smc_set_ecc_pg_size(mtd->writesize);
+ switch (mtd->writesize) {
+ case SZ_512:
+ case SZ_1K:
+ case SZ_2K:
+ pl353_smc_set_ecc_mode(PL353_SMC_ECCMODE_APB);
+ break;
+ default:
+ ecc->calculate = nand_calculate_ecc;
+ ecc->correct = nand_correct_data;
+ ecc->size = 256;
+ break;
+ }
+
+ if (mtd->oobsize == 16) {
+ mtd_set_ooblayout(mtd, &pl353_ecc_ooblayout16_ops);
+ } else if (mtd->oobsize == 64) {
+ mtd_set_ooblayout(mtd, &pl353_ecc_ooblayout64_ops);
+ } else {
+ err = -ENXIO;
+ dev_err(xnfc->dev, "Unsupported oob Layout\n");
+ }
+ }
+
+ return err;
+}
+
+static int pl353_setup_data_interface(struct mtd_info *mtd, int csline,
+ const struct nand_data_interface *conf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ const struct nand_sdr_timings *sdr;
+ u32 timings[7], mckperiodps;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ /*
+ * SDR timings are given in pico-seconds while NFC timings must be
+ * expressed in NAND controller clock cycles.
+ */
+ mckperiodps = NSEC_PER_SEC / clk_get_rate(xnfc->mclk);
+ mckperiodps *= 1000;
+ if (sdr->tRC_min <= 20000)
+ /*
+ * PL353 SMC needs one extra read cycle in SDR Mode 5
+ * This is not written anywhere in the datasheet but
+ * the results observed during testing.
+ */
+ timings[0] = DIV_ROUND_UP(sdr->tRC_min, mckperiodps) + 1;
+ else
+ timings[0] = DIV_ROUND_UP(sdr->tRC_min, mckperiodps);
+
+ timings[1] = DIV_ROUND_UP(sdr->tWC_min, mckperiodps);
+ /*
+ * For all SDR modes, PL353 SMC needs tREA max value as 1,
+ * Results observed during testing.
+ */
+ timings[2] = PL353_TREA_MAX_VALUE;
+ timings[3] = DIV_ROUND_UP(sdr->tWP_min, mckperiodps);
+ timings[4] = DIV_ROUND_UP(sdr->tCLR_min, mckperiodps);
+ timings[5] = DIV_ROUND_UP(sdr->tAR_min, mckperiodps);
+ timings[6] = DIV_ROUND_UP(sdr->tRR_min, mckperiodps);
+ pl353_smc_set_cycles(timings);
+
+ return 0;
+}
+
+static int pl353_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ u32 ret;
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ pl353_smc_set_buswidth(PL353_SMC_MEM_WIDTH_16);
+
+ if (mtd->writesize <= SZ_512)
+ xnfc->addr_cycles = 1;
+ else
+ xnfc->addr_cycles = 2;
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ xnfc->addr_cycles += 3;
+ else
+ xnfc->addr_cycles += 2;
+
+ ret = pl353_nand_ecc_init(mtd, &chip->ecc, chip->ecc.mode);
+ if (ret) {
+ dev_err(xnfc->dev, "ECC init failed\n");
+ return ret;
+ }
+
+ if (!mtd->name) {
+ /*
+ * If the new bindings are used and the bootloader has not been
+ * updated to pass a new mtdparts parameter on the cmdline, you
+ * should define the following property in your NAND node, ie:
+ *
+ * label = "pl353-nand";
+ *
+ * This way, mtd->name will be set by the core when
+ * nand_set_flash_node() is called.
+ */
+ mtd->name = devm_kasprintf(xnfc->dev, GFP_KERNEL,
+ "%s", PL353_NAND_DRIVER_NAME);
+ if (!mtd->name) {
+ dev_err(xnfc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+
+}
+
+static const struct nand_controller_ops pl353_nand_controller_ops = {
+ .attach_chip = pl353_nand_attach_chip,
+};
+
+/**
+ * pl353_nand_probe - Probe method for the NAND driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function initializes the driver data structures and the hardware.
+ * The NAND driver has dependency with the pl353_smc memory controller
+ * driver for initializing the NAND timing parameters, bus width, ECC modes,
+ * control and status information.
+ *
+ * Return: 0 on success or error value on failure
+ */
+static int pl353_nand_probe(struct platform_device *pdev)
+{
+ struct pl353_nand_controller *xnfc;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct resource *res;
+ struct device_node *np, *dn;
+ u32 ret, val;
+
+ xnfc = devm_kzalloc(&pdev->dev, sizeof(*xnfc), GFP_KERNEL);
+ if (!xnfc)
+ return -ENOMEM;
+ xnfc->dev = &pdev->dev;
+
+ /* Map physical address of NAND flash */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xnfc->regs = devm_ioremap_resource(xnfc->dev, res);
+ if (IS_ERR(xnfc->regs))
+ return PTR_ERR(xnfc->regs);
+
+ chip = &xnfc->chip;
+ mtd = nand_to_mtd(chip);
+ chip->exec_op = pl353_nfc_exec_op;
+ nand_set_controller_data(chip, xnfc);
+ mtd->priv = chip;
+ mtd->owner = THIS_MODULE;
+
+ nand_set_flash_node(chip, xnfc->dev->of_node);
+
+ /* Set the driver entry points for MTD */
+ chip->dev_ready = pl353_nand_device_ready;
+ chip->select_chip = pl353_nand_select_chip;
+ /* If we don't set this delay driver sets 20us by default */
+ np = of_get_next_parent(xnfc->dev->of_node);
+ xnfc->mclk = of_clk_get(np, 0);
+ if (IS_ERR(xnfc->mclk)) {
+ dev_err(xnfc->dev, "Failed to retrieve MCK clk\n");
+ return PTR_ERR(xnfc->mclk);
+ }
+
+ dn = nand_get_flash_node(chip);
+ ret = of_property_read_u32(dn, "nand-bus-width", &val);
+ if (ret)
+ val = 8;
+
+ xnfc->buswidth = val;
+ chip->chip_delay = 30;
+ /* Set the device option and flash width */
+ chip->options = NAND_BUSWIDTH_AUTO;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+ platform_set_drvdata(pdev, xnfc);
+ chip->setup_data_interface = pl353_setup_data_interface;
+ chip->dummy_controller.ops = &pl353_nand_controller_ops;
+ ret = nand_scan(mtd, 1);
+ if (ret) {
+ dev_err(xnfc->dev, "could not scan the nand chip\n");
+ return ret;
+ }
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(xnfc->dev, "Failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * pl353_nand_remove - Remove method for the NAND driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if the driver module is being unloaded. It frees all
+ * resources allocated to the device.
+ *
+ * Return: 0 on success or error value on failure
+ */
+static int pl353_nand_remove(struct platform_device *pdev)
+{
+ struct pl353_nand_controller *xnfc = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = nand_to_mtd(&xnfc->chip);
+
+ /* Release resources, unregister device */
+ nand_release(mtd);
+
+ return 0;
+}
+
+/* Match table for device tree binding */
+static const struct of_device_id pl353_nand_of_match[] = {
+ { .compatible = "arm,pl353-nand-r2p1" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pl353_nand_of_match);
+
+/*
+ * pl353_nand_driver - This structure defines the NAND subsystem platform driver
+ */
+static struct platform_driver pl353_nand_driver = {
+ .probe = pl353_nand_probe,
+ .remove = pl353_nand_remove,
+ .driver = {
+ .name = PL353_NAND_DRIVER_NAME,
+ .of_match_table = pl353_nand_of_match,
+ },
+};
+
+module_platform_driver(pl353_nand_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_ALIAS("platform:" PL353_NAND_DRIVER_NAME);
+MODULE_DESCRIPTION("ARM PL353 NAND Flash Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index e93584650dfc..1cf29ca05f4b 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -21,6 +21,7 @@
#include <linux/of_platform.h>
#include <linux/spi/flash.h>
#include <linux/mtd/spi-nor.h>
+#include <linux/spi/spi.h>
/* Define max times to check status register before we give up. */
@@ -142,6 +143,23 @@ struct sfdp_header {
/* Basic Flash Parameter Table */
+bool update_stripe(const u8 opcode)
+{
+ if (opcode == SPINOR_OP_BE_4K ||
+ opcode == SPINOR_OP_BE_32K ||
+ opcode == SPINOR_OP_CHIP_ERASE ||
+ opcode == SPINOR_OP_SE ||
+ opcode == SPINOR_OP_BE_32K_4B ||
+ opcode == SPINOR_OP_SE_4B ||
+ opcode == SPINOR_OP_BE_4K_4B ||
+ opcode == SPINOR_OP_WRSR ||
+ opcode == SPINOR_OP_WREAR ||
+ opcode == SPINOR_OP_BRWR ||
+ opcode == SPINOR_OP_WRSR2)
+ return false;
+
+ return true;
+}
/*
* JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
* They are indexed from 1 but C arrays are indexed from 0.
@@ -250,7 +268,7 @@ struct flash_info {
u16 page_size;
u16 addr_width;
- u16 flags;
+ u32 flags;
#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
#define SST_WRITE BIT(2) /* use SST byte programming */
@@ -279,6 +297,9 @@ struct flash_info {
#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
#define USE_CLSR BIT(14) /* use CLSR command */
#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */
+#define SST_GLOBAL_PROT_UNLK BIT(16) /* Unlock the Global protection for
+ * sst flashes
+ */
/* Part specific fixup hooks. */
const struct spi_nor_fixups *fixups;
@@ -288,6 +309,8 @@ struct flash_info {
#define JEDEC_MFR(info) ((info)->id[0])
+static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr);
+
/*
* Read the status register, returning its value in the location
* Return the status register value.
@@ -296,15 +319,24 @@ struct flash_info {
static int read_sr(struct spi_nor *nor)
{
int ret;
- u8 val;
+ u8 val[2];
- ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
- if (ret < 0) {
- pr_err("error %d reading SR\n", (int) ret);
- return ret;
+ if (nor->isparallel) {
+ ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val[0], 2);
+ if (ret < 0) {
+ pr_err("error %d reading SR\n", (int) ret);
+ return ret;
+ }
+ val[0] |= val[1];
+ } else {
+ ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val[0], 1);
+ if (ret < 0) {
+ pr_err("error %d reading SR\n", (int) ret);
+ return ret;
+ }
}
- return val;
+ return val[0];
}
/*
@@ -315,15 +347,24 @@ static int read_sr(struct spi_nor *nor)
static int read_fsr(struct spi_nor *nor)
{
int ret;
- u8 val;
+ u8 val[2];
- ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
- if (ret < 0) {
- pr_err("error %d reading FSR\n", ret);
- return ret;
+ if (nor->isparallel) {
+ ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val[0], 2);
+ if (ret < 0) {
+ pr_err("error %d reading FSR\n", ret);
+ return ret;
+ }
+ val[0] &= val[1];
+ } else {
+ ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val[0], 1);
+ if (ret < 0) {
+ pr_err("error %d reading FSR\n", ret);
+ return ret;
+ }
}
- return val;
+ return val[0];
}
/*
@@ -513,6 +554,38 @@ static int set_4byte(struct spi_nor *nor, bool enable)
}
}
+/**
+ * read_ear - Get the extended/bank address register value
+ * @nor: Pointer to the flash control structure
+ *
+ * This routine reads the Extended/bank address register value
+ *
+ * Return: Negative if error occured.
+ */
+static int read_ear(struct spi_nor *nor, struct flash_info *info)
+{
+ int ret;
+ u8 val;
+ u8 code;
+
+ /* This is actually Spansion */
+ if (JEDEC_MFR(info) == CFI_MFR_AMD)
+ code = SPINOR_OP_BRRD;
+ /* This is actually Micron */
+ else if (JEDEC_MFR(info) == CFI_MFR_ST ||
+ JEDEC_MFR(info) == CFI_MFR_MACRONIX ||
+ JEDEC_MFR(info) == SNOR_MFR_ISSI)
+ code = SPINOR_OP_RDEAR;
+ else
+ return -EINVAL;
+
+ ret = nor->read_reg(nor, code, &val, 1);
+ if (ret < 0)
+ return ret;
+
+ return val;
+}
+
static int s3an_sr_ready(struct spi_nor *nor)
{
int ret;
@@ -622,15 +695,81 @@ static int spi_nor_wait_till_ready(struct spi_nor *nor)
}
/*
+ * Update Extended Address/bank selection Register.
+ * Call with flash->lock locked.
+ */
+static int write_ear(struct spi_nor *nor, u32 addr)
+{
+ u8 code;
+ u8 ear;
+ int ret;
+ struct mtd_info *mtd = &nor->mtd;
+
+ /* Wait until finished previous write command. */
+ if (spi_nor_wait_till_ready(nor))
+ return 1;
+
+ if (mtd->size <= (0x1000000) << nor->shift)
+ return 0;
+
+ addr = addr % (u32) mtd->size;
+ ear = addr >> 24;
+
+ if ((!nor->isstacked) && (ear == nor->curbank))
+ return 0;
+
+ if (nor->isstacked && (mtd->size <= 0x2000000))
+ return 0;
+
+ if (nor->jedec_id == CFI_MFR_AMD)
+ code = SPINOR_OP_BRWR;
+ if (nor->jedec_id == CFI_MFR_ST ||
+ nor->jedec_id == CFI_MFR_MACRONIX ||
+ nor->jedec_id == SNOR_MFR_ISSI) {
+ write_enable(nor);
+ code = SPINOR_OP_WREAR;
+ }
+ nor->cmd_buf[0] = ear;
+
+ ret = nor->write_reg(nor, code, nor->cmd_buf, 1);
+ if (ret < 0)
+ return ret;
+
+ nor->curbank = ear;
+
+ return 0;
+}
+
+/*
* Erase the whole flash memory
*
* Returns 0 if successful, non-zero otherwise.
*/
static int erase_chip(struct spi_nor *nor)
{
+ u32 ret;
+
dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
- return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
+ if (nor->isstacked)
+ nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
+
+ ret = nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
+ if (ret)
+ return ret;
+
+ if (nor->isstacked) {
+ /* Wait until previous write command finished */
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ nor->spi->master->flags |= SPI_MASTER_U_PAGE;
+
+ ret = nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
+ }
+ return ret;
+
}
static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
@@ -982,7 +1121,7 @@ destroy_erase_cmd_list:
static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- u32 addr, len;
+ u32 addr, len, offset;
uint32_t rem;
int ret;
@@ -1034,9 +1173,35 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
/* "sector"-at-a-time erase */
} else if (spi_nor_has_uniform_erase(nor)) {
while (len) {
+
write_enable(nor);
+ offset = addr;
+ if (nor->isparallel == 1)
+ offset /= 2;
+
+ if (nor->isstacked == 1) {
+ if (offset >= (mtd->size / 2)) {
+ offset = offset - (mtd->size / 2);
+ nor->spi->master->flags |=
+ SPI_MASTER_U_PAGE;
+ } else {
+ nor->spi->master->flags &=
+ ~SPI_MASTER_U_PAGE;
+ }
+ }
+ if (nor->addr_width == 3) {
+ /* Update Extended Address Register */
+ ret = write_ear(nor, offset);
+ if (ret)
+ goto erase_err;
+ }
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto erase_err;
- ret = spi_nor_erase_sector(nor, addr);
+ write_enable(nor);
+
+ ret = spi_nor_erase_sector(nor, offset);
if (ret)
goto erase_err;
@@ -1063,6 +1228,118 @@ erase_err:
return ret;
}
+static inline uint16_t min_lockable_sectors(struct spi_nor *nor,
+ uint16_t n_sectors)
+{
+ uint16_t lock_granularity;
+
+ /*
+ * Revisit - SST (not used by us) has the same JEDEC ID as micron but
+ * protected area table is similar to that of spansion.
+ */
+ lock_granularity = max(1, n_sectors/M25P_MAX_LOCKABLE_SECTORS);
+ if (nor->jedec_id == CFI_MFR_ST) /* Micron */
+ lock_granularity = 1;
+
+ return lock_granularity;
+}
+
+static inline uint32_t get_protected_area_start(struct spi_nor *nor,
+ uint8_t lock_bits)
+{
+ u16 n_sectors;
+ u32 sector_size;
+ uint64_t mtd_size;
+ struct mtd_info *mtd = &nor->mtd;
+
+ n_sectors = nor->n_sectors;
+ sector_size = nor->sector_size;
+ mtd_size = mtd->size;
+
+ if (nor->isparallel) {
+ sector_size = (nor->sector_size >> 1);
+ mtd_size = (mtd->size >> 1);
+ }
+ if (nor->isstacked) {
+ n_sectors = (nor->n_sectors >> 1);
+ mtd_size = (mtd->size >> 1);
+ }
+
+ return mtd_size - (1<<(lock_bits-1)) *
+ min_lockable_sectors(nor, n_sectors) * sector_size;
+}
+
+static uint8_t min_protected_area_including_offset(struct spi_nor *nor,
+ uint32_t offset)
+{
+ uint8_t lock_bits, lockbits_limit;
+
+ /*
+ * Revisit - SST (not used by us) has the same JEDEC ID as micron but
+ * protected area table is similar to that of spansion.
+ * Mircon has 4 block protect bits.
+ */
+ lockbits_limit = 7;
+ if (nor->jedec_id == CFI_MFR_ST) /* Micron */
+ lockbits_limit = 15;
+
+ for (lock_bits = 1; lock_bits < lockbits_limit; lock_bits++) {
+ if (offset >= get_protected_area_start(nor, lock_bits))
+ break;
+ }
+ return lock_bits;
+}
+
+static int write_sr_modify_protection(struct spi_nor *nor, uint8_t status,
+ uint8_t lock_bits)
+{
+ uint8_t status_new, bp_mask;
+ u8 val[2];
+
+ status_new = status & ~SR_BP_BIT_MASK;
+ bp_mask = (lock_bits << SR_BP_BIT_OFFSET) & SR_BP_BIT_MASK;
+
+ /* Micron */
+ if (nor->jedec_id == CFI_MFR_ST) {
+ /* To support chips with more than 896 sectors (56MB) */
+ status_new &= ~SR_BP3;
+
+ /* Protected area starts from top */
+ status_new &= ~SR_BP_TB;
+
+ if (lock_bits > 7)
+ bp_mask |= SR_BP3;
+ }
+
+ if (nor->is_lock)
+ status_new |= bp_mask;
+
+ write_enable(nor);
+
+ /* For spansion flashes */
+ if (nor->jedec_id == CFI_MFR_AMD) {
+ val[1] = read_cr(nor) << 8;
+ val[0] |= status_new;
+ if (write_sr_cr(nor, val) < 0)
+ return 1;
+ } else {
+ if (write_sr(nor, status_new) < 0)
+ return 1;
+ }
+ return 0;
+}
+
+static uint8_t bp_bits_from_sr(struct spi_nor *nor, uint8_t status)
+{
+ uint8_t ret;
+
+ ret = (((status) & SR_BP_BIT_MASK) >> SR_BP_BIT_OFFSET);
+ if (nor->jedec_id == 0x20)
+ ret |= ((status & SR_BP3) >> (SR_BP_BIT_OFFSET + 1));
+
+ return ret;
+}
+
/* Write status register and ensure bits in mask match written values */
static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
{
@@ -1353,13 +1630,42 @@ static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
+ uint8_t status;
+ uint8_t lock_bits;
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
if (ret)
return ret;
+ if (nor->isparallel == 1)
+ ofs = ofs >> nor->shift;
+
+ if (nor->isstacked == 1) {
+ if (ofs >= (mtd->size / 2)) {
+ ofs = ofs - (mtd->size / 2);
+ nor->spi->master->flags |= SPI_MASTER_U_PAGE;
+ } else
+ nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
+ }
+
ret = nor->flash_lock(nor, ofs, len);
+ /* Wait until finished previous command */
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto err;
+
+ status = read_sr(nor);
+ lock_bits = min_protected_area_including_offset(nor, ofs);
+
+ /* Only modify protection if it will not unlock other areas */
+ if (lock_bits > bp_bits_from_sr(nor, status)) {
+ nor->is_lock = 1;
+ ret = write_sr_modify_protection(nor, status, lock_bits);
+ }
+ else
+ dev_err(nor->dev, "trying to unlock already locked area\n");
+err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
return ret;
}
@@ -1368,13 +1674,42 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
+ uint8_t status;
+ uint8_t lock_bits;
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
if (ret)
return ret;
+ if (nor->isparallel == 1)
+ ofs = ofs >> nor->shift;
+
+ if (nor->isstacked == 1) {
+ if (ofs >= (mtd->size / 2)) {
+ ofs = ofs - (mtd->size / 2);
+ nor->spi->master->flags |= SPI_MASTER_U_PAGE;
+ } else
+ nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
+ }
+
ret = nor->flash_unlock(nor, ofs, len);
+ /* Wait until finished previous command */
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto err;
+
+ status = read_sr(nor);
+ lock_bits = min_protected_area_including_offset(nor, ofs+len) - 1;
+
+ /* Only modify protection if it will not lock other areas */
+ if (lock_bits < bp_bits_from_sr(nor, status)) {
+ nor->is_lock = 0;
+ ret = write_sr_modify_protection(nor, status, lock_bits);
+ }
+ else
+ dev_err(nor->dev, "trying to lock already unlocked area\n");
+err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
return ret;
}
@@ -1901,6 +2236,28 @@ static const struct flash_info spi_nor_ids[] = {
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
/* ISSI */
+ { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp080d", INFO(0x9d7014, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp016d", INFO(0x9d7015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25lp032d", INFO(0x9d6016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp032d", INFO(0x9d7016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25lp064a", INFO(0x9d6017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp064a", INFO(0x9d7017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25lp128f", INFO(0x9d6018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp128f", INFO(0x9d7018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25lp256d", INFO(0x9d6019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp256d", INFO(0x9d7019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK |
+ SPI_NOR_4B_OPCODES) },
+ { "is25lp512m", INFO(0x9d601a, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp512m", INFO(0x9d701a, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK |
+ SPI_NOR_4B_OPCODES) },
{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
{ "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
@@ -1953,6 +2310,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "mx66u1g45g", INFO(0xc2253b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
/* Micron <--> ST Micro */
@@ -1961,15 +2319,18 @@ static const struct flash_info spi_nor_ids[] = {
{ "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
+ { "n25q256a", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ | USE_FSR| SPI_NOR_HAS_LOCK) },
+ { "n25q256a13", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
{ "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
- { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
- { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
- { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
- { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "n25q512a13", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
+ { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | NO_CHIP_ERASE) },
+ { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | NO_CHIP_ERASE) },
+ { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | NO_CHIP_ERASE) },
+ { "mt25ul02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | NO_CHIP_ERASE) },
/* Micron */
{
@@ -1992,17 +2353,18 @@ static const struct flash_info spi_nor_ids[] = {
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
+ { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | USE_CLSR) },
{ "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | USE_CLSR) },
{ "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
- { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
- { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
- { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, SPI_NOR_HAS_LOCK) },
+ { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, SPI_NOR_HAS_LOCK) },
+ { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | USE_CLSR) },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | USE_CLSR) },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
{ "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
{ "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
@@ -2020,6 +2382,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "sst26wf016B", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K | SST_GLOBAL_PROT_UNLK) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
@@ -2118,7 +2481,7 @@ static const struct flash_info spi_nor_ids[] = {
},
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
@@ -2171,21 +2534,71 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- ssize_t ret;
-
+ int ret;
+ u32 offset = from;
+ u32 stack_shift = 0;
+ u32 read_len = 0;
+ u32 rem_bank_len = 0;
+ u8 bank;
+ u8 is_ofst_odd = 0;
+ loff_t addr = 0;
+
+#define OFFSET_16_MB 0x1000000
dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
+ if ((nor->isparallel) && (offset & 1)) {
+ /* We can hit this case when we use file system like ubifs */
+ from = (loff_t)(from - 1);
+ len = (size_t)(len + 1);
+ is_ofst_odd = 1;
+ }
+
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
if (ret)
return ret;
while (len) {
- loff_t addr = from;
+ if (nor->addr_width == 3) {
+ bank = (u32)from / (OFFSET_16_MB << nor->shift);
+ rem_bank_len = ((OFFSET_16_MB << nor->shift) *
+ (bank + 1)) - from;
+ }
+ offset = from;
+
+ if (nor->isparallel == 1)
+ offset /= 2;
+
+ if (nor->isstacked == 1) {
+ stack_shift = 1;
+ if (offset >= (mtd->size / 2)) {
+ offset = offset - (mtd->size / 2);
+ nor->spi->master->flags |= SPI_MASTER_U_PAGE;
+ } else {
+ nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
+ }
+ }
+
+ /* Die cross over issue is not handled */
+ if (nor->addr_width == 4) {
+ rem_bank_len = (mtd->size >> stack_shift) -
+ (offset << nor->shift);
+ }
+ if (nor->addr_width == 3)
+ write_ear(nor, offset);
+ if (len < rem_bank_len)
+ read_len = len;
+ else
+ read_len = rem_bank_len;
+
+ /* Wait till previous write/erase is done. */
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto read_err;
if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
- addr = spi_nor_s3an_addr_convert(nor, addr);
+ addr = spi_nor_s3an_addr_convert(nor, offset);
- ret = nor->read(nor, addr, len, buf);
+ ret = nor->read(nor, offset, read_len, buf);
if (ret == 0) {
/* We shouldn't see 0-length reads */
ret = -EIO;
@@ -2195,7 +2608,12 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
goto read_err;
WARN_ON(ret > len);
- *retlen += ret;
+ if (is_ofst_odd == 1) {
+ memcpy(buf, (buf + 1), (len - 1));
+ *retlen += (ret - 1);
+ } else {
+ *retlen += ret;
+ }
buf += ret;
from += ret;
len -= ret;
@@ -2297,41 +2715,93 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
struct spi_nor *nor = mtd_to_spi_nor(mtd);
size_t page_offset, page_remain, i;
ssize_t ret;
+ u32 offset, stack_shift=0;
+ u8 bank = 0;
+ u32 rem_bank_len = 0;
+
+#define OFFSET_16_MB 0x1000000
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+ /*
+ * Cannot write to odd offset in parallel mode,
+ * so write 2 bytes first
+ */
+ if ((nor->isparallel) && (to & 1)) {
+
+ u8 two[2] = {0xff, buf[0]};
+ size_t local_retlen;
+
+ ret = spi_nor_write(mtd, to & ~1, 2, &local_retlen, two);
+ if (ret < 0)
+ return ret;
+
+ *retlen += 1; /* We've written only one actual byte */
+ ++buf;
+ --len;
+ ++to;
+ }
+
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
if (ret)
return ret;
-
for (i = 0; i < len; ) {
ssize_t written;
loff_t addr = to + i;
- /*
- * If page_size is a power of two, the offset can be quickly
- * calculated with an AND operation. On the other cases we
- * need to do a modulus operation (more expensive).
- * Power of two numbers have only one bit set and we can use
- * the instruction hweight32 to detect if we need to do a
- * modulus (do_div()) or not.
- */
- if (hweight32(nor->page_size) == 1) {
- page_offset = addr & (nor->page_size - 1);
- } else {
- uint64_t aux = addr;
+ if (nor->addr_width == 3) {
+ bank = (u32)to / (OFFSET_16_MB << nor->shift);
+ rem_bank_len = ((OFFSET_16_MB << nor->shift) *
+ (bank + 1)) - to;
+ }
+
+ page_offset = ((to + i)) & (nor->page_size - 1);
+
+ offset = (to + i);
- page_offset = do_div(aux, nor->page_size);
+ if (nor->isparallel == 1)
+ offset /= 2;
+
+ if (nor->isstacked == 1) {
+ stack_shift = 1;
+ if (offset >= (mtd->size / 2)) {
+ offset = offset - (mtd->size / 2);
+ nor->spi->master->flags |= SPI_MASTER_U_PAGE;
+ } else {
+ nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
+ }
}
- /* the size of data remaining on the first page */
- page_remain = min_t(size_t,
- nor->page_size - page_offset, len - i);
+
+ /* Die cross over issue is not handled */
+ if (nor->addr_width == 4)
+ rem_bank_len = (mtd->size >> stack_shift) - offset;
+ if (nor->addr_width == 3)
+ write_ear(nor, offset);
+ if (nor->isstacked == 1) {
+ if (len <= rem_bank_len) {
+ page_remain = min_t(size_t,
+ nor->page_size - page_offset, len - i);
+ } else {
+ /*
+ * the size of data remaining
+ * on the first page
+ */
+ page_remain = rem_bank_len;
+ }
+ } else {
+ page_remain = min_t(size_t,
+ nor->page_size - page_offset, len - i);
+ }
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto write_err;
if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
addr = spi_nor_s3an_addr_convert(nor, addr);
write_enable(nor);
- ret = nor->write(nor, addr, page_remain, buf + i);
+
+ ret = nor->write(nor, (offset), page_remain, buf + i);
if (ret < 0)
goto write_err;
written = ret;
@@ -2920,6 +3390,9 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
}
params->size >>= 3; /* Convert to bytes. */
+ if (params->size > 0x1000000 && nor->addr_width == 3)
+ return -EINVAL;
+
/* Fast Read settings. */
for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
@@ -3708,9 +4181,15 @@ static int spi_nor_init_params(struct spi_nor *nor,
}
/* Page Program settings. */
- params->hwcaps.mask |= SNOR_HWCAPS_PP;
- spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
- SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+ if (nor->spi->mode & SPI_TX_QUAD) {
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4],
+ SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4);
+ } else {
+ params->hwcaps.mask |= SNOR_HWCAPS_PP;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
+ SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+ }
/*
* Sector Erase settings. Sort Erase Types in ascending order, with the
@@ -3739,6 +4218,7 @@ static int spi_nor_init_params(struct spi_nor *nor,
SNOR_HWCAPS_PP_QUAD)) {
switch (JEDEC_MFR(info)) {
case SNOR_MFR_MACRONIX:
+ case SNOR_MFR_ISSI:
params->quad_enable = macronix_quad_enable;
break;
@@ -4075,12 +4555,14 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
struct spi_nor_flash_parameter params;
- const struct flash_info *info = NULL;
+ struct flash_info *info = NULL;
struct device *dev = nor->dev;
struct mtd_info *mtd = &nor->mtd;
struct device_node *np = spi_nor_get_flash_node(nor);
+ struct device_node *np_spi;
int ret;
int i;
+ u32 is_dual;
ret = spi_nor_check(nor);
if (ret)
@@ -4092,10 +4574,10 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
nor->write_proto = SNOR_PROTO_1_1_1;
if (name)
- info = spi_nor_match_id(name);
+ info = (struct flash_info *)spi_nor_match_id(name);
/* Try to auto-detect if chip name wasn't specified or not found */
if (!info)
- info = spi_nor_read_id(nor);
+ info = (struct flash_info *)spi_nor_read_id(nor);
if (IS_ERR_OR_NULL(info))
return -ENOENT;
@@ -4119,7 +4601,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
*/
dev_warn(dev, "found %s, expected %s\n",
jinfo->name, info->name);
- info = jinfo;
+ info = (struct flash_info *)jinfo;
}
}
@@ -4150,6 +4632,25 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (ret)
return ret;
+ /*
+ * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
+ * with the software protection bits set
+ */
+
+ if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
+ JEDEC_MFR(info) == SNOR_MFR_INTEL ||
+ JEDEC_MFR(info) == SNOR_MFR_SST ||
+ info->flags & SPI_NOR_HAS_LOCK) {
+ write_enable(nor);
+ write_sr(nor, 0);
+ if (info->flags & SST_GLOBAL_PROT_UNLK) {
+ write_enable(nor);
+ /* Unlock global write protection bits */
+ nor->write_reg(nor, GLOBAL_BLKPROT_UNLK, NULL, 0);
+ }
+ spi_nor_wait_till_ready(nor);
+ }
+
if (!mtd->name)
mtd->name = dev_name(dev);
mtd->priv = nor;
@@ -4159,6 +4660,73 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
mtd->size = params.size;
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
+#ifdef CONFIG_OF
+ np_spi = of_get_next_parent(np);
+ if ((of_property_match_string(np_spi, "compatible",
+ "xlnx,zynq-qspi-1.0") >= 0) ||
+ (of_property_match_string(np_spi, "compatible",
+ "xlnx,zynqmp-qspi-1.0") >= 0)) {
+ if (of_property_read_u32(np_spi, "is-dual",
+ &is_dual) < 0) {
+ /* Default to single if prop not defined */
+ nor->shift = 0;
+ nor->isstacked = 0;
+ nor->isparallel = 0;
+ } else {
+ if (is_dual == 1) {
+ /* dual parallel */
+ nor->shift = 1;
+ info->sector_size <<= nor->shift;
+ info->page_size <<= nor->shift;
+ mtd->size <<= nor->shift;
+ nor->isparallel = 1;
+ nor->isstacked = 0;
+ nor->spi->master->flags |=
+ (SPI_MASTER_DATA_STRIPE
+ | SPI_MASTER_BOTH_CS);
+ } else {
+#ifdef CONFIG_SPI_ZYNQ_QSPI_DUAL_STACKED
+ /* dual stacked */
+ nor->shift = 0;
+ mtd->size <<= 1;
+ info->n_sectors <<= 1;
+ nor->isstacked = 1;
+ nor->isparallel = 0;
+#else
+ u32 is_stacked;
+ if (of_property_read_u32(np_spi,
+ "is-stacked",
+ &is_stacked) < 0) {
+ is_stacked = 0;
+ }
+ if (is_stacked) {
+ /* dual stacked */
+ nor->shift = 0;
+ mtd->size <<= 1;
+ info->n_sectors <<= 1;
+ nor->isstacked = 1;
+ nor->isparallel = 0;
+ } else {
+ /* single */
+ nor->shift = 0;
+ nor->isstacked = 0;
+ nor->isparallel = 0;
+ }
+#endif
+ }
+ }
+ }
+#if 0
+ pr_info("parallel %d stacked %d shift %d mtsize %d\n",
+ nor->isparallel, nor->isstacked, nor->shift, mtd->size);
+#endif
+#else
+ /* Default to single */
+ nor->shift = 0;
+ nor->isstacked = 0;
+ nor->isparallel = 0;
+#endif
+
mtd->_resume = spi_nor_resume;
/* NOR protection support for STmicro/Micron chips and similar */
@@ -4191,9 +4759,22 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (info->flags & USE_CLSR)
nor->flags |= SNOR_F_USE_CLSR;
+ if (nor->shift)
+ mtd->erasesize = info->sector_size;
+
+#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+ /* prefer "small sector" erase if possible */
+ if (nor->shift &&
+ (info->flags & SECT_4K ||
+ info->flags & SECT_4K_PMC)) {
+ mtd->erasesize = 4096 << nor->shift;
+ }
+#endif
+
if (info->flags & SPI_NOR_NO_ERASE)
mtd->flags |= MTD_NO_ERASE;
+ nor->jedec_id = info->id[0];
mtd->dev.parent = dev;
nor->page_size = params.page_size;
mtd->writebufsize = nor->page_size;
@@ -4232,19 +4813,64 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
} else if (info->addr_width) {
nor->addr_width = info->addr_width;
} else if (mtd->size > 0x1000000) {
- /* enable 4-byte addressing if the device exceeds 16MiB */
- nor->addr_width = 4;
+#ifdef CONFIG_OF
+ np_spi = of_get_next_parent(np);
+ if (of_property_match_string(np_spi, "compatible",
+ "xlnx,zynq-qspi-1.0") >= 0) {
+ int status;
+
+ nor->addr_width = 3;
+ set_4byte(nor, false);
+ status = read_ear(nor, info);
+ if (status < 0)
+ dev_warn(dev, "failed to read ear reg\n");
+ else
+ nor->curbank = status & EAR_SEGMENT_MASK;
+ } else {
+#endif
+ /*
+ * enable 4-byte addressing
+ * if the device exceeds 16MiB
+ */
+ nor->addr_width = 4;
+ if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
+ info->flags & SPI_NOR_4B_OPCODES)
+ spi_nor_set_4byte_opcodes(nor);
+ else {
+ np_spi = of_get_next_parent(np);
+ if (of_property_match_string(np_spi,
+ "compatible",
+ "xlnx,xps-spi-2.00.a") >= 0) {
+ nor->addr_width = 3;
+ set_4byte(nor, false);
+ } else {
+ set_4byte(nor, true);
+ if (nor->isstacked) {
+ nor->spi->master->flags |=
+ SPI_MASTER_U_PAGE;
+ set_4byte(nor, true);
+ nor->spi->master->flags &=
+ ~SPI_MASTER_U_PAGE;
+ }
+ }
+ }
+#ifdef CONFIG_OF
+ }
+#endif
} else {
nor->addr_width = 3;
}
- if (info->flags & SPI_NOR_4B_OPCODES ||
- (JEDEC_MFR(info) == SNOR_MFR_SPANSION && mtd->size > SZ_16M))
- nor->flags |= SNOR_F_4B_OPCODES;
+ if (of_property_match_string(np_spi, "compatible",
+ "xlnx,zynq-qspi-1.0") < 0) {
+ if (info->flags & SPI_NOR_4B_OPCODES ||
+ (JEDEC_MFR(info) == SNOR_MFR_SPANSION && mtd->size > SZ_16M))
+ nor->flags |= SNOR_F_4B_OPCODES;
- if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
- !(nor->flags & SNOR_F_HAS_4BAIT))
- spi_nor_set_4byte_opcodes(nor);
+ if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
+ !(nor->flags & SNOR_F_HAS_4BAIT))
+ spi_nor_set_4byte_opcodes(nor);
+ }
if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
dev_err(dev, "address width is too large: %u\n",
@@ -4286,6 +4912,14 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
}
EXPORT_SYMBOL_GPL(spi_nor_scan);
+void spi_nor_shutdown(struct spi_nor *nor)
+{
+ if (nor->addr_width == 3 &&
+ (nor->mtd.size >> nor->shift) > 0x1000000)
+ write_ear(nor, 0);
+}
+EXPORT_SYMBOL_GPL(spi_nor_shutdown);
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
MODULE_AUTHOR("Mike Lavender");