[Feature] add GA346 baseline version

Change-Id: Ic62933698569507dcf98240cdf5d9931ae34348f
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/Kconfig b/src/kernel/linux/v4.19/drivers/mtd/nandx/Kconfig
new file mode 100644
index 0000000..4bb8dac
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/Kconfig
@@ -0,0 +1,16 @@
+menuconfig MTD_NANDX_V2
+	tristate "MTK NANDX device support"
+	depends on MTD
+	help
+	  MTK NANDX device
+
+if MTD_NANDX_V2
+
+config MTD_NANDX_V2_SPI
+	bool "NANDX SPI"
+	default n
+	help
+	  MTK SPI NAND device drivers
+
+endif # MTD_NANDX_V2
+
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/NOTICE b/src/kernel/linux/v4.19/drivers/mtd/nandx/NOTICE
new file mode 100644
index 0000000..1a06ca3
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/NOTICE
@@ -0,0 +1,52 @@
+
+/*
+ * Nandx - Mediatek Common Nand Driver
+ * Copyright (C) 2017 MediaTek Inc.
+ *
+ * Nandx is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ *  a) This program is free software; you can redistribute it and/or modify
+ *     it under the terms of the GNU General Public License version 2 as
+ *     published by the Free Software Foundation.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     This program is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *     See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * Alternatively,
+ *
+ *  b) Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *     1. Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *     2. Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ *     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ *     CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ *     INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *     MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ *     CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ *     NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ *     LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ *     HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ *     OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ *     EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+####################################################################################################
\ No newline at end of file
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/Nandx.config b/src/kernel/linux/v4.19/drivers/mtd/nandx/Nandx.config
new file mode 100644
index 0000000..482b33e
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/Nandx.config
@@ -0,0 +1,19 @@
+NANDX_IC_VERSION := mt6880
+
+NANDX_SIMULATOR_SUPPORT := n
+NANDX_CTP_SUPPORT := n
+NANDX_DA_SUPPORT := n
+NANDX_PRELOADER_SUPPORT := n
+NANDX_LK_SUPPORT := n
+NANDX_KERNEL_SUPPORT := y
+NANDX_BROM_SUPPORT := n
+NANDX_BBT_SUPPORT := y
+NANDX_VERIFY_SUPPORT := n
+
+NANDX_NAND_SPI := y
+NANDX_NAND_SLC := y
+NANDX_NAND_MLC := n
+NANDX_NAND_TLC := n
+NANDX_NFI_BASE := y
+NANDX_NFI_ECC := y
+NANDX_NFI_SPI := y
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/Nandx.mk b/src/kernel/linux/v4.19/drivers/mtd/nandx/Nandx.mk
new file mode 100644
index 0000000..74a61d2
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/Nandx.mk
@@ -0,0 +1,102 @@
+#
+# Copyright (C) 2017 MediaTek Inc.
+# Licensed under either
+#     BSD Licence, (see NOTICE for more details)
+#     GNU General Public License, version 2.0, (see NOTICE for more details)
+#
+
+nandx_dir := $(shell dirname $(lastword $(MAKEFILE_LIST)))
+include $(nandx_dir)/Nandx.config
+
+ifeq ($(NANDX_SIMULATOR_SUPPORT), y)
+sim-obj :=
+sim-inc :=
+nandx-obj := sim-obj
+nandx-prefix := .
+nandx-postfix := %.o
+sim-inc += -I$(nandx-prefix)/include/internal
+sim-inc += -I$(nandx-prefix)/include/simulator
+endif
+
+ifeq ($(NANDX_CTP_SUPPORT), y)
+nandx-obj := C_SRC_FILES
+nandx-prefix := $(nandx_dir)
+nandx-postfix := %.c
+INC_DIRS += $(nandx_dir)/include/internal
+INC_DIRS += $(nandx_dir)/include/ctp
+INC_DIRS += $(nandx_dir)/include/platform/$(NANDX_IC_VERSION)
+endif
+
+ifeq ($(NANDX_DA_SUPPORT), y)
+nandx-obj := obj-y
+nandx-prefix := $(nandx_dir)
+nandx-postfix := %.o
+INCLUDE_PATH += $(TOPDIR)/platform/$(CODE_BASE)/dev/nand/nandx/include/internal
+INCLUDE_PATH += $(TOPDIR)/platform/$(CODE_BASE)/dev/nand/nandx/include/da
+endif
+
+ifeq ($(NANDX_PRELOADER_SUPPORT), y)
+nandx-obj := MOD_SRC
+nandx-prefix := $(nandx_dir)
+nandx-postfix := %.c
+C_OPTION += -I$(MTK_PATH_PLATFORM)/src/drivers/nandx/include/internal
+C_OPTION += -I$(MTK_PATH_PLATFORM)/src/drivers/nandx/include/preloader
+endif
+
+ifeq ($(NANDX_LK_SUPPORT), y)
+nandx-obj := MODULE_SRCS
+nandx-prefix := $(nandx_dir)
+nandx-postfix := %.c
+GLOBAL_INCLUDES += $(nandx_dir)/include/internal
+GLOBAL_INCLUDES += $(nandx_dir)/include/lk
+GLOBAL_INCLUDES += $(nandx_dir)/include/platform/$(NANDX_IC_VERSION)
+endif
+
+ifeq ($(NANDX_AOS_SUPPORT), y)
+nandx-obj := $(NAME)_SOURCES
+nandx-prefix := drivers/nandx
+nandx-postfix := %.c
+$(NAME)_INCLUDES += drivers/nandx/include/internal
+$(NAME)_INCLUDES += drivers/nandx/include/aos
+endif
+
+ifeq ($(NANDX_KERNEL_SUPPORT), y)
+nandx-obj := obj-y
+nandx-prefix := nandx
+nandx-postfix := %.o
+ccflags-y += -I$(nandx_dir)/include/internal
+ccflags-y += -I$(nandx_dir)/include/kernel
+ccflags-y += -I$(nandx_dir)/include/platform/$(NANDX_IC_VERSION)
+endif
+
+ifeq ($(NANDX_UBOOT_SUPPORT), y)
+	nandx-obj := obj-y
+	nandx-prefix := nandx
+	nandx-postfix := %.o
+	ccflags-y += -I$(nandx_dir)/include/internal
+	ccflags-y += -I$(nandx_dir)/include/uboot
+endif
+
+nandx-y :=
+include $(nandx_dir)/core/Nandx.mk
+nandx-target := $(nandx-prefix)/core/$(nandx-postfix)
+$(nandx-obj) += $(patsubst %.c, $(nandx-target), $(nandx-y))
+
+
+nandx-y :=
+include $(nandx_dir)/driver/Nandx.mk
+nandx-target := $(nandx-prefix)/driver/$(nandx-postfix)
+$(nandx-obj) += $(patsubst %.c, $(nandx-target), $(nandx-y))
+
+ifeq ($(NANDX_SIMULATOR_SUPPORT), y)
+cc := gcc
+CFLAGS += $(sim-inc)
+
+.PHONY:nandx
+nandx: $(sim-obj)
+	$(cc)  $(sim-obj) -o nandx
+
+.PHONY:clean
+clean:
+	rm -rf $(sim-obj) nandx
+endif
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/README b/src/kernel/linux/v4.19/drivers/mtd/nandx/README
new file mode 100644
index 0000000..0feaeae
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/README
@@ -0,0 +1,31 @@
+
+                          NAND2.0
+                ===============================
+
+    NAND2.0 is a common nand driver which designed for accessing
+different type of NANDs(SLC, SPI-NAND, MLC, TLC) on various OS. This
+driver can work on mostly SoCs of Mediatek.
+
+    Although there already has a common nand driver, it doesn't cover
+SPI-NAND, and not match our IC-Verification's reqirement. We need
+a driver that can be exten or cut easily.
+
+    This driver is base on NANDX & SLC. We try to refactor structures,
+and make them inheritable. We also refactor some operations' flow
+principally for adding SPI-NAND support.
+
+    This driver's architecture is like:
+
+          Driver @LK/Uboot/DA...           |IC verify/other purposes
+    ----------------------------------------------------------------
+      partition       |        BBM         |
+    -------------------------------------- |       extend_core
+             nandx_core/core_io            |
+    ----------------------------------------------------------------
+             nand_chip/nand_base           |
+    -------------------------------------- |        extend_nfi
+      nand_device     |    nfi/nfi_base    |
+
+    Any block of above graph can be extended at your will, if you
+want add new feature into this code, please make sure that your code
+would follow the framework, and we will be appreciated about it.
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/Nandx.mk b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/Nandx.mk
new file mode 100644
index 0000000..8f998fe
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/Nandx.mk
@@ -0,0 +1,38 @@
+#
+# Copyright (C) 2017 MediaTek Inc.
+# Licensed under either
+#     BSD Licence, (see NOTICE for more details)
+#     GNU General Public License, version 2.0, (see NOTICE for more details)
+#
+
+nandx-y += nand_device.c
+nandx-y += nand_base.c
+nandx-y += nand_chip.c
+nandx-y += core_io.c
+
+nandx-header-y += nand_device.h
+nandx-header-y += nand_base.h
+nandx-header-y += nand_chip.h
+nandx-header-y += core_io.h
+nandx-header-y += nfi.h
+
+nandx-$(NANDX_NAND_SPI) += nand/device_spi.c
+nandx-$(NANDX_NAND_SPI) += nand/nand_spi.c
+nandx-$(NANDX_NAND_SLC) += nand/device_slc.c
+nandx-$(NANDX_NAND_SLC) += nand/nand_slc.c
+
+nandx-header-$(NANDX_NAND_SPI) += nand/device_spi.h
+nandx-header-$(NANDX_NAND_SPI) += nand/nand_spi.h
+nandx-header-$(NANDX_NAND_SLC) += nand/device_slc.h
+nandx-header-$(NANDX_NAND_SLC) += nand/nand_slc.h
+
+nandx-$(NANDX_NFI_BASE) += nfi/nfi_base.c
+nandx-$(NANDX_NFI_ECC) += nfi/nfiecc.c
+nandx-$(NANDX_NFI_SPI) += nfi/nfi_spi.c
+
+nandx-header-$(NANDX_NFI_BASE) += nfi/nfi_base.h
+nandx-header-$(NANDX_NFI_BASE) += nfi/nfi_regs.h
+nandx-header-$(NANDX_NFI_ECC) += nfi/nfiecc.h
+nandx-header-$(NANDX_NFI_ECC) += nfi/nfiecc_regs.h
+nandx-header-$(NANDX_NFI_SPI) += nfi/nfi_spi.h
+nandx-header-$(NANDX_NFI_SPI) += nfi/nfi_spi_regs.h
\ No newline at end of file
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/core_io.c b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/core_io.c
new file mode 100644
index 0000000..252aee8
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/core_io.c
@@ -0,0 +1,833 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+/*NOTE: switch cache/multi*/
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "nand_chip.h"
+#include "core_io.h"
+
+static struct nandx_desc *g_nandx;
+
+static inline bool is_sector_align(u64 val)
+{
+	return reminder(val, g_nandx->chip->sector_size) ? false : true;
+}
+
+static inline bool is_page_align(u64 val)
+{
+	return reminder(val, g_nandx->chip->page_size) ? false : true;
+}
+
+static inline bool is_block_align(u64 val)
+{
+	return reminder(val, g_nandx->chip->block_size) ? false : true;
+}
+
+static inline u32 page_sectors(void)
+{
+	return div_down(g_nandx->chip->page_size, g_nandx->chip->sector_size);
+}
+
+static inline u32 sector_oob(void)
+{
+	return div_down(g_nandx->chip->oob_size, page_sectors());
+}
+
+static inline u32 sector_padded_size(void)
+{
+	return g_nandx->chip->sector_size + g_nandx->chip->sector_spare_size;
+}
+
+static inline u32 page_padded_size(void)
+{
+	return page_sectors() * sector_padded_size();
+}
+
+static inline u32 offset_to_padded_col(u64 offset)
+{
+	struct nandx_desc *nandx = g_nandx;
+	u32 col, sectors;
+
+	col = reminder(offset, nandx->chip->page_size);
+	sectors = div_down(col, nandx->chip->sector_size);
+
+	return col + sectors * nandx->chip->sector_spare_size;
+}
+
+static inline u32 offset_to_row(u64 offset)
+{
+	return div_down(offset, g_nandx->chip->page_size);
+}
+
+static inline u32 offset_to_col(u64 offset)
+{
+	return reminder(offset, g_nandx->chip->page_size);
+}
+
+static inline u32 oob_upper_size(void)
+{
+	return g_nandx->ecc_en ? (u32)g_nandx->chip->oob_size :
+	       g_nandx->chip->sector_spare_size * page_sectors();
+}
+
+static inline bool is_upper_oob_align(u64 val)
+{
+	return reminder(val, oob_upper_size()) ? false : true;
+}
+
+#define prepare_op(_op, _row, _col, _len, _data, _oob) \
+	do { \
+		(_op).row = (_row); \
+		(_op).col = (_col); \
+		(_op).len = (_len); \
+		(_op).data = (_data); \
+		(_op).oob = (_oob); \
+	} while (0)
+
+static int operation_multi(enum nandx_op_mode mode, u8 *data, u8 *oob,
+			   u64 offset, size_t len)
+{
+	struct nandx_desc *nandx = g_nandx;
+	u32 row = offset_to_row(offset);
+	u32 col = offset_to_padded_col(offset);
+
+	if (nandx->mode == NANDX_IDLE) {
+		nandx->mode = mode;
+		nandx->ops_current = 0;
+	} else if (nandx->mode != mode) {
+		pr_err("forbid mixed operations.\n");
+		return -EOPNOTSUPP;
+	}
+
+	prepare_op(nandx->ops[nandx->ops_current], row, col, len, data, oob);
+	nandx->ops_current++;
+
+	if (nandx->ops_current == nandx->ops_multi_len)
+		return nandx_sync();
+
+	return nandx->ops_multi_len - nandx->ops_current;
+}
+
+static int operation_sequent(enum nandx_op_mode mode, u8 *data, u8 *oob,
+			     u64 offset, size_t len)
+{
+	struct nandx_desc *nandx = g_nandx;
+	struct nand_chip *chip = nandx->chip;
+	u32 row = offset_to_row(offset);
+	func_chip_ops chip_ops;
+	u8 *ref_data = data, *ref_oob = oob;
+	int align, ops, row_step;
+	int i, rem;
+
+	align = data ? chip->page_size : oob_upper_size();
+
+	ops = data ? div_down(len, align) : div_down(len, oob_upper_size());
+	row_step = 1;
+
+	switch (mode) {
+	case NANDX_ERASE:
+		chip_ops = chip->erase_block;
+		align = chip->block_size;
+		ops = div_down(len, align);
+		row_step = chip->block_pages;
+		break;
+
+	case NANDX_READ:
+		chip_ops = chip->read_page;
+		break;
+
+	case NANDX_WRITE:
+		chip_ops = chip->write_page;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (!data) {
+		ref_data = nandx->head_buf;
+		memset(ref_data, 0xff, chip->page_size);
+	}
+
+	if (!oob) {
+		ref_oob = nandx->head_buf + chip->page_size;
+		memset(ref_oob, 0xff, oob_upper_size());
+	}
+
+	for (i = 0; i < ops; i++) {
+		prepare_op(nandx->ops[nandx->ops_current],
+			   row + i * row_step, 0, align, ref_data, ref_oob);
+		nandx->ops_current++;
+		/* if data or oob is null, nandx->head_buf or
+		 * nandx->head_buf + chip->page_size should not been used
+		 * so, here it is safe to use the buf.
+		 */
+		ref_data = data ? ref_data + chip->page_size : nandx->head_buf;
+		ref_oob = oob ? ref_oob + oob_upper_size() :
+			  nandx->head_buf + chip->page_size;
+	}
+
+	if (nandx->mode == NANDX_WRITE) {
+		rem = reminder(nandx->ops_current, nandx->min_write_pages);
+		if (rem)
+			return nandx->min_write_pages - rem;
+	}
+
+	nandx->ops_current = 0;
+	return chip_ops(chip, nandx->ops, ops);
+}
+
+static int read_pages(u8 *data, u8 *oob, u64 offset, size_t len)
+{
+	struct nandx_desc *nandx = g_nandx;
+	struct nand_chip *chip = nandx->chip;
+	struct nandx_split64 split = {0};
+	u8 *ref_data = data, *ref_oob;
+	u32 row, col;
+	int ret = 0, i, ops;
+	u32 head_offset = 0;
+	u64 val;
+
+	if (!data)
+		return operation_sequent(NANDX_READ, NULL, oob, offset, len);
+
+	ref_oob = oob ? oob : nandx->head_buf + chip->page_size;
+
+	nandx_split(&split, offset, len, val, (u64)chip->page_size);
+
+	if (split.head_len) {
+		row = offset_to_row(split.head);
+		col = offset_to_col(split.head);
+		prepare_op(nandx->ops[nandx->ops_current], row, 0,
+			   chip->page_size,
+			   nandx->head_buf, ref_oob);
+		nandx->ops_current++;
+
+		head_offset = col;
+
+		ref_data += split.head_len;
+		ref_oob = oob ? ref_oob + oob_upper_size() :
+			  nandx->head_buf + chip->page_size;
+	}
+
+	if (split.body_len) {
+		ops = div_down(split.body_len, chip->page_size);
+		row = offset_to_row(split.body);
+		for (i = 0; i < ops; i++) {
+			prepare_op(nandx->ops[nandx->ops_current],
+				   row + i, 0, chip->page_size,
+				   ref_data, ref_oob);
+			nandx->ops_current++;
+			ref_data += chip->page_size;
+			ref_oob = oob ? ref_oob + oob_upper_size() :
+				  nandx->head_buf + chip->page_size;
+		}
+	}
+
+	if (split.tail_len) {
+		row = offset_to_row(split.tail);
+		prepare_op(nandx->ops[nandx->ops_current], row, 0,
+			   chip->page_size, nandx->tail_buf, ref_oob);
+		nandx->ops_current++;
+	}
+
+	ret = chip->read_page(chip, nandx->ops, nandx->ops_current);
+
+	if (split.head_len)
+		memcpy(data, nandx->head_buf + head_offset, split.head_len);
+	if (split.tail_len)
+		memcpy(ref_data, nandx->tail_buf, split.tail_len);
+
+	nandx->ops_current = 0;
+	return ret;
+}
+
+int nandx_read(u8 *data, u8 *oob, u64 offset, size_t len)
+{
+	struct nandx_desc *nandx = g_nandx;
+	int ret;
+#if NANDX_PERFORMANCE_TRACE
+	int speed;
+	u64 time_cons = get_current_time_us();
+#endif
+
+	if (!len || len > nandx->info.total_size)
+		return -EINVAL;
+	if (div_up(len, nandx->chip->page_size) > (u64)nandx->ops_len)
+		return -EINVAL;
+	if (!data && !oob)
+		return -EINVAL;
+	/**
+	 * as design, oob not support partial read
+	 * and, the length of oob buf should be oob size aligned
+	 */
+	if (!data && !is_upper_oob_align(len))
+		return -EINVAL;
+
+	if (g_nandx->multi_en) {
+		/* as design, there only 2 buf for partial read,
+		 * if partial read allowed for multi read,
+		 * there are not enough buf
+		 */
+		if (!is_sector_align(offset))
+			return -EINVAL;
+		if (data && !is_sector_align(len))
+			return -EINVAL;
+		return operation_multi(NANDX_READ, data, oob, offset, len);
+	}
+
+	nandx->ops_current = 0;
+	nandx->mode = NANDX_IDLE;
+	ret = read_pages(data, oob, offset, len);
+
+#if NANDX_PERFORMANCE_TRACE
+	time_cons = get_current_time_us() - time_cons;
+	speed = div_down(len * 1024, time_cons);
+	if (nandx->performance.read_speed) {
+		speed += nandx->performance.read_speed;
+		speed = div_down(speed, 2);
+	}
+	nandx->performance.read_speed = speed;
+#endif
+
+	return ret;
+}
+
+static int write_pages(u8 *data, u8 *oob, u64 offset, size_t len)
+{
+	struct nandx_desc *nandx = g_nandx;
+	struct nand_chip *chip = nandx->chip;
+	struct nandx_split64 split = {0};
+	int ret, rem, i, ops;
+	u32 row, col;
+	u8 *ref_oob = oob;
+	u64 val;
+
+	nandx->mode = NANDX_WRITE;
+
+	if (!data)
+		return operation_sequent(NANDX_WRITE, NULL, oob, offset, len);
+
+	if (!oob) {
+		ref_oob = nandx->head_buf + chip->page_size;
+		memset(ref_oob, 0xff, oob_upper_size());
+	}
+
+	nandx_split(&split, offset, len, val, (u64)chip->page_size);
+
+	/*NOTE: slc can support sector write, here copy too many data.*/
+	if (split.head_len) {
+		row = offset_to_row(split.head);
+		col = offset_to_col(split.head);
+		memset(nandx->head_buf, 0xff, page_padded_size());
+		memcpy(nandx->head_buf + col, data, split.head_len);
+		prepare_op(nandx->ops[nandx->ops_current], row, 0,
+			   chip->page_size, nandx->head_buf, ref_oob);
+		nandx->ops_current++;
+
+		data += split.head_len;
+		ref_oob = oob ? ref_oob + oob_upper_size() :
+			  nandx->head_buf + chip->page_size;
+	}
+
+	if (split.body_len) {
+		row = offset_to_row(split.body);
+		ops = div_down(split.body_len, chip->page_size);
+		for (i = 0; i < ops; i++) {
+			prepare_op(nandx->ops[nandx->ops_current],
+				   row + i, 0, chip->page_size, data, ref_oob);
+			nandx->ops_current++;
+			data += chip->page_size;
+			ref_oob = oob ? ref_oob + oob_upper_size() :
+				  nandx->head_buf + chip->page_size;
+		}
+	}
+
+	if (split.tail_len) {
+		row = offset_to_row(split.tail);
+		memset(nandx->tail_buf, 0xff, page_padded_size());
+		memcpy(nandx->tail_buf, data, split.tail_len);
+		prepare_op(nandx->ops[nandx->ops_current], row, 0,
+			   chip->page_size, nandx->tail_buf, ref_oob);
+		nandx->ops_current++;
+	}
+
+	rem = reminder(nandx->ops_current, nandx->min_write_pages);
+	if (rem)
+		return nandx->min_write_pages - rem;
+
+	ret = chip->write_page(chip, nandx->ops, nandx->ops_current);
+
+	nandx->ops_current = 0;
+	nandx->mode = NANDX_IDLE;
+	return ret;
+}
+
+int nandx_write(u8 *data, u8 *oob, u64 offset, size_t len)
+{
+	struct nandx_desc *nandx = g_nandx;
+	int ret;
+#if NANDX_PERFORMANCE_TRACE
+	int speed;
+	u64 time_cons = get_current_time_us();
+#endif
+
+	if (!len || len > nandx->info.total_size)
+		return -EINVAL;
+	if (div_up(len, nandx->chip->page_size) > (u64)nandx->ops_len)
+		return -EINVAL;
+	if (!data && !oob)
+		return -EINVAL;
+	if (!data && !is_upper_oob_align(len))
+		return -EINVAL;
+
+	if (nandx->multi_en) {
+		if (!is_page_align(offset))
+			return -EINVAL;
+		if (data && !is_page_align(len))
+			return -EINVAL;
+
+		return operation_multi(NANDX_WRITE, data, oob, offset, len);
+	}
+
+	ret = write_pages(data, oob, offset, len);
+
+#if NANDX_PERFORMANCE_TRACE
+	time_cons = get_current_time_us() - time_cons;
+	speed = div_down(len * 1024, time_cons);
+	if (nandx->performance.write_speed) {
+		speed += nandx->performance.write_speed;
+		speed = div_down(speed, 2);
+	}
+	nandx->performance.write_speed = speed;
+#endif
+
+	return ret;
+}
+
+/* before invoke this interface, need to disable ecc. */
+int nandx_write_raw_pages(u8 *data, u64 offset, size_t len)
+{
+	struct nandx_desc *nandx = g_nandx;
+	struct nand_chip *chip = nandx->chip;
+	u32 row = offset_to_row(offset);
+	int pages, i;
+
+	/* TODO: check arguments validity, now only for test. */
+
+	pages = div_down(len, page_padded_size());
+	for (i = 0; i < pages; i++) {
+		prepare_op(nandx->ops[nandx->ops_current],
+			   row + i, 0, page_padded_size(),
+			   data + i * page_padded_size(), NULL);
+		nandx->ops_current++;
+	}
+
+	nandx->ops_current = 0;
+	return chip->write_page(chip, nandx->ops, pages);
+}
+
+int nandx_erase(u64 offset, size_t len)
+{
+	struct nandx_desc *nandx = g_nandx;
+	int ret;
+#if NANDX_PERFORMANCE_TRACE
+	int speed;
+	u64 time_cons = get_current_time_us();
+#endif
+
+	if (!len || len > nandx->info.total_size)
+		return -EINVAL;
+	if (div_down(len, nandx->chip->block_size) > (u64)nandx->ops_len)
+		return -EINVAL;
+	if (!is_block_align(offset) || !is_block_align(len))
+		return -EINVAL;
+
+	if (g_nandx->multi_en)
+		return operation_multi(NANDX_ERASE, NULL, NULL, offset, len);
+
+	nandx->ops_current = 0;
+	nandx->mode = NANDX_IDLE;
+	ret = operation_sequent(NANDX_ERASE, NULL, NULL, offset, len);
+
+#if NANDX_PERFORMANCE_TRACE
+	time_cons = get_current_time_us() - time_cons;
+	speed = div_down(len * 1024, time_cons);
+	if (nandx->performance.erase_speed) {
+		speed += nandx->performance.erase_speed;
+		speed = div_down(speed, 2);
+	}
+	nandx->performance.erase_speed = speed;
+#endif
+
+	return ret;
+}
+
+int nandx_sync(void)
+{
+	struct nandx_desc *nandx = g_nandx;
+	struct nand_chip *chip = nandx->chip;
+	func_chip_ops chip_ops;
+	int ret, i, rem;
+
+	if (!nandx->ops_current)
+		return 0;
+
+	rem = reminder(nandx->ops_current, nandx->ops_multi_len);
+	if (nandx->multi_en && rem) {
+		ret = -EIO;
+		goto error;
+	}
+
+	switch (nandx->mode) {
+	case NANDX_IDLE:
+		return 0;
+	case NANDX_ERASE:
+		chip_ops = chip->erase_block;
+		break;
+	case NANDX_READ:
+		chip_ops = chip->read_page;
+		break;
+	case NANDX_WRITE:
+		chip_ops = chip->write_page;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	rem = reminder(nandx->ops_current, nandx->min_write_pages);
+	if (!nandx->multi_en && nandx->mode == NANDX_WRITE && rem) {
+		/* in one process of program, only allow 2 pages to do partial
+		 * write, here we supposed 1st buf would be used, and 2nd
+		 * buf should be not used.
+		 */
+		memset(nandx->tail_buf, 0xff,
+		       chip->page_size + oob_upper_size());
+		for (i = 0; i < rem; i++) {
+			prepare_op(nandx->ops[nandx->ops_current],
+				   nandx->ops[nandx->ops_current - 1].row + 1,
+				   0, chip->page_size, nandx->tail_buf,
+				   nandx->tail_buf + chip->page_size);
+			nandx->ops_current++;
+		}
+	}
+
+	ret = chip_ops(nandx->chip, nandx->ops, nandx->ops_current);
+
+error:
+	nandx->mode = NANDX_IDLE;
+	nandx->ops_current = 0;
+
+	return ret;
+}
+
+static void trace_ioctl_info(int cmd, void *arg)
+{
+	char *cmds[CHIP_CTRL_PERF_INFO_CLEAR + 1] = {
+		"CORE_CTRL_NAND_INFO",
+		"CORE_CTRL_PERF_INFO",
+		"CORE_CTRL_PERF_INFO_CLEAR",
+		"NFI_CTRL_BASE_INFO",
+		"SNFI_CTRL_BASE_INFO",
+		"NFI_CTRL_NFI_IRQ",
+		"NFI_CTRL_ECC_IRQ",
+		"NFI_CTRL_ECC_PAGE_IRQ",
+		"NFI_CTRL_DMA",
+		"NFI_BURST_EN",
+		"NFI_ADDR_ALIGNMENT_EN",
+		"NFI_BYTE_RW_EN",
+		"NFI_CRC_EN",
+		"NFI_CTRL_RANDOMIZE",
+		"NFI_CTRL_RANDOMIZE_SEL",
+		"NFI_CTRL_IO_FORMAT",
+		"NFI_CTRL_ECC",
+		"NFI_CTRL_ECC_MODE",
+		"NFI_CTRL_ECC_DECODE_MODE",
+		"NFI_CTRL_ECC_ERRNUM0",
+		"NFI_CTRL_ECC_GET_STATUS",
+		"NFI_CTRL_BAD_MARK_SWAP",
+		"SNFI_CTRL_OP_MODE",
+		"SNFI_CTRL_RX_MODE",
+		"SNFI_CTRL_TX_MODE",
+		"SNFI_CTRL_DELAY_MODE",
+		"SNFI_CTRL_4FIFO_EN",
+		"SNFI_CTRL_GF_CONFIG",
+		"SNFI_CTRL_SAMPLE_DELAY",
+		"SNFI_CTRL_LATCH_LATENCY",
+		"SNFI_CTRL_MAC_QPI_MODE",
+		"CHIP_CTRL_OPS_CACHE",
+		"CHIP_CTRL_OPS_MULTI",
+		"CHIP_CTRL_PSLC_MODE",
+		"CHIP_CTRL_DRIVE_STRENGTH",
+		"CHIP_CTRL_DDR_MODE",
+		"CHIP_CTRL_DEVICE_RESET",
+		"CHIP_CTRL_ONDIE_ECC",
+		"CHIP_CTRL_TIMING_MODE",
+		"CHIP_CTRL_PERF_INFO",
+		"CHIP_CTRL_PERF_INFO_CLEAR"
+	};
+
+	pr_debug("[CORE_IO] Set %s(%d) to %d\n", cmds[cmd], cmd, *(int *)arg);
+}
+
+int nandx_ioctl(int cmd, void *arg)
+{
+	struct nandx_desc *nandx = g_nandx;
+	struct nand_chip *chip = nandx->chip;
+	int ret = 0;
+
+	trace_ioctl_info(cmd, arg);
+	
+	switch (cmd) {
+	case CORE_CTRL_NAND_INFO:
+		*(struct nandx_info *)arg = nandx->info;
+		break;
+
+	case CORE_CTRL_PERF_INFO:
+		chip->chip_ctrl(chip, CHIP_CTRL_PERF_INFO,
+				&nandx->performance.page_perf);
+		*(struct nand_performance *)arg = nandx->performance;
+		break;
+
+	case CORE_CTRL_PERF_INFO_CLEAR:
+		chip->chip_ctrl(chip, CHIP_CTRL_PERF_INFO_CLEAR, arg);
+		memset(&nandx->performance, 0, sizeof(struct nand_performance));
+		break;
+
+	case CHIP_CTRL_OPS_MULTI:
+		ret = chip->chip_ctrl(chip, cmd, arg);
+		if (!ret)
+			nandx->multi_en = *(bool *)arg;
+		break;
+
+	case NFI_CTRL_ECC:
+		ret = chip->chip_ctrl(chip, cmd, arg);
+		if (!ret)
+			nandx->ecc_en = *(bool *)arg;
+		break;
+
+	default:
+		ret = chip->chip_ctrl(chip, cmd, arg);
+		break;
+	}
+
+	return ret;
+}
+
+bool nandx_is_bad_block(u64 offset)
+{
+	struct nandx_desc *nandx = g_nandx;
+
+	prepare_op(nandx->ops[0], offset_to_row(offset), 0,
+		   nandx->chip->page_size, nandx->head_buf,
+		   nandx->head_buf + nandx->chip->page_size);
+
+	return nandx->chip->is_bad_block(nandx->chip, nandx->ops, 1);
+}
+
+int nandx_suspend(void)
+{
+	return g_nandx->chip->suspend(g_nandx->chip);
+}
+
+int nandx_resume(void)
+{
+	return g_nandx->chip->resume(g_nandx->chip);
+}
+
+int nandx_init(struct nfi_resource *res)
+{
+	struct nand_chip *chip;
+	struct nandx_desc *nandx;
+	int ret = 0;
+
+	if (!res)
+		return -EINVAL;
+
+	chip = nand_chip_init(res);
+	if (!chip) {
+		pr_err("nand chip init fail.\n");
+		return -EFAULT;
+	}
+
+	nandx = (struct nandx_desc *)mem_alloc(1, sizeof(struct nandx_desc));
+	if (!nandx) {
+		ret = -ENOMEM;
+		goto nandx_error;
+	}
+
+	g_nandx = nandx;
+
+	nandx->chip = chip;
+	nandx->min_write_pages = chip->min_program_pages;
+	nandx->ops_multi_len = nandx->min_write_pages * chip->plane_num;
+	nandx->ops_len = chip->block_pages * chip->plane_num;
+	nandx->ops = mem_alloc(1, sizeof(struct nand_ops) * nandx->ops_len);
+	if (!nandx->ops) {
+		ret = -ENOMEM;
+		goto ops_error;
+	}
+
+#if NANDX_BULK_IO_USE_DRAM
+	nandx->head_buf = (u8 *)NANDX_CORE_BUF_ADDR;
+#else
+	nandx->head_buf = (u8 *)mem_alloc(2, page_padded_size());
+#endif
+	if (!nandx->head_buf) {
+		ret = -ENOMEM;
+		goto buf_error;
+	}
+	nandx->tail_buf = nandx->head_buf + page_padded_size();
+	memset(nandx->head_buf, 0xff, 2 * page_padded_size());
+	nandx->multi_en = false;
+	nandx->ecc_en = false;
+	nandx->ops_current = 0;
+	nandx->mode = NANDX_IDLE;
+
+	nandx->info.max_io_count = nandx->ops_len;
+	nandx->info.min_write_pages = nandx->min_write_pages;
+	nandx->info.plane_num = chip->plane_num;
+	nandx->info.oob_size = chip->oob_size;
+	nandx->info.page_parity_size = chip->sector_spare_size * page_sectors();
+	nandx->info.page_size = chip->page_size;
+	nandx->info.block_size = chip->block_size;
+	nandx->info.total_size = (u64)chip->block_size * (u64)chip->block_num;
+	nandx->info.fdm_ecc_size = chip->fdm_ecc_size;
+	nandx->info.fdm_reg_size = chip->fdm_reg_size;
+	nandx->info.ecc_strength = chip->ecc_strength;
+	nandx->info.sector_size = chip->sector_size;
+
+	memset(&nandx->performance, 0, sizeof(struct nand_performance));
+
+	return 0;
+
+buf_error:
+	mem_free(nandx->ops);
+ops_error:
+	mem_free(nandx);
+nandx_error:
+	nand_chip_exit(chip);
+
+	return ret;
+}
+
+void nandx_exit(void)
+{
+	nand_chip_exit(g_nandx->chip);
+#if !NANDX_BULK_IO_USE_DRAM
+	mem_free(g_nandx->head_buf);
+#endif
+	mem_free(g_nandx->ops);
+	mem_free(g_nandx);
+}
+
+#ifdef NANDX_TEST_UT
+static void dump_buf(u8 *buf, u32 len)
+{
+	u32 i;
+
+	pr_info("dump buf@0x%X start", buf);
+	for (i = 0; i < len; i++) {
+		if (!reminder(i, 16))
+			pr_info("\n0x");
+		pr_info("%x ", buf[i]);
+	}
+	pr_info("\ndump buf done.\n");
+}
+
+int nandx_unit_test(u64 offset, size_t len)
+{
+	u8 *src_buf, *dst_buf;
+	size_t i;
+	int ret;
+
+	if (!len || len > g_nandx->chip->block_size)
+		return -EINVAL;
+
+#if NANDX_BULK_IO_USE_DRAM
+	src_buf = NANDX_UT_SRC_ADDR;
+	dst_buf = NANDX_UT_DST_ADDR;
+#else
+
+	src_buf = mem_alloc(1, len);
+	if (!src_buf)
+		return -ENOMEM;
+
+	dst_buf = mem_alloc(1, len);
+	if (!dst_buf) {
+		mem_free(src_buf);
+		return -ENOMEM;
+	}
+#endif
+
+	pr_debug("%s: src_buf address 0x%x, dst_buf address 0x%x\n",
+		 __func__, (int)((unsigned long)src_buf),
+		 (int)((unsigned long)dst_buf));
+
+	/*fill random data in source buffer, em... it's not real random data.*/
+	for (i = 0; i < len; i++)
+		src_buf[i] = (u8)reminder(get_current_time_us(), 255);
+
+	ret = nandx_erase(offset, g_nandx->chip->block_size);
+	if (ret < 0) {
+		pr_err("erase fail with ret %d\n", ret);
+		goto error;
+	}
+
+	ret = nandx_write(src_buf, NULL, offset, len);
+	if (ret < 0) {
+		pr_err("write fail with ret %d\n", ret);
+		goto error;
+	}
+
+	ret = nandx_read(dst_buf, NULL, offset, len);
+	if (ret < 0) {
+		pr_err("read fail with ret %d\n", ret);
+		goto error;
+	}
+
+	for (i = 0; i < len; i++) {
+		if (dst_buf[i] != src_buf[i]) {
+			pr_err("read after write, check fail\n");
+			pr_err("dst_buf should be same as src_buf\n");
+			ret = -EIO;
+			dump_buf(src_buf, len);
+			dump_buf(dst_buf, len);
+			goto error;
+		}
+	}
+
+	ret = nandx_erase(offset, g_nandx->chip->block_size);
+	if (ret < 0) {
+		pr_err("erase fail with ret %d\n", ret);
+		goto error;
+	}
+
+	ret = nandx_read(dst_buf, NULL, offset, len);
+	if (ret < 0) {
+		pr_err("read fail with ret %d\n", ret);
+		goto error;
+	}
+
+	for (i = 0; i < len; i++) {
+		if (dst_buf[i] != 0xff) {
+			pr_err("read after erase, check fail\n");
+			pr_err("all data should be 0xff\n");
+			ret = -ENANDERASE;
+			dump_buf(dst_buf, len);
+			goto error;
+		}
+	}
+
+error:
+#if !NANDX_BULK_IO_USE_DRAM
+	mem_free(src_buf);
+	mem_free(dst_buf);
+#endif
+
+	return ret;
+}
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/core_io.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/core_io.h
new file mode 100644
index 0000000..7e0f3963
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/core_io.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __CORE_IO_H__
+#define __CORE_IO_H__
+
+typedef int (*func_chip_ops)(struct nand_chip *, struct nand_ops *,
+			     int);
+
+enum nandx_op_mode {
+	NANDX_IDLE,
+	NANDX_WRITE,
+	NANDX_READ,
+	NANDX_ERASE
+};
+
+struct nandx_desc {
+	struct nand_chip *chip;
+	struct nandx_info info;
+	enum nandx_op_mode mode;
+
+	bool multi_en;
+	bool ecc_en;
+
+	struct nand_ops *ops;
+	int ops_len;
+	int ops_multi_len;
+	int ops_current;
+	int min_write_pages;
+
+	u8 *head_buf;
+	u8 *tail_buf;
+
+	struct nand_performance performance;
+};
+
+#endif /* __CORE_IO_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/device_slc.c b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/device_slc.c
new file mode 100644
index 0000000..175e6cd
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/device_slc.c
@@ -0,0 +1,383 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "../nand_device.h"
+#include "device_slc.h"
+
+/* onfi nand timing mode */
+static struct slc_timing_mode timing_mode[] = {
+	{
+		.addr = 0x01,
+		.mode = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05}
+	},
+};
+
+/* tREA, tREH, tCR, tRP, tWP, tWH, tWHR, tCLS, tALS, tCLH,tALH, tWC, tRC */
+static struct nand_sdr_timing sdr_timing[SLC_TIMING_NUM] = {
+	{40, 30, 0, 50, 50, 30, 120, 50, 50, 20, 20, 100, 100},
+	{30, 15, 0, 25, 25, 15,  80, 25, 25, 10, 10,  45,  50},
+	{25, 15, 0, 20, 17, 15,  80, 15, 15, 10, 10,  35,  35},
+	{20, 10, 0, 15, 15, 10,  60, 10, 10,  5,  5,  30,  30},
+	{20, 10, 0, 12, 12, 10,  60, 12, 12,  5,  5,  25,  25},
+	{16,  7, 0, 10, 10,  7,  60, 10, 10,  5,  5,  20,  20},
+};
+
+/* Not all SLC devices entirely match ONFI-1.0 spec. So customize them here.
+ * MT29F8G08ABBCAH4 & MT29F4G08ABBFAH4 use the following settings
+ * and support timing mode 0~3
+ */
+static struct nand_sdr_timing sdr_timing_micron_slc[SLC_TIMING_NUM] = {
+	{40, 30, 0, 50, 50, 30, 120, 50, 50, 20, 20, 100, 100},
+	{30, 15, 0, 25, 25, 15,  80, 25, 25, 10, 10,  45,  50},
+	{25, 15, 0, 20, 17, 15,  80, 15, 15, 10, 10,  35,  35},
+	{25, 10, 0, 15, 15, 10,  80, 10, 10,  5,  5,  30,  30},
+};
+
+static struct nand_sdr_timing sdr_timing_winbond_slc[SLC_TIMING_NUM] = {
+	{40, 30, 0, 50, 50, 30, 120, 50, 50, 20, 20, 100, 100},
+	{30, 15, 0, 25, 25, 15,  80, 25, 25, 10, 10,  45,  50},
+	{25, 15, 0, 20, 17, 15,  80, 15, 15, 10, 10,  35,  35},
+};
+
+/* tREA, tREH, tCR, tRP, tWP, tWH, tWHR, tCLS, tALS, tCLH,tALH, tWC, tRC */
+static struct nand_sdr_timing sdr_timing_hynix_slc[] = {
+	{30, 15, 10, 25, 25, 15,  60, 25, 25, 10, 10,  45,  45},
+};
+
+/* tREA, tREH, tCR, tRP, tWP, tWH, tWHR, tCLS, tALS, tCLH,tALH, tWC, tRC */
+static struct nand_sdr_timing sdr_timing_NM4888KMPAXAI[] = {
+	{22, 10, 0, 12, 12, 10,  80, 10, 10, 5, 5, 25, 25},
+};
+
+/* onfi nand basic commands */
+static struct nand_cmds onfi_cmds = {
+	.reset = 0xff,
+	.read_id = 0x90,
+	.read_status = 0x70,
+	.read_param_page = 0xec,
+	.set_feature = 0xef,
+	.get_feature = 0xee,
+	.read_1st = 0x00,
+	.read_2nd = 0x30,
+	.random_out_1st = 0x05,
+	.random_out_2nd = 0xe0,
+	.program_1st = 0x80,
+	.program_2nd = 0x10,
+	.erase_1st = 0x60,
+	.erase_2nd = 0xd0,
+	.read_cache = 0x31,
+	.read_cache_last = 0x3f,
+	.program_cache = 0x15,
+};
+
+/* onfi nand extend commands */
+static struct slc_extend_cmds onfi_extend_cmds = {
+	.read_multi_1st = 0x00,
+	.read_multi_2nd = 0x32,
+	.program_multi_1st = 0x80,
+	.program_multi_2nd = 0x11,
+	.erase_multi_1st = 0x60,
+	.erase_multi_2nd = 0xd1,
+	.read_status_enhanced = 0x78
+};
+
+/* toggle nand basic commands */
+static struct nand_cmds toggle_cmds = {
+	.reset = 0xff,
+	.read_id = 0x90,
+	.read_status = 0x70,
+	.read_param_page = 0x03,
+	.set_feature = 0xef,
+	.get_feature = 0xee,
+	.read_1st = 0x00,
+	.read_2nd = 0x30,
+	.random_out_1st = 0x05,
+	.random_out_2nd = 0xe0,
+	.program_1st = 0x80,
+	.program_2nd = 0x10,
+	.erase_1st = 0x60,
+	.erase_2nd = 0xd0,
+	.read_cache = 0x31,
+	.read_cache_last = 0x3f,
+	.program_cache = 0x15,
+};
+
+/* toggle nand extend commands */
+static struct slc_extend_cmds toggle_extend_cmds = {
+	.read_multi_1st = 0x60,
+	.read_multi_2nd = 0x30,
+	.program_multi_1st = 0x80,
+	.program_multi_2nd = 0x11,
+	.erase_multi_1st = 0x60,
+	.erase_multi_2nd = -1,
+	.read_status_enhanced = 0xf1
+};
+
+/* means the start bit of addressing type */
+static struct nand_addressing slc_addressing = {
+	.row_bit_start = 0,
+	.block_bit_start = 6,
+	.plane_bit_start = 6,
+	.lun_bit_start = 18,
+};
+
+/* means the status of read status value by bit location */
+static struct nand_status slc_status = {
+	.array_busy = BIT(5),
+	.write_protect = BIT(7),
+	.erase_fail = BIT(0),
+	.program_fail = BIT(0)
+};
+
+/* measure cycle by the times */
+static struct nand_endurance slc_endurance = {
+	.pe_cycle = 60000,
+	.ecc_req = 1,
+	.max_bitflips = 8
+};
+
+/*
+ * measure time by the us.
+ * tRST:
+ * According to the datasheet of NAND MT29F8G08ABBCAH4:
+ * the first time the RESET(FFh) command is issued while the device is idle,
+ * the device will be busy for a maximum of 1ms.
+ *
+ * tR:
+ * JSFDDQ5QHAxGD max is 30us
+ *
+ * tPROG:
+ * JSFDDQ5QHAxGD max is 700us
+ *
+ * tPCBSY:
+ * JSFDDQ5QHAxGD max is 700us
+ *
+ * tFEAT:
+ * According to the datasheet of NAND MT29F8G08ABBCAH4:
+ * After send the get feature command, R/B# need time tWB + tFEAT + tRR to be ready
+ * After send the set feature command, R/B# need time tWB + tFEAT to be ready
+ * tWB max is 100ns, tFEAT max is 1us, tRR min is 20ns
+ * Consider sytem timer deviation, relax the tFEAT as 5us
+ */
+static struct nand_array_timing slc_array_timing = {
+	.tRST = 1000,
+	.tWHR = 1,
+	.tR = 100,
+	.tRCBSY = 25,
+	.tFEAT = 5,
+	.tPROG = 700,
+	.tPCBSY = 700,
+	.tBERS = 10000,
+	.tDBSY = 1
+};
+
+/*
+ * NAND -nand basic information
+ * NAND_DEVICE
+ *   _name,
+ *   _id,
+ *   _id_len, _io_width, _row_cycle, _col_cycle,
+ *   _target_num, _lun_num, _plane_num, _block_num,
+ *   _block_size, _page_size, _spare_size, _min_program_pages,
+ *   _cmds, _addressing, _status,
+ *   _endurance, _array_timing
+ */
+static struct device_slc slc_nand[] = {
+	/* MCP */
+	/* JSC */
+	{
+		NAND_DEVICE("JSFDDQ5QHAxGD-405x",
+				NAND_PACK_ID(0xad, 0xa3, 0x81, 0x16, 0x20, 0, 0, 0),
+				5, NAND_IO8, 3, 2,
+				1, 1, 1, 4096,
+				KB(256), KB(4), 256, 1,
+				&onfi_cmds, &slc_addressing, &slc_status,
+				&slc_endurance, &slc_array_timing),
+		SLC_DRIVE_STRENGTH(0x80, 0x00, 0x01, 0x02, 0x03),
+		&onfi_extend_cmds,
+		CHIP_TIMING_MODE0,
+		NULL,
+		sdr_timing_hynix_slc
+	},
+	/* NANYA */
+	{
+		NAND_DEVICE("NM4484NSPAXAE-3E",
+				NAND_PACK_ID(0x90, 0xac, 0x90, 0x26, 0x76, 0, 0, 0),
+				5, NAND_IO8, 3, 2,
+				1, 1, 1, 2048,
+				KB(256), KB(4), 256, 1,
+				&onfi_cmds, &slc_addressing, &slc_status,
+				&slc_endurance, &slc_array_timing),
+		SLC_DRIVE_STRENGTH(0x80, 0x00, 0x01, 0x02, 0x03),
+		&onfi_extend_cmds,
+		CHIP_TIMING_MODE0,
+		&timing_mode[0],
+		sdr_timing
+	},
+	{
+		NAND_DEVICE("NM4888KMPAXAI",
+				NAND_PACK_ID(0xc2, 0xa3, 0xd1, 0x15, 0x5a, 0, 0, 0),
+				5, NAND_IO8, 3, 2,
+				1, 1, 1, 8192,
+				KB(128), KB(2), 64, 1,
+				&onfi_cmds, &slc_addressing, &slc_status,
+				&slc_endurance, &slc_array_timing),
+		SLC_DRIVE_STRENGTH(0x80, 0x00, 0x01, 0x02, 0x03),
+		&onfi_extend_cmds,
+		CHIP_TIMING_MODE0,
+		&timing_mode[0],
+		sdr_timing_NM4888KMPAXAI
+	},
+	/* Parallel-NAND*/
+	/* Micron */
+	{
+		NAND_DEVICE("MT29F4G08ABA",
+				NAND_PACK_ID(0x2c, 0xdc, 0x80, 0xa6, 0x62, 0, 0, 0),
+				5, NAND_IO8, 3, 2,
+				1, 1, 1, 4096,
+				KB(256), KB(4), 256, 1,
+				&onfi_cmds, &slc_addressing, &slc_status,
+				&slc_endurance, &slc_array_timing),
+		SLC_DRIVE_STRENGTH(0x80, 0x00, 0x01, 0x02, 0x03),
+		&onfi_extend_cmds,
+		CHIP_TIMING_MODE0,
+		&timing_mode[0],
+		sdr_timing
+	},
+	{
+		/* This device is used on:
+		 * mt6880's socket board
+		 * m.2 datacard
+                 */
+		NAND_DEVICE("MT29F4G08ABBFAH4-IT:F",
+				NAND_PACK_ID(0x2c, 0xac, 0x80, 0x26, 0x62, 0, 0, 0),
+				5, NAND_IO8, 3, 2,
+				1, 1, 1, 2048,
+				KB(256), KB(4), 256, 1,
+				&onfi_cmds, &slc_addressing, &slc_status,
+				&slc_endurance, &slc_array_timing),
+		SLC_DRIVE_STRENGTH(0x80, 0x00, 0x01, 0x02, 0x03),
+		&onfi_extend_cmds,
+		CHIP_TIMING_MODE3,
+		&timing_mode[0],
+		sdr_timing_micron_slc
+	},
+	{
+		/* This device is used on:
+		 * mt6890's MP board
+                 */
+		NAND_DEVICE("MT29F8G08ABBCAH4-IT:C",
+				NAND_PACK_ID(0x2c, 0xa3, 0x90, 0x26, 0x64, 0, 0, 0),
+				5, NAND_IO8, 3, 2,
+				1, 1, 2, 2048,
+				KB(256), KB(4), 224, 1,
+				&onfi_cmds, &slc_addressing, &slc_status,
+				&slc_endurance, &slc_array_timing),
+		SLC_DRIVE_STRENGTH(0x80, 0x00, 0x01, 0x02, 0x03),
+		&onfi_extend_cmds,
+		CHIP_TIMING_MODE3,
+		&timing_mode[0],
+		sdr_timing_micron_slc
+	},
+	{
+		/*
+		 * MT29F8G08ADBFA or
+		 * MT29GZ6A6BPIET-53AIT.112 or MT29GZ6A6BPIET-53AAT.112
+                 */
+		NAND_DEVICE("MT29F8G08ADBFA",
+				NAND_PACK_ID(0x2c, 0xa3, 0xd0, 0x26, 0x66, 0, 0, 0),
+				5, NAND_IO8, 3, 2,
+				1, 1, 1, 4096,
+				KB(256), KB(4), 256, 1,
+				&onfi_cmds, &slc_addressing, &slc_status,
+				&slc_endurance, &slc_array_timing),
+		SLC_DRIVE_STRENGTH(0x80, 0x00, 0x01, 0x02, 0x03),
+		&onfi_extend_cmds,
+		CHIP_TIMING_MODE3,
+		&timing_mode[0],
+		sdr_timing_micron_slc //zhengzhou for MT29GZ6A6BPIET-046AAT.112 
+	},
+	/* Winbond */
+	{
+		NAND_DEVICE("W29N04GZBIBA",
+				NAND_PACK_ID(0xef, 0xac, 0x90, 0x15, 0x54, 0, 0, 0),
+				5, NAND_IO8, 3, 2,
+				1, 1, 2, 2048,
+				KB(128), KB(2), 64, 1,
+				&onfi_cmds, &slc_addressing, &slc_status,
+				&slc_endurance, &slc_array_timing),
+		SLC_DRIVE_STRENGTH(0x80, 0x00, 0x01, 0x02, 0x03),
+		&onfi_extend_cmds,
+		CHIP_TIMING_MODE2,
+		&timing_mode[0],
+		sdr_timing_winbond_slc
+	},
+	{
+		NAND_DEVICE("W29N08GZBIBA",
+				NAND_PACK_ID(0xef, 0xa3, 0x91, 0x15, 0x58, 0, 0, 0),
+				5, NAND_IO8, 3, 2,
+				1, 2, 2, 2048,
+				KB(128), KB(2), 64, 1,
+				&onfi_cmds, &slc_addressing, &slc_status,
+				&slc_endurance, &slc_array_timing),
+		SLC_DRIVE_STRENGTH(0x80, 0x00, 0x01, 0x02, 0x03),
+		&onfi_extend_cmds,
+		CHIP_TIMING_MODE2,
+		&timing_mode[0],
+		sdr_timing_winbond_slc
+	},
+	/* Toshiba */
+	{
+		NAND_DEVICE("TC58NVG2S0HTA00",
+			    NAND_PACK_ID(0x98, 0xdc, 0x90, 0x26, 0x76,
+					 0x16, 0, 0),
+			    6, NAND_IO8, 3, 2,
+			    1, 1, 1, 2048,
+			    KB(256), KB(4), 256, 1,
+			    &toggle_cmds, &slc_addressing, &slc_status,
+			    &slc_endurance, &slc_array_timing),
+		SLC_DRIVE_STRENGTH(0x10, 0x04, 0x06, 0x04, 0x02),
+		&toggle_extend_cmds,
+		CHIP_TIMING_MODE0,
+		NULL,
+		sdr_timing
+	},
+	{
+		NAND_DEVICE("MX30UF4G18AC",
+			    NAND_PACK_ID(0xc2, 0xac, 0x90, 0x15, 0x56,
+					 0, 0, 0),
+			    5, NAND_IO8, 3, 2,
+			    1, 1, 1, 4096,
+			    KB(128), KB(2), 64, 1,
+			    &onfi_cmds, &slc_addressing, &slc_status,
+			    &slc_endurance, &slc_array_timing),
+		SLC_DRIVE_STRENGTH(0x10, 0x04, 0x06, 0x04, 0x02),
+		&onfi_extend_cmds,
+		CHIP_TIMING_MODE0,
+		&timing_mode[0],
+		sdr_timing
+	},
+	{
+		NAND_DEVICE("NO-DEVICE",
+			    NAND_PACK_ID(0, 0, 0, 0, 0, 0, 0, 0),
+			    0, 0, 0, 0,
+			    0, 0, 0, 0,
+			    0, 0, 0, 1,
+			    &onfi_cmds, &slc_addressing, &slc_status,
+			    &slc_endurance, &slc_array_timing),
+		SLC_DRIVE_STRENGTH(0x80, 0x00, 0x01, 0x02, 0x03),
+		NULL,
+		CHIP_TIMING_MODE0,
+		NULL,
+		sdr_timing
+	}
+};
+
+struct nand_device *nand_get_device(int index)
+{
+	return &slc_nand[index].dev;
+}
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/device_slc.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/device_slc.h
new file mode 100644
index 0000000..b1bf566
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/device_slc.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __DEVICE_SLC_H__
+#define __DEVICE_SLC_H__
+
+#define SLC_TIMING_NUM  (CHIP_TIMING_MODE5 + 1)
+
+/*
+ * timing mode
+ *   onfi nand has timing mode setting by set feature
+ *   but device would adjust it's timing mode when host sends timing on slc nand
+ * @addr: feature address for timing mode
+ * @mode: feature value for mode selection
+ */
+struct slc_timing_mode {
+	u8 addr;
+	u8 mode[SLC_TIMING_NUM];
+};
+
+/*
+ * drive strength
+ *    control signal strength, the greater the value, the higher the
+ *    signal strength
+ *    use SLC_DRIVE_STRENGTH macro to assign the value on device table
+ * @ addr: feature address for nand device drive strength
+ * @ normal: default value by manufacture
+ * @ high: higher drive strength than default
+ * @ middle: lower drive strength than default
+ * @ low: the lowest drive strength
+ */
+struct slc_drive_strength {
+	u8 addr;
+	u8 normal;
+	u8 high;
+	u8 middle;
+	u8 low;
+};
+
+/*
+ * extend cmds
+ * @read_multi_1st: first command of multi-plane read operation
+ * @read_multi_2nd: second command of multi-plane read operation
+ * @program_multi_1st: first command of multi-plane program operation
+ * @program_multi_2nd: second command of multi-plane program operation
+ * @erase_multi_1st: first command of multi-plane erase operation
+ * @erase_multi_2nd: second command of multi-plane erase operation
+ * @read_status_enhanced: enhanced command of read status
+ */
+struct slc_extend_cmds {
+	short read_multi_1st;
+	short read_multi_2nd;
+	short program_multi_1st;
+	short program_multi_2nd;
+	short erase_multi_1st;
+	short erase_multi_2nd;
+	short read_status_enhanced;
+};
+
+/*
+ * device_slc
+ *    configurations of slc nand device table
+ * @dev: base information of nand device
+ * @drive_strength: feature information of nand drive strength
+ * @timing_mode: feature information of nand timing mode
+ * @extend_cmds: extended the nand base commands
+ * @timing: nand operated sdr timing setting for NFI
+ */
+struct device_slc {
+	struct nand_device dev;
+	struct slc_drive_strength drive_strength;
+	struct slc_extend_cmds *extend_cmds;
+	enum chip_ctrl_timing_mode default_mode;
+	struct slc_timing_mode *timing_mode;
+	struct nand_sdr_timing *timing;
+};
+
+#define SLC_DRIVE_STRENGTH(_addr, _normal, _high, _middle, _low) \
+	{ _addr, _normal, _high, _middle, _low }
+
+static inline struct device_slc *device_to_slc(struct nand_device *dev)
+{
+	return container_of(dev, struct device_slc, dev);
+}
+
+#endif /* __DEVICE_SLC_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/device_spi.c b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/device_spi.c
new file mode 100644
index 0000000..9e6b9c0
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/device_spi.c
@@ -0,0 +1,233 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#include "nandx_util.h"
+#include "../nand_device.h"
+#include "device_spi.h"
+
+/* spi nand basic commands */
+static struct nand_cmds spi_cmds = {
+	.reset = 0xff,
+	.read_id = 0x9f,
+	.read_status = 0x0f,
+	.read_param_page = 0x03,
+	.set_feature = 0x1f,
+	.get_feature = 0x0f,
+	.read_1st = 0x13,
+	.read_2nd = -1,
+	.random_out_1st = 0x03,
+	.random_out_2nd = -1,
+	.program_1st = 0x02,
+	.program_2nd = 0x10,
+	.erase_1st = 0xd8,
+	.erase_2nd = -1,
+	.read_cache = 0x30,
+	.read_cache_last = 0x3f,
+	.program_cache = 0x02
+};
+
+/* spi nand extend commands */
+static struct spi_extend_cmds spi_extend_cmds = {
+	.die_select = 0xc2,
+	.write_enable = 0x06
+};
+
+/* means the start bit of addressing type */
+static struct nand_addressing spi_addressing = {
+	.row_bit_start = 0,
+	.block_bit_start = 0,
+	.plane_bit_start = 12,
+	.lun_bit_start = 0,
+};
+
+/* spi nand endurance */
+static struct nand_endurance spi_endurance = {
+	.pe_cycle = 100000,
+	.ecc_req = 1,
+	.max_bitflips = 1
+};
+
+/* array_busy, write_protect, erase_fail, program_fail */
+static struct nand_status spi_status[] = {
+	{
+		.array_busy = BIT(0),
+		.write_protect = BIT(1),
+		.erase_fail = BIT(2),
+		.program_fail = BIT(3)
+	}
+};
+
+/* measure time by the us */
+static struct nand_array_timing spi_array_timing = {
+	.tRST = 500,
+	.tWHR = 1,
+	.tR = 25,
+	.tRCBSY = 25,
+	.tFEAT = 1,
+	.tPROG = 600,
+	.tPCBSY = 600,
+	.tBERS = 10000,
+	.tDBSY = 1
+};
+
+/* spi nand device table */
+static struct device_spi spi_nand[] = {
+	/* Winbond */
+	{
+		NAND_DEVICE("W25N01GV",
+			    NAND_PACK_ID(0xef, 0xaa, 0x21, 0, 0, 0, 0, 0),
+			    3, 0, 3, 3,
+			    1, 1, 1, 1024, KB(128), KB(2), 64, 1,
+			    &spi_cmds, &spi_addressing, &spi_status[0],
+			    &spi_endurance, &spi_array_timing),
+		{
+			NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+			NAND_SPI_CONFIG(0xb0, 4, 6, 0),
+			NAND_SPI_STATUS(0xc0, 4, 5),
+			NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+		},
+		&spi_extend_cmds, 0xff, 0xff
+	},
+	{
+		NAND_DEVICE("W25M02GV",
+			    NAND_PACK_ID(0xef, 0xab, 0x21, 0, 0, 0, 0, 0),
+			    3, 0, 3, 3,
+			    1, 2, 1, 1024, KB(128), KB(2), 64, 1,
+			    &spi_cmds, &spi_addressing, &spi_status[0],
+			    &spi_endurance, &spi_array_timing),
+		{
+			NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+			NAND_SPI_CONFIG(0xb0, 4, 6, 0),
+			NAND_SPI_STATUS(0xc0, 4, 5),
+			NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+		},
+		&spi_extend_cmds, 0xff, 0xff
+	},
+	{
+		NAND_DEVICE("MX35LF1G",
+			    NAND_PACK_ID(0xc2, 0x12, 0x21, 0, 0, 0, 0, 0),
+			    2, 0, 3, 3,
+			    1, 1, 1, 1024, KB(128), KB(2), 64, 1,
+			    &spi_cmds, &spi_addressing, &spi_status[0],
+			    &spi_endurance, &spi_array_timing),
+		{
+			NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+			NAND_SPI_CONFIG(0xb0, 4, 6, 1),
+			NAND_SPI_STATUS(0xc0, 4, 5),
+			NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+		},
+		&spi_extend_cmds, 0xff, 0xff
+	},
+	/* Micron */
+	{
+		NAND_DEVICE("MT29F4G01ABAFDWB",
+			    NAND_PACK_ID(0x2c, 0x34, 0, 0, 0, 0, 0, 0),
+			    2, 0, 3, 3,
+			    1, 1, 1, 2048, KB(256), KB(4), 256, 1,
+			    &spi_cmds, &spi_addressing, &spi_status[0],
+			    &spi_endurance, &spi_array_timing),
+		{
+			NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+			NAND_SPI_CONFIG(0xb0, 4, 6, 1),
+			NAND_SPI_STATUS(0xc0, 4, 5),
+			NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+		},
+		&spi_extend_cmds, 0xff, 0xff
+	},
+	{
+		NAND_DEVICE("MT29F4G01ABBFDWB-IT:F",
+				NAND_PACK_ID(0x2c, 0x35, 0, 0, 0, 0, 0, 0),
+				2, 0, 3, 2,
+				1, 1, 1, 2048, KB(256), KB(4), 256, 1,
+				&spi_cmds, &spi_addressing, &spi_status[0],
+				&spi_endurance, &spi_array_timing),
+		{
+			NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+			NAND_SPI_CONFIG(0xb0, 4, 6, 1),
+			NAND_SPI_STATUS(0xc0, 4, 5),
+			NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+		},
+		&spi_extend_cmds, 0xff, 0xff
+	},
+	/* Toshiba */
+	{
+		NAND_DEVICE("TC58CYG2S0HRAIG",
+				NAND_PACK_ID(0x98, 0xbd, 0, 0, 0, 0, 0, 0),
+				2, NAND_IO8, 2, 1,
+				1, 1, 1, 2048, KB(256), KB(4), 256, 1,
+				&spi_cmds, &spi_addressing, &spi_status[0],
+				&spi_endurance, &spi_array_timing),
+		{
+			NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+			NAND_SPI_CONFIG(0xb0, 4, 6, 1),
+			NAND_SPI_STATUS(0xc0, 4, 5),
+			NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+		},
+		&spi_extend_cmds, 0xff, 0xff
+	},
+	{
+		NAND_DEVICE("GD5F4GQ4UB",
+			    NAND_PACK_ID(0xc8, 0xd4, 0, 0, 0, 0, 0, 0),
+			    2, 0, 3, 3,
+			    1, 1, 1, 2048, KB(256), KB(4), 256, 1,
+			    &spi_cmds, &spi_addressing, &spi_status[0],
+			    &spi_endurance, &spi_array_timing),
+		{
+			NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+			NAND_SPI_CONFIG(0xb0, 4, 6, 1),
+			NAND_SPI_STATUS(0xc0, 4, 5),
+			NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+		},
+		&spi_extend_cmds, 0xff, 0xff
+	},
+	{
+		NAND_DEVICE("NO-DEVICE",
+			    NAND_PACK_ID(0, 0, 0, 0, 0, 0, 0, 0), 0, 0, 0, 0,
+			    0, 0, 0, 0, 0, 0, 0, 1,
+			    &spi_cmds, &spi_addressing, &spi_status[0],
+			    &spi_endurance, &spi_array_timing),
+		{
+			NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+			NAND_SPI_CONFIG(0xb0, 4, 6, 0),
+			NAND_SPI_STATUS(0xc0, 4, 5),
+			NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+		},
+		&spi_extend_cmds, 0xff, 0xff
+	}
+};
+
+u8 spi_replace_rx_cmds(u8 mode)
+{
+	u8 rx_replace_cmds[] = {0x03, 0x3b, 0x6b, 0xbb, 0xeb};
+
+	return rx_replace_cmds[mode];
+}
+
+u8 spi_replace_tx_cmds(u8 mode)
+{
+	u8 tx_replace_cmds[] = {0x84, 0x32};
+
+	return tx_replace_cmds[mode];
+}
+
+u8 spi_replace_rx_col_cycle(u8 mode)
+{
+	u8 rx_replace_col_cycle[] = {3, 3, 3, 3, 4};
+
+	return rx_replace_col_cycle[mode];
+}
+
+u8 spi_replace_tx_col_cycle(u8 mode)
+{
+	u8 tx_replace_col_cycle[] = {2, 2};
+
+	return tx_replace_col_cycle[mode];
+}
+
+struct nand_device *nand_spi_get_device(int index)
+{
+	return &spi_nand[index].dev;
+}
+
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/device_spi.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/device_spi.h
new file mode 100644
index 0000000..e0ca416
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/device_spi.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __DEVICE_SPI_H__
+#define __DEVICE_SPI_H__
+
+/*
+ * extend commands
+ * @die_select: select nand device die command
+ * @write_enable: enable write command before write data to spi nand
+ *    spi nand device will auto to be disable after write done
+ */
+struct spi_extend_cmds {
+	short die_select;
+	short write_enable;
+};
+
+/*
+ * protection feature register
+ * @addr: register address
+ * @wp_en_bit: write protection enable bit
+ * @bp_start_bit: block protection mask start bit
+ * @bp_end_bit: block protection mask end bit
+ */
+struct feature_protect {
+	u8 addr;
+	u8 wp_en_bit;
+	u8 bp_start_bit;
+	u8 bp_end_bit;
+};
+
+/*
+ * configuration feature register
+ * @addr: register address
+ * @ecc_en_bit: in-die ecc enable bit
+ * @otp_en_bit: enter otp access mode bit
+ * @need_qe: quad io enable bit
+ */
+struct feature_config {
+	u8 addr;
+	u8 ecc_en_bit;
+	u8 otp_en_bit;
+	u8 need_qe;
+};
+
+/*
+ * status feature register
+ * @addr: register address
+ * @ecc_start_bit: ecc status mask start bit for error bits number
+ * @ecc_end_bit: ecc status mask end bit for error bits number
+ * note that:
+ *   operations status (ex. array busy status) could see on struct nand_status
+ */
+struct feature_status {
+	u8 addr;
+	u8 ecc_start_bit;
+	u8 ecc_end_bit;
+};
+
+/*
+ * character feature register
+ * @addr: register address
+ * @die_sel_bit: die select bit
+ * @drive_start_bit: drive strength mask start bit
+ * @drive_end_bit: drive strength mask end bit
+ */
+struct feature_character {
+	u8 addr;
+	u8 die_sel_bit;
+	u8 drive_start_bit;
+	u8 drive_end_bit;
+};
+
+/*
+ * spi features
+ * @protect: protection feature register
+ * @config: configuration feature register
+ * @status: status feature register
+ * @character: character feature register
+ */
+struct spi_features {
+	struct feature_protect protect;
+	struct feature_config config;
+	struct feature_status status;
+	struct feature_character character;
+};
+
+/*
+ * device_spi
+ *    configurations of spi nand device table
+ * @dev: base information of nand device
+ * @feature: feature information for spi nand
+ * @extend_cmds: extended the nand base commands
+ * @tx_mode_mask: tx mode mask for chip read
+ * @rx_mode_mask: rx mode mask for chip write
+ */
+struct device_spi {
+	struct nand_device dev;
+	struct spi_features feature;
+	struct spi_extend_cmds *extend_cmds;
+
+	u8 tx_mode_mask;
+	u8 rx_mode_mask;
+};
+
+#define NAND_SPI_PROTECT(addr, wp_en_bit, bp_start_bit, bp_end_bit) \
+	{addr, wp_en_bit, bp_start_bit, bp_end_bit}
+
+#define NAND_SPI_CONFIG(addr, ecc_en_bit, otp_en_bit, need_qe) \
+	{addr, ecc_en_bit, otp_en_bit, need_qe}
+
+#define NAND_SPI_STATUS(addr, ecc_start_bit, ecc_end_bit) \
+	{addr, ecc_start_bit, ecc_end_bit}
+
+#define NAND_SPI_CHARACTER(addr, die_sel_bit, drive_start_bit, drive_end_bit) \
+	{addr, die_sel_bit, drive_start_bit, drive_end_bit}
+
+static inline struct device_spi *device_to_spi(struct nand_device *dev)
+{
+	return container_of(dev, struct device_spi, dev);
+}
+
+u8 spi_replace_rx_cmds(u8 mode);
+u8 spi_replace_tx_cmds(u8 mode);
+u8 spi_replace_rx_col_cycle(u8 mode);
+u8 spi_replace_tx_col_cycle(u8 mode);
+
+#endif /* __DEVICE_SPI_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/nand_slc.c b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/nand_slc.c
new file mode 100644
index 0000000..4e827fe
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/nand_slc.c
@@ -0,0 +1,586 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "../nand_chip.h"
+#include "../nand_device.h"
+#include "../nfi.h"
+#include "../nand_base.h"
+#include "device_slc.h"
+#include "nand_slc.h"
+
+static int nand_slc_read_status_enhanced(struct nand_base *nand,
+					 int row)
+{
+	struct device_slc *dev_slc = device_to_slc(nand->dev);
+	struct nand_device *dev = nand->dev;
+	struct nfi *nfi = nand->nfi;
+	u8 status = 0;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev_slc->extend_cmds->read_status_enhanced);
+	nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
+	nfi->wait_ready(nfi, NAND_WAIT_TIME, dev->array_timing->tWHR);
+	nfi->read_bytes(nfi, &status, 1);
+
+	return status;
+}
+
+static int nand_slc_cache_read_page(struct nand_chip *chip,
+				    struct nand_ops *ops, int count)
+{
+	struct nand_base *nand = chip->nand;
+	struct nand_device *dev = nand->dev;
+	int i, ret = 0;
+	int row = 0, col, sectors;
+	u8 *data, *oob;
+
+	for (i = 0; i <= count; i++) {
+		if (i < count) {
+			row = ops[i].row;
+			col = ops[i].col;
+
+			nand->addressing(nand, &row, &col);
+		}
+
+		if (i == 0) {
+			ops[i].status = nand->read_page(nand, row);
+			continue;
+		}
+
+		if (i == count - 1) {
+			ops[i].status = nand->read_last(nand);
+			continue;
+		}
+
+		ops[i].status = nand->read_cache(nand, row);
+		if (ops[i - 1].status < 0) {
+			ret = ops[i - 1].status;
+			continue;
+		}
+
+		row = ops[i - 1].row;
+		col = ops[i - 1].col;
+		data = ops[i - 1].data;
+		oob = ops[i - 1].oob;
+		sectors = ops[i - 1].len / chip->sector_size;
+		ops[i - 1].status = nand->read_data(nand, row, col, sectors,
+						    data, oob);
+		if (ops[i - 1].status > 0) {
+			ops[i - 1].status =
+				ops[i - 1].status >=
+				dev->endurance->max_bitflips ? -ENANDFLIPS : 0;
+		}
+
+		ret = min(ret, ops[i - 1].status);
+	}
+
+	return ret;
+}
+
+static int nand_slc_read_multi(struct nand_base *nand, int row)
+{
+	struct device_slc *dev_slc = device_to_slc(nand->dev);
+	struct nand_device *dev = nand->dev;
+	struct nfi *nfi = nand->nfi;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev_slc->extend_cmds->read_multi_1st);
+	nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
+	nfi->send_cmd(nfi, dev_slc->extend_cmds->read_multi_2nd);
+	nfi->trigger(nfi);
+
+	return nfi->wait_ready(nfi, NAND_WAIT_BUSY,
+			       dev->array_timing->tDBSY);
+}
+
+static int nand_slc_multi_read_page(struct nand_chip *chip,
+				    struct nand_ops *ops, int count)
+{
+	struct nand_base *nand = chip->nand;
+	struct nand_slc *slc = base_to_slc(nand);
+	struct nand_device *dev = nand->dev;
+	int row[2], col[2], sectors;
+	int i, j, ret = 0;
+	u8 *data, *oob;
+
+	for (i = 0; i < count; i += 2) {
+		for (j = 0; j < 2; j++) {
+			row[j] = ops[i + j].row;
+			col[j] = ops[i + j].col;
+
+			nand->addressing(nand, &row[j], &col[j]);
+		}
+
+		ops[i].status = slc->read_multi(nand, row[0]);
+		if (ops[i].status < 0) {
+			ret = ops[i].status;
+			continue;
+		}
+
+		ops[i + 1].status = nand->read_page(nand, row[1]);
+		if (ops[i + 1].status < 0) {
+			ret = ops[i + 1].status;
+			continue;
+		}
+
+		for (j = 0; j < 2; j++) {
+			data = ops[i + j].data;
+			oob = ops[i + j].oob;
+			sectors = ops[i + j].len / chip->sector_size;
+			ops[i + j].status = nand->read_data(nand, row[j],
+							    col[j], sectors,
+							    data, oob);
+			if (ops[i + j].status > 0) {
+				ops[i + j].status =
+					ops[i + j].status >=
+					dev->endurance->max_bitflips ?
+					-ENANDFLIPS : 0;
+			}
+
+			ret = min(ret, ops[i + j].status);
+		}
+	}
+
+	return ret;
+}
+
+static int nand_chip_slc_read_page(struct nand_chip *chip,
+				   struct nand_ops *ops,
+				   int count)
+{
+	struct nand_base *nand = chip->nand;
+	struct nand_slc *slc = base_to_slc(nand);
+
+	if (slc->cache)
+		return slc->cache_read_page(chip, ops, count);
+
+	if (slc->multi)
+		return slc->multi_read_page(chip, ops, count);
+
+	return slc->read_page(chip, ops, count);
+}
+
+static int nand_slc_cache_write_page(struct nand_chip *chip,
+				     struct nand_ops *ops, int count)
+{
+	struct nand_base *nand = chip->nand;
+	struct nand_device *dev = nand->dev;
+	int i, ret = 0;
+	int row, col;
+	u8 *data, *oob;
+
+	for (i = 0; i < count; i++) {
+		row = ops[i].row;
+		col = ops[i].col;
+
+		nand->addressing(nand, &row, &col);
+
+		ops[i].status = nand->write_enable(nand);
+		if (ops[i].status) {
+			pr_debug("Write Protect at %x!\n", row);
+			ops[i].status = -ENANDWP;
+			return -ENANDWP;
+		}
+
+		data = ops[i].data;
+		oob = ops[i].oob;
+		ops[i].status = nand->program_data(nand, row, col, data, oob);
+		if (ops[i].status < 0) {
+			ret = ops[i].status;
+			continue;
+		}
+
+		if (i == count - 1)
+			ops[i].status = nand->program_page(nand, -1);
+		else
+			ops[i].status = nand->program_cache(nand);
+		if (ops[i].status < 0) {
+			ret = ops[i].status;
+			continue;
+		}
+
+		ops[i].status = nand->read_status(nand);
+		if (ops[i].status & dev->status->program_fail)
+			ops[i].status = -ENANDWRITE;
+
+		ret = min(ret, ops[i].status);
+	}
+
+	return ret;
+}
+
+static int nand_slc_program_multi(struct nand_base *nand)
+{
+	struct device_slc *dev_slc = device_to_slc(nand->dev);
+	struct nand_device *dev = nand->dev;
+	struct nfi *nfi = nand->nfi;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev_slc->extend_cmds->program_multi_2nd);
+	nfi->trigger(nfi);
+
+	return nfi->wait_ready(nfi, NAND_WAIT_BUSY,
+			       dev->array_timing->tDBSY);
+}
+
+static int nand_slc_multi_write_page(struct nand_chip *chip,
+				     struct nand_ops *ops, int count)
+{
+	struct nand_base *nand = chip->nand;
+	struct nand_slc *slc = base_to_slc(nand);
+	struct nand_device *dev = nand->dev;
+	int i, j, idx, ret = 0;
+	int row[2], col[2];
+	u8 *data, *oob;
+
+	for (i = 0; i < count; i++) {
+		idx = i & 1;
+		row[idx] = ops[i].row;
+		col[idx] = ops[i].col;
+
+		nand->addressing(nand, &row[idx], &col[idx]);
+
+		ops[i].status = nand->write_enable(nand);
+		if (ops[i].status) {
+			pr_debug("Write Protect at %x!\n", row[idx]);
+			ops[i].status = -ENANDWP;
+			return -ENANDWP;
+		}
+
+		data = ops[i].data;
+		oob = ops[i].oob;
+		ops[i].status = nand->program_data(nand, row[idx], col[idx],
+						   data, oob);
+		if (ops[i].status < 0) {
+			ret = ops[i].status;
+			continue;
+		}
+
+		if (idx)
+			ops[i].status = slc->program_multi(nand);
+		else
+			ops[i].status = nand->program_page(nand, -1);
+
+		if (ops[i].status < 0) {
+			ret = ops[i].status;
+			continue;
+		}
+
+		for (j = 0; j < 2 && idx == 1; j++) {
+			ops[i + j - 1].status =
+				slc->read_status_enhanced(nand, row[j]);
+			if (ops[i + j - 1].status & dev->status->program_fail)
+				ops[i + j - 1].status = -ENANDWRITE;
+
+			ret = min(ret, ops[i + j - 1].status);
+		}
+	}
+
+	return ret;
+}
+
+static int nand_chip_slc_write_page(struct nand_chip *chip,
+				    struct nand_ops *ops, int count)
+{
+	struct nand_base *nand = chip->nand;
+	struct nand_slc *slc = base_to_slc(nand);
+
+	if (slc->cache)
+		return slc->cache_write_page(chip, ops, count);
+
+	if (slc->multi)
+		return slc->multi_write_page(chip, ops, count);
+
+	return slc->write_page(chip, ops, count);
+}
+
+static int nand_slc_erase_multi(struct nand_base *nand, int row)
+{
+	struct device_slc *dev_slc = device_to_slc(nand->dev);
+	struct nand_device *dev = nand->dev;
+	struct nfi *nfi = nand->nfi;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev_slc->extend_cmds->erase_multi_1st);
+	nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
+	nfi->send_cmd(nfi, dev_slc->extend_cmds->erase_multi_2nd);
+	nfi->trigger(nfi);
+
+	return nfi->wait_ready(nfi, NAND_WAIT_BUSY,
+			       dev->array_timing->tDBSY);
+}
+
+static int nand_slc_multi_erase_block(struct nand_chip *chip,
+				      struct nand_ops *ops, int count)
+{
+	struct nand_base *nand = chip->nand;
+	struct nand_slc *slc = base_to_slc(nand);
+	struct nand_device *dev = nand->dev;
+	int i, j, idx, ret = 0;
+	int row[2], col[2];
+
+	for (i = 0; i < count; i++) {
+		idx = i & 1;
+		row[idx] = ops[i].row;
+		col[idx] = ops[i].col;
+
+		nand->addressing(nand, &row[idx], &col[idx]);
+
+		ops[i].status = nand->write_enable(nand);
+		if (ops[i].status) {
+			pr_debug("Write Protect at %x!\n", row[idx]);
+			ops[i].status = -ENANDWP;
+			return -ENANDWP;
+		}
+
+		if (idx == 1)
+			ops[i].status = nand->erase_block(nand, row[idx]);
+		else
+			ops[i].status = slc->erase_multi(nand, row[idx]);
+
+		if (ops[i].status < 0) {
+			ret = min(ret, ops[i].status);
+			continue;
+		}
+
+		for (j = 0; j < 2 && idx == 1; j++) {
+			ops[i + j - 1].status =
+				slc->read_status_enhanced(nand, row[j]);
+			if (ops[i + j - 1].status & dev->status->program_fail)
+				ops[i + j - 1].status = -ENANDWRITE;
+
+			ret = min(ret, ops[i + j - 1].status);
+		}
+	}
+
+	return ret;
+}
+
+static int nand_chip_slc_erase_block(struct nand_chip *chip,
+				     struct nand_ops *ops, int count)
+{
+	struct nand_base *nand = chip->nand;
+	struct nand_slc *slc = base_to_slc(nand);
+
+	if (slc->multi)
+		return slc->multi_erase_block(chip, ops, count);
+
+	return slc->erase_block(chip, ops, count);
+}
+
+static int nand_slc_set_format(struct nand_base *nand)
+{
+	struct nfi_format format = {
+		nand->dev->page_size,
+		nand->dev->spare_size,
+		nand->dev->endurance->ecc_req
+	};
+
+	return nand->nfi->set_format(nand->nfi, &format);
+}
+
+static int nand_slc_set_timing(struct nand_base *nand, int timing_mode)
+{
+	struct device_slc *dev_slc = device_to_slc(nand->dev);
+	struct nand_slc *slc = base_to_slc(nand);
+	struct nand_sdr_timing *timing;
+	struct slc_timing_mode *mode;
+	struct nfi *nfi = nand->nfi;
+	u32 val, back;
+	int ret;
+
+	if (timing_mode < 0 || timing_mode >= SLC_TIMING_NUM)
+		return -EINVAL;
+
+	slc->timing_mode = timing_mode;
+	timing = &dev_slc->timing[timing_mode];
+
+	if (dev_slc->timing_mode) {
+		mode = dev_slc->timing_mode;
+		val = (u32)mode->mode[timing_mode];
+		ret = nand->set_feature(nand, mode->addr, (u8 *)&val, 4);
+		if (ret) {
+			pr_err("set_feature fail.\n");
+			return ret;
+		}
+
+		ret = nand->get_feature(nand, mode->addr, (u8 *)&back, 4);
+		if (ret)
+			return ret;
+
+		if (mode->mode[timing_mode] != back)
+			return -EFAULT;
+	}
+
+	return nfi->set_timing(nfi, timing, NAND_TIMING_SDR);
+}
+
+static int nand_slc_set_drive(struct nand_base *nand, int drive_level)
+{
+	struct device_slc *dev_slc = device_to_slc(nand->dev);
+	struct slc_drive_strength *drive = &dev_slc->drive_strength;
+	u8 back[4] = { 0xff, 0xff, 0xff, 0xff };
+	u8 value[4] = { 0 };
+
+	switch (drive_level) {
+	case CHIP_DRIVE_NORMAL:
+		value[0] = drive->normal;
+		break;
+
+	case CHIP_DRIVE_HIGH:
+		value[0] = drive->high;
+		break;
+
+	case CHIP_DRIVE_MIDDLE:
+		value[0] = drive->middle;
+		break;
+
+	case CHIP_DRIVE_LOW:
+		value[0] = drive->low;
+		break;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	nand->set_feature(nand, drive->addr, value, 4);
+	nand->get_feature(nand, drive->addr, back, 4);
+
+	if (value[0] != back[0])
+		return -EFAULT;
+
+	return 0;
+}
+
+static int nand_chip_slc_ctrl(struct nand_chip *chip, int cmd,
+			      void *args)
+{
+	struct nand_base *nand = chip->nand;
+	struct nand_slc *slc = base_to_slc(nand);
+	struct nfi *nfi = nand->nfi;
+	int ret = 0, value = *(int *)args;
+
+	switch (cmd) {
+	case CHIP_CTRL_DRIVE_STRENGTH:
+		ret = nand_slc_set_drive(nand, value);
+		break;
+
+	case CHIP_CTRL_TIMING_MODE:
+		ret = nand_slc_set_timing(nand, value);
+		break;
+
+	case CHIP_CTRL_OPS_CACHE:
+		slc->cache = value ? true : false;
+		break;
+
+	case CHIP_CTRL_OPS_MULTI:
+		if (chip->plane_num > 1)
+			slc->multi = value ? true : false;
+		else
+			ret = -EOPNOTSUPP;
+		break;
+
+	case CHIP_CTRL_PSLC_MODE:
+	case CHIP_CTRL_DDR_MODE:
+	case CHIP_CTRL_ONDIE_ECC:
+		ret = -EOPNOTSUPP;
+		break;
+
+	default:
+		ret = nfi->nfi_ctrl(nfi, cmd, args);
+		break;
+	}
+
+	return ret;
+}
+
+static int nand_chip_slc_resume(struct nand_chip *chip)
+{
+	struct nand_base *nand = chip->nand;
+	struct nand_slc *slc = base_to_slc(nand);
+
+	nand->reset(nand);
+
+	nand_slc_set_format(nand);
+
+	return nand_slc_set_timing(nand, slc->timing_mode);
+}
+
+struct nand_base *nand_init(struct nand_chip *chip)
+{
+	struct nand_base *nand;
+	struct nand_slc *slc;
+	struct device_slc *dev_slc;
+	int ret;
+
+	slc = mem_alloc(1, sizeof(struct nand_slc));
+	if (!slc) {
+		pr_err("alloc nand_slc fail\n");
+		return NULL;
+	}
+
+	slc->parent = chip->nand;
+	nand = &slc->base;
+	memcpy(nand, slc->parent, sizeof(struct nand_base));
+
+	slc->cache = slc->multi = false;
+
+	slc->read_page = chip->read_page;
+	slc->write_page = chip->write_page;
+	slc->erase_block = chip->erase_block;
+
+	slc->read_multi = nand_slc_read_multi;
+	slc->program_multi = nand_slc_program_multi;
+	slc->erase_multi = nand_slc_erase_multi;
+	slc->read_status_enhanced = nand_slc_read_status_enhanced;
+
+	slc->cache_read_page = nand_slc_cache_read_page;
+	slc->multi_read_page = nand_slc_multi_read_page;
+	slc->cache_write_page = nand_slc_cache_write_page;
+	slc->multi_write_page = nand_slc_multi_write_page;
+	slc->multi_erase_block = nand_slc_multi_erase_block;
+
+	chip->read_page = nand_chip_slc_read_page;
+	chip->write_page = nand_chip_slc_write_page;
+	chip->erase_block = nand_chip_slc_erase_block;
+	chip->chip_ctrl = nand_chip_slc_ctrl;
+	chip->resume = nand_chip_slc_resume;
+	chip->nand_type = NAND_SLC;
+
+	ret = nand_detect_device(nand);
+	if (ret) {
+		pr_err("nand_detect_device fail.\n");
+		goto error;
+	}
+
+	ret = nand_slc_set_format(nand);
+	if (ret) {
+		pr_err("nand_slc_set_format fail.\n");
+		goto error;
+	}
+
+	dev_slc = device_to_slc(nand->dev);
+	ret = nand_slc_set_timing(nand, dev_slc->default_mode);
+	if (ret) {
+		pr_err("nand_slc_set_timing fail.\n");
+		goto error;
+	}
+
+	return nand;
+
+error:
+	mem_free(slc);
+	return NULL;
+}
+
+void nand_exit(struct nand_base *nand)
+{
+	struct nand_slc *slc = base_to_slc(nand);
+
+	nand_base_exit(slc->parent);
+	mem_free(nand);
+}
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/nand_slc.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/nand_slc.h
new file mode 100644
index 0000000..fc76738
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/nand_slc.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __NAND_SLC_H__
+#define __NAND_SLC_H__
+
+/*
+ * slc nand handler
+ * @base: slc nand base functions
+ * @parent: common parent nand base functions
+ * @cache: nand cache operation flag
+ * @multi: nand multi-plane operation flag
+ * @timing_mode: current timing mode on nand device
+ * @read_multi: slc nand advanced base function for multi-plane read
+ * @program_multi: slc nand advanced base function for multi-plane write
+ * @erase_multi: slc nand advanced base function for multi-plane erase
+ * @read_status_enhanced: slc nand advanced base function for enhanced
+ *    status read
+ */
+struct nand_slc {
+	struct nand_base base;
+	struct nand_base *parent;
+
+	bool cache;
+	bool multi;
+	int timing_mode;
+
+	/* slc advanced base function  */
+	int (*read_multi)(struct nand_base *nand, int row);
+	int (*program_multi)(struct nand_base *nand);
+	int (*erase_multi)(struct nand_base *nand, int row);
+	int (*read_status_enhanced)(struct nand_base *nand, int row);
+
+	/* record chip base function */
+	int (*read_page)(struct nand_chip *chip, struct nand_ops *ops,
+			 int count);
+	int (*write_page)(struct nand_chip *chip, struct nand_ops *ops,
+			  int count);
+	int (*erase_block)(struct nand_chip *chip, struct nand_ops *ops,
+			   int count);
+
+	/* slc advanced function */
+	int (*cache_read_page)(struct nand_chip *chip, struct nand_ops *ops,
+			       int count);
+	int (*multi_read_page)(struct nand_chip *chip, struct nand_ops *ops,
+			       int count);
+	int (*cache_write_page)(struct nand_chip *chip, struct nand_ops *ops,
+				int count);
+	int (*multi_write_page)(struct nand_chip *chip, struct nand_ops *ops,
+				int count);
+	int (*multi_erase_block)(struct nand_chip *chip, struct nand_ops *ops,
+				 int count);
+};
+
+static inline struct nand_slc *base_to_slc(struct nand_base *base)
+{
+	return container_of(base, struct nand_slc, base);
+}
+
+#endif /* __NAND_SLC_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/nand_spi.c b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/nand_spi.c
new file mode 100644
index 0000000..4c0fd67
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/nand_spi.c
@@ -0,0 +1,739 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "../nand_chip.h"
+#include "../nand_device.h"
+#include "../nfi.h"
+#include "../nand_base.h"
+#include "device_spi.h"
+#include "nand_spi.h"
+
+#define READY_TIMEOUT   500000 /* us */
+#define SPI_GPRAM_MAX_LEN       160
+
+unsigned short spi_to_qpi[] = {0xcccc, 0xcdcc, 0xdccc, 0xddcc, 0xcccd,
+			       0xcdcd, 0xdccd, 0xddcd, 0xccdc, 0xcddc, 0xdcdc,
+			       0xdddc, 0xccdd, 0xcddd, 0xdcdd, 0xdddd
+			      };
+
+static void spi2qpi_pattern(u32 byte_spi, u32 data, u8 *qpi_data)
+{
+	u32 val_spi_32, val_qpi_32, cycle;
+	u8  val_spi_8;
+	u32 *data_32 = (u32 *)qpi_data;
+	int idx_8;
+
+	val_spi_32 = data;
+	cycle = byte_spi;
+
+	while (byte_spi) {
+		idx_8 = byte_spi % 4;
+		if (idx_8 == 0)
+			idx_8 = 3;
+		else
+			idx_8 = idx_8 - 1;
+
+		for (; idx_8 >= 0; idx_8--) {
+			byte_spi--;
+			val_spi_8 = *(((u8 *)&val_spi_32) + idx_8);
+			val_qpi_32 = (spi_to_qpi[val_spi_8 >> 4])
+				     | (spi_to_qpi[val_spi_8 & 0xF] << 16);
+
+			data_32[--cycle] = val_qpi_32;
+		}
+	}
+
+	return;
+}
+
+static void spi2qpi(u32 addr, u8 cycle, bool read, u8 *qpi_val)
+{
+	u32 tmp_addr, spi_val;
+	u8 cmd;
+
+	if (read) {
+		cmd = 0x6b;
+		tmp_addr = nandx_cpu_to_be32(addr) >>
+			   ((4 - cycle + 1) << 3);
+	} else {
+		cmd = 0x34;
+		tmp_addr = nandx_cpu_to_be32(addr) >> ((4 - cycle) << 3);
+	}
+
+	spi_val = cmd | (tmp_addr << 8);
+
+	spi2qpi_pattern(cycle + 1, spi_val, qpi_val);
+
+	return;
+}
+
+static int nand_spi_read_status(struct nand_base *nand)
+{
+	struct device_spi *dev = device_to_spi(nand->dev);
+	u8 status;
+
+	nand->get_feature(nand, dev->feature.status.addr, &status, 1);
+
+	return status;
+}
+
+static int nand_spi_wait_ready(struct nand_base *nand, u32 timeout)
+{
+	u64 now, end;
+	int status;
+
+	end = get_current_time_us() + timeout;
+
+	do {
+		status = nand_spi_read_status(nand);
+		status &= nand->dev->status->array_busy;
+		now = get_current_time_us();
+
+		if (now > end)
+			break;
+	} while (status);
+
+	return status ? -EBUSY : 0;
+}
+
+static int nand_spi_set_op_mode(struct nand_base *nand, u8 mode)
+{
+	struct nfi *nfi = nand->nfi;
+	int ret = 0;
+
+	ret = nfi->nfi_ctrl(nfi, SNFI_CTRL_OP_MODE, (void *)&mode);
+
+	return ret;
+}
+
+static int nand_spi_set_config(struct nand_base *nand, u8 addr, u8 mask,
+			       bool en)
+{
+	u8 val = 0, configs = 0;
+
+	nand->get_feature(nand, addr, &configs, 1);
+
+	if (en)
+		configs |= mask;
+	else
+		configs &= ~mask;
+
+	nand->set_feature(nand, addr, &configs, 1);
+
+	nand->get_feature(nand, addr, &val, 1);
+
+	return (val == configs) ? 0 : -EFAULT;
+}
+
+static int nand_spi_die_select(struct nand_base *nand, int *row)
+{
+	struct device_spi *dev = device_to_spi(nand->dev);
+	struct nfi *nfi = nand->nfi;
+	int lun_blocks, block_pages, lun, blocks;
+	int page = *row, ret = 0;
+	u8 param = 0, die_sel;
+
+	if (nand->dev->lun_num < 2)
+		return 0;
+
+	block_pages = nand_block_pages(nand->dev);
+	lun_blocks = nand_lun_blocks(nand->dev);
+	blocks = div_down(page, block_pages);
+	lun = div_down(blocks, lun_blocks);
+
+	if (dev->extend_cmds->die_select == -1) {
+		die_sel = (u8)(lun << dev->feature.character.die_sel_bit);
+		nand->get_feature(nand, dev->feature.character.addr, &param, 1);
+		param |= die_sel;
+		nand->set_feature(nand, dev->feature.character.addr, &param, 1);
+		param = 0;
+		nand->get_feature(nand, dev->feature.character.addr, &param, 1);
+		ret = (param & die_sel) ? 0 : -EFAULT;
+	} else {
+		nfi->reset(nfi);
+		nfi->send_cmd(nfi, dev->extend_cmds->die_select);
+		nfi->send_addr(nfi, lun, 0, 1, 0);
+		nfi->trigger(nfi);
+	}
+
+	*row =  page - (lun_blocks * block_pages) * lun;
+
+	return ret;
+}
+
+static int nand_spi_select_device(struct nand_base *nand, int cs)
+{
+	struct nand_spi *spi = base_to_spi(nand);
+	struct nand_base *parent = spi->parent;
+
+	nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+	return parent->select_device(nand, cs);
+}
+
+static int nand_spi_reset(struct nand_base *nand)
+{
+	struct nand_spi *spi = base_to_spi(nand);
+	struct nand_base *parent = spi->parent;
+
+	nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+	parent->reset(nand);
+
+	return nand_spi_wait_ready(nand, READY_TIMEOUT);
+}
+
+static int nand_spi_read_id(struct nand_base *nand, u8 *id, int count)
+{
+	struct nand_spi *spi = base_to_spi(nand);
+	struct nand_base *parent = spi->parent;
+
+	nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+	return parent->read_id(nand, id, count);
+}
+
+static int nand_spi_read_param_page(struct nand_base *nand, u8 *data,
+				    int count)
+{
+	struct device_spi *dev = device_to_spi(nand->dev);
+	struct nand_spi *spi = base_to_spi(nand);
+	struct nfi *nfi = nand->nfi;
+	int sectors, value;
+	u8 param = 0;
+
+	sectors = div_round_up(count, nfi->sector_size);
+
+	nand->get_feature(nand, dev->feature.config.addr, &param, 1);
+	param |= BIT(dev->feature.config.otp_en_bit);
+	nand->set_feature(nand, dev->feature.config.addr, &param, 1);
+
+	param = 0;
+	nand->get_feature(nand, dev->feature.config.addr, &param, 1);
+	if (param & BIT(dev->feature.config.otp_en_bit)) {
+		value = 0;
+		nfi->nfi_ctrl(nfi, NFI_CTRL_ECC, &value);
+		nand->dev->col_cycle  = spi_replace_rx_col_cycle(spi->rx_mode);
+		nand->read_page(nand, 0x01);
+		nand->read_data(nand, 0x01, 0, sectors, data, NULL);
+	}
+
+	param &= ~BIT(dev->feature.config.otp_en_bit);
+	nand->set_feature(nand, dev->feature.config.addr, &param, 1);
+
+	return 0;
+}
+
+static int nand_spi_set_feature(struct nand_base *nand, u8 addr,
+				u8 *param,
+				int count)
+{
+	struct nand_spi *spi = base_to_spi(nand);
+	struct nand_base *parent = spi->parent;
+
+	nand->write_enable(nand);
+
+	nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+	return parent->set_feature(nand, addr, param, count);
+}
+
+static int nand_spi_get_feature(struct nand_base *nand, u8 addr,
+				u8 *param,
+				int count)
+{
+	struct nand_spi *spi = base_to_spi(nand);
+	struct nand_base *parent = spi->parent;
+
+	nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+	return parent->get_feature(nand, addr, param, count);
+}
+
+static int nand_spi_addressing(struct nand_base *nand, int *row,
+			       int *col)
+{
+	struct nand_device *dev = nand->dev;
+	int plane, block, block_pages;
+	int ret;
+
+	ret = nand_spi_die_select(nand, row);
+	if (ret)
+		return ret;
+
+	block_pages = nand_block_pages(dev);
+	block = div_down(*row, block_pages);
+
+	plane = block % dev->plane_num;
+	*col |= (plane << dev->addressing->plane_bit_start);
+
+	return 0;
+}
+
+typedef int (*func_nandx_operation)(struct nfi *, u8 *, int);
+
+static int nand_spi_rw_mac(struct nand_base *nand, int row,
+			   u8 *data, bool read)
+{
+	struct nand_spi *spi = base_to_spi(nand);
+	struct nand_device *dev = nand->dev;
+	struct device_spi *dev_spi = device_to_spi(dev);
+	struct nandx_split64 split = {0};
+	func_nandx_operation operation;
+	struct nfi *nfi = nand->nfi;
+	u8 cmd, *lbuf = data, qpi_val[16] = {0}, cycle, qpi_cycle;
+	int len, i, ret = 0, count, args;
+	bool mac_qpi;
+	u64 tmp_val;
+
+	operation = read ? nfi->read_bytes : nfi->write_bytes;
+	cmd = read ? nand->dev->cmds->random_out_1st :
+	      nand->dev->cmds->program_1st;
+
+	if ((spi->rx_mode != SNFI_RX_114 && spi->rx_mode != SNFI_RX_111)) {
+		pr_warn("mac mode not support rx %d mode\n", spi->rx_mode);
+		return -EOPNOTSUPP;
+	}
+
+	if ((read && spi->rx_mode == SNFI_RX_114) ||
+	    (!read && spi->tx_mode == SNFI_TX_114)) {
+		args = 1;
+		mac_qpi = true;
+		cycle = (nand->dev->col_cycle + 1) << 2;
+		ret = nfi->nfi_ctrl(nfi, SNFI_CTRL_MAC_QPI_MODE, &args);
+		if (ret)
+			return ret;
+		nand_spi_set_config(nand, dev_spi->feature.config.addr,
+				    BIT(0), true);
+	} else {
+		mac_qpi = false;
+		cycle = nand->dev->col_cycle + 1;
+		nand_spi_set_config(nand, dev_spi->feature.config.addr,
+				    BIT(0), false);
+	}
+
+	/* Just support whole page operation for now */
+	count = nand->dev->page_size + nand->dev->spare_size;
+	len = SPI_GPRAM_MAX_LEN - cycle;
+	nandx_split(&split, 0, count, tmp_val, len);
+
+	if (split.head_len) {
+		nfi->reset(nfi);
+		if (mac_qpi) {
+			/* transfer cmd & col addr to qpi,
+			 * and use send_cmd function tx to device
+			 */
+			spi2qpi(0, nand->dev->col_cycle, read, qpi_val);
+			for (qpi_cycle = 0; qpi_cycle < cycle; qpi_cycle++)
+				nfi->send_cmd(nfi, qpi_val[qpi_cycle]);
+		} else {
+			nfi->send_cmd(nfi, cmd);
+			nfi->send_addr(nfi, 0, 0, nand->dev->col_cycle, 0);
+		}
+		ret = operation(nfi, lbuf, split.head_len);
+		lbuf += split.head_len;
+	}
+
+	if (split.body_len) {
+		tmp_val = div_down(split.body_len, len);
+		for (i = 0; i < tmp_val; i++) {
+			nfi->reset(nfi);
+			if (mac_qpi) {
+				spi2qpi(split.body + i * len,
+					nand->dev->col_cycle, read, qpi_val);
+				for (qpi_cycle = 0; qpi_cycle < cycle;
+				     qpi_cycle++)
+					nfi->send_cmd(nfi, qpi_val[qpi_cycle]);
+
+			} else {
+				nfi->send_cmd(nfi, cmd);
+				nfi->send_addr(nfi,
+					       split.body + i * len, 0,
+					       nand->dev->col_cycle, 0);
+			}
+			ret = operation(nfi, lbuf, len);
+			lbuf += len;
+		}
+	}
+
+	if (split.tail_len) {
+		nfi->reset(nfi);
+		if (mac_qpi) {
+			spi2qpi(split.tail, nand->dev->col_cycle,
+				read, qpi_val);
+			for (qpi_cycle = 0; qpi_cycle < cycle; qpi_cycle++)
+				nfi->send_cmd(nfi, qpi_val[qpi_cycle]);
+
+		} else {
+			nfi->send_cmd(nfi, cmd);
+			nfi->send_addr(nfi, split.tail, 0,
+				       nand->dev->col_cycle, 0);
+		}
+		ret = operation(nfi, lbuf, split.tail_len);
+	}
+
+	if ((read && spi->rx_mode == SNFI_RX_114) ||
+	    (!read && spi->tx_mode == SNFI_TX_114)) {
+		args = 0;
+		ret = nfi->nfi_ctrl(nfi, SNFI_CTRL_MAC_QPI_MODE, &args);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int nand_spi_read_page(struct nand_base *nand, int row)
+{
+	struct nand_spi *spi = base_to_spi(nand);
+	struct nand_base *parent = spi->parent;
+
+	if (spi->op_mode == SNFI_AUTO_MODE)
+		nand_spi_set_op_mode(nand, SNFI_AUTO_MODE);
+	else
+		nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+	parent->read_page(nand, row);
+
+	return nand_spi_wait_ready(nand, READY_TIMEOUT);
+}
+
+static int nand_spi_read_data(struct nand_base *nand, int row, int col,
+			      int sectors, u8 *data, u8 *oob)
+{
+	struct device_spi *dev = device_to_spi(nand->dev);
+	struct nand_spi *spi = base_to_spi(nand);
+	struct nand_base *parent = spi->parent;
+	int ret;
+
+	if ((spi->rx_mode == SNFI_RX_114 || spi->rx_mode == SNFI_RX_144) &&
+	    dev->feature.config.need_qe)
+		nand_spi_set_config(nand, dev->feature.config.addr,
+				    BIT(0), true);
+	else
+		nand_spi_set_config(nand, dev->feature.config.addr,
+				    BIT(0), false);
+
+	nand->dev->col_cycle  = spi_replace_rx_col_cycle(spi->rx_mode);
+
+	nand_spi_set_op_mode(nand, spi->op_mode);
+
+	if (spi->op_mode == SNFI_MAC_MODE) {
+		ret = nand_spi_rw_mac(nand, row, data, true);
+		memcpy(oob, data + dev->dev.page_size, dev->dev.spare_size);
+	} else
+		ret = parent->read_data(nand, row, col, sectors, data, oob);
+	if (ret < 0)
+		return -ENANDREAD;
+
+	if (spi->ondie_ecc) {
+		ret = nand_spi_read_status(nand);
+		ret &= GENMASK(dev->feature.status.ecc_end_bit,
+			       dev->feature.status.ecc_start_bit);
+		ret >>= dev->feature.status.ecc_start_bit;
+		if (ret > nand->dev->endurance->ecc_req)
+			return -ENANDREAD;
+		else if (ret > nand->dev->endurance->max_bitflips)
+			return -ENANDFLIPS;
+	}
+
+	return 0;
+}
+
+static int nand_spi_write_enable(struct nand_base *nand)
+{
+	struct device_spi *dev = device_to_spi(nand->dev);
+	struct nfi *nfi = nand->nfi;
+	int status;
+
+	nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev->extend_cmds->write_enable);
+
+	nfi->trigger(nfi);
+
+	status = nand_spi_read_status(nand);
+	status &= nand->dev->status->write_protect;
+
+	return !status;
+}
+
+static int nand_spi_program_data(struct nand_base *nand, int row,
+				 int col,
+				 u8 *data, u8 *oob)
+{
+	struct device_spi *dev = device_to_spi(nand->dev);
+	struct nand_spi *spi = base_to_spi(nand);
+	int ret;
+
+	if (spi->tx_mode == SNFI_TX_114 && dev->feature.config.need_qe)
+		nand_spi_set_config(nand, dev->feature.config.addr,
+				    BIT(0), true);
+	else
+		nand_spi_set_config(nand, dev->feature.config.addr,
+				    BIT(0), false);
+
+	nand_spi_set_op_mode(nand, spi->op_mode);
+
+	nand->dev->col_cycle  = spi_replace_tx_col_cycle(spi->tx_mode);
+
+	if (spi->op_mode == SNFI_MAC_MODE) {
+		memcpy(data + dev->dev.page_size, oob, dev->dev.spare_size);
+		ret = nand_spi_rw_mac(nand, row, data, false);
+	} else
+		ret = spi->parent->program_data(nand, row, col, data, oob);
+
+	return ret;
+}
+
+static int nand_spi_program_page(struct nand_base *nand, int row)
+{
+	struct nand_spi *spi = base_to_spi(nand);
+	struct nand_device *dev = nand->dev;
+	struct nfi *nfi = nand->nfi;
+
+	if (spi->op_mode != SNFI_AUTO_MODE) {
+		nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+		nfi->reset(nfi);
+		nfi->send_cmd(nfi, dev->cmds->program_2nd);
+		nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
+		nfi->trigger(nfi);
+		return nand_spi_wait_ready(nand, READY_TIMEOUT);
+	}
+
+	return 0;
+}
+
+static int nand_spi_erase_block(struct nand_base *nand, int row)
+{
+	struct nand_spi *spi = base_to_spi(nand);
+	struct nand_base *parent = spi->parent;
+
+	if (spi->op_mode == SNFI_AUTO_MODE)
+		nand_spi_set_op_mode(nand, SNFI_AUTO_MODE);
+	else
+		nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+	parent->erase_block(nand, row);
+
+	return nand_spi_wait_ready(nand, READY_TIMEOUT);
+}
+
+static int nand_chip_spi_ctrl(struct nand_chip *chip, int cmd,
+			      void *args)
+{
+	struct nand_base *nand = chip->nand;
+	struct device_spi *dev = device_to_spi(nand->dev);
+	struct nand_spi *spi = base_to_spi(nand);
+	struct nfi *nfi = nand->nfi;
+	int ret = 0, value = *(int *)args;
+
+	switch (cmd) {
+	case NFI_CTRL_IO_FORMAT:
+		ret = nfi->nfi_ctrl(nfi, cmd, args);
+		if (!ret) {
+			chip->sector_size = nfi->sector_size;
+			chip->sector_spare_size = nfi->sector_spare_size;
+			chip->fdm_reg_size = nfi->fdm_size;
+			chip->fdm_ecc_size = nfi->fdm_ecc_size;
+			chip->ecc_strength = nfi->ecc_strength;
+			chip->ecc_parity_size = nfi->ecc_parity_size;
+		}
+		break;
+
+	case SNFI_CTRL_OP_MODE:
+		spi->op_mode = *(u8 *)args;
+		break;
+
+	case CHIP_CTRL_ONDIE_ECC:
+		spi->ondie_ecc = (bool)value;
+		ret = nand_spi_set_config(nand, dev->feature.config.addr,
+					  BIT(dev->feature.config.ecc_en_bit),
+					  spi->ondie_ecc);
+		break;
+
+	case SNFI_CTRL_TX_MODE:
+		if (value < 0 || value > SNFI_TX_114)
+			return -EOPNOTSUPP;
+
+		if (dev->tx_mode_mask & BIT(value)) {
+			spi->tx_mode = value;
+			nand->dev->cmds->program_1st = spi_replace_tx_cmds(
+							       spi->tx_mode);
+			ret = nfi->nfi_ctrl(nfi, cmd, args);
+		}
+
+		break;
+
+	case SNFI_CTRL_RX_MODE:
+		if (value < 0 || value > SNFI_RX_144)
+			return -EOPNOTSUPP;
+
+		if (dev->rx_mode_mask & BIT(value)) {
+			spi->rx_mode = value;
+			nand->dev->cmds->random_out_1st = spi_replace_rx_cmds(
+								  spi->rx_mode);
+			ret = nfi->nfi_ctrl(nfi, cmd, args);
+		}
+
+		break;
+
+	case CHIP_CTRL_PERF_INFO:
+		*(struct page_performance *)args = *nand->performance;
+		break;
+
+	case CHIP_CTRL_PERF_INFO_CLEAR:
+		memset(nand->performance, 0, sizeof(struct page_performance));
+		break;
+
+	case CHIP_CTRL_DEVICE_RESET:
+		ret = nand_spi_reset(nand);
+		break;
+
+	case CHIP_CTRL_OPS_CACHE:
+	case CHIP_CTRL_OPS_MULTI:
+	case CHIP_CTRL_PSLC_MODE:
+	case CHIP_CTRL_DDR_MODE:
+	case CHIP_CTRL_DRIVE_STRENGTH:
+	case CHIP_CTRL_TIMING_MODE:
+		ret = -EOPNOTSUPP;
+		break;
+
+	default:
+		ret = nfi->nfi_ctrl(nfi, cmd, args);
+		break;
+	}
+
+	return ret;
+}
+
+int nand_chip_spi_resume(struct nand_chip *chip)
+{
+	struct nand_base *nand = chip->nand;
+	struct nand_spi *spi = base_to_spi(nand);
+	struct device_spi *dev = device_to_spi(nand->dev);
+	struct nfi *nfi = nand->nfi;
+	struct nfi_format format;
+	u8 mask;
+
+	nand->reset(nand);
+
+	mask = GENMASK(dev->feature.protect.bp_end_bit,
+		       dev->feature.protect.bp_start_bit);
+	nand_spi_set_config(nand, dev->feature.config.addr, mask, false);
+	mask =  BIT(dev->feature.config.ecc_en_bit);
+	nand_spi_set_config(nand, dev->feature.config.addr, mask,
+			    spi->ondie_ecc);
+
+	format.page_size = nand->dev->page_size;
+	format.spare_size = nand->dev->spare_size;
+	format.ecc_req = nand->dev->endurance->ecc_req;
+
+	return nfi->set_format(nfi, &format);
+}
+
+static int nand_spi_set_format(struct nand_base *nand)
+{
+	struct nfi_format format = {
+		nand->dev->page_size,
+		nand->dev->spare_size,
+		nand->dev->endurance->ecc_req
+	};
+
+	return nand->nfi->set_format(nand->nfi, &format);
+}
+
+struct nand_base *nand_spi_init(struct nand_chip *chip)
+{
+	struct nand_base *nand;
+	struct nand_spi *spi;
+	struct device_spi *dev;
+	int ret;
+	u8 mask;
+
+	spi = mem_alloc(1, sizeof(struct nand_spi));
+	if (!spi) {
+		pr_err("alloc nand_spi fail\n");
+		return NULL;
+	}
+
+	spi->ondie_ecc = false;
+	spi->op_mode = SNFI_CUSTOM_MODE;
+	spi->rx_mode = SNFI_RX_114;
+	spi->tx_mode = SNFI_TX_114;
+
+	spi->parent = chip->nand;
+	spi->base.performance = spi->parent->performance;
+	nand = &spi->base;
+	nand->dev = spi->parent->dev;
+	nand->nfi = spi->parent->nfi;
+
+	nand->select_device = nand_spi_select_device;
+	nand->reset = nand_spi_reset;
+	nand->read_id = nand_spi_read_id;
+	nand->read_param_page = nand_spi_read_param_page;
+	nand->set_feature = nand_spi_set_feature;
+	nand->get_feature = nand_spi_get_feature;
+	nand->read_status = nand_spi_read_status;
+	nand->addressing = nand_spi_addressing;
+	nand->read_page = nand_spi_read_page;
+	nand->read_data = nand_spi_read_data;
+	nand->write_enable = nand_spi_write_enable;
+	nand->program_data = nand_spi_program_data;
+	nand->program_page = nand_spi_program_page;
+	nand->erase_block = nand_spi_erase_block;
+	nand->nand_get_device = nand_spi_get_device;
+
+	chip->chip_ctrl = nand_chip_spi_ctrl;
+	chip->nand_type = NAND_SPI;
+	chip->resume = nand_chip_spi_resume;
+
+	ret = nand_detect_device(nand);
+	if (ret)
+		goto err;
+
+	nand->select_device(nand, 0);
+
+	ret = nand_spi_set_format(nand);
+	if (ret)
+		goto err;
+
+	dev = (struct device_spi *)nand->dev;
+
+	nand->dev->cmds->random_out_1st =
+		spi_replace_rx_cmds(spi->rx_mode);
+	nand->dev->cmds->program_1st =
+		spi_replace_tx_cmds(spi->tx_mode);
+
+	mask = GENMASK(dev->feature.protect.bp_end_bit,
+		       dev->feature.protect.bp_start_bit);
+	ret = nand_spi_set_config(nand, dev->feature.protect.addr, mask, false);
+	if (ret)
+		goto err;
+
+	mask =  BIT(dev->feature.config.ecc_en_bit);
+	ret = nand_spi_set_config(nand, dev->feature.config.addr, mask,
+				  spi->ondie_ecc);
+	if (ret)
+		goto err;
+
+	return nand;
+
+err:
+	mem_free(spi);
+	return NULL;
+}
+
+void nand_spi_exit(struct nand_base *nand)
+{
+	struct nand_spi *spi = base_to_spi(nand);
+
+	nand_base_exit(spi->parent);
+	mem_free(spi);
+}
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/nand_spi.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/nand_spi.h
new file mode 100644
index 0000000..f53b476
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand/nand_spi.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __NAND_SPI_H__
+#define __NAND_SPI_H__
+
+/*
+ * spi nand handler
+ * @base: spi nand base functions
+ * @parent: common parent nand base functions
+ * @tx_mode: spi bus width of transfer to device
+ * @rx_mode: spi bus width of transfer from device
+ * @op_mode: spi nand controller (NFI) operation mode
+ * @ondie_ecc: spi nand on-die ecc flag
+ */
+
+struct nand_spi {
+	struct nand_base base;
+	struct nand_base *parent;
+	u8 tx_mode;
+	u8 rx_mode;
+	u8 op_mode;
+	bool ondie_ecc;
+};
+
+static inline struct nand_spi *base_to_spi(struct nand_base *base)
+{
+	return container_of(base, struct nand_spi, base);
+}
+
+#endif /* __NAND_SPI_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_base.c b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_base.c
new file mode 100644
index 0000000..1ca85cf
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_base.c
@@ -0,0 +1,311 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "nand_chip.h"
+#include "nand_device.h"
+#include "nfi.h"
+#include "nand_base.h"
+
+static int nand_base_select_device(struct nand_base *nand, int cs)
+{
+	struct nfi *nfi = nand->nfi;
+
+	nfi->reset(nfi);
+
+	return nfi->select_chip(nfi, cs);
+}
+
+static int nand_base_reset(struct nand_base *nand)
+{
+	struct nfi *nfi = nand->nfi;
+	struct nand_device *dev = nand->dev;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev->cmds->reset);
+	nfi->trigger(nfi);
+
+	return nfi->wait_ready(nfi, NAND_WAIT_BUSY, dev->array_timing->tRST);
+}
+
+static int nand_base_read_id(struct nand_base *nand, u8 *id, int count)
+{
+	struct nfi *nfi = nand->nfi;
+	struct nand_device *dev = nand->dev;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev->cmds->read_id);
+	nfi->wait_ready(nfi, NAND_WAIT_TIME, dev->array_timing->tWHR);
+	nfi->send_addr(nfi, 0, 0, 1, 0);
+
+	return nfi->read_bytes(nfi, id, count);
+}
+
+static int nand_base_read_param_page(struct nand_base *nand, u8 *data,
+				     int count)
+{
+	struct nfi *nfi = nand->nfi;
+	struct nand_device *dev = nand->dev;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev->cmds->read_param_page);
+	nfi->send_addr(nfi, 0, 0, 1, 0);
+
+	nfi->wait_ready(nfi, NAND_WAIT_BUSY, dev->array_timing->tR);
+
+	return nfi->read_bytes(nfi, data, count);
+}
+
+static int nand_base_set_feature(struct nand_base *nand, u8 addr,
+				 u8 *param,
+				 int count)
+{
+	struct nfi *nfi = nand->nfi;
+	struct nand_device *dev = nand->dev;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev->cmds->set_feature);
+	nfi->send_addr(nfi, addr, 0, 1, 0);
+
+	nfi->write_bytes(nfi, param, count);
+
+	return nfi->wait_ready(nfi, NAND_WAIT_BUSY,
+			       dev->array_timing->tFEAT);
+}
+
+static int nand_base_get_feature(struct nand_base *nand, u8 addr,
+				 u8 *param,
+				 int count)
+{
+	struct nfi *nfi = nand->nfi;
+	struct nand_device *dev = nand->dev;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev->cmds->get_feature);
+	nfi->send_addr(nfi, addr, 0, 1, 0);
+	nfi->wait_ready(nfi, NAND_WAIT_BUSY, dev->array_timing->tFEAT);
+
+	return nfi->read_bytes(nfi, param, count);
+}
+
+static int nand_base_read_status(struct nand_base *nand)
+{
+	struct nfi *nfi = nand->nfi;
+	struct nand_device *dev = nand->dev;
+	u8 status = 0;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev->cmds->read_status);
+	nfi->wait_ready(nfi, NAND_WAIT_TIME, dev->array_timing->tWHR);
+	nfi->read_bytes(nfi, &status, 1);
+
+	return status;
+}
+
+static int nand_base_addressing(struct nand_base *nand, int *row,
+				int *col)
+{
+	struct nand_device *dev = nand->dev;
+	int lun, plane, block, page, cs = 0;
+	int block_pages, target_blocks, wl = 0;
+	int icol = *col;
+
+	if (dev->target_num > 1) {
+		block_pages = nand_block_pages(dev);
+		target_blocks = nand_target_blocks(dev);
+		cs = div_down(*row, block_pages * target_blocks);
+		*row -= cs * block_pages * target_blocks;
+	}
+
+	nand->select_device(nand, cs);
+
+	block_pages = nand_block_pages(dev);
+	block = div_down(*row, block_pages);
+	page = *row - block * block_pages;
+	plane = reminder(block, dev->plane_num);
+	lun = div_down(block, nand_lun_blocks(dev));
+
+	wl |= (page << dev->addressing->row_bit_start);
+	wl |= (block << dev->addressing->block_bit_start);
+	wl |= (plane << dev->addressing->plane_bit_start);
+	wl |= (lun << dev->addressing->lun_bit_start);
+
+	*row = wl;
+	*col = icol;
+
+	return 0;
+}
+
+static int nand_base_read_page(struct nand_base *nand, int row)
+{
+	struct nfi *nfi = nand->nfi;
+	struct nand_device *dev = nand->dev;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev->cmds->read_1st);
+	nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
+	nfi->send_cmd(nfi, dev->cmds->read_2nd);
+	nfi->trigger(nfi);
+
+	return nfi->wait_ready(nfi, NAND_WAIT_BUSY, dev->array_timing->tR);
+}
+
+static int nand_base_read_data(struct nand_base *nand, int row, int col,
+			       int sectors, u8 *data, u8 *oob)
+{
+	struct nfi *nfi = nand->nfi;
+	struct nand_device *dev = nand->dev;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev->cmds->random_out_1st);
+	nfi->send_addr(nfi, col, row, dev->col_cycle, dev->row_cycle);
+	nfi->send_cmd(nfi, dev->cmds->random_out_2nd);
+	nfi->wait_ready(nfi, NAND_WAIT_TIME, dev->array_timing->tWHR);
+
+	return nfi->read_sectors(nfi, data, oob, sectors);
+}
+
+static int nand_base_write_enable(struct nand_base *nand)
+{
+	struct nand_device *dev = nand->dev;
+	int status;
+
+	status = nand_base_read_status(nand);
+	if (status & dev->status->write_protect)
+		return 0;
+
+	return -ENANDWP;
+}
+
+static int nand_base_program_data(struct nand_base *nand, int row,
+				  int col,
+				  u8 *data, u8 *oob)
+{
+	struct nfi *nfi = nand->nfi;
+	struct nand_device *dev = nand->dev;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev->cmds->program_1st);
+	nfi->send_addr(nfi, col, row, dev->col_cycle, dev->row_cycle);
+
+	return nfi->write_page(nfi, data, oob);
+}
+
+static int nand_base_program_page(struct nand_base *nand, int row)
+{
+	struct nfi *nfi = nand->nfi;
+	struct nand_device *dev = nand->dev;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev->cmds->program_2nd);
+	nfi->trigger(nfi);
+
+	return nfi->wait_ready(nfi, NAND_WAIT_BUSY,
+			       dev->array_timing->tPROG);
+}
+
+static int nand_base_erase_block(struct nand_base *nand, int row)
+{
+	struct nfi *nfi = nand->nfi;
+	struct nand_device *dev = nand->dev;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev->cmds->erase_1st);
+	nfi->send_addr(nfi, 0, row, 0, dev->row_cycle);
+	nfi->send_cmd(nfi, dev->cmds->erase_2nd);
+	nfi->trigger(nfi);
+
+	return nfi->wait_ready(nfi, NAND_WAIT_BUSY,
+			       dev->array_timing->tBERS);
+}
+
+static int nand_base_read_cache(struct nand_base *nand, int row)
+{
+	struct nfi *nfi = nand->nfi;
+	struct nand_device *dev = nand->dev;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev->cmds->read_1st);
+	nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
+	nfi->send_cmd(nfi, dev->cmds->read_cache);
+	nfi->trigger(nfi);
+
+	return nfi->wait_ready(nfi, NAND_WAIT_BUSY,
+			       dev->array_timing->tRCBSY);
+}
+
+static int nand_base_read_last(struct nand_base *nand)
+{
+	struct nfi *nfi = nand->nfi;
+	struct nand_device *dev = nand->dev;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev->cmds->read_cache_last);
+	nfi->trigger(nfi);
+
+	return nfi->wait_ready(nfi, NAND_WAIT_BUSY,
+			       dev->array_timing->tRCBSY);
+}
+
+static int nand_base_program_cache(struct nand_base *nand)
+{
+	struct nfi *nfi = nand->nfi;
+	struct nand_device *dev = nand->dev;
+
+	nfi->reset(nfi);
+	nfi->send_cmd(nfi, dev->cmds->program_cache);
+	nfi->trigger(nfi);
+
+	return nfi->wait_ready(nfi, NAND_WAIT_BUSY,
+			       dev->array_timing->tPCBSY);
+}
+
+struct nand_base *nand_base_init(struct nand_device *dev,
+				 struct nfi *nfi)
+{
+	struct nand_base *nand;
+
+	nand = mem_alloc(1, sizeof(struct nand_base));
+	if (!nand)
+		return NULL;
+
+	nand->performance = mem_alloc(1, sizeof(struct page_performance));
+	if (!nand->performance) {
+		mem_free(nand);
+		return NULL;
+	}
+	memset(nand->performance, 0, sizeof(struct page_performance));
+
+	nand->dev = dev;
+	nand->nfi = nfi;
+	nand->select_device = nand_base_select_device;
+	nand->reset = nand_base_reset;
+	nand->read_id = nand_base_read_id;
+	nand->read_param_page = nand_base_read_param_page;
+	nand->set_feature = nand_base_set_feature;
+	nand->get_feature = nand_base_get_feature;
+	nand->read_status = nand_base_read_status;
+	nand->addressing = nand_base_addressing;
+	nand->read_page = nand_base_read_page;
+	nand->read_data = nand_base_read_data;
+	nand->read_cache = nand_base_read_cache;
+	nand->read_last = nand_base_read_last;
+	nand->write_enable = nand_base_write_enable;
+	nand->program_data = nand_base_program_data;
+	nand->program_page = nand_base_program_page;
+	nand->program_cache = nand_base_program_cache;
+	nand->erase_block = nand_base_erase_block;
+	nand->nand_get_device = nand_get_device;
+
+	return nand;
+}
+
+void nand_base_exit(struct nand_base *base)
+{
+	nfi_exit(base->nfi);
+	mem_free(base->performance);
+	mem_free(base);
+}
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_base.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_base.h
new file mode 100644
index 0000000..5907851
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_base.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __NAND_BASE_H__
+#define __NAND_BASE_H__
+
+/*
+ * nand base functions
+ * @dev: nand device infomations
+ * @nfi: nand host controller
+ * @select_device: select one nand device of multi nand on chip
+ * @reset: reset current nand device
+ * @read_id: read current nand id
+ * @read_param_page: read current nand parameters page
+ * @set_feature: configurate the nand device feature
+ * @get_feature: get the nand device feature
+ * @read_status: read nand device status
+ * @addressing: addressing the address to nand device physical address
+ * @read_page: read page data to device cache register
+ * @read_data: read data from device cache register by bus protocol
+ * @read_cache: nand cache read operation for data output
+ * @read_last: nand cache read operation for last page output
+ * @write_enable: enable program/erase for nand, especially spi nand
+ * @program_data: program data to nand device cache register
+ * @program_page: program page data from nand device cache register to array
+ * @program_cache: nand cache program operation for data input
+ * @erase_block: erase nand block operation
+ */
+struct nand_base {
+	struct nand_device *dev;
+	struct nfi *nfi;
+	struct page_performance *performance;
+
+	int (*select_device)(struct nand_base *nand, int cs);
+	int (*reset)(struct nand_base *nand);
+	int (*read_id)(struct nand_base *nand, u8 *id, int count);
+	int (*read_param_page)(struct nand_base *nand, u8 *data, int count);
+	int (*set_feature)(struct nand_base *nand, u8 addr, u8 *param,
+			   int count);
+	int (*get_feature)(struct nand_base *nand, u8 addr, u8 *param,
+			   int count);
+	int (*read_status)(struct nand_base *nand);
+	int (*addressing)(struct nand_base *nand, int *row, int *col);
+
+	int (*read_page)(struct nand_base *nand, int row);
+	int (*read_data)(struct nand_base *nand, int row, int col, int sectors,
+			 u8 *data, u8 *oob);
+	int (*read_cache)(struct nand_base *nand, int row);
+	int (*read_last)(struct nand_base *nand);
+
+	int (*write_enable)(struct nand_base *nand);
+	int (*program_data)(struct nand_base *nand, int row, int col, u8 *data,
+			    u8 *oob);
+	int (*program_page)(struct nand_base *nand, int row);
+	int (*program_cache)(struct nand_base *nand);
+
+	int (*erase_block)(struct nand_base *nand, int row);
+
+	struct nand_device *(*nand_get_device)(int index);
+};
+
+struct nand_base *nand_base_init(struct nand_device *device,
+				 struct nfi *nfi);
+void nand_base_exit(struct nand_base *base);
+
+struct nand_base *nand_init(struct nand_chip *nand);
+void nand_exit(struct nand_base *nand);
+struct nand_base *nand_spi_init(struct nand_chip *nand);
+void nand_spi_exit(struct nand_base *nand);
+
+int nand_detect_device(struct nand_base *nand);
+
+#endif /* __NAND_BASE_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_chip.c b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_chip.c
new file mode 100644
index 0000000..acb1445
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_chip.c
@@ -0,0 +1,328 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "nand_chip.h"
+#include "nand_device.h"
+#include "nfi.h"
+#include "nand_base.h"
+
+static int nand_chip_read_page(struct nand_chip *chip,
+			       struct nand_ops *ops,
+			       int count)
+{
+	struct nand_base *nand = chip->nand;
+	int i, ret_min = 0, ret_max = 0;
+	int row, col, sectors;
+	u8 *data, *oob;
+#if NANDX_PAGE_PERFORMANCE_TRACE
+	u64 time_cons, page_cons;
+#endif
+
+	for (i = 0; i < count; i++) {
+#if NANDX_PAGE_PERFORMANCE_TRACE
+		page_cons = get_current_time_us();
+#endif
+		row = ops[i].row;
+		col = ops[i].col;
+
+		nand->addressing(nand, &row, &col);
+#if NANDX_PAGE_PERFORMANCE_TRACE
+		time_cons = get_current_time_us();
+#endif
+		ops[i].status = nand->read_page(nand, row);
+#if NANDX_PAGE_PERFORMANCE_TRACE
+		time_cons = get_current_time_us() - time_cons;
+		if (nand->performance->read_page_time) {
+			time_cons += nand->performance->read_page_time;
+			time_cons = div_down(time_cons, 2);
+		}
+		nand->performance->read_page_time = (int)time_cons;
+#endif
+		if (ops[i].status < 0) {
+			ret_min = min_t(int, ret_min, ops[i].status);
+			continue;
+		}
+
+		data = ops[i].data;
+		oob = ops[i].oob;
+		sectors = ops[i].len / chip->sector_size;
+#if NANDX_PAGE_PERFORMANCE_TRACE
+		time_cons = get_current_time_us();
+#endif
+		ops[i].status = nand->read_data(nand, row, col,
+						sectors, data, oob);
+#if NANDX_PAGE_PERFORMANCE_TRACE
+		time_cons = get_current_time_us() - time_cons;
+		if (nand->performance->read_data_time) {
+			time_cons += nand->performance->read_data_time;
+			time_cons = div_down(time_cons, 2);
+		}
+		nand->performance->read_data_time = (int)time_cons;
+#endif
+
+		ret_max = max_t(int, ret_max, ops[i].status);
+		ret_min = min_t(int, ret_min, ops[i].status);
+#if NANDX_PAGE_PERFORMANCE_TRACE
+		page_cons = get_current_time_us() - page_cons;
+		if (nand->performance->rx_page_total_time) {
+			page_cons += nand->performance->rx_page_total_time;
+			page_cons = div_down(page_cons, 2);
+		}
+		nand->performance->rx_page_total_time = (int)page_cons;
+#endif
+	}
+
+	return ret_min < 0 ? ret_min : ret_max;
+}
+
+static int nand_chip_write_page(struct nand_chip *chip,
+				struct nand_ops *ops,
+				int count)
+{
+	struct nand_base *nand = chip->nand;
+	struct nand_device *dev = nand->dev;
+	int i, ret = 0;
+	int row, col;
+	u8 *data, *oob;
+#if NANDX_PAGE_PERFORMANCE_TRACE
+	u64 time_cons, page_cons;
+#endif
+
+	for (i = 0; i < count; i++) {
+#if NANDX_PAGE_PERFORMANCE_TRACE
+		page_cons = get_current_time_us();
+#endif
+		row = ops[i].row;
+		col = ops[i].col;
+
+		nand->addressing(nand, &row, &col);
+
+		ops[i].status = nand->write_enable(nand);
+		if (ops[i].status) {
+			pr_debug("Write Protect at %x!\n", row);
+			ops[i].status = -ENANDWP;
+			return -ENANDWP;
+		}
+
+		data = ops[i].data;
+		oob = ops[i].oob;
+#if NANDX_PAGE_PERFORMANCE_TRACE
+		time_cons = get_current_time_us();
+#endif
+		ops[i].status = nand->program_data(nand, row, col, data, oob);
+#if NANDX_PAGE_PERFORMANCE_TRACE
+		time_cons = get_current_time_us() - time_cons;
+		if (nand->performance->write_data_time) {
+			time_cons += nand->performance->write_data_time;
+			time_cons = div_down(time_cons, 2);
+		}
+		nand->performance->write_data_time = (int)time_cons;
+#endif
+		if (ops[i].status < 0) {
+			ret = ops[i].status;
+			continue;
+		}
+#if NANDX_PAGE_PERFORMANCE_TRACE
+		time_cons = get_current_time_us();
+#endif
+		ops[i].status = nand->program_page(nand, row);
+#if NANDX_PAGE_PERFORMANCE_TRACE
+		time_cons = get_current_time_us() - time_cons;
+		if (nand->performance->write_page_time) {
+			time_cons += nand->performance->write_page_time;
+			time_cons = div_down(time_cons, 2);
+		}
+		nand->performance->write_page_time = (int)time_cons;
+#endif
+		if (ops[i].status < 0) {
+			ret = ops[i].status;
+			continue;
+		}
+
+		ops[i].status = nand->read_status(nand);
+		if (ops[i].status & dev->status->program_fail)
+			ops[i].status = -ENANDWRITE;
+
+		ret = min(ret, ops[i].status);
+#if NANDX_PAGE_PERFORMANCE_TRACE
+		page_cons = get_current_time_us() - page_cons;
+		if (nand->performance->tx_page_total_time) {
+			page_cons += nand->performance->tx_page_total_time;
+			page_cons = div_down(page_cons, 2);
+		}
+		nand->performance->tx_page_total_time = (int)page_cons;
+#endif
+	}
+
+	return ret;
+}
+
+static int nand_chip_erase_block(struct nand_chip *chip,
+				 struct nand_ops *ops,
+				 int count)
+{
+	struct nand_base *nand = chip->nand;
+	struct nand_device *dev = nand->dev;
+	int i, ret = 0;
+	int row, col;
+
+	for (i = 0; i < count; i++) {
+		row = ops[i].row;
+		col = ops[i].col;
+
+		nand->addressing(nand, &row, &col);
+
+		ops[i].status = nand->write_enable(nand);
+		if (ops[i].status) {
+			pr_debug("Write Protect at %x!\n", row);
+			ops[i].status = -ENANDWP;
+			return -ENANDWP;
+		}
+
+		ops[i].status = nand->erase_block(nand, row);
+		if (ops[i].status < 0) {
+			ret = ops[i].status;
+			continue;
+		}
+
+		ops[i].status = nand->read_status(nand);
+		if (ops[i].status & dev->status->erase_fail)
+			ops[i].status = -ENANDERASE;
+
+		ret = min(ret, ops[i].status);
+	}
+
+	return ret;
+}
+
+/* read first bad mark on spare */
+static int nand_chip_is_bad_block(struct nand_chip *chip,
+				  struct nand_ops *ops,
+				  int count)
+{
+	int i, ret, value;
+	int status = 0;
+	u8 *data;
+
+	/* Disable ECC */
+	value = 0;
+	ret = chip->chip_ctrl(chip, NFI_CTRL_ECC, &value);
+	if (ret)
+		return ret;
+
+	ret = chip->read_page(chip, ops, count);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < count; i++) {
+		data = ops[i].data;
+
+		if (data[chip->page_size] != 0xff) {
+			ops[i].status = -ENANDBAD;
+			status = -ENANDBAD;
+		} else
+			ops[i].status = 0;
+	}
+
+	/* Enable ECC */
+	value = 1;
+	ret = chip->chip_ctrl(chip, NFI_CTRL_ECC, &value);
+	if (ret)
+		return ret;
+
+	return status;
+}
+
+static int nand_chip_ctrl(struct nand_chip *chip, int cmd, void *args)
+{
+	return -EOPNOTSUPP;
+}
+
+static int nand_chip_suspend(struct nand_chip *chip)
+{
+	return 0;
+}
+
+static int nand_chip_resume(struct nand_chip *chip)
+{
+	return 0;
+}
+
+struct nand_chip *nand_chip_init(struct nfi_resource *res)
+{
+	struct nand_chip *chip;
+	struct nand_base *nand;
+	struct nfi *nfi;
+
+	chip = mem_alloc(1, sizeof(struct nand_chip));
+	if (!chip) {
+		pr_err("nand chip alloc fail!\n");
+		return NULL;
+	}
+
+	nfi = nfi_init(res);
+	if (!nfi) {
+		pr_err("nfi init fail!\n");
+		goto nfi_err;
+	}
+
+	nand = nand_base_init(NULL, nfi);
+	if (!nand) {
+		pr_err("nand base init fail!\n");
+		goto base_err;
+	}
+
+	chip->nand = (void *)nand;
+	chip->read_page = nand_chip_read_page;
+	chip->write_page = nand_chip_write_page;
+	chip->erase_block = nand_chip_erase_block;
+	chip->is_bad_block = nand_chip_is_bad_block;
+	chip->chip_ctrl = nand_chip_ctrl;
+	chip->suspend = nand_chip_suspend;
+	chip->resume = nand_chip_resume;
+	if (res->nand_type == NAND_SPI)
+		nand = nand_spi_init(chip);
+	else
+		nand = nand_init(chip);
+	if (!nand)
+		goto nand_err;
+
+	chip->nand = (void *)nand;
+	chip->plane_num = nand->dev->plane_num;
+	chip->block_num = nand_total_blocks(nand->dev);
+	chip->block_size = nand->dev->block_size;
+	chip->block_pages = nand_block_pages(nand->dev);
+	chip->page_size = nand->dev->page_size;
+	chip->oob_size = nfi->fdm_size * div_down(chip->page_size,
+						  nfi->sector_size);
+	chip->sector_size = nfi->sector_size;
+	chip->sector_spare_size = nfi->sector_spare_size;
+	chip->min_program_pages = nand->dev->min_program_pages;
+	chip->ecc_strength = nfi->ecc_strength;
+	chip->ecc_parity_size = nfi->ecc_parity_size;
+	chip->fdm_ecc_size = nfi->fdm_ecc_size;
+	chip->fdm_reg_size = nfi->fdm_size;
+
+	return chip;
+
+nand_err:
+	mem_free(nand);
+base_err:
+	nfi_exit(nfi);
+nfi_err:
+	mem_free(chip);
+	return NULL;
+}
+
+void nand_chip_exit(struct nand_chip *chip)
+{
+	if (chip->nand_type == NAND_SPI)
+		nand_spi_exit(chip->nand);
+	else
+		nand_exit(chip->nand);
+	mem_free(chip);
+}
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_chip.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_chip.h
new file mode 100644
index 0000000..5295138
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_chip.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __NAND_CHIP_H__
+#define __NAND_CHIP_H__
+
+/*
+ * nand chip operation unit
+ *    one nand_ops indicates one row operation
+ * @row: nand chip row address, like as nand row
+ * @col: nand chip column address, like as nand column
+ * @len: operate data length, min is sector_size,
+ *    max is page_size and sector_size aligned
+ * @status: one operation result status
+ * @data: data buffer for operation
+ * @oob: oob buffer for operation, like as nand spare area
+ */
+struct nand_ops {
+	int row;
+	int col;
+	int len;
+	int status;
+	void *data;
+	void *oob;
+};
+
+/*
+ * nand chip descriptions
+ *    nand chip includes nand controller and the several same nand devices
+ * @nand_type: the nand type on this chip,
+ *    the chip maybe have several nand device and the type must be same
+ * @plane_num: the whole plane number on the chip
+ * @block_num: the whole block number on the chip
+ * @block_size: nand device block size
+ * @block_pages: nand device block has page number
+ * @page_size: nand device page size
+ * @oob_size: chip out of band size, like as nand spare szie,
+ *    but restricts this:
+ *    the size is provied by nand controller(NFI),
+ *    because NFI would use some nand spare size
+ * @min_program_pages: chip needs min pages per program operations
+ *    one page as one nand_ops
+ * @sector_size: chip min read size
+ * @sector_spare_size: spare size for sector, is spare_size/page_sectors
+ * @ecc_strength: ecc stregth per sector_size, it would be for calculated ecc
+ * @ecc_parity_size: ecc parity size for one  sector_size data
+ * @nand: pointer to inherited struct nand_base
+ * @read_page: read %count pages on chip
+ * @write_page: write %count pages on chip
+ * @erase_block: erase %count blocks on chip, one block is one nand_ops
+ *    it is better to set nand_ops.row to block start row
+ * @is_bad_block: judge the %count blocks on chip if they are bad
+ *    by vendor specification
+ * @chip_ctrl: control the chip features by nandx_ctrl_cmd
+ * @suspend: suspend nand chip
+ * @resume: resume nand chip
+ */
+struct nand_chip {
+	int nand_type;
+	int plane_num;
+	int block_num;
+	int block_size;
+	int block_pages;
+	int page_size;
+	int oob_size;
+
+	int min_program_pages;
+	int sector_size;
+	int sector_spare_size;
+	int ecc_strength;
+	int ecc_parity_size;
+	u32 fdm_ecc_size;
+	u32 fdm_reg_size;
+
+	void *nand;
+
+	int (*read_page)(struct nand_chip *chip, struct nand_ops *ops,
+			 int count);
+	int (*write_page)(struct nand_chip *chip, struct nand_ops *ops,
+			  int count);
+	int (*erase_block)(struct nand_chip *chip, struct nand_ops *ops,
+			   int count);
+	int (*is_bad_block)(struct nand_chip *chip, struct nand_ops *ops,
+			    int count);
+	int (*chip_ctrl)(struct nand_chip *chip, int cmd, void *args);
+	int (*suspend)(struct nand_chip *chip);
+	int (*resume)(struct nand_chip *chip);
+};
+
+struct nand_chip *nand_chip_init(struct nfi_resource *res);
+void nand_chip_exit(struct nand_chip *chip);
+#endif /* __NAND_CHIP_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_device.c b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_device.c
new file mode 100644
index 0000000..2985a7a
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_device.c
@@ -0,0 +1,273 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "nand_chip.h"
+#include "nand_device.h"
+#include "nand_base.h"
+
+/* Platform related, should move to platform config later */
+#define MAX_CHIP_DEVICE 4
+#define PARAM_PAGE_LEN  2048
+#define ONFI_CRC_BASE   0x4f4e
+
+static u16 nand_onfi_crc16(u16 crc, u8 const *p, size_t len)
+{
+	int i;
+
+	while (len--) {
+		crc ^= *p++ << 8;
+
+		for (i = 0; i < 8; i++)
+			crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
+	}
+
+	return crc;
+}
+
+static inline void decode_addr_cycle(u8 addr_cycle, u8 *row_cycle,
+				     u8 *col_cycle)
+{
+	*row_cycle = addr_cycle & 0xf;
+	*col_cycle = (addr_cycle >> 4) & 0xf;
+}
+
+static int detect_onfi(struct nand_device *dev,
+		       struct nand_onfi_params *onfi)
+{
+	struct nand_endurance *endurance = dev->endurance;
+	u16 size, i, crc16;
+	u8 *id;
+
+	size = sizeof(struct nand_onfi_params) - sizeof(u16);
+
+	for (i = 0; i < 3; i++) {
+		crc16 = nand_onfi_crc16(ONFI_CRC_BASE, (u8 *)&onfi[i], size);
+
+		if (onfi[i].signature[0] == 'O' &&
+		    onfi[i].signature[1] == 'N' &&
+		    onfi[i].signature[2] == 'F' &&
+		    onfi[i].signature[3] == 'I' &&
+		    onfi[i].crc16 == crc16)
+			break;
+
+		/* in some spi nand, onfi signature maybe "NAND" */
+		if (onfi[i].signature[0] == 'N' &&
+		    onfi[i].signature[1] == 'A' &&
+		    onfi[i].signature[2] == 'N' &&
+		    onfi[i].signature[3] == 'D' &&
+		    onfi[i].crc16 == crc16)
+			break;
+	}
+
+	if (i == 3)
+		return -ENODEV;
+
+	memcpy(dev->name, onfi[i].model, 20);
+	id = onfi[i].manufacturer;
+	dev->id = NAND_PACK_ID(id[0], id[1], id[2], id[3], id[4], id[5], id[6],
+			       id[7]);
+	dev->id_len = MAX_ID_NUM;
+	dev->io_width = (onfi[i].features & 1) ? NAND_IO16 : NAND_IO8;
+	decode_addr_cycle(onfi[i].addr_cycle, &dev->row_cycle,
+			  &dev->col_cycle);
+	dev->target_num = 1;
+	dev->lun_num = onfi[i].lun_num;
+	dev->plane_num = BIT(onfi[i].plane_address_bits);
+	dev->block_num = onfi[i].lun_blocks / dev->plane_num;
+	dev->block_size = onfi[i].block_pages * onfi[i].page_size;
+	dev->page_size = onfi[i].page_size;
+	dev->spare_size = onfi[i].spare_size;
+
+	endurance->ecc_req = onfi[i].ecc_req;
+	endurance->pe_cycle = onfi[i].valid_block_endurance;
+	endurance->max_bitflips = endurance->ecc_req >> 1;
+
+	return 0;
+}
+
+static int detect_jedec(struct nand_device *dev,
+			struct nand_jedec_params *jedec)
+{
+	struct nand_endurance *endurance = dev->endurance;
+	u16 size, i, crc16;
+	u8 *id;
+
+	size = sizeof(struct nand_jedec_params) - sizeof(u16);
+
+	for (i = 0; i < 3; i++) {
+		crc16 = nand_onfi_crc16(ONFI_CRC_BASE, (u8 *)&jedec[i], size);
+
+		if (jedec[i].signature[0] == 'J' &&
+		    jedec[i].signature[1] == 'E' &&
+		    jedec[i].signature[2] == 'S' &&
+		    jedec[i].signature[3] == 'D' &&
+		    jedec[i].crc16 == crc16)
+			break;
+	}
+
+	if (i == 3)
+		return -ENODEV;
+
+	memcpy(dev->name, jedec[i].model, 20);
+	id = jedec[i].manufacturer;
+	dev->id = NAND_PACK_ID(id[0], id[1], id[2], id[3], id[4], id[5], id[6],
+			       id[7]);
+	dev->id_len = MAX_ID_NUM;
+	dev->io_width = (jedec[i].features & 1) ? NAND_IO16 : NAND_IO8;
+	decode_addr_cycle(jedec[i].addr_cycle, &dev->row_cycle,
+			  &dev->col_cycle);
+	dev->target_num = 1;
+	dev->lun_num = jedec[i].lun_num;
+	dev->plane_num = BIT(jedec[i].plane_address_bits);
+	dev->block_num = jedec[i].lun_blocks / dev->plane_num;
+	dev->block_size = jedec[i].block_pages * jedec[i].page_size;
+	dev->page_size = jedec[i].page_size;
+	dev->spare_size = jedec[i].spare_size;
+
+	endurance->ecc_req = jedec[i].endurance_block0[0];
+	endurance->pe_cycle = jedec[i].valid_block_endurance;
+	endurance->max_bitflips = endurance->ecc_req >> 1;
+
+	return 0;
+}
+
+static struct nand_device *detect_parameters_page(struct nand_base
+						  *nand)
+{
+	struct nand_device *dev = nand->dev;
+	void *params;
+	int ret;
+
+	params = mem_alloc(1, PARAM_PAGE_LEN);
+	if (!params)
+		return NULL;
+
+	memset(params, 0, PARAM_PAGE_LEN);
+	ret = nand->read_param_page(nand, params, PARAM_PAGE_LEN);
+	if (ret < 0) {
+		pr_err("read parameters page fail!\n");
+		goto error;
+	}
+
+	ret = detect_onfi(dev, params);
+	if (ret) {
+		pr_err("detect onfi device fail! try to detect jedec\n");
+		ret = detect_jedec(dev, params);
+		if (ret) {
+			pr_err("detect jedec device fail!\n");
+			goto error;
+		}
+	}
+
+	mem_free(params);
+	return dev;
+
+error:
+	mem_free(params);
+	return NULL;
+}
+
+static int read_device_id(struct nand_base *nand, int cs, u8 *id)
+{
+	nand->select_device(nand, cs);
+	nand->reset(nand);
+	nand->read_id(nand, id, MAX_ID_NUM);
+
+	pr_info("%d ID: %x %x %x %x %x %x\n",
+		cs, id[0], id[1], id[2], id[3], id[4], id[5]);
+
+	return 0;
+}
+
+static int detect_more_device(struct nand_base *nand, u8 *id)
+{
+	u8 id_ext[MAX_ID_NUM];
+	int i, j, target_num = 0;
+
+	for (i = 1; i < (int)MAX_CHIP_DEVICE; i++) {
+		memset(id_ext, 0xff, MAX_ID_NUM);
+		read_device_id(nand, i, id_ext);
+
+		for (j = 0; j < (int)MAX_ID_NUM; j++) {
+			if (id_ext[j] != id[j])
+				goto out;
+		}
+
+		target_num += 1;
+	}
+
+out:
+	return target_num;
+}
+
+static struct nand_device *scan_device_table(struct nand_base *nand, const u8 *id, int id_len)
+{
+	struct nand_device *dev;
+	int i = 0, j;
+	u8 ids[MAX_ID_NUM] = {0};
+
+	while (1) {
+		dev = nand->nand_get_device(i);
+
+		if (!strcmp(dev->name, "NO-DEVICE"))
+			break;
+
+		if (id_len < dev->id_len) {
+			i += 1;
+			continue;
+		}
+
+		NAND_UNPACK_ID(dev->id, ids, MAX_ID_NUM);
+		for (j = 0; j < dev->id_len; j++) {
+			if (ids[j] != id[j])
+				break;
+		}
+
+		if (j == dev->id_len)
+			break;
+
+		i += 1;
+	}
+
+	return dev;
+}
+
+int nand_detect_device(struct nand_base *nand)
+{
+	struct nand_device *dev;
+	u8 id[MAX_ID_NUM] = { 0 };
+	int target_num = 0;
+
+	/* Get nand device default setting for reset/read_id */
+	nand->dev = scan_device_table(nand, NULL, -1);
+
+	read_device_id(nand, 0, id);
+	dev = scan_device_table(nand, id, MAX_ID_NUM);
+
+	if (!strcmp(dev->name, "NO-DEVICE")) {
+		pr_info("device scan fail, detect parameters page\n");
+		dev = detect_parameters_page(nand);
+		if (!dev) {
+			pr_err("detect parameters fail\n");
+			return -ENODEV;
+		}
+	}
+
+	if (dev->target_num > 1)
+		target_num = detect_more_device(nand, id);
+
+	target_num += 1;
+	pr_debug("chip has target device num: %d\n", target_num);
+
+	if (dev->target_num != target_num)
+		dev->target_num = target_num;
+
+	nand->dev = dev;
+
+	return 0;
+}
+
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_device.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_device.h
new file mode 100644
index 0000000..f5c6718
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nand_device.h
@@ -0,0 +1,606 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __NAND_DEVICE_H__
+#define __NAND_DEVICE_H__
+
+/* onfi 3.2 */
+struct nand_onfi_params {
+	/* Revision information and features block. 0 */
+	/*
+	 * Byte 0: 4Fh,
+	 * Byte 1: 4Eh,
+	 * Byte 2: 46h,
+	 * Byte 3: 49h,
+	 */
+	u8 signature[4];
+	/*
+	 * 9-15 Reserved (0)
+	 * 8 1 = supports ONFI version 3.2
+	 * 7 1 = supports ONFI version 3.1
+	 * 6 1 = supports ONFI version 3.0
+	 * 5 1 = supports ONFI version 2.3
+	 * 4 1 = supports ONFI version 2.2
+	 * 3 1 = supports ONFI version 2.1
+	 * 2 1 = supports ONFI version 2.0
+	 * 1 1 = supports ONFI version 1.0
+	 * 0 Reserved (0)
+	 */
+	u16 revision;
+	/*
+	 * 13-15 Reserved (0)
+	 * 12 1 = supports external Vpp
+	 * 11 1 = supports Volume addressing
+	 * 10 1 = supports NV-DDR2
+	 * 9 1 = supports EZ NAND
+	 * 8 1 = supports program page register clear enhancement
+	 * 7 1 = supports extended parameter page
+	 * 6 1 = supports multi-plane read operations
+	 * 5 1 = supports NV-DDR
+	 * 4 1 = supports odd to even page Copyback
+	 * 3 1 = supports multi-plane program and erase operations
+	 * 2 1 = supports non-sequential page programming
+	 * 1 1 = supports multiple LUN operations
+	 * 0 1 = supports 16-bit data bus width
+	 */
+	u16 features;
+	/*
+	 * 13-15 Reserved (0)
+	 * 12 1 = supports LUN Get and LUN Set Features
+	 * 11 1 = supports ODT Configure
+	 * 10 1 = supports Volume Select
+	 * 9 1 = supports Reset LUN
+	 * 8 1 = supports Small Data Move
+	 * 7 1 = supports Change Row Address
+	 * 6 1 = supports Change Read Column Enhanced
+	 * 5 1 = supports Read Unique ID
+	 * 4 1 = supports Copyback
+	 * 3 1 = supports Read Status Enhanced
+	 * 2 1 = supports Get Features and Set Features
+	 * 1 1 = supports Read Cache commands
+	 * 0 1 = supports Page Cache Program command
+	 */
+	u16 opt_cmds;
+	/*
+	 * 4-7 Reserved (0)
+	 * 3 1 = supports Multi-plane Block Erase
+	 * 2 1 = supports Multi-plane Copyback Program
+	 * 1 1 = supports Multi-plane Page Program
+	 * 0 1 = supports Random Data Out
+	 */
+	u8 advance_cmds;
+	u8 reserved0[1];
+	u16 extend_param_len;
+	u8 param_page_num;
+	u8 reserved1[17];
+
+	/* Manufacturer information block. 32 */
+	u8 manufacturer[12];
+	u8 model[20];
+	u8 jedec_id;
+	u16 data_code;
+	u8 reserved2[13];
+
+	/* Memory organization block. 80 */
+	u32 page_size;
+	u16 spare_size;
+	u32 partial_page_size; /* obsolete */
+	u16 partial_spare_size; /* obsolete */
+	u32 block_pages;
+	u32 lun_blocks;
+	u8 lun_num;
+	/*
+	 * 4-7 Column address cycles
+	 * 0-3 Row address cycles
+	 */
+	u8 addr_cycle;
+	u8 cell_bits;
+	u16 lun_max_bad_blocks;
+	u16 block_endurance;
+	u8 target_begin_valid_blocks;
+	u16 valid_block_endurance;
+	u8 page_program_num;
+	u8 partial_program_attr; /* obsolete */
+	u8 ecc_req;
+	/*
+	 * 4-7 Reserved (0)
+	 * 0-3 Number of plane address bits
+	 */
+	u8 plane_address_bits;
+	/*
+	 * 6-7 Reserved (0)
+	 * 5 1 = lower bit XNOR block address restriction
+	 * 4 1 = read cache supported
+	 * 3 Address restrictions for cache operations
+	 * 2 1 = program cache supported
+	 * 1 1 = no block address restrictions
+	 * 0 Overlapped / concurrent multi-plane support
+	 */
+	u8 multi_plane_attr;
+	u8 ez_nand_support;
+	u8 reserved3[12];
+
+	/* Electrical parameters block. 128 */
+	u8 io_pin_max_capacitance;
+	/*
+	 * 6-15 Reserved (0)
+	 * 5 1 = supports timing mode 5
+	 * 4 1 = supports timing mode 4
+	 * 3 1 = supports timing mode 3
+	 * 2 1 = supports timing mode 2
+	 * 1 1 = supports timing mode 1
+	 * 0 1 = supports timing mode 0, shall be 1
+	 */
+	u16 sdr_timing_mode;
+	u16 sdr_program_cache_timing_mode; /* obsolete */
+	u16 tPROG;
+	u16 tBERS;
+	u16 tR;
+	u16 tCCS;
+	/*
+	 * 7 Reserved (0)
+	 * 6 1 = supports NV-DDR2 timing mode 8
+	 * 5 1 = supports NV-DDR timing mode 5
+	 * 4 1 = supports NV-DDR timing mode 4
+	 * 3 1 = supports NV-DDR timing mode 3
+	 * 2 1 = supports NV-DDR timing mode 2
+	 * 1 1 = supports NV-DDR timing mode 1
+	 * 0 1 = supports NV-DDR timing mode 0
+	 */
+	u8 nvddr_timing_mode;
+	/*
+	 * 7 1 = supports timing mode 7
+	 * 6 1 = supports timing mode 6
+	 * 5 1 = supports timing mode 5
+	 * 4 1 = supports timing mode 4
+	 * 3 1 = supports timing mode 3
+	 * 2 1 = supports timing mode 2
+	 * 1 1 = supports timing mode 1
+	 * 0 1 = supports timing mode 0
+	 */
+	u8 nvddr2_timing_mode;
+	/*
+	 * 4-7 Reserved (0)
+	 * 3 1 = device requires Vpp enablement sequence
+	 * 2 1 = device supports CLK stopped for data input
+	 * 1 1 = typical capacitance
+	 * 0 tCAD value to use
+	 */
+	u8 nvddr_fetures;
+	u16 clk_pin_capacitance;
+	u16 io_pin_capacitance;
+	u16 input_pin_capacitance;
+	u8 input_pin_max_capacitance;
+	/*
+	 * 3-7 Reserved (0)
+	 * 2 1 = supports 18 Ohm drive strength
+	 * 1 1 = supports 25 Ohm drive strength
+	 * 0 1 = supports driver strength settings
+	 */
+	u8 drive_strength;
+	u16 tR_multi_plane;
+	u16 tADL;
+	u16 tR_ez_nand;
+	/*
+	 * 6-7 Reserved (0)
+	 * 5 1 = external VREFQ required for >= 200 MT/s
+	 * 4 1 = supports differential signaling for DQS
+	 * 3 1 = supports differential signaling for RE_n
+	 * 2 1 = supports ODT value of 30 Ohms
+	 * 1 1 = supports matrix termination ODT
+	 * 0 1 = supports self-termination ODT
+	 */
+	u8 nvddr2_features;
+	u8 nvddr2_warmup_cycles;
+	u8 reserved4[4];
+
+	/* vendor block. 164 */
+	u16 vendor_revision;
+	u8      vendor_spec[88];
+
+	/* CRC for Parameter Page. 254 */
+	u16 crc16;
+} __packed;
+
+/* JESD230-B */
+struct nand_jedec_params {
+	/* Revision information and features block. 0 */
+	/*
+	 * Byte 0:4Ah
+	 * Byte 1:45h
+	 * Byte 2:53h
+	 * Byte 3:44h
+	 */
+	u8 signature[4];
+	/*
+	 * 3-15: Reserved (0)
+	 * 2: 1 = supports parameter page revision 1.0 and standard revision 1.0
+	 * 1: 1 = supports vendor specific parameter page
+	 * 0: Reserved (0)
+	 */
+	u16 revision;
+	/*
+	 * 9-15 Reserved (0)
+	 * 8: 1 = supports program page register clear enhancement
+	 * 7: 1 = supports external Vpp
+	 * 6: 1 = supports Toggle Mode DDR
+	 * 5: 1 = supports Synchronous DDR
+	 * 4: 1 = supports multi-plane read operations
+	 * 3: 1 = supports multi-plane program and erase operations
+	 * 2: 1 = supports non-sequential page programming
+	 * 1: 1 = supports multiple LUN operations
+	 * 0: 1 = supports 16-bit data bus width
+	 */
+	u16 features;
+	/*
+	 * 11-23: Reserved (0)
+	 * 10: 1 = supports Synchronous Reset
+	 * 9: 1 = supports Reset LUN (Primary)
+	 * 8: 1 = supports Small Data Move
+	 * 7: 1 = supports Multi-plane Copyback Program (Primary)
+	 * 6: 1 = supports Random Data Out (Primary)
+	 * 5: 1 = supports Read Unique ID
+	 * 4: 1 = supports Copyback
+	 * 3: 1 = supports Read Status Enhanced (Primary)
+	 * 2: 1 = supports Get Features and Set Features
+	 * 1: 1 = supports Read Cache commands
+	 * 0: 1 = supports Page Cache Program command
+	 */
+	u8 opt_cmds[3];
+	/*
+	 * 8-15: Reserved (0)
+	 * 7: 1 = supports secondary Read Status Enhanced
+	 * 6: 1 = supports secondary Multi-plane Block Erase
+	 * 5: 1 = supports secondary Multi-plane Copyback Program
+	 * 4: 1 = supports secondary Multi-plane Program
+	 * 3: 1 = supports secondary Random Data Out
+	 * 2: 1 = supports secondary Multi-plane Copyback Read
+	 * 1: 1 = supports secondary Multi-plane Read Cache Random
+	 * 0: 1 = supports secondary Multi-plane Read
+	 */
+	u16 secondary_cmds;
+	u8 param_page_num;
+	u8 reserved0[18];
+
+	/* Manufacturer information block. 32*/
+	u8 manufacturer[12];
+	u8 model[20];
+	u8 jedec_id[6];
+	u8 reserved1[10];
+
+	/* Memory organization block. 80 */
+	u32 page_size;
+	u16 spare_size;
+	u8 reserved2[6];
+	u32 block_pages;
+	u32 lun_blocks;
+	u8 lun_num;
+	/*
+	 * 4-7 Column address cycles
+	 * 0-3 Row address cycles
+	 */
+	u8 addr_cycle;
+	u8 cell_bits;
+	u8 page_program_num;
+	/*
+	 * 4-7 Reserved (0)
+	 * 0-3 Number of plane address bits
+	 */
+	u8 plane_address_bits;
+	/*
+	 * 3-7: Reserved (0)
+	 * 2: 1= read cache supported
+	 * 1: 1 = program cache supported
+	 * 0: 1= No multi-plane block address restrictions
+	 */
+	u8 multi_plane_attr;
+	u8 reserved3[38];
+
+	/* Electrical parameters block. 144 */
+	/*
+	 * 6-15: Reserved (0)
+	 * 5: 1 = supports 20 ns speed grade (50 MHz)
+	 * 4: 1 = supports 25 ns speed grade (40 MHz)
+	 * 3: 1 = supports 30 ns speed grade (~33 MHz)
+	 * 2: 1 = supports 35 ns speed grade (~28 MHz)
+	 * 1: 1 = supports 50 ns speed grade (20 MHz)
+	 * 0: 1 = supports 100 ns speed grade (10 MHz)
+	 */
+	u16 sdr_speed;
+	/*
+	 * 8-15: Reserved (0)
+	 * 7: 1 = supports 5 ns speed grade (200 MHz)
+	 * 6: 1 = supports 6 ns speed grade (~166 MHz)
+	 * 5: 1 = supports 7.5 ns speed grade (~133 MHz)
+	 * 4: 1 = supports 10 ns speed grade (100 MHz)
+	 * 3: 1 = supports 12 ns speed grade (~83 MHz)
+	 * 2: 1 = supports 15 ns speed grade (~66 MHz)
+	 * 1: 1 = supports 25 ns speed grade (40 MHz)
+	 * 0: 1 = supports 30 ns speed grade (~33 MHz)
+	 */
+	u16 toggle_ddr_speed;
+	/*
+	 * 6-15: Reserved (0)
+	 * 5: 1 = supports 10 ns speed grade (100 MHz)
+	 * 4: 1 = supports 12 ns speed grade (~83 MHz)
+	 * 3: 1 = supports 15 ns speed grade (~66 MHz)
+	 * 2: 1 = supports 20 ns speed grade (50 MHz)
+	 * 1: 1 = supports 30 ns speed grade (~33 MHz)
+	 * 0: 1 = supports 50 ns speed grade (20 MHz)
+	 */
+	u16 sync_ddr_speed;
+	u8 sdr_features;
+	u8 toggle_ddr_features;
+	/*
+	 * 2-7: Reserved (0)
+	 * 1: Device supports CK stopped for data input
+	 * 0: tCAD value to use
+	 */
+	u8 sync_ddr_features;
+	u16 tPROG;
+	u16 tBERS;
+	u16 tR;
+	u16 tR_multi_plane;
+	u16 tCCS;
+	u16 io_pin_capacitance;
+	u16 input_pin_capacitance;
+	u16 ck_pin_capacitance;
+	/*
+	 * 3-7: Reserved (0)
+	 * 2: 1 = supports 18 ohm drive strength
+	 * 1: 1 = supports 25 ohm drive strength
+	 * 0: 1 = supports 35ohm/50ohm drive strength
+	 */
+	u8 drive_strength;
+	u16 tADL;
+	u8 reserved4[36];
+
+	/* ECC and endurance block. 208 */
+	u8 target_begin_valid_blocks;
+	u16 valid_block_endurance;
+	/*
+	 * Byte 0: Number of bits ECC correctability
+	 * Byte 1: Codeword size
+	 * Byte 2-3: Bad blocks maximum per LUN
+	 * Byte 4-5: Block endurance
+	 * Byte 6-7: Reserved (0)
+	 */
+	u8 endurance_block0[8];
+	u8 endurance_block1[8];
+	u8 endurance_block2[8];
+	u8 endurance_block3[8];
+	u8 reserved5[29];
+
+	/* Reserved. 272 */
+	u8 reserved6[148];
+
+	/* Vendor specific block. 420  */
+	u16 vendor_revision;
+	u8      vendor_spec[88];
+
+	/* CRC for Parameter Page. 510 */
+	u16 crc16;
+} __packed;
+
+/* parallel nand io width */
+enum nand_io_width {
+	NAND_IO8,
+	NAND_IO16
+};
+
+/* all supported nand timming type */
+enum nand_timing_type {
+	NAND_TIMING_SDR,
+	NAND_TIMING_SYNC_DDR,
+	NAND_TIMING_TOGGLE_DDR,
+	NAND_TIMING_NVDDR2
+};
+
+/* nand basic commands */
+struct nand_cmds {
+	short reset;
+	short read_id;
+	short read_status;
+	short read_param_page;
+	short set_feature;
+	short get_feature;
+	short read_1st;
+	short read_2nd;
+	short random_out_1st;
+	short random_out_2nd;
+	short program_1st;
+	short program_2nd;
+	short erase_1st;
+	short erase_2nd;
+	short read_cache;
+	short read_cache_last;
+	short program_cache;
+};
+
+/*
+ * addressing for nand physical address
+ * @row_bit_start: row address start bit
+ * @block_bit_start: block address start bit
+ * @plane_bit_start: plane address start bit
+ * @lun_bit_start: lun address start bit
+ */
+struct nand_addressing {
+	u8 row_bit_start;
+	u8 block_bit_start;
+	u8 plane_bit_start;
+	u8 lun_bit_start;
+};
+
+/*
+ * nand operations status
+ * @array_busy: indicates device array operation busy
+ * @write_protect: indicates the device cannot be wrote or erased
+ * @erase_fail: indicates erase operation fail
+ * @program_fail: indicates program operation fail
+ */
+struct nand_status {
+	u8 array_busy;
+	u8 write_protect;
+	u8 erase_fail;
+	u8 program_fail;
+};
+
+/*
+ * nand endurance information
+ * @pe_cycle: max program/erase cycle for nand stored data stability
+ * @ecc_req: ecc strength required for the nand, measured per 1KB
+ * @max_bitflips: bitflips is ecc corrected bits,
+ *    max_bitflips is the threshold for nand stored data stability
+ *    if corrected bits is over max_bitflips, stored data must be moved
+ *    to another good block
+ */
+struct nand_endurance {
+	int pe_cycle;
+	int ecc_req;
+	int max_bitflips;
+};
+
+/* wait for nand busy type */
+enum nand_wait_type {
+	NAND_WAIT_BUSY,
+	NAND_WAIT_TIME,
+};
+
+/* each nand array operations time */
+struct nand_array_timing {
+	u16 tRST;
+	u16 tWHR;
+	u16 tR;
+	u16 tRCBSY;
+	u16 tFEAT;
+	u16 tPROG;
+	u16 tPCBSY;
+	u16 tBERS;
+	u16 tDBSY;
+};
+
+/* nand sdr interface timing required */
+struct nand_sdr_timing {
+	u16 tREA;
+	u16 tREH;
+	u16 tCR;
+	u16 tRP;
+	u16 tWP;
+	u16 tWH;
+	u16 tWHR;
+	u16 tCLS;
+	u16 tALS;
+	u16 tCLH;
+	u16 tALH;
+	u16 tWC;
+	u16 tRC;
+};
+
+/* nand onfi ddr (nvddr) interface timing required */
+struct nand_onfi_timing {
+	u16 tCAD;
+	u16 tWPRE;
+	u16 tWPST;
+	u16 tWRCK;
+	u16 tDQSCK;
+	u16 tWHR;
+};
+
+/* nand toggle ddr (toggle 1.0) interface timing required */
+struct nand_toggle_timing {
+	u16 tCS;
+	u16 tCH;
+	u16 tCAS;
+	u16 tCAH;
+	u16 tCALS;
+	u16 tCALH;
+	u16 tWP;
+	u16 tWPRE;
+	u16 tWPST;
+	u16 tWPSTH;
+	u16 tCR;
+	u16 tRPRE;
+	u16 tRPST;
+	u16 tRPSTH;
+	u16 tCDQSS;
+	u16 tWHR;
+};
+
+/* nand basic device information */
+struct nand_device {
+	u8 *name;
+	u64 id;
+	u8 id_len;
+	u8 io_width;
+	u8 row_cycle;
+	u8 col_cycle;
+	u8 target_num;
+	u8 lun_num;
+	u8 plane_num;
+	int block_num;
+	int block_size;
+	int page_size;
+	int spare_size;
+	int min_program_pages;
+	struct nand_cmds *cmds;
+	struct nand_addressing *addressing;
+	struct nand_status *status;
+	struct nand_endurance *endurance;
+	struct nand_array_timing *array_timing;
+};
+
+#define NAND_DEVICE(_name, _id, _id_len, _io_width, _row_cycle, \
+		    _col_cycle, _target_num, _lun_num, _plane_num, \
+		    _block_num, _block_size, _page_size, _spare_size, \
+		    _min_program_pages, _cmds, _addressing, _status, \
+		    _endurance, _array_timing) \
+{ \
+	_name, _id, _id_len, _io_width, _row_cycle, \
+	_col_cycle, _target_num, _lun_num, _plane_num, \
+	_block_num, _block_size, _page_size, _spare_size, \
+	_min_program_pages, _cmds, _addressing, _status, \
+	_endurance, _array_timing \
+}
+
+#define MAX_ID_NUM      sizeof(u64)
+
+#define NAND_PACK_ID(id0, id1, id2, id3, id4, id5, id6, id7) \
+	( \
+	  (u64)id0 | (u64)id1 << 8 | (u64)id2 << 16 | (u64)id3 << 24 | \
+	  (u64)id4 << 32 | (u64)id5 << 40 | \
+	  (u64)id6 << 48 | (u64)id7 << 56 \
+	)
+
+#define NAND_UNPACK_ID(id, ids, len) \
+	do { \
+		int _i; \
+		for (_i = 0; _i < len; _i++) \
+			ids[_i] = id >> (_i << 3) & 0xff; \
+	} while (0)
+
+static inline int nand_block_pages(struct nand_device *device)
+{
+	return div_down(device->block_size, device->page_size);
+}
+
+static inline int nand_lun_blocks(struct nand_device *device)
+{
+	return device->plane_num * device->block_num;
+}
+
+static inline int nand_target_blocks(struct nand_device *device)
+{
+	return device->lun_num * device->plane_num * device->block_num;
+}
+
+static inline int nand_total_blocks(struct nand_device *device)
+{
+	return device->target_num * device->lun_num * device->plane_num *
+	       device->block_num;
+}
+
+struct nand_device *nand_get_device(int index);
+struct nand_device *nand_spi_get_device(int index);
+#endif /* __NAND_DEVICE_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi.h
new file mode 100644
index 0000000..94af02d
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __NFI_H__
+#define __NFI_H__
+
+struct nfi_format {
+	int page_size;
+	int spare_size;
+	int ecc_req;
+};
+
+struct nfi {
+	int sector_size;
+	int sector_spare_size;
+	int fdm_size; /*for sector*/
+	int fdm_ecc_size;
+	int ecc_strength;
+	int ecc_parity_size; /*for sector*/
+
+	int (*select_chip)(struct nfi *nfi, int cs);
+	int (*set_format)(struct nfi *nfi, struct nfi_format *format);
+	int (*set_timing)(struct nfi *nfi, void *timing, int type);
+	int (*nfi_ctrl)(struct nfi *nfi, int cmd, void *args);
+
+	int (*reset)(struct nfi *nfi);
+	int (*send_cmd)(struct nfi *nfi, short cmd);
+	int (*send_addr)(struct nfi *nfi, int col, int row,
+			 int col_cycle, int row_cycle);
+	int (*trigger)(struct nfi *nfi);
+
+	int (*write_page)(struct nfi *nfi, u8 *data, u8 *fdm);
+	int (*write_bytes)(struct nfi *nfi, u8 *data, int count);
+	int (*read_sectors)(struct nfi *nfi, u8 *data, u8 *fdm,
+			    int sectors);
+	int (*read_bytes)(struct nfi *nfi, u8 *data, int count);
+
+	int (*wait_ready)(struct nfi *nfi, int type, u32 timeout);
+
+	int (*enable_randomizer)(struct nfi *nfi, u32 row, bool encode);
+	int (*disable_randomizer)(struct nfi *nfi);
+};
+
+struct nfi *nfi_init(struct nfi_resource *res);
+void nfi_exit(struct nfi *nfi);
+
+#endif /* __NFI_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_base.c b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_base.c
new file mode 100644
index 0000000..679a426
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_base.c
@@ -0,0 +1,1585 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+/**
+ * nfi_base.c - the base logic for nfi to access nand flash
+ *
+ * slc/mlc/tlc could use same code to access nand
+ * of cause, there still some work need to do
+ * even for spi nand, there should be a chance to integrate code together
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "../nfi.h"
+#include "../nand_device.h"
+#include "../nand_chip.h"
+#include "nfi_regs.h"
+#include "nfiecc.h"
+#include "nfi_base.h"
+
+static const int spare_size_v10[] = {
+	16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51,
+	52, 62, 61, 63, 64, 67, 74
+};
+
+/* IRQ schedule time, the unit is us */
+#define IRQ_TIMEOUT	500000
+
+#ifdef NFI_RANDOM_SUPPORT
+#define RAND_SEED_SHIFT(op) \
+	((op) == RAND_ENCODE ? ENCODE_SEED_SHIFT : DECODE_SEED_SHIFT)
+#define RAND_EN(op) \
+	((op) == RAND_ENCODE ? RAN_ENCODE_EN : RAN_DECODE_EN)
+
+#define SS_SEED_NUM     128
+static u16 ss_randomizer_seed[SS_SEED_NUM] = {
+	0x576A, 0x05E8, 0x629D, 0x45A3, 0x649C, 0x4BF0, 0x2342, 0x272E,
+	0x7358, 0x4FF3, 0x73EC, 0x5F70, 0x7A60, 0x1AD8, 0x3472, 0x3612,
+	0x224F, 0x0454, 0x030E, 0x70A5, 0x7809, 0x2521, 0x484F, 0x5A2D,
+	0x492A, 0x043D, 0x7F61, 0x3969, 0x517A, 0x3B42, 0x769D, 0x0647,
+	0x7E2A, 0x1383, 0x49D9, 0x07B8, 0x2578, 0x4EEC, 0x4423, 0x352F,
+	0x5B22, 0x72B9, 0x367B, 0x24B6, 0x7E8E, 0x2318, 0x6BD0, 0x5519,
+	0x1783, 0x18A7, 0x7B6E, 0x7602, 0x4B7F, 0x3648, 0x2C53, 0x6B99,
+	0x0C23, 0x67CF, 0x7E0E, 0x4D8C, 0x5079, 0x209D, 0x244A, 0x747B,
+	0x350B, 0x0E4D, 0x7004, 0x6AC3, 0x7F3E, 0x21F5, 0x7A15, 0x2379,
+	0x1517, 0x1ABA, 0x4E77, 0x15A1, 0x04FA, 0x2D61, 0x253A, 0x1302,
+	0x1F63, 0x5AB3, 0x049A, 0x5AE8, 0x1CD7, 0x4A00, 0x30C8, 0x3247,
+	0x729C, 0x5034, 0x2B0E, 0x57F2, 0x00E4, 0x575B, 0x6192, 0x38F8,
+	0x2F6A, 0x0C14, 0x45FC, 0x41DF, 0x38DA, 0x7AE1, 0x7322, 0x62DF,
+	0x5E39, 0x0E64, 0x6D85, 0x5951, 0x5937, 0x6281, 0x33A1, 0x6A32,
+	0x3A5A, 0x2BAC, 0x743A, 0x5E74, 0x3B2E, 0x7EC7, 0x4FD2, 0x5D28,
+	0x751F, 0x3EF8, 0x39B1, 0x4E49, 0x746B, 0x6EF6, 0x44BE, 0x6DB7
+};
+
+static int nfi_enable_randomizer(struct nfi *nfi, u32 row, bool encode)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	enum randomizer_op op = RAND_ENCODE;
+	void *regs = nb->res.nfi_regs;
+	u32 val;
+
+	if (!encode)
+		op = RAND_DECODE;
+
+	/* randomizer type and reseed type setup */
+	val = readl(regs + NFI_CNFG);
+	val |= CNFG_RAND_SEL | CNFG_RESEED_SEC_EN;
+	writel(val, regs + NFI_CNFG);
+
+	/* randomizer seed and type setup */
+	val = ss_randomizer_seed[row % SS_SEED_NUM] & RAN_SEED_MASK;
+	val <<= RAND_SEED_SHIFT(op);
+	val |= RAND_EN(op);
+	writel(val, regs + NFI_RANDOM_CNFG);
+
+	return 0;
+}
+
+static int nfi_disable_randomizer(struct nfi *nfi)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+
+	writel(0, nb->res.nfi_regs + NFI_RANDOM_CNFG);
+
+	return 0;
+}
+#else
+static int nfi_enable_randomizer(struct nfi *nfi, u32 row, bool encode)
+{
+	/* Not support */
+	return 0;
+}
+
+static int nfi_disable_randomizer(struct nfi *nfi)
+{
+	/* Not support */
+	return 0;
+}
+#endif
+
+static irqreturn_t nfi_irq_handler(int irq, void *data)
+{
+	struct nfi_base *nb = nfi_to_base(data);
+	void *regs = nb->res.nfi_regs;
+	u16 status, en;
+	int ret = 0;
+
+	nandx_irq_disable(nb->res.nfi_irq_id);
+
+	status = readl(regs + NFI_INTR_STA);
+	en = readl(regs + NFI_INTR_EN);
+
+	if (!(status & en)) {
+		pr_err("nfi irq, error, status:0x%x, en:0x%x\n", status, en);
+		ret = NAND_IRQ_NONE;
+		goto done;
+	}
+
+	nandx_event_complete(nb->done);
+
+	nandx_irq_enable(nb->res.nfi_irq_id);
+
+	pr_debug("%s irq occur status:0x%x en:0x%x\n", __func__, status, en);
+
+	ret = NAND_IRQ_HANDLED;
+done:
+	return ret;
+}
+
+static int nfi_select_chip(struct nfi *nfi, int cs)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+
+	writel(cs, nb->res.nfi_regs + NFI_CSEL);
+
+	return 0;
+}
+
+static inline void set_op_mode(void *regs, u32 mode)
+{
+	u32 val = readl(regs + NFI_CNFG);
+
+	val &= ~CNFG_OP_MODE_MASK;
+	val |= mode;
+
+	writel(val, regs + NFI_CNFG);
+}
+
+static int nfi_reset(struct nfi *nfi)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	void *regs = nb->res.nfi_regs;
+	int ret, val;
+
+	/* The NFI reset to reset all registers and force the NFI
+	 * master be early terminated
+	 */
+	writel(CON_FIFO_FLUSH | CON_NFI_RST, regs + NFI_CON);
+
+	/* check state of NFI internal FSM and NAND interface FSM */
+	ret = readl_poll_timeout_atomic(regs + NFI_MASTER_STA, val,
+					!(val & MASTER_BUS_BUSY),
+					10, NFI_TIMEOUT);
+	if (ret)
+		pr_warn("nfi reset timeout...\n");
+
+	writel(CON_FIFO_FLUSH | CON_NFI_RST, regs + NFI_CON);
+	writew(STAR_DE, regs + NFI_STRDATA);
+
+	return ret;
+}
+
+static void bad_mark_swap(struct nfi *nfi, u8 *buf, u8 *fdm)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	u32 start_sector = div_down(nb->col, nfi->sector_size);
+	u32 data_mark_pos;
+	u8 temp;
+
+	/* raw access, no need to do swap. */
+	if (!nb->ecc_en)
+		return;
+
+	if (!buf || !fdm)
+		return;
+
+	if (nb->bad_mark_ctrl.sector < start_sector ||
+	    nb->bad_mark_ctrl.sector > start_sector + nb->rw_sectors)
+		return;
+
+	data_mark_pos = nb->bad_mark_ctrl.position +
+			(nb->bad_mark_ctrl.sector - start_sector) *
+			nfi->sector_size;
+
+	temp = *fdm;
+	*fdm = *(buf + data_mark_pos);
+	*(buf + data_mark_pos) = temp;
+}
+
+static u8 *fdm_shift(struct nfi *nfi, u8 *fdm, int sector)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	u8 *pos;
+
+	if (!fdm)
+		return NULL;
+
+	/* map the sector's FDM data to free oob:
+	 * the beginning of the oob area stores the FDM data of bad mark sectors
+	 */
+	if (sector < nb->bad_mark_ctrl.sector)
+		pos = fdm + (sector + 1) * nfi->fdm_size;
+	else if (sector == nb->bad_mark_ctrl.sector)
+		pos = fdm;
+	else
+		pos = fdm + sector * nfi->fdm_size;
+
+	return pos;
+}
+
+static void set_bad_mark_ctrl(struct nfi_base *nb)
+{
+	int temp, page_size = nb->format.page_size;
+
+	nb->bad_mark_ctrl.bad_mark_swap = bad_mark_swap;
+	nb->bad_mark_ctrl.fdm_shift = fdm_shift;
+
+	temp = nb->nfi.sector_size + nb->nfi.sector_spare_size;
+	nb->bad_mark_ctrl.sector = div_down(page_size, temp);
+	nb->bad_mark_ctrl.position = reminder(page_size, temp);
+}
+
+
+static void setup_spare_format(struct nfi_base *nb, int spare_idx)
+{
+	struct nfi *nfi = &nb->nfi;
+	u32 val = readl(nb->res.nfi_regs + NFI_PAGEFMT);
+
+	val &= ~PAGEFMT_SPARE_MASK;
+	val &= ~PAGEFMT_FDM_MASK;
+	val &= ~PAGEFMT_FDM_ECC_MASK;
+	val |= spare_idx << PAGEFMT_SPARE_SHIFT;
+	val |= nfi->fdm_size << PAGEFMT_FDM_SHIFT;
+	val |= nfi->fdm_ecc_size << PAGEFMT_FDM_ECC_SHIFT;
+	writel(val, nb->res.nfi_regs + NFI_PAGEFMT);
+
+	if (nb->cus_sec_size_en) {
+		/*TODO: check if right about custom sector setting*/
+		val = nfi->sector_spare_size + nfi->sector_size;
+		val |= SECCUS_SIZE_EN;
+		writel(val, nb->res.nfi_regs + NFI_SECCUS_SIZE);
+	}
+}
+
+static int setup_page_format(struct nfi_base *nb)
+{
+	struct nfi *nfi = &nb->nfi;
+	u32 page_size = nb->format.page_size;
+	u32 val, tmp;
+
+	switch (page_size) {
+	case 512:
+		val = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
+		break;
+
+	case KB(2):
+		if (nfi->sector_size == 512)
+			val = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
+		else
+			val = PAGEFMT_512_2K;
+
+		break;
+
+	case KB(4):
+		if (nfi->sector_size == 512)
+			val = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
+		else
+			val = PAGEFMT_2K_4K;
+
+		break;
+
+	case KB(8):
+		if (nfi->sector_size == 512)
+			val = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
+		else
+			val = PAGEFMT_4K_8K;
+
+		break;
+
+	case KB(16):
+		val = PAGEFMT_8K_16K;
+		break;
+
+	default:
+		pr_err("invalid page len: %d\n", page_size);
+		return -EINVAL;
+	}
+
+	tmp = readl(nb->res.nfi_regs + NFI_PAGEFMT);
+	tmp &= ~PAGEFMT_PAGE_MASK;
+	tmp |= val;
+	writel(tmp, nb->res.nfi_regs + NFI_PAGEFMT);
+
+	return 0;
+}
+
+static int adjust_spare(struct nfi_base *nb, int *spare)
+{
+	int multi = nb->nfi.sector_size == 512 ? 1 : 2;
+	int i, count = nb->caps->spare_size_num;
+
+	if (*spare >= nb->caps->spare_size[count - 1] * multi) {
+		*spare = nb->caps->spare_size[count - 1] * multi;
+		return count - 1;
+	}
+
+	if (*spare < nb->caps->spare_size[0] * multi)
+		return -EINVAL;
+
+	for (i = 1; i < count; i++) {
+		if (*spare < nb->caps->spare_size[i] * multi) {
+			*spare = nb->caps->spare_size[i - 1] * multi;
+			return i - 1;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int nfi_set_format(struct nfi *nfi, struct nfi_format *format)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	struct nfiecc *ecc = nb->ecc;
+	int ecc_strength = format->ecc_req;
+	int min_fdm, min_ecc, max_ecc;
+	u32 temp, page_sectors;
+	int spare_idx = 0;
+
+	if (!nb->buf) {
+#if NANDX_BULK_IO_USE_DRAM
+		nb->buf = (u8 *)NANDX_NFI_BUF_ADDR;
+#else
+		nb->buf = (u8 *)pmem_alloc(1, format->page_size + format->spare_size);
+#endif
+		if (!nb->buf)
+			return -ENOMEM;
+	}
+
+#ifdef NANDX_TEST_BUF_ALIGN
+	if (!nb->buf_align) {
+#if NANDX_BULK_IO_USE_DRAM
+		nb->buf_align = NANDX_NFI_BUF_ALIGN_ADDR;
+#else
+		nb->buf_align = mem_alloc(1,
+					  format->page_size + format->spare_size + 0x100);
+#endif
+		if (!nb->buf_align)
+			return -ENOMEM;
+	}
+#endif
+
+	if (nb->res.force_spare_size) {
+		format->spare_size = nb->res.force_spare_size;
+		pr_debug("%s force spare_size to %d\n", __func__, format->spare_size);
+	}
+
+	nb->format = *format;
+
+	/* setup sector_size according to the min oob required */
+	if (nb->res.force_sector_size) {
+		nfi->sector_size = nb->res.force_sector_size;
+		if (nfi->sector_size == 512)
+			ecc_strength >>= 1;
+		pr_debug("%s force sector size to %d\n", __func__, nfi->sector_size);
+	} else {
+		if (nb->res.min_oob_req / nb->caps->max_fdm_size >=
+		    format->page_size / 1024) {
+			if (format->page_size / 512 <
+			    nb->res.min_oob_req / nb->caps->max_fdm_size)
+				return -EINVAL;
+
+			nfi->sector_size = 512;
+			/* format->ecc_req is the requirement per 1KB */
+			ecc_strength >>= 1;
+		} else
+			nfi->sector_size = 1024;
+	}
+
+	page_sectors = div_down(format->page_size, nfi->sector_size);
+	nfi->sector_spare_size = div_down(format->spare_size, page_sectors);
+
+	if (!nb->cus_sec_size_en) {
+		spare_idx = adjust_spare(nb, &nfi->sector_spare_size);
+		if (spare_idx < 0)
+			return -EINVAL;
+	}
+
+	/* calculate ecc strength and fdm size */
+	temp = (nfi->sector_spare_size - nb->caps->max_fdm_size) * 8;
+	min_ecc = div_down(temp, nb->caps->ecc_parity_bits);
+	min_ecc = ecc->adjust_strength(ecc, min_ecc);
+	if (min_ecc < 0)
+		return -EINVAL;
+
+	temp = div_up(nb->res.min_oob_req, page_sectors);
+	temp = (nfi->sector_spare_size - temp) * 8;
+	max_ecc = div_down(temp, nb->caps->ecc_parity_bits);
+	max_ecc = ecc->adjust_strength(ecc, max_ecc);
+	if (max_ecc < 0)
+		return -EINVAL;
+
+	temp = div_up(temp * nb->caps->ecc_parity_bits, 8);
+	temp = nfi->sector_spare_size - temp;
+	min_fdm = min(temp, (u32)nb->caps->max_fdm_size);
+
+	if (ecc_strength > max_ecc) {
+		pr_warn("required ecc strength %d, max supported %d\n",
+			ecc_strength, max_ecc);
+		nfi->ecc_strength = max_ecc;
+		nfi->fdm_size = min_fdm;
+	} else if (ecc_strength < min_ecc) {
+		nfi->ecc_strength = min_ecc;
+		nfi->fdm_size = nb->caps->max_fdm_size;
+	} else {
+		ecc_strength = ecc->adjust_strength(ecc, ecc_strength);
+		if (ecc_strength < 0)
+			return -EINVAL;
+
+		nfi->ecc_strength = ecc_strength;
+		temp = div_up((u32)ecc_strength * nb->caps->ecc_parity_bits, 8);
+		nfi->fdm_size = nfi->sector_spare_size - temp;
+	}
+
+	nb->page_sectors = div_down(format->page_size, nfi->sector_size);
+
+	/* some IC has fixed fdm_ecc_size, if not assigend, set to fdm_size */
+	nfi->fdm_ecc_size = nb->caps->fdm_ecc_size ? : nfi->fdm_size;
+
+	nfi->ecc_parity_size = div_up((u32)nfi->ecc_strength *
+				      nb->caps->ecc_parity_bits,
+				      8);
+	set_bad_mark_ctrl(nb);
+
+	pr_debug("sector_size: %d\n", nfi->sector_size);
+	pr_debug("sector_spare_size: %d\n", nfi->sector_spare_size);
+	pr_debug("fdm_size: %d\n", nfi->fdm_size);
+	pr_debug("fdm_ecc_size: %d\n", nfi->fdm_ecc_size);
+	pr_debug("ecc_strength: %d\n", nfi->ecc_strength);
+	pr_debug("ecc_parity_size: %d\n", nfi->ecc_parity_size);
+
+	setup_spare_format(nb, spare_idx);
+	return setup_page_format(nb);
+}
+
+static int update_io_format(struct nfi_base *nb,
+			    struct nfi_io_format *ioformat)
+{
+	struct nfi *nfi = &nb->nfi;
+	void *regs = nb->res.nfi_regs;
+	u32 val, tmp, spare;
+	int ret = 0, ecc_level;
+
+	/* custom sector format setting */
+	val = readl(regs + NFI_SECCUS_SIZE);
+	if (ioformat->cus_sec_en) {
+		val |= SECCUS_SIZE_EN;
+		val &= ~SECCUS_SIZE_MASK;
+		val |= ioformat->cus_sec_size & SECCUS_SIZE_MASK;
+		nfi->sector_size = ioformat->cus_sec_size;
+	} else
+		val &= ~SECCUS_SIZE_EN;
+	writel(val, regs + NFI_SECCUS_SIZE);
+
+	/* if custom sector disable, just set normal sector format */
+	if (!ioformat->cus_sec_en) {
+		if (ioformat->sec_size == 1024)
+			nfi->sector_size = 1024;
+		else if (ioformat->sec_size == 512)
+			nfi->sector_size = 512;
+		else
+			return -EINVAL;
+		ret = setup_page_format(nb);
+		if (ret)
+			return ret;
+	}
+
+	/* spare format settting */
+	spare = ioformat->sec_spare_size;
+	tmp = adjust_spare(nb, &spare);
+	if (tmp < 0)
+		return -EINVAL;
+	val = readl(regs + NFI_PAGEFMT);
+	val &= ~PAGEFMT_SPARE_MASK;
+	val &= ~PAGEFMT_FDM_MASK;
+	val &= ~PAGEFMT_FDM_ECC_MASK;
+	val |= tmp << PAGEFMT_SPARE_SHIFT;
+	val |= ioformat->fdm_size << PAGEFMT_FDM_SHIFT;
+	val |= ioformat->fdm_ecc_size << PAGEFMT_FDM_ECC_SHIFT;
+	writel(val, regs + NFI_PAGEFMT);
+	nfi->fdm_size = ioformat->fdm_size;
+	nfi->fdm_ecc_size = ioformat->fdm_ecc_size;
+
+	ecc_level = nb->ecc->adjust_strength(nb->ecc, ioformat->ecc_level_sel);
+	if (ecc_level < 0)
+		return -EINVAL;
+	nfi->ecc_strength = ecc_level;
+	nfi->ecc_parity_size = div_up((u32)nfi->ecc_strength *
+				      nb->caps->ecc_parity_bits,
+				      8);
+	return ret;
+}
+
+static int nfi_ctrl(struct nfi *nfi, int cmd, void *args)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	void *regs = nb->res.nfi_regs;
+	int ret = 0;
+	u32 val;
+
+	switch (cmd) {
+	case NFI_CTRL_DMA:
+		nb->dma_en = *(bool *)args;
+		break;
+
+	case NFI_BURST_EN:
+		nb->dma_burst_en = *(bool *)args;
+		break;
+
+	case NFI_CTRL_NFI_IRQ:
+		nb->nfi_irq_en = *(bool *)args;
+		break;
+
+	case NFI_CTRL_IO_FORMAT:
+		ret = update_io_format(nb, args);
+		break;
+
+	case NFI_CTRL_IOCON:
+		val = readl(regs + NFI_IOCON);
+		val &= ~BRSTN_MASK;
+		val |= *(u32 *)args << BRSTN_SHIFT;
+		writel(val, regs + NFI_IOCON);
+		break;
+
+	case NFI_CTRL_ECC:
+		nb->ecc_en = *(bool *)args;
+		break;
+
+	case NFI_CTRL_ECC_MODE:
+		nb->ecc_mode = *(enum nfiecc_mode *)args;
+		break;
+
+	case NFI_CTRL_ECC_DECODE_MODE:
+		nb->ecc_deccon = *(enum nfiecc_deccon *)args;
+		break;
+
+	case NFI_CTRL_BAD_MARK_SWAP:
+		nb->bad_mark_swap_en = *(bool *)args;
+		break;
+
+#ifdef NANDX_TEST_BUF_ALIGN
+	case NFI_ADDR_ALIGNMENT_EN:
+		if (*(u8 *)args)
+			nb->buf = nb->buf_align + *(u8 *)args;
+		else
+			nb->buf = nb->buf_align;
+		break;
+#endif
+
+	case NFI_BYTE_RW_EN:
+		nb->byte_rw_en = *(bool *)args;
+		break;
+
+	case NFI_CRC_EN:
+		/* TODO */
+		break;
+
+	default:
+		ret = nb->ecc->nfiecc_ctrl(nb->ecc, cmd, args);
+		if (ret < 0) {
+			pr_err("%s cmd(%d) args(%d) not support.\n",
+			       __func__, cmd, *(u32 *)args);
+		}
+		break;
+	}
+
+	return ret;
+}
+
+static int nfi_send_cmd(struct nfi *nfi, short cmd)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	void *regs = nb->res.nfi_regs;
+	int ret;
+	u32 val;
+
+	pr_debug("%s: cmd 0x%x\n", __func__, cmd);
+
+	if (cmd < 0)
+		return -EINVAL;
+
+	set_op_mode(regs, nb->op_mode);
+
+	writel(cmd, regs + NFI_CMD);
+
+	ret = readl_poll_timeout_atomic(regs + NFI_STA,
+					val, !(val & STA_CMD),
+					5, NFI_TIMEOUT);
+	if (ret)
+		pr_err("send cmd 0x%x timeout\n", cmd);
+
+	return ret;
+}
+
+static int nfi_send_addr(struct nfi *nfi, int col, int row,
+			 int col_cycle, int row_cycle)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	void *regs = nb->res.nfi_regs;
+	int ret;
+	u32 val;
+
+	pr_debug("%s: col 0x%x, row 0x%x, col_cycle 0x%x, row_cycle 0x%x\n",
+		 __func__, col, row, col_cycle, row_cycle);
+
+	nb->col = col;
+	nb->row = row;
+
+	writel(col, regs + NFI_COLADDR);
+	writel(row, regs + NFI_ROWADDR);
+	writel(col_cycle | (row_cycle << ROW_SHIFT), regs + NFI_ADDRNOB);
+
+	ret = readl_poll_timeout_atomic(regs + NFI_STA,
+					val, !(val & STA_ADDR),
+					5, NFI_TIMEOUT);
+	if (ret)
+		pr_err("send address timeout\n");
+
+	return ret;
+}
+
+static int nfi_trigger(struct nfi *nfi)
+{
+	/* Nothing need to do. */
+	return 0;
+}
+
+static inline int wait_io_ready(void *regs)
+{
+	u32 val;
+	int ret;
+
+	ret = readl_poll_timeout_atomic(regs + NFI_PIO_DIRDY,
+					val, val & PIO_DI_RDY,
+					2, NFI_TIMEOUT);
+	if (ret)
+		pr_err("wait io ready timeout\n");
+
+	return ret;
+}
+
+static int wait_ready_irq(struct nfi_base *nb, u32 timeout)
+{
+	void *regs = nb->res.nfi_regs;
+	u32 val;
+	int ret;
+
+	nandx_event_init(nb->done);
+	writel(0xf1, regs + NFI_CNRNB);
+	val = readl(regs + NFI_INTR_EN);
+	val |= NFI_IRQ_INTR | INTR_BUSY_RETURN_EN;
+	writel(val, regs + NFI_INTR_EN);
+
+	/**
+	 * check if nand already been ready,
+	 * avoid issue that casued by missing irq-event.
+	 */
+	val = readl(regs + NFI_STA);
+	if (val & STA_BUSY2READY) {
+		readl(regs + NFI_INTR_STA);
+		writel(0, (void *)(regs + NFI_INTR_EN));
+		writew(0, regs + NFI_CNRNB);
+
+		return 0;
+	}
+
+	ret = nandx_event_wait_complete(nb->done, IRQ_TIMEOUT);
+	writel(0, (void *)(regs + NFI_INTR_EN));
+	writew(0, regs + NFI_CNRNB);
+
+	return ret ? 0 : -ETIMEDOUT;
+}
+
+static int wait_ready_poll(struct nfi_base *nb, u32 timeout)
+{
+	void *regs = nb->res.nfi_regs;
+	int ret;
+	u32 val;
+
+	writel(0x21, regs + NFI_CNRNB);
+	ret = readl_poll_timeout_atomic(regs + NFI_STA, val,
+					val & STA_BUSY2READY,
+					2, timeout);
+	writew(0, regs + NFI_CNRNB);
+
+	return ret;
+}
+
+static void wait_ready_time(struct nfi_base *nb, u32 timeout)
+{
+	udelay(timeout);
+}
+
+static int nfi_wait_ready(struct nfi *nfi, int type, u32 timeout)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	int ret;
+
+	switch (type) {
+	case NAND_WAIT_BUSY:
+		if (nb->nfi_irq_en)
+			ret = wait_ready_irq(nb, timeout);
+		else
+			ret = wait_ready_poll(nb, timeout);
+		break;
+
+	case NAND_WAIT_TIME:
+		wait_ready_time(nb, timeout);
+		ret = 0;
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	if (ret)
+		pr_err("%s: type 0x%x, timeout 0x%x\n",
+		       __func__, type, timeout);
+
+	return ret;
+}
+
+static int enable_ecc_decode(struct nfi_base *nb, int sectors)
+{
+	struct nfi *nfi = &nb->nfi;
+	struct nfiecc *ecc = nb->ecc;
+
+	ecc->config.op = ECC_DECODE;
+	ecc->config.mode = nb->ecc_mode;
+	ecc->config.deccon = nb->ecc_deccon;
+	ecc->config.sectors = sectors;
+	ecc->config.len = nfi->sector_size + nfi->fdm_ecc_size;
+	ecc->config.strength = nfi->ecc_strength;
+
+	return ecc->enable(ecc);
+}
+
+static int enable_ecc_encode(struct nfi_base *nb)
+{
+	struct nfiecc *ecc = nb->ecc;
+	struct nfi *nfi = &nb->nfi;
+
+	ecc->config.op = ECC_ENCODE;
+	ecc->config.mode = nb->ecc_mode;
+	ecc->config.len = nfi->sector_size + nfi->fdm_ecc_size;
+	ecc->config.strength = nfi->ecc_strength;
+
+	return ecc->enable(ecc);
+}
+
+static void read_fdm(struct nfi_base *nb, u8 *fdm, int start_sector,
+		     int sectors)
+{
+	void *regs = nb->res.nfi_regs;
+	int j, i;
+	u32 vall, valm;
+	u8 *buf = fdm;
+
+	for (i = start_sector; i < start_sector + sectors; i++) {
+		if (nb->bad_mark_swap_en)
+			buf = nb->bad_mark_ctrl.fdm_shift(&nb->nfi, fdm, i);
+
+		vall = readl(regs + NFI_FDML(i));
+		valm = readl(regs + NFI_FDMM(i));
+
+		for (j = 0; j < nb->nfi.fdm_size; j++)
+			*buf++ = (j >= 4 ? valm : vall) >> ((j & 3) << 3);
+	}
+}
+
+static void write_fdm(struct nfi_base *nb, u8 *fdm)
+{
+	struct nfi *nfi = &nb->nfi;
+	void *regs = nb->res.nfi_regs;
+	u32 vall, valm;
+	int i, j;
+	u8 *buf = fdm;
+
+	for (i = 0; i < nb->page_sectors; i++) {
+		if (nb->bad_mark_swap_en)
+			buf = nb->bad_mark_ctrl.fdm_shift(nfi, fdm, i);
+
+		vall = 0;
+		for (j = 0; j < 4; j++)
+			vall |= (j < nfi->fdm_size ? *buf++ : 0xff) << (j * 8);
+		writel(vall, regs + NFI_FDML(i));
+
+		valm = 0;
+		for (j = 0; j < 4; j++)
+			valm |=	((j + 4) < nfi->fdm_size
+				? *buf++ : 0xff) << (j * 8);
+		writel(valm, regs + NFI_FDMM(i));
+	}
+}
+
+static bool is_page_empty(struct nfi_base *nb, u8 *data, u8 *fdm,
+			  int sectors)
+{
+	u32 empty = readl(nb->res.nfi_regs + NFI_STA) & STA_EMP_PAGE;
+
+	if (empty) {
+		pr_debug("empty page!\n");
+		return true;
+	}
+
+	return false;
+}
+
+/* NOTE: pio not use auto format */
+static int pio_rx_data(struct nfi_base *nb, u8 *data, u8 *fdm,
+		       int sectors)
+{
+	struct nfiecc_status ecc_status;
+	struct nfi *nfi = &nb->nfi;
+	void *regs = nb->res.nfi_regs;
+	u32 val, bitflips = 0;
+	int len, ret, i;
+	u32 *buf32;
+	u8 *buf;
+
+	len = nfi->sector_size + nfi->sector_spare_size;
+	len *= sectors;
+
+	val = readl(regs + NFI_CNFG);
+	if (nb->byte_rw_en) {
+		val |= CNFG_BYTE_RW;
+		writel(val, regs + NFI_CNFG);
+
+		for (i = 0; i < len; i++) {
+			ret = wait_io_ready(regs);
+			if (ret)
+				return ret;
+
+			nb->buf[i] = readb(regs + NFI_DATAR);
+		}
+	} else {
+		val &= ~CNFG_BYTE_RW;
+		writel(val, regs + NFI_CNFG);
+
+		buf32 = (u32 *)nb->buf;
+		for (i = 0; i < (len >> 2); i++) {
+			ret = wait_io_ready(regs);
+			if (ret)
+				return ret;
+
+			buf32[i] = readl(regs + NFI_DATAR);
+		}
+	}
+
+	/* TODO: do error handle for autoformat setting of pio */
+	if (nb->ecc_en) {
+		for (i = 0; i < sectors; i++) {
+			buf = nb->buf + i * (nfi->sector_size +
+					     nfi->sector_spare_size);
+			
+			if (nb->ecc_deccon == ECC_DEC_LOCATE) {
+				/* Need to do manual corrrect data for PIO mode */
+				nb->ecc->correct_data(nb->ecc,
+						      &ecc_status,
+						      buf, i);
+				ret = nb->ecc->decode_status(nb->ecc, i, 1);
+				if (ret < 0) {
+					if (nb->is_page_empty(nb,
+							      nb->buf,
+							      fdm,
+							      sectors))
+						return 0;
+					return ret;
+				}
+				bitflips = max((int)bitflips, ret);
+			} else if (nb->ecc_deccon == ECC_DEC_FER) {
+				ret = nb->ecc->decode_status(nb->ecc, i, 1);
+				if (ret)
+					return ret;
+			}
+
+			if (data)
+				memcpy(data + i * nfi->sector_size,
+				       buf, nfi->sector_size);
+			if (fdm)
+				memcpy(fdm + i * nfi->fdm_size,
+				       buf + nfi->sector_size, nfi->fdm_size);
+		}
+
+		return bitflips;
+	}
+
+	/* raw read, only data not null, and its length should be $len */
+	if (data)
+		memcpy(data, nb->buf, len);
+
+	return 0;
+}
+
+static int pio_tx_data(struct nfi_base *nb, u8 *data, u8 *fdm,
+		       int sectors)
+{
+	struct nfi *nfi = &nb->nfi;
+	void *regs = nb->res.nfi_regs;
+	u32 i, val, *buf32;
+	int len, ret;
+
+	len = nb->ecc_en ? nfi->sector_size :
+	      nfi->sector_size + nfi->sector_spare_size;
+	len *= sectors;
+
+	/* raw read, only data not null, and its length should be $len */
+	if (data)
+		memcpy(nb->buf, data, len);
+
+	val = readl(regs + NFI_CNFG);
+	if (nb->byte_rw_en) {
+		val |= CNFG_BYTE_RW;
+		writel(val, regs + NFI_CNFG);
+
+		for (i = 0; i < len; i++) {
+			ret = wait_io_ready(regs);
+			if (ret)
+				return ret;
+			writeb(nb->buf[i], regs + NFI_DATAW);
+		}
+	} else {
+		val &= ~CNFG_BYTE_RW;
+		writel(val, regs + NFI_CNFG);
+		buf32 = (u32 *)nb->buf;
+		for (i = 0; i < (len >> 2); i++) {
+			ret = wait_io_ready(regs);
+			if (ret)
+				return ret;
+			writel(buf32[i], regs + NFI_DATAW);
+		}
+	}
+
+	return 0;
+}
+
+static int rw_prepare(struct nfi_base *nb, int sectors, u8 *data,
+		      u8 *fdm, bool read)
+{
+	void *regs = nb->res.nfi_regs;
+	u32 len = nb->nfi.sector_size * sectors;
+	bool irq_en = nb->dma_en && nb->nfi_irq_en;
+	void *dma_addr;
+	u32 val;
+	int ret;
+
+	nb->rw_sectors = sectors;
+
+	if (irq_en) {
+		nandx_event_init(nb->done);
+		val  = readl(regs + NFI_INTR_EN);
+		val |= NFI_IRQ_INTR | INTR_AHB_DONE_EN;
+		writel(val, regs + NFI_INTR_EN);
+	}
+
+	val = readw(regs + NFI_CNFG);
+	if (read)
+		val |= CNFG_READ_EN;
+	else
+		val &= ~CNFG_READ_EN;
+
+	/* as design, now, auto format enabled when ecc enabled */
+	if (nb->ecc_en) {
+		val |= CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
+
+		if (read)
+			ret = enable_ecc_decode(nb, sectors);
+		else
+			ret = enable_ecc_encode(nb);
+
+		if (ret) {
+			pr_warn("%s: ecc enable %s fail!\n", __func__,
+				read ? "decode" : "encode");
+			return ret;
+		}
+	}
+
+	if (!read && nb->bad_mark_swap_en)
+		nb->bad_mark_ctrl.bad_mark_swap(&nb->nfi, data, fdm);
+
+	/* Need check with auto_fmt_en flag */
+	if (!nb->ecc_en)
+		len += sectors * nb->nfi.sector_spare_size;
+
+	if (nb->dma_en) {
+		val |= CNFG_AHB;
+		if (nb->dma_burst_en)
+			val |= CNFG_DMA_BURST_EN;
+		else
+			val &= ~CNFG_DMA_BURST_EN;
+
+		if (read) {
+			dma_addr = (void *)(unsigned long)nandx_dma_map(
+					   nb->res.dev, nb->buf,
+					   (u64)len, NDMA_FROM_DEV);
+		} else {
+			memcpy(nb->buf, data, len);
+			dma_addr = (void *)(unsigned long)nandx_dma_map(
+					   nb->res.dev, nb->buf,
+					   (u64)len, NDMA_TO_DEV);
+		}
+
+		writel((unsigned long)dma_addr, (void *)regs + NFI_STRADDR);
+
+		nb->access_len = len;
+		nb->dma_addr = dma_addr;
+	} else {
+		val &= ~CNFG_AHB;
+		val &= ~CNFG_DMA_BURST_EN;
+	}
+
+	if (nb->ecc_en && !read && fdm)
+		write_fdm(nb, fdm);
+
+	writew(val, regs + NFI_CNFG);
+	/* setup R/W sector number */
+	writel(sectors << CON_SEC_SHIFT, regs + NFI_CON);
+
+	return 0;
+}
+
+static void rw_trigger(struct nfi_base *nb, bool read)
+{
+	void *regs = nb->res.nfi_regs;
+	u32 val;
+
+	val = readl(regs + NFI_CON);
+	val |= read ? CON_BRD : CON_BWR;
+	writel(val, regs + NFI_CON);
+
+	writel(STAR_EN, regs + NFI_STRDATA);
+}
+
+static int rw_wait_done(struct nfi_base *nb, int sectors, bool read)
+{
+	void *regs = nb->res.nfi_regs;
+	bool irq_en = nb->dma_en && nb->nfi_irq_en;
+	int ret;
+	u32 val;
+
+	if (irq_en) {
+		ret = nandx_event_wait_complete(nb->done, NFI_TIMEOUT);
+		if (!ret)
+			writew(0, regs + NFI_INTR_EN);
+	}
+
+	if (read) {
+		ret = readl_poll_timeout_atomic(regs + NFI_BYTELEN, val,
+						ADDRCNTR_SEC(val) >=
+						(u32)sectors,
+						2, NFI_TIMEOUT);
+		/* HW issue: if not wait ahb done, need polling bus busy */
+		if (!ret && !irq_en)
+			ret = readl_poll_timeout_atomic(regs + NFI_MASTER_STA,
+							val,
+							!(val &
+							  MASTER_BUS_BUSY),
+							2, NFI_TIMEOUT);
+	} else {
+		ret = readl_poll_timeout_atomic(regs + NFI_ADDRCNTR, val,
+						ADDRCNTR_SEC(val) >=
+						(u32)sectors,
+						2, NFI_TIMEOUT);
+	}
+
+	if (ret) {
+		pr_warn("do page %s timeout\n", read ? "read" : "write");
+		return ret;
+	}
+
+	if (read && nb->ecc_en) {
+		ret = nb->ecc->wait_done(nb->ecc);
+		if (ret)
+			return ret;
+
+		return nb->ecc->decode_status(nb->ecc, 0, sectors);
+	}
+
+	return 0;
+}
+
+static int rw_data(struct nfi_base *nb, u8 *data, u8 *fdm, int sectors,
+		   bool read)
+{
+	if (read && nb->dma_en && nb->ecc_en && fdm)
+		read_fdm(nb, fdm, 0, sectors);
+
+	if (!nb->dma_en) {
+		if (read)
+			return pio_rx_data(nb, data, fdm, sectors);
+
+		return pio_tx_data(nb, data, fdm, sectors);
+	}
+
+	return 0;
+}
+
+static void rw_complete(struct nfi_base *nb, u8 *data, u8 *fdm,
+			bool read)
+{
+	bool is_empty;
+
+	if (nb->dma_en) {
+		if (read) {
+			nandx_dma_unmap(nb->res.dev, nb->buf, nb->dma_addr,
+					(u64)nb->access_len, NDMA_FROM_DEV);
+
+			if (data)
+				memcpy(data, nb->buf, nb->access_len);
+		} else {
+			nandx_dma_unmap(nb->res.dev, nb->buf, nb->dma_addr,
+					(u64)nb->access_len, NDMA_TO_DEV);
+		}
+	}
+
+	if (read && nb->read_status == -ENANDREAD) {
+		is_empty = nb->is_page_empty(nb, data, fdm,
+					     nb->rw_sectors);
+		if (is_empty)
+			nb->read_status = 0;
+	}
+
+	/* whether it's reading or writing, we all check if nee swap
+	 * for write, we need to restore data
+	 */
+	if (nb->bad_mark_swap_en)
+		nb->bad_mark_ctrl.bad_mark_swap(&nb->nfi, data, fdm);
+
+	if (nb->ecc_en)
+		nb->ecc->disable(nb->ecc);
+
+	writel(0, nb->res.nfi_regs + NFI_CNFG);
+	writel(0, nb->res.nfi_regs + NFI_CON);
+}
+
+static int nfi_read_sectors(struct nfi *nfi, u8 *data, u8 *fdm,
+			    int sectors)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	int bitflips = 0, ret;
+
+	pr_debug("%s: read page#%d\n", __func__, nb->row);
+	pr_debug("%s: data address 0x%x, fdm address 0x%x,sectors 0x%x, buf: 0x%x\n",
+		 __func__, (u32)((unsigned long)data),
+		 (u32)((unsigned long)fdm), sectors,
+		 (u32)((unsigned long)nb->buf));
+	nb->read_status = 0;
+
+	ret = nb->rw_prepare(nb, sectors, data, fdm, true);
+	if (ret)
+		return ret;
+
+	nb->rw_trigger(nb, true);
+
+	if (nb->dma_en) {
+		ret = nb->rw_wait_done(nb, sectors, true);
+		if (ret > 0)
+			bitflips = ret;
+		else if (ret == -ENANDREAD)
+			nb->read_status = -ENANDREAD;
+		else if (ret < 0)
+			goto complete;
+	}
+
+	ret = nb->rw_data(nb, data, fdm, sectors, true);
+	if (ret >= 0)
+		ret = max(ret, bitflips);
+
+complete:
+	nb->rw_complete(nb, data, fdm, true);
+
+	if (nb->read_status == -ENANDREAD)
+		return -ENANDREAD;
+
+	return ret;
+}
+
+int nfi_write_page(struct nfi *nfi, u8 *data, u8 *fdm)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	u32 sectors = div_down(nb->format.page_size, nfi->sector_size);
+	int ret;
+
+	pr_debug("%s: write page#%d\n", __func__, nb->row);
+	pr_debug("%s: data address 0x%x, fdm address 0x%x, buf 0x%x\n",
+		 __func__, (int)((unsigned long)data),
+		 (int)((unsigned long)fdm), (u32)((unsigned long)nb->buf));
+
+	ret = nb->rw_prepare(nb, sectors, data, fdm, false);
+	if (ret)
+		return ret;
+
+	nb->rw_trigger(nb, false);
+
+	ret = nb->rw_data(nb, data, fdm, sectors, false);
+	if (ret)
+		return ret;
+
+	ret = nb->rw_wait_done(nb, sectors, false);
+
+	nb->rw_complete(nb, data, fdm, false);
+
+	return ret;
+}
+
+static void nfi_enable_seccus(struct nfi *nfi, int count)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	void *regs = nb->res.nfi_regs;
+	u32 val;
+
+	val = readl(regs + NFI_SECCUS_SIZE);
+	val &= ~SECCUS_SIZE_MASK;
+	val |= ((count & SECCUS_SIZE_MASK) << SECCUS_SIZE_SHIFT);
+	val |= SECCUS_SIZE_EN;
+	writel(val, regs + NFI_SECCUS_SIZE);
+}
+
+static void nfi_disable_seccus(struct nfi *nfi, int count)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	void *regs = nb->res.nfi_regs;
+	u32 val;
+
+	val = readl(regs + NFI_SECCUS_SIZE);
+	val &= ~(SECCUS_SIZE_MASK | SECCUS_SIZE_EN);
+	writel(val, regs + NFI_SECCUS_SIZE);
+}
+
+static int nfi_rw_bytes(struct nfi *nfi, u8 *data, int count, bool read)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	void *regs = nb->res.nfi_regs;
+	int i, ret;
+	u32 val;
+
+	for (i = 0; i < count; i++) {
+		val = readl(regs + NFI_STA) & NFI_FSM_MASK;
+		if (val != NFI_FSM_CUSTDATA) {
+			val = readw(regs + NFI_CNFG) | CNFG_BYTE_RW;
+			if (read)
+				val |= CNFG_READ_EN;
+			writew(val, regs + NFI_CNFG);
+
+			if (count < nfi->sector_size) {
+				nfi_enable_seccus(nfi, count);
+				val = 1;
+			} else {
+				val = div_up(count, nfi->sector_size);
+			}
+			val = (val << CON_SEC_SHIFT) | CON_BRD | CON_BWR;
+			writel(val, regs + NFI_CON);
+
+			writew(STAR_EN, regs + NFI_STRDATA);
+		}
+
+		ret = wait_io_ready(regs);
+		if (ret)
+			break;
+
+		if (read)
+			data[i] = readb(regs + NFI_DATAR);
+		else
+			writeb(data[i], regs + NFI_DATAW);
+	}
+
+	if (count < nfi->sector_size) {
+		nfi_disable_seccus(nfi, count);
+	}
+
+	writel(0, nb->res.nfi_regs + NFI_CNFG);
+
+	return ret;
+}
+
+static int nfi_read_bytes(struct nfi *nfi, u8 *data, int count)
+{
+	return nfi_rw_bytes(nfi, data, count, true);
+}
+
+static int nfi_write_bytes(struct nfi *nfi, u8 *data, int count)
+{
+	return nfi_rw_bytes(nfi, data, count, false);
+}
+
+/* As register map says, only when flash macro is idle,
+ * sw reset or nand interface change can be issued
+ */
+static inline int wait_flash_macro_idle(void *regs)
+{
+	u32 val;
+
+	return readl_poll_timeout_atomic(regs + NFI_STA, val,
+					 val & FLASH_MACRO_IDLE, 2,
+					 NFI_TIMEOUT);
+}
+
+#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
+	((tpoecs) << 28 | (tprecs) << 22 | (tc2r) << 16 | \
+	 (tw2r) << 12 | (twh) << 8 | (twst) << 4 | (trlt))
+
+static int nfi_set_sdr_timing(struct nfi *nfi, void *timing, u8 type)
+{
+	struct nand_sdr_timing *sdr = (struct nand_sdr_timing *) timing;
+	struct nfi_base *nb = nfi_to_base(nfi);
+	void *regs = nb->res.nfi_regs;
+	u32 tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt, tstrobe;
+	u32 rate, val;
+	int ret;
+
+	ret = wait_flash_macro_idle(regs);
+	if (ret) {
+		pr_err("wait_flash_macro_idle fail.\n");
+		return ret;
+	}
+
+	/* turn clock rate into KHZ */
+	rate = nb->res.clock_1x / 1000;
+
+	tpoecs = max(sdr->tALH, sdr->tCLH);
+	tpoecs = div_up(tpoecs * rate, 1000000);
+	tpoecs &= 0xf;
+
+	tprecs = max(sdr->tCLS, sdr->tALS);
+	tprecs = div_up(tprecs * rate, 1000000);
+	tprecs &= 0x3f;
+
+	/* tc2r is in unit of 2T */
+	tc2r = div_up(sdr->tCR * rate, 1000000);
+	tc2r = div_down(tc2r, 2);
+	tc2r &= 0x3f;
+
+	tw2r = div_up(sdr->tWHR * rate, 1000000);
+	tw2r = div_down(tw2r, 2);
+	tw2r &= 0xf;
+
+	twh = max(sdr->tREH, sdr->tWH);
+	twh = div_up(twh * rate, 1000000) - 1;
+	twh &= 0xf;
+
+	twst = div_up(sdr->tWP * rate, 1000000) - 1;
+	twst &= 0xf;
+
+	trlt = div_up(sdr->tRP * rate, 1000000) - 1;
+	trlt &= 0xf;
+
+	/* If tREA is bigger than tRP, setup strobe sel here */
+	if ((trlt + 1) * 1000000 / rate < sdr->tREA) {
+		tstrobe = sdr->tREA - (trlt + 1) * 1000000 / rate;
+		tstrobe = div_up(tstrobe * rate, 1000000);
+		val = readl(regs + NFI_DEBUG_CON1);
+		val &= ~STROBE_MASK;
+		val |= tstrobe << STROBE_SHIFT;
+		writel(val, regs + NFI_DEBUG_CON1);
+	}
+
+	/*
+	 * ACCON: access timing control register
+	 * -------------------------------------
+	 * 31:28: tpoecs, minimum required time for CS post pulling down after
+	 *        accessing the device
+	 * 27:22: tprecs, minimum required time for CS pre pulling down before
+	 *        accessing the device
+	 * 21:16: tc2r, minimum required time from NCEB low to NREB low
+	 * 15:12: tw2r, minimum required time from NWEB high to NREB low.
+	 * 11:08: twh, write enable hold time
+	 * 07:04: twst, write wait states
+	 * 03:00: trlt, read wait states
+	 */
+	val = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt);
+	pr_debug("acctiming: 0x%x\n", val);
+	writel(val, regs + NFI_ACCCON);
+
+	/* set NAND type */
+	writel(NAND_TYPE_ASYNC, regs + NFI_NAND_TYPE_CNFG);
+
+	return ret;
+}
+
+static int nfi_set_timing(struct nfi *nfi, void *timing, int type)
+{
+	switch (type) {
+	case NAND_TIMING_SDR:
+		return nfi_set_sdr_timing(nfi, timing, type);
+
+	/* NOTE: for mlc/tlc */
+	case NAND_TIMING_SYNC_DDR:
+	case NAND_TIMING_TOGGLE_DDR:
+	case NAND_TIMING_NVDDR2:
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void set_nfi_funcs(struct nfi *nfi)
+{
+	nfi->select_chip = nfi_select_chip;
+	nfi->set_format = nfi_set_format;
+	nfi->nfi_ctrl = nfi_ctrl;
+	nfi->set_timing = nfi_set_timing;
+
+	nfi->reset = nfi_reset;
+	nfi->send_cmd = nfi_send_cmd;
+	nfi->send_addr = nfi_send_addr;
+	nfi->trigger = nfi_trigger;
+
+	nfi->write_page = nfi_write_page;
+	nfi->write_bytes = nfi_write_bytes;
+	nfi->read_sectors = nfi_read_sectors;
+	nfi->read_bytes = nfi_read_bytes;
+
+	nfi->wait_ready = nfi_wait_ready;
+
+	nfi->enable_randomizer = nfi_enable_randomizer;
+	nfi->disable_randomizer = nfi_disable_randomizer;
+}
+
+static struct nfi_caps nfi_caps_v10 = {
+	.max_fdm_size = 8,
+	.fdm_ecc_size = 1,
+	.ecc_parity_bits = 14,
+	.spare_size = spare_size_v10,
+	.spare_size_num = 19,
+};
+
+static struct nfi_caps *nfi_get_match_data(enum mtk_ic_version ic)
+{
+	switch (ic) {
+	case NANDX_MT6880:
+	case NANDX_MT8518:
+		return &nfi_caps_v10;
+
+	/* NOTE: add other IC's data */
+	default:
+		return &nfi_caps_v10;
+	}
+	/* NOTE: add other IC's data */
+}
+
+static void set_nfi_base_params(struct nfi_base *nb)
+{
+	nb->ecc_en = true;
+	nb->dma_en = true;
+	nb->dma_burst_en = true;
+
+	nb->nfi_irq_en = false;
+	nb->randomize_en = false;
+	nb->crc_en = false;
+
+	nb->cus_sec_size_en = false;
+	nb->last_seccus_size_en = false;
+	nb->bad_mark_swap_en = false;
+
+	nb->op_mode = CNFG_CUSTOM_MODE;
+	nb->ecc_deccon = ECC_DEC_CORRECT;
+	nb->ecc_mode = ECC_NFI_MODE;
+
+	nb->done = nandx_event_create();
+	nb->caps = nfi_get_match_data(nb->res.ic_ver);
+
+	nb->set_op_mode = set_op_mode;
+	nb->is_page_empty = is_page_empty;
+
+	nb->rw_prepare = rw_prepare;
+	nb->rw_trigger = rw_trigger;
+	nb->rw_wait_done = rw_wait_done;
+	nb->rw_data = rw_data;
+	nb->rw_complete = rw_complete;
+}
+
+struct nfi *nfi_extend_init(struct nfi_base *nb)
+{
+	return &nb->nfi;
+}
+
+void nfi_extend_exit(struct nfi_base *nb)
+{
+	mem_free(nb);
+}
+
+struct nfi *nfi_init(struct nfi_resource *res)
+{
+	struct nfiecc_resource ecc_res;
+	struct nfi_base *nb;
+	struct nfiecc *ecc;
+	struct nfi *nfi;
+	int ret;
+
+	nb = mem_alloc(1, sizeof(struct nfi_base));
+	if (!nb) {
+		pr_err("nfi alloc memory fail @%s.\n", __func__);
+		return NULL;
+	}
+	nb->buf = NULL;
+
+	nb->res = *res;
+
+	/* fill ecc paras and init ecc */
+	ecc_res.ic_ver = nb->res.ic_ver;
+	ecc_res.dev = nb->res.dev;
+	ecc_res.irq_id = nb->res.ecc_irq_id;
+	ecc_res.regs = nb->res.ecc_regs;
+	ecc = nfiecc_init(&ecc_res);
+	if (!ecc) {
+		pr_err("nfiecc init fail.\n");
+		goto error;
+	}
+
+	nb->ecc = ecc;
+
+	set_nfi_base_params(nb);
+	set_nfi_funcs(&nb->nfi);
+
+	/* Assign a temp sector size for reading ID & para page.
+	 * We may assign new value later.
+	 */
+	nb->nfi.sector_size = 512;
+
+	/* give a default timing, and as discuss
+	 * this is the only thing what we need do for nfi init
+	 * if need do more, then we can add a function
+	 */
+	writel(0x30C77FFF, nb->res.nfi_regs + NFI_ACCCON);
+
+	if (nb->res.nand_type == NAND_SPI)
+		nfi = nfi_extend_spi_init(nb);
+	else
+		nfi = nfi_extend_init(nb);
+	if (nfi) {
+		ret = nandx_irq_register(res->dev, res->nfi_irq_id,
+					 nfi_irq_handler,
+					 "mtk_nfi", nfi);
+		if (ret) {
+			pr_err("nfi irq register failed!\n");
+			goto error;
+		}
+
+		return nfi;
+	}
+
+error:
+	mem_free(nb);
+	return NULL;
+}
+
+void nfi_exit(struct nfi *nfi)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+
+	nandx_event_destroy(nb->done);
+	nandx_irq_unregister(nb->res.nfi_irq_id);
+	nfiecc_exit(nb->ecc);
+	pmem_free(nb->buf);
+#ifdef NANDX_TEST_BUF_ALIGN
+	pmem_free(nb->buf_align);
+#endif
+	if (nb->res.nand_type == NAND_SPI)
+		nfi_extend_spi_exit(nb);
+	else
+		nfi_extend_exit(nb);
+}
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_base.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_base.h
new file mode 100644
index 0000000..2261f00
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_base.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __NFI_BASE_H__
+#define __NFI_BASE_H__
+
+#define NFI_TIMEOUT             1000000
+
+enum randomizer_op {
+	RAND_ENCODE,
+	RAND_DECODE
+};
+
+struct bad_mark_ctrl {
+	void (*bad_mark_swap)(struct nfi *nfi, u8 *buf, u8 *fdm);
+	u8 *(*fdm_shift)(struct nfi *nfi, u8 *fdm, int sector);
+	u32 sector;
+	u32 position;
+};
+
+struct nfi_caps {
+	u8 max_fdm_size;
+	u8 fdm_ecc_size;
+	u8 ecc_parity_bits;
+	const int *spare_size;
+	u32 spare_size_num;
+};
+
+enum nfi_error_code {
+	/******** NFI successful code *********/
+	NFI_SUCCESS							  = 0x00
+	, NFI_ECC_CORRECTED                   = 0x01
+
+	/******** NFI error code *********/
+	, NFI_ERROR                           = 0x30
+	, NFI_ECC_UNCORRECT                   = 0x31
+	, NFI_ECC_TIMEOUT                     = 0x32
+	, NFI_CMD_TIMEOUT                     = 0x33
+	, NFI_ADDR_TIMEOUT                    = 0x34
+	, NFI_DATA_TIMEOUT                    = 0x35
+	, NFI_PROG_TIMEOUT                    = 0x36
+	, NFI_ERASE_TIMEOUT                   = 0x37
+	, NFI_READ_TIMEOUT                    = 0x38
+	, NFI_RESET_TIMEOUT                   = 0x39
+	, NFI_DEVICE_TIMEOUT                  = 0x3A
+	, NFI_PROG_FAILED                     = 0x3B
+	, NFI_ERASE_FAILED                    = 0x3C
+	, NFI_INVALID_PARAM                   = 0x3E
+	, NFI_BAD_BLOCK                       = 0x3F
+};
+
+struct nfi_base {
+	struct nfi nfi;
+	struct nfi_resource res;
+	struct nfiecc *ecc;
+	struct nfi_format format;
+	struct nfi_caps *caps;
+	struct bad_mark_ctrl bad_mark_ctrl;
+
+	/* page_size + spare_size */
+	u8 *buf;
+	u8 *buf_align;
+
+	/* used for spi nand */
+	u8 cmd_mode;
+	u32 op_mode;
+
+	int page_sectors;
+
+	void *done;
+
+	/* for read/write */
+	int col;
+	int row;
+	int access_len;
+	int rw_sectors;
+	void *dma_addr;
+	int read_status;
+
+	bool dma_en;
+	bool dma_burst_en;
+	bool byte_rw_en;
+
+	/* CPU IRQ for NFI/NFIECC */
+	bool nfi_irq_en;
+
+	u8 fdm_size_sel;
+	u8 fdm_ecc_size_sel;
+	u8 sector_spare_size_sel;
+	u16 sector_size_sel;
+
+	bool cus_sec_size_en;
+	u8 cus_sec_size_sel;
+
+	/* No need for SLC/SPI */
+	bool last_seccus_size_en;
+	u8 last_seccus_size_sel;
+
+	bool ecc_en;
+
+	/* No need for SLC/SPI */
+	bool randomize_en;
+	u8 randomize_sel;
+	bool crc_en;
+
+	bool bad_mark_swap_en;
+
+	enum nfiecc_deccon ecc_deccon;
+	enum nfiecc_mode ecc_mode;
+
+	void (*set_op_mode)(void *regs, u32 mode);
+	bool (*is_page_empty)(struct nfi_base *nb, u8 *data, u8 *fdm,
+			      int sectors);
+
+	int (*rw_prepare)(struct nfi_base *nb, int sectors, u8 *data, u8 *fdm,
+			  bool read);
+	void (*rw_trigger)(struct nfi_base *nb, bool read);
+	int (*rw_wait_done)(struct nfi_base *nb, int sectors, bool read);
+	int (*rw_data)(struct nfi_base *nb, u8 *data, u8 *fdm, int sectors,
+		       bool read);
+	void (*rw_complete)(struct nfi_base *nb, u8 *data, u8 *fdm, bool read);
+};
+
+static inline struct nfi_base *nfi_to_base(struct nfi *nfi)
+{
+	return container_of(nfi, struct nfi_base, nfi);
+}
+
+struct nfi *nfi_extend_init(struct nfi_base *nb);
+void nfi_extend_exit(struct nfi_base *nb);
+struct nfi *nfi_extend_spi_init(struct nfi_base *nb);
+void nfi_extend_spi_exit(struct nfi_base *nb);
+
+#endif /* __NFI_BASE_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_regs.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_regs.h
new file mode 100644
index 0000000..ebb6874
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_regs.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __NFI_REGS_H__
+#define __NFI_REGS_H__
+
+#define NFI_CNFG                0x000
+#define         CNFG_AHB                BIT(0)
+#define         CNFG_READ_EN            BIT(1)
+#define         CNFG_DMA_BURST_EN       BIT(2)
+#define         CNFG_RESEED_SEC_EN      BIT(4)
+#define         CNFG_RAND_SEL           BIT(5)
+#define         CNFG_BYTE_RW            BIT(6)
+#define         CNFG_HW_ECC_EN          BIT(8)
+#define         CNFG_AUTO_FMT_EN        BIT(9)
+#define         CNFG_RAND_MASK          GENMASK(5, 4)
+#define         CNFG_OP_MODE_MASK       GENMASK(14, 12)
+#define         CNFG_IDLE_MOD           0
+#define         CNFG_READ_MODE          (1 << 12)
+#define         CNFG_SINGLE_READ_MODE   (2 << 12)
+#define         CNFG_PROGRAM_MODE       (3 << 12)
+#define         CNFG_ERASE_MODE         (4 << 12)
+#define         CNFG_RESET_MODE         (5 << 12)
+#define         CNFG_CUSTOM_MODE        (6 << 12)
+#define NFI_PAGEFMT             0x004
+#define         PAGEFMT_SPARE_SHIFT     16
+#define         PAGEFMT_SPARE_MASK      GENMASK(21, 16)
+#define         PAGEFMT_FDM_ECC_SHIFT   12
+#define         PAGEFMT_FDM_SHIFT       8
+#define         PAGEFMT_FDM_ECC_MASK    GENMASK(15, 12)
+#define         PAGEFMT_SEC_SEL_512     BIT(2)
+#define         PAGEFMT_FDM_MASK        GENMASK(11, 8)
+#define         PAGEFMT_512_2K          0
+#define         PAGEFMT_2K_4K           1
+#define         PAGEFMT_4K_8K           2
+#define         PAGEFMT_8K_16K          3
+#define         PAGEFMT_PAGE_MASK       GENMASK(2, 0);
+
+#define NFI_CON                 0x008
+#define         CON_FIFO_FLUSH          BIT(0)
+#define         CON_NFI_RST             BIT(1)
+#define         CON_NFI_SRD             BIT(4)
+#define         CON_NOB_BYTE            BIT(5)
+#define         CON_BRD                 BIT(8)
+#define         CON_BWR                 BIT(9)
+#define         CON_SEC_SHIFT           12
+#define         CON_SEC_MASK            GENMASK(16, 12)
+#define NFI_ACCCON              0x00c
+#define NFI_INTR_EN             0x010
+#define         INTR_BUSY_RETURN_EN     BIT(4)
+#define         INTR_AHB_DONE_EN        BIT(6)
+#define NFI_INTR_STA            0x014
+#define         NFI_IRQ_INTR            BIT(31)
+#define         NFI_IRQ_SPI             GENMASK(11, 6)
+#define         NFI_IRQ_SLC             (GENMASK(13, 12) | GENMASK(6, 0))
+
+#define NFI_CMD                 0x020
+#define NFI_ADDRNOB             0x030
+#define         ROW_SHIFT               4
+#define NFI_COLADDR             0x034
+#define NFI_ROWADDR             0x038
+#define NFI_STRDATA             0x040
+#define         STAR_EN                 1
+#define         STAR_DE                 0
+#define NFI_CNRNB               0x044
+#define NFI_DATAW               0x050
+#define NFI_DATAR               0x054
+#define NFI_PIO_DIRDY           0x058
+#define         PIO_DI_RDY              1
+#define NFI_STA                 0x060
+#define         STA_CMD                 BIT(0)
+#define         STA_ADDR                BIT(1)
+#define         STA_DATAR               BIT(2)
+#define         FLASH_MACRO_IDLE        BIT(5)
+#define         STA_BUSY                BIT(8)
+#define         STA_BUSY2READY          BIT(9)
+#define         STA_EMP_PAGE            BIT(12)
+#define         NFI_FSM_CUSTDATA        (0xe << 16)
+#define         NFI_FSM_MASK            GENMASK(19, 16)
+#define         NAND_FSM_MASK           GENMASK(29, 23)
+#define NFI_ADDRCNTR            0x070
+#define         CNTR_VALID_MASK         GENMASK(16, 0)
+#define         CNTR_MASK               GENMASK(16, 12)
+#define         ADDRCNTR_SEC_SHIFT      12
+#define         ADDRCNTR_SEC(val) \
+	(((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
+#define NFI_STRADDR             0x080
+#define NFI_BYTELEN             0x084
+#define NFI_CSEL                0x090
+#define NFI_IOCON               0x094
+#define         BRSTN_MASK              GENMASK(7, 4)
+#define         BRSTN_SHIFT             4
+#define NFI_FDML(x)             (0x0a0 + (x) * 8)
+#define NFI_FDMM(x)             (0x0a4 + (x) * 8)
+#define NFI_DEBUG_CON1          0x220
+#define         STROBE_MASK             GENMASK(5, 3)
+#define         STROBE_SHIFT            3
+#define         ECC_CLK_EN              BIT(11)
+#define         AUTOC_SRAM_MODE         BIT(12)
+#define         BYPASS_MASTER_EN        BIT(15)
+#define NFI_MASTER_STA          0x224
+#define         MASTER_BUS_BUSY         0x3
+#define NFI_SECCUS_SIZE         0x22c
+#define         LAST_SECCUS_SIZE_SHIFT  20
+#define         LAST_SECCUS_SIZE_MASK   GENMASK(31, 20)
+#define         LAST_SECCUS_SIZE_EN     BIT(18)
+#define         SECCUS_SIZE_EN          BIT(17)
+#define         SECCUS_SIZE_MASK        GENMASK(12, 0)
+#define         SECCUS_SIZE_SHIFT       0
+
+#define NFI_RANDOM_CNFG         0x238
+#define         RAN_ENCODE_EN           BIT(0)
+#define         ENCODE_SEED_SHIFT       1
+#define         RAN_DECODE_EN           BIT(16)
+#define         DECODE_SEED_SHIFT       17
+#define         RAN_SEED_MASK           0x7fff
+#define NFI_EMPTY_THRESH        0x23c
+#define NFI_NAND_TYPE_CNFG      0x240
+#define         NAND_TYPE_ASYNC         0
+#define         NAND_TYPE_TOGGLE        1
+#define         NAND_TYPE_SYNC          2
+#define NFI_ACCCON1             0x244
+#define NFI_DELAY_CTRL          0x248
+#define NFI_TLC_RD_WHR2         0x300
+#define         TLC_RD_WHR2_EN          BIT(12)
+#define         TLC_RD_WHR2_MASK        GENMASK(11, 0)
+#define SNF_SNF_CNFG            0x55c
+#define         SPI_MODE_EN             1
+#define         SPI_MODE_DIS            0
+
+#endif /* __NFI_REGS_H__ */
+
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_spi.c b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_spi.c
new file mode 100644
index 0000000..458730f
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_spi.c
@@ -0,0 +1,859 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "../nfi.h"
+#include "nfiecc.h"
+#include "nfi_regs.h"
+#include "nfi_base.h"
+#include "nfi_spi_regs.h"
+#include "nfi_spi.h"
+
+#define NFI_CMD_DUMMY_RD 0x00
+#define NFI_CMD_DUMMY_WR 0x80
+
+static struct nfi_spi_delay spi_delay[SPI_NAND_MAX_DELAY] = {
+	/*
+	 * tCLK_SAM_DLY, tCLK_OUT_DLY, tCS_DLY, tWR_EN_DLY,
+	 * tIO_IN_DLY[4], tIO_OUT_DLY[4], tREAD_LATCH_LATENCY
+	 */
+	{0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
+	{23, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
+	{47, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
+	{0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1},
+	{23, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1},
+	{47, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1}
+};
+
+static inline struct nfi_spi *base_to_snfi(struct nfi_base *nb)
+{
+	return container_of(nb, struct nfi_spi, base);
+}
+
+static int spi_wait_done(struct nfi_base *nb, u32 timeout)
+{
+	struct nfi_spi *nfi_spi = base_to_snfi(nb);
+	void *regs = nb->res.nfi_regs;
+	u64 now, end;
+	u32 val;
+
+	end = get_current_time_us() + timeout;
+
+	do {
+		val = readl(regs + SNF_STA_CTL1);
+		val &= nfi_spi->snfi_status_mask;
+		now = get_current_time_us();
+
+		if (now > end)
+			break;
+	} while (!val);
+
+	return !val ? -ETIMEDOUT : 0;
+}
+
+static void snfi_mac_enable(struct nfi_base *nb)
+{
+	struct nfi_spi *nfi_spi = base_to_snfi(nb);
+	void *regs = nb->res.nfi_regs;
+	u32 val;
+
+	val = readl(regs + SNF_MAC_CTL);
+	if (nfi_spi->mac_qpi_mode)
+		val |= MAC_XIO_SEL;
+	else
+		val &= ~MAC_XIO_SEL;
+	val |= SF_MAC_EN;
+
+	writel(val, regs + SNF_MAC_CTL);
+}
+
+static void snfi_mac_disable(struct nfi_base *nb)
+{
+	void *regs = nb->res.nfi_regs;
+	u32 val;
+
+	val = readl(regs + SNF_MAC_CTL);
+	val &= ~(SF_TRIG | SF_MAC_EN);
+	writel(val, regs + SNF_MAC_CTL);
+}
+
+static int snfi_mac_trigger(struct nfi_base *nb)
+{
+	void *regs = nb->res.nfi_regs;
+	int ret;
+	u32 val;
+
+	val = readl(regs + SNF_MAC_CTL);
+	val |= SF_TRIG;
+	writel(val, regs + SNF_MAC_CTL);
+
+	ret = readl_poll_timeout_atomic(regs + SNF_MAC_CTL, val,
+					val & WIP_READY, 10,
+					NFI_TIMEOUT);
+	if (ret) {
+		pr_err("polling wip ready for read timeout\n");
+		return ret;
+	}
+
+	return readl_poll_timeout_atomic(regs + SNF_MAC_CTL, val,
+					 !(val & WIP), 10,
+					 NFI_TIMEOUT);
+}
+
+static int snfi_mac_op(struct nfi_base *nb)
+{
+	int ret;
+
+	snfi_mac_enable(nb);
+	ret = snfi_mac_trigger(nb);
+	snfi_mac_disable(nb);
+
+	return ret;
+}
+
+static void snfi_write_mac(struct nfi_spi *nfi_spi, u8 *data, int count)
+{
+	struct nandx_split32 split = {0};
+	u32 reg_offset = round_down(nfi_spi->tx_count, 4);
+	void *regs = nfi_spi->base.res.nfi_regs;
+	u32 data_offset = 0, i, val;
+	u8 *p_val = (u8 *)(&val);
+
+	nandx_split(&split, nfi_spi->tx_count, count, val, 4);
+
+	if (split.head_len) {
+		val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
+
+		for (i = 0; i < split.head_len; i++)
+			p_val[reminder(split.head, 4) + i] = data[i];
+
+		writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
+	}
+
+	if (split.body_len) {
+		reg_offset = split.body;
+		data_offset = split.head_len;
+
+		for (i = 0; i < split.body_len; i++) {
+			p_val[i & 3] = data[data_offset + i];
+
+			if ((i & 3) == 3) {
+				writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
+				reg_offset += 4;
+			}
+		}
+	}
+
+	if (split.tail_len) {
+		reg_offset = split.tail;
+		data_offset += split.body_len;
+
+		for (i = 0; i < split.tail_len; i++) {
+			p_val[i] = data[data_offset + i];
+
+			if (i == split.tail_len - 1)
+				writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
+		}
+	}
+}
+
+static void snfi_read_mac(struct nfi_spi *nfi_spi, u8 *data, int count)
+{
+	void *regs = nfi_spi->base.res.nfi_regs;
+	u32 reg_offset = round_down(nfi_spi->tx_count, 4);
+	struct nandx_split32 split = {0};
+	u32 data_offset = 0, i, val;
+	u8 *p_val = (u8 *)&val;
+
+	nandx_split(&split, nfi_spi->tx_count, count, val, 4);
+
+	if (split.head_len) {
+		val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
+
+		for (i = 0; i < split.head_len; i++)
+			data[data_offset + i] = p_val[split.head + i];
+	}
+
+	if (split.body_len) {
+		reg_offset = split.body;
+		data_offset = split.head_len;
+
+		for (i = 0; i < split.body_len; i++) {
+			if ((i & 3) == 0) {
+				val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
+				reg_offset += 4;
+			}
+
+			data[data_offset + i] = p_val[i % 4];
+		}
+	}
+
+	if (split.tail_len) {
+		reg_offset = split.tail;
+		data_offset += split.body_len;
+		val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
+
+		for (i = 0; i < split.tail_len; i++)
+			data[data_offset + i] = p_val[i];
+	}
+}
+
+static int snfi_auto_erase(struct nfi *nfi)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	struct nfi_spi *nfi_spi = base_to_snfi(nb);
+	void *regs = nb->res.nfi_regs;
+	u32 val;
+	int ret;
+
+	nfi_spi->snfi_status_mask = AUTO_BLKER_DONE;
+
+	val = readl(regs + SNF_ER_CTL);
+	val &= ~ER_CMD_MASK;
+	val |= nfi_spi->cmd[0] << ER_CMD_SHIFT;
+	writel(val, regs + SNF_ER_CTL);
+
+	val = nfi_spi->row_addr[0];
+	writel(val, regs + SNF_ER_CTL2);
+
+	val = readl(regs + SNF_ER_CTL);
+	val |= AUTO_ER_TRIGGER;
+	writel(val, regs + SNF_ER_CTL);
+
+	ret = spi_wait_done(nb, NFI_TIMEOUT);
+	if (ret)
+		pr_warn("snfi wait done time out, status(0x%x) mask(0x%x)\n",
+			readl(regs + SNF_STA_CTL1), nfi_spi->snfi_status_mask);
+
+	nfi_spi->snfi_status_mask = 0;
+	val = readl(regs + SNF_ER_CTL);
+	val &= ~AUTO_ER_TRIGGER;
+	writel(val, regs + SNF_ER_CTL);
+
+	nfi_spi->tx_count = 0;
+	nfi_spi->cur_cmd_idx = 0;
+	nfi_spi->cur_addr_idx = 0;
+
+	return ret;
+}
+
+static int snfi_send_command(struct nfi *nfi, short cmd)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	struct nfi_spi *nfi_spi = base_to_snfi(nb);
+
+	if (cmd == -1)
+		return 0;
+
+	if (nfi_spi->snfi_mode == SNFI_MAC_MODE) {
+		snfi_write_mac(nfi_spi, (u8 *)&cmd, 1);
+		nfi_spi->tx_count++;
+		return 0;
+	}
+
+	nfi_spi->cmd[nfi_spi->cur_cmd_idx++] = cmd;
+
+	/* for erase op */
+	if ((cmd == 0xd8) && (nfi_spi->snfi_mode == SNFI_AUTO_MODE))
+		nfi_spi->auto_erase = true;
+
+	return 0;
+}
+
+static int snfi_send_address(struct nfi *nfi, int col, int row,
+			     int col_cycle,
+			     int row_cycle)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	struct nfi_spi *nfi_spi = base_to_snfi(nb);
+	u32 addr, cycle, temp;
+
+	nb->col = col;
+	nb->row = row;
+
+	if (nfi_spi->snfi_mode == SNFI_MAC_MODE) {
+		addr = row;
+		cycle = row_cycle;
+
+		if (!row_cycle) {
+			addr = col;
+			cycle = col_cycle;
+		}
+
+		/* for read, for col addr [15:0] + 8bits dummy */
+		temp = nandx_cpu_to_be32(addr) >> ((4 - cycle) << 3);
+		if (!row_cycle && (col_cycle == 3))
+			temp = nandx_cpu_to_be32(addr) >>
+			       ((4 - cycle + 1) << 3);
+		snfi_write_mac(nfi_spi, (u8 *)&temp, cycle);
+		nfi_spi->tx_count += cycle;
+	}  else {
+		nfi_spi->row_addr[nfi_spi->cur_addr_idx++] = row;
+		nfi_spi->col_addr[nfi_spi->cur_addr_idx++] = col;
+	}
+
+	return 0;
+}
+
+static int snfi_trigger(struct nfi *nfi)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	struct nfi_spi *nfi_spi = base_to_snfi(nb);
+	void *regs = nb->res.nfi_regs;
+	int ret;
+
+	if (nfi_spi->auto_erase) {
+		nfi_spi->auto_erase = false;
+		ret = snfi_auto_erase(nfi);
+	} else {
+		writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
+		writel(0, regs + SNF_MAC_INL);
+
+		ret =  snfi_mac_op(nb);
+	}
+
+	if (!nfi_spi->cur_cmd_idx) {
+		nfi_spi->tx_count = 0;
+		nfi_spi->cur_cmd_idx = 0;
+		nfi_spi->cur_addr_idx = 0;
+	}
+
+	return ret;
+}
+
+static int snfi_select_chip(struct nfi *nfi, int cs)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	void *regs = nb->res.nfi_regs;
+	u32 val;
+
+	val = readl(regs + SNF_MISC_CTL);
+
+	if (cs == 0) {
+		val &= ~SF2CS_SEL;
+		val &= ~SF2CS_EN;
+	} else if (cs == 1) {
+		val |= SF2CS_SEL;
+		val |= SF2CS_EN;
+	} else
+		return -EIO;
+
+	writel(val, regs + SNF_MISC_CTL);
+
+	return 0;
+}
+
+static int snfi_set_delay(struct nfi_base *nb, u8 delay_mode)
+{
+	void *regs = nb->res.nfi_regs;
+	struct nfi_spi_delay *delay;
+	u32 val;
+
+	if (delay_mode < 0 || delay_mode >= SPI_NAND_MAX_DELAY)
+		return -EINVAL;
+
+	delay = &spi_delay[delay_mode];
+
+	val = delay->tIO_OUT_DLY[0] | delay->tIO_OUT_DLY[1] << 8 |
+	      delay->tIO_OUT_DLY[2] << 16 |
+	      delay->tIO_OUT_DLY[3] << 24;
+	writel(val, regs + SNF_DLY_CTL1);
+
+	val = delay->tIO_IN_DLY[0] | (delay->tIO_IN_DLY[1] << 8) |
+	      delay->tIO_IN_DLY[2] << 16 |
+	      delay->tIO_IN_DLY[3] << 24;
+	writel(val, regs + SNF_DLY_CTL2);
+
+	val = delay->tCLK_SAM_DLY | delay->tCLK_OUT_DLY << 8 |
+	      delay->tCS_DLY << 16 |
+	      delay->tWR_EN_DLY << 24;
+	writel(val, regs + SNF_DLY_CTL3);
+
+	writel(delay->tCS_DLY, regs + SNF_DLY_CTL4);
+
+	val = readl(regs + SNF_MISC_CTL);
+	val |= (delay->tREAD_LATCH_LATENCY) <<
+	       LATCH_LAT_SHIFT;
+	writel(val, regs + SNF_MISC_CTL);
+
+	return 0;
+}
+
+static int snfi_set_timing(struct nfi *nfi, void *timing, int type)
+{
+	/* Nothing need to do. */
+	return 0;
+}
+
+static int snfi_wait_ready(struct nfi *nfi, int type, u32 timeout)
+{
+	/* Nothing need to do. */
+	return 0;
+}
+
+static int snfi_ctrl(struct nfi *nfi, int cmd, void *args)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	void *regs = nb->res.nfi_regs;
+	struct nfi_spi *nfi_spi = base_to_snfi(nb);
+	int ret = 0;
+	u32 val;
+
+	switch (cmd) {
+#if 0
+	case NFI_CTRL_AUTO_READ_IRQ:
+		nfi_spi->auto_read_irq = *(u8 *)args;
+		break;
+
+	case NFI_CTRL_AUTO_PROGRAM_IRQ:
+		nfi_spi->auto_write_irq = *(u8 *)args;
+		break;
+
+	case NFI_CTRL_AUTO_ERASE_IRQ:
+		nfi_spi->auto_erase_irq = *(u8 *)args;
+		break;
+
+	case NFI_CTRL_CUSTOM_READ_IRQ:
+		nfi_spi->custom_read_irq = *(u8 *)args;
+		break;
+
+	case NFI_CTRL_CUSTOM_PROGRAM_IRQ:
+		nfi_spi->custom_write_irq = *(u8 *)args;
+		break;
+#endif
+	case SNFI_CTRL_BASE_INFO:
+		pr_debug("snfi hw config as below:\n");
+		pr_debug("snfi mode:%d\n", nfi_spi->snfi_mode);
+		pr_debug("snfi read mode:%d\n", nfi_spi->read_cache_mode);
+		pr_debug("snfi write mode:%d\n", nfi_spi->write_cache_mode);
+		break;
+
+	case SNFI_CTRL_OP_MODE:
+		nfi_spi->snfi_mode = *(u8 *)args;
+		break;
+
+	case SNFI_CTRL_RX_MODE:
+		nfi_spi->read_cache_mode = *(u8 *)args;
+		break;
+
+	case SNFI_CTRL_TX_MODE:
+		nfi_spi->write_cache_mode = *(u8 *)args;
+		break;
+
+	case SNFI_CTRL_DELAY_MODE:
+		ret = snfi_set_delay(nb, *(u8 *)args);
+		break;
+
+	case SNFI_CTRL_4FIFO_EN:
+		val = readl(regs + SNF_MISC_CTL);
+		if (*(u8 *)args)
+			val |= FIFO4_EN;
+		else
+			val &= ~FIFO4_EN;
+
+		writel(val, regs + SNF_MISC_CTL);
+		break;
+
+	case SNFI_CTRL_GF_CONFIG:
+		val = *(u32 *)args;
+		if (val)
+			writel(val, regs + SNF_GF_CTL3);
+		break;
+
+	case SNFI_CTRL_SAMPLE_DELAY:
+		writel(*(u8 *)args, regs + SNF_DLY_CTL3);
+		break;
+
+	case SNFI_CTRL_LATCH_LATENCY:
+		val = readl(regs + SNF_MISC_CTL);
+		val &= ~LATCH_LAT_MASK;
+		val |= *(u8 *)args << LATCH_LAT_SHIFT;
+		writel(val, regs + SNF_MISC_CTL);
+		break;
+
+	case SNFI_CTRL_MAC_QPI_MODE:
+		nfi_spi->mac_qpi_mode = *(u8 *)args;
+		break;
+
+	default:
+		ret = nfi_spi->parent->nfi.nfi_ctrl(&nfi_spi->base.nfi,
+						    cmd, args);
+		if (ret < 0) {
+			pr_err("%s cmd(%d) args(%d) not support.\n",
+			       __func__, cmd, *(u32 *)args);
+			ret = -EOPNOTSUPP;
+		}
+		break;
+
+	}
+
+	return ret;
+}
+
+static int snfi_read_bytes(struct nfi *nfi, u8 *data, int count)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	struct nfi_spi *nfi_spi = base_to_snfi(nb);
+	void *regs = nb->res.nfi_regs;
+	int ret;
+
+	writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
+	writel(count, regs + SNF_MAC_INL);
+
+	ret = snfi_mac_op(nb);
+	if (ret)
+		return ret;
+
+	snfi_read_mac(nfi_spi, data, count);
+
+	nfi_spi->tx_count = 0;
+
+	return 0;
+}
+
+static int snfi_write_bytes(struct nfi *nfi, u8 *data, int count)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	struct nfi_spi *nfi_spi = base_to_snfi(nb);
+	void *regs = nb->res.nfi_regs;
+
+	snfi_write_mac(nfi_spi, data, count);
+	nfi_spi->tx_count += count;
+
+	writel(0, regs + SNF_MAC_INL);
+	writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
+
+	nfi_spi->tx_count = 0;
+
+	return snfi_mac_op(nb);
+}
+
+static int snfi_reset(struct nfi *nfi)
+{
+	struct nfi_base *nb = nfi_to_base(nfi);
+	struct nfi_spi *nfi_spi = base_to_snfi(nb);
+	void *regs = nb->res.nfi_regs;
+	u32 val;
+	int ret;
+
+	ret = nfi_spi->parent->nfi.reset(nfi);
+	if (ret)
+		return ret;
+
+	val = readl(regs + SNF_MISC_CTL);
+	val |= SW_RST;
+	writel(val, regs + SNF_MISC_CTL);
+
+	ret = readx_poll_timeout_atomic(readw, regs + SNF_STA_CTL1, val,
+					!(val & SPI_STATE), 50,
+					NFI_TIMEOUT);
+	if (ret) {
+		pr_warn("spi state active in reset [0x%x] = 0x%x\n",
+			SNF_STA_CTL1, val);
+		return ret;
+	}
+
+	val = readl(regs + SNF_MISC_CTL);
+	val &= ~SW_RST;
+	writel(val, regs + SNF_MISC_CTL);
+
+	return 0;
+}
+
+static int snfi_config_for_write(struct nfi_base *nb, int count)
+{
+	struct nfi_spi *nfi_spi = base_to_snfi(nb);
+	void *regs = nb->res.nfi_regs;
+	u32 val;
+
+	nb->set_op_mode(regs, CNFG_CUSTOM_MODE);
+#if 0
+	val  = readl(regs + NFI_INTR_EN);
+	if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
+		val |= INTR_CUST_PROG_DONE;
+	else if (nfi_spi->snfi_mode == SNFI_AUTO_MODE)
+		val |= INTR_AUTO_PROG_DONE;
+	writel(val, regs + NFI_INTR_EN);
+#endif
+	val = readl(regs + SNF_MISC_CTL);
+
+	if (nfi_spi->write_cache_mode == SNFI_TX_114)
+		val |= PG_LOAD_X4_EN;
+	else
+		val &= ~PG_LOAD_X4_EN;
+
+	if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE) {
+		val |= PG_LOAD_CUSTOM_EN;
+		nfi_spi->snfi_status_mask = CUST_PROG_DONE;
+	} else {
+		val &= ~PG_LOAD_CUSTOM_EN;
+		nfi_spi->snfi_status_mask = AUTO_PROG_DONE;
+	}
+
+	writel(val, regs + SNF_MISC_CTL);
+
+	val = count * (nb->nfi.sector_size + nb->nfi.sector_spare_size);
+	writel(val << PG_LOAD_SHIFT, regs + SNF_MISC_CTL2);
+
+	val = readl(regs + SNF_PG_CTL1);
+	val &= ~PG_LOAD_CMD_MASK;
+
+	if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
+		val |= nfi_spi->cmd[0] << PG_LOAD_CMD_SHIFT;
+	else {
+		val |= nfi_spi->cmd[0] << PG_LOAD_CMD_SHIFT;
+		writel(nfi_spi->row_addr[0], regs + SNF_PG_CTL3);
+	}
+
+	writel(val, regs + SNF_PG_CTL1);
+	writel(nfi_spi->col_addr[0], regs + SNF_PG_CTL2);
+
+	writel(NFI_CMD_DUMMY_WR, regs + NFI_CMD);
+
+	return 0;
+}
+
+static int snfi_config_for_read(struct nfi_base *nb, int count)
+{
+	struct nfi_spi *nfi_spi = base_to_snfi(nb);
+	void *regs = nb->res.nfi_regs;
+	u32 val;
+	int ret = 0;
+
+	nb->set_op_mode(regs, CNFG_CUSTOM_MODE);
+
+#if 0
+	val  = readl(regs + NFI_INTR_EN);
+	if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
+		val |= INTR_CUST_READ_DONE;
+	else if (nfi_spi->snfi_mode == SNFI_AUTO_MODE)
+		val |= INTR_AUTO_READ_DONE;
+	writel(val, regs + NFI_INTR_EN);
+#endif
+	val = readl(regs + SNF_MISC_CTL);
+	val &= ~(DARA_READ_MODE_MASK | PG_LOAD_X4_EN);
+
+	switch (nfi_spi->read_cache_mode) {
+
+	case SNFI_RX_111:
+		break;
+
+	case SNFI_RX_112:
+		val |= X2_DATA_MODE << READ_MODE_SHIFT;
+		break;
+
+	case SNFI_RX_114:
+		val |= X4_DATA_MODE << READ_MODE_SHIFT;
+		break;
+
+	case SNFI_RX_122:
+		val |= DUAL_IO_MODE << READ_MODE_SHIFT;
+		break;
+
+	case SNFI_RX_144:
+		val |= QUAD_IO_MODE << READ_MODE_SHIFT;
+		break;
+
+	default:
+		pr_err("Not support this read operarion: %d!\n",
+		       nfi_spi->read_cache_mode);
+		ret = -EINVAL;
+		break;
+	}
+
+	if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE) {
+		val |= DATARD_CUSTOM_EN;
+		nfi_spi->snfi_status_mask = CUST_READ_DONE;
+	} else {
+		val &= ~DATARD_CUSTOM_EN;
+		nfi_spi->snfi_status_mask = AUTO_READ_DONE;
+	}
+
+	writel(val, regs + SNF_MISC_CTL);
+
+	val = count * (nb->nfi.sector_size + nb->nfi.sector_spare_size);
+	writel(val, regs + SNF_MISC_CTL2);
+
+	val = readl(regs + SNF_RD_CTL2);
+	val &= ~DATA_READ_CMD_MASK;
+
+	if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE) {
+		val |= nfi_spi->cmd[0];
+		writel(nfi_spi->col_addr[1], regs + SNF_RD_CTL3);
+	} else {
+		val |= nfi_spi->cmd[1];
+		writel(nfi_spi->cmd[0] << PAGE_READ_CMD_SHIFT |
+		       nfi_spi->row_addr[0], regs + SNF_RD_CTL1);
+#if 0
+		writel(nfi_spi->cmd[1] << GF_CMD_SHIFT |
+		       nfi_spi->col_addr[1] << GF_ADDR_SHIFT,
+		       regs + SNF_GF_CTL1);
+#endif
+		writel(nfi_spi->col_addr[0], regs + SNF_RD_CTL3);
+	}
+
+	writel(val, regs + SNF_RD_CTL2);
+
+	writel(NFI_CMD_DUMMY_RD, regs + NFI_CMD);
+
+	return ret;
+}
+
+static bool is_page_empty(struct nfi_base *nb, u8 *data, u8 *fdm,
+			  int sectors)
+{
+	u32 *data32 = (u32 *)data;
+	u32 *fdm32 = (u32 *)fdm;
+	u32 i, count = 0;
+
+	for (i = 0; i < nb->format.page_size >> 2; i++) {
+		if (data32[i] != 0xffff) {
+			count += zero_popcount(data32[i]);
+			if (count > 10) {
+				pr_debug("%d %d count:%d\n", __LINE__, i, count);
+				return false;
+			}
+		}
+	}
+
+	if (fdm) {
+		for (i = 0; i < (nb->nfi.fdm_size * sectors >> 2); i++)
+			if (fdm32[i] != 0xffff) {
+				count += zero_popcount(fdm32[i]);
+				if (count > 10) {
+					pr_debug("%d %d count:%d\n", __LINE__, i, count);
+					return false;
+				}
+			}
+	}
+
+	pr_debug("page %d is empty\n", nb->row);
+
+	return true;
+}
+
+static int rw_prepare(struct nfi_base *nb, int sectors, u8 *data,
+		      u8 *fdm,
+		      bool read)
+{
+	struct nfi_spi *nfi_spi = base_to_snfi(nb);
+	int ret;
+
+	ret = nfi_spi->parent->rw_prepare(nb, sectors, data, fdm, read);
+	if (ret)
+		return ret;
+
+	if (read)
+		ret = snfi_config_for_read(nb, sectors);
+	else
+		ret = snfi_config_for_write(nb, sectors);
+
+	return ret;
+}
+
+static int rw_wait_done(struct nfi_base *nb, int sectors, bool read)
+{
+	struct nfi_spi *nfi_spi = base_to_snfi(nb);
+	void *regs = nb->res.nfi_regs;
+	int ret;
+
+	ret = nfi_spi->parent->rw_wait_done(nb, sectors, read);
+	if (ret)
+		return ret;
+
+	ret = spi_wait_done(nb, NFI_TIMEOUT);
+	if (ret)
+		pr_warn("snfi wait done time out, status(0x%x) mask(0x%x)\n",
+			readl(regs + SNF_STA_CTL1), nfi_spi->snfi_status_mask);
+
+	return ret;
+}
+
+static void rw_complete(struct nfi_base *nb, u8 *data, u8 *fdm,
+			bool read)
+{
+	struct nfi_spi *nfi_spi = base_to_snfi(nb);
+	void *regs = nb->res.nfi_regs;
+	u32 val;
+
+	nfi_spi->parent->rw_complete(nb, data, fdm, read);
+
+	val = readl(regs + SNF_MISC_CTL);
+
+	if (read)
+		val &= ~DATARD_CUSTOM_EN;
+	else
+		val &= ~PG_LOAD_CUSTOM_EN;
+
+	writel(val, regs + SNF_MISC_CTL);
+
+	/* clear snfi status */
+	val = readl(regs + SNF_STA_CTL1);
+	val |= nfi_spi->snfi_status_mask;
+	writel(val, regs + SNF_STA_CTL1);
+	val &= ~nfi_spi->snfi_status_mask;
+	writel(val, regs + SNF_STA_CTL1);
+
+	nfi_spi->tx_count = 0;
+	nfi_spi->cur_cmd_idx = 0;
+	nfi_spi->cur_addr_idx = 0;
+	nfi_spi->snfi_status_mask = 0;
+	nfi_spi->mac_qpi_mode = 0;
+}
+
+static void set_nfi_base_funcs(struct nfi_base *nb)
+{
+	nb->nfi.reset = snfi_reset;
+	nb->nfi.set_timing = snfi_set_timing;
+	nb->nfi.wait_ready = snfi_wait_ready;
+
+	nb->nfi.send_cmd = snfi_send_command;
+	nb->nfi.send_addr = snfi_send_address;
+	nb->nfi.trigger = snfi_trigger;
+	nb->nfi.nfi_ctrl = snfi_ctrl;
+	nb->nfi.select_chip = snfi_select_chip;
+
+	nb->nfi.read_bytes = snfi_read_bytes;
+	nb->nfi.write_bytes = snfi_write_bytes;
+
+	nb->rw_prepare = rw_prepare;
+	nb->rw_wait_done = rw_wait_done;
+	nb->rw_complete = rw_complete;
+	nb->is_page_empty = is_page_empty;
+}
+
+struct nfi *nfi_extend_spi_init(struct nfi_base *nb)
+{
+	struct nfi_spi *nfi_spi;
+
+	nfi_spi = mem_alloc(1, sizeof(struct nfi_spi));
+	if (!nfi_spi) {
+		pr_err("snfi alloc memory fail @%s.\n", __func__);
+		return NULL;
+	}
+
+	memcpy(&nfi_spi->base, nb, sizeof(struct nfi_base));
+	nfi_spi->parent = nb;
+	nfi_spi->read_cache_mode = SNFI_RX_114;
+	nfi_spi->write_cache_mode = SNFI_TX_114;
+	nfi_spi->cur_cmd_idx = 0;
+
+	set_nfi_base_funcs(&nfi_spi->base);
+
+	/* Change nfi to spi mode */
+	writel(SPI_MODE, nb->res.nfi_regs + SNF_SNF_CNFG);
+
+	return &(nfi_spi->base.nfi);
+}
+
+void nfi_extend_spi_exit(struct nfi_base *nb)
+{
+	struct nfi_spi *nfi_spi = base_to_snfi(nb);
+
+	mem_free(nfi_spi->parent);
+	mem_free(nfi_spi);
+}
+
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_spi.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_spi.h
new file mode 100644
index 0000000..eb2a517
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_spi.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __NFI_SPI_H__
+#define __NFI_SPI_H__
+
+#define SPI_NAND_MAX_DELAY      6
+#define SPI_NAND_MAX_OP         4
+
+/*TODO - add comments */
+struct nfi_spi_delay {
+	u8 tCLK_SAM_DLY;
+	u8 tCLK_OUT_DLY;
+	u8 tCS_DLY;
+	u8 tWR_EN_DLY;
+	u8 tIO_IN_DLY[4];
+	u8 tIO_OUT_DLY[4];
+	u8 tREAD_LATCH_LATENCY;
+};
+
+/* SPI Nand structure */
+struct nfi_spi {
+	struct nfi_base base;
+	struct nfi_base *parent;
+
+	u32 snfi_status_mask;
+	u8 snfi_mode;
+	u8 tx_count;
+
+	u8 cmd[16];
+	u8 cur_cmd_idx;
+
+	u32 row_addr[SPI_NAND_MAX_OP];
+	u32 col_addr[SPI_NAND_MAX_OP];
+	u8 cur_addr_idx;
+
+	u8 read_cache_mode;
+	u8 write_cache_mode;
+	bool auto_erase;
+	bool mac_qpi_mode;
+};
+
+#endif /* __NFI_SPI_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_spi_regs.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_spi_regs.h
new file mode 100644
index 0000000..ae56404
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfi_spi_regs.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __NFI_SPI_REGS_H__
+#define __NFI_SPI_REGS_H__
+
+#define SNF_MAC_CTL             0x500
+#define         WIP                     BIT(0)
+#define         WIP_READY               BIT(1)
+#define         SF_TRIG                 BIT(2)
+#define         SF_MAC_EN               BIT(3)
+#define         MAC_XIO_SEL             BIT(4)
+#define SNF_MAC_OUTL            0x504
+#define SNF_MAC_INL             0x508
+#define SNF_RD_CTL1             0x50c
+#define         PAGE_READ_CMD_SHIFT     24
+#define SNF_RD_CTL2             0x510
+#define         DATA_READ_CMD_MASK      GENMASK(7, 0)
+#define SNF_RD_CTL3             0x514
+#define SNF_GF_CTL1             0x518
+#define         GF_ADDR_SHIFT           16
+#define         GF_CMD_SHIFT            24
+#define SNF_GF_CTL3             0x520
+#define SNF_PG_CTL1             0x524
+#define         PG_EXE_CMD_SHIFT        16
+#define         PG_LOAD_CMD_SHIFT       8
+#define         PG_LOAD_CMD_MASK        GENMASK(15, 8)
+#define SNF_PG_CTL2             0x528
+#define SNF_PG_CTL3             0x52c
+#define SNF_ER_CTL              0x530
+#define         AUTO_ER_TRIGGER         BIT(0)
+#define         ER_CMD_MASK             GENMASK(15, 8)
+#define         ER_CMD_SHIFT            8
+#define SNF_ER_CTL2             0x534
+#define SNF_MISC_CTL            0x538
+#define         SW_RST                  BIT(28)
+#define         FIFO4_EN                BIT(24)
+#define         PG_LOAD_X4_EN           BIT(20)
+#define         X2_DATA_MODE            1
+#define         X4_DATA_MODE            2
+#define         DUAL_IO_MODE            5
+#define         QUAD_IO_MODE            6
+#define         READ_MODE_SHIFT         16
+#define         LATCH_LAT_SHIFT         8
+#define         LATCH_LAT_MASK          GENMASK(9, 8)
+#define         DARA_READ_MODE_MASK     GENMASK(18, 16)
+#define         SF2CS_SEL               BIT(13)
+#define         SF2CS_EN                BIT(12)
+#define         PG_LOAD_CUSTOM_EN       BIT(7)
+#define         DATARD_CUSTOM_EN        BIT(6)
+#define SNF_MISC_CTL2           0x53c
+#define         PG_LOAD_SHIFT           16
+#define SNF_DLY_CTL1            0x540
+#define SNF_DLY_CTL2            0x544
+#define SNF_DLY_CTL3            0x548
+#define SNF_DLY_CTL4            0x54c
+#define SNF_STA_CTL1            0x550
+#define         CUST_PROG_DONE          BIT(28)
+#define         CUST_READ_DONE          BIT(27)
+#define         AUTO_PROG_DONE          BIT(26)
+#define         AUTO_READ_DONE          BIT(25)
+#define         AUTO_BLKER_DONE         BIT(24)
+#define         SPI_STATE               GENMASK(3, 0)
+#define SNF_STA_CTL2            0x554
+#define SNF_STA_CTL3            0x558
+#define SNF_SNF_CNFG            0x55c
+#define         SPI_MODE                BIT(0)
+#define SNF_DEBUG_SEL           0x560
+#define SPI_GPRAM_ADDR          0x800
+
+#endif /* __NFI_SPI_REGS_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfiecc.c b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfiecc.c
new file mode 100644
index 0000000..29109c9
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfiecc.c
@@ -0,0 +1,569 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "nfiecc_regs.h"
+#include "nfiecc.h"
+
+#define NFIECC_IDLE_REG(op) \
+	((op) == ECC_ENCODE ? NFIECC_ENCIDLE : NFIECC_DECIDLE)
+#define         IDLE_MASK       1
+#define NFIECC_CTL_REG(op) \
+	((op) == ECC_ENCODE ? NFIECC_ENCCON : NFIECC_DECCON)
+#define NFIECC_IRQ_REG(op) \
+	((op) == ECC_ENCODE ? NFIECC_ENCIRQEN : NFIECC_DECIRQEN)
+#define NFIECC_ADDR(op) \
+	((op) == ECC_ENCODE ? NFIECC_ENCDIADDR : NFIECC_DECDIADDR)
+
+#define ECC_TIMEOUT     500000
+
+/* ecc strength that each IP supports */
+static const int ecc_strength_mt6880[] = {
+	4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24
+};
+
+static irqreturn_t nfiecc_irq_handler(int irq, void *data)
+{
+	struct nfiecc *ecc = data;
+	void *regs = ecc->res.regs;
+	u32 status;
+
+	nandx_irq_disable(ecc->res.irq_id);
+	status = readl(regs + NFIECC_DECIRQSTA) & ECC_IRQEN;
+	if (status) {
+		ecc->irq_status = status;
+		/*status = readl(regs + NFIECC_DECDONE);
+		if (!(status & ecc->config.sectors))
+			return NAND_IRQ_NONE;*/
+
+		pr_debug("%s ECC DEC irq complete status:%x\n",
+			 __func__, status);
+		/*
+		 * Clear decode IRQ status once again to ensure that
+		 * there will be no extra IRQ.
+		 */
+		readl(regs + NFIECC_DECIRQSTA);
+		ecc->config.sectors = 0;
+		nandx_event_complete(ecc->done);
+	} else {
+		status = readl(regs + NFIECC_ENCIRQSTA) & ECC_IRQEN;
+		if (!status)
+			return NAND_IRQ_NONE;
+		
+		ecc->irq_status = status;
+
+		pr_debug("%s ECC ENC irq complete status:%x\n",
+			 __func__, status);
+
+		readl(regs + NFIECC_ENCIRQSTA);
+		nandx_event_complete(ecc->done);
+	}
+
+	nandx_irq_enable(ecc->res.irq_id);
+	return NAND_IRQ_HANDLED;
+}
+
+static inline int nfiecc_wait_idle(struct nfiecc *ecc)
+{
+	int op = ecc->config.op;
+	int ret, val;
+
+	ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_IDLE_REG(op),
+					val, val & IDLE_MASK,
+					10, ECC_TIMEOUT);
+	if (ret)
+		pr_warn("%s not idle\n",
+			op == ECC_ENCODE ? "encoder" : "decoder");
+
+	return ret;
+}
+
+static inline int nfiecc_wait_irq(struct nfiecc *ecc, u32 timeout)
+{
+	u32 i = 0;
+	int ret = 0;
+
+	while (i++ < timeout) {
+		if (ecc->irq_status) {
+			pr_debug("%s IRQ done status:%x time:%duS\n",
+				 __func__, ecc->irq_status, i);
+			break;
+		}
+		udelay(1);
+	}
+	if (i > timeout) {
+		pr_err("%s IRQ timeout error:ecc_irq_en:%d nfi_irq_rb_en:%d\n",
+		       __func__, ecc->irq_en, ecc->page_irq_en);
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+static int nfiecc_wait_encode_done(struct nfiecc *ecc)
+{
+	int ret, val;
+
+	if (ecc->irq_en) {
+		nfiecc_wait_irq(ecc, 0x10000);
+		/* poll one time to avoid missing irq event */
+		ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_ENCSTA,
+						val, val & ENC_FSM_IDLE, 1, 1);
+		if (!ret)
+			return 0;
+
+		/* irq done, if not, we can go on to poll status for a while */
+		ret = nandx_event_wait_complete(ecc->done, ECC_TIMEOUT);
+		if (ret)
+			return 0;
+	}
+
+	ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_ENCSTA,
+					val, val & ENC_FSM_IDLE,
+					10, ECC_TIMEOUT);
+	if (ret)
+		pr_err("encode timeout\n");
+
+	return ret;
+}
+
+static int nfiecc_wait_decode_done(struct nfiecc *ecc)
+{
+	u32 secbit = BIT(ecc->config.sectors - 1);
+	void *regs = ecc->res.regs;
+	int ret, val;
+
+	if (ecc->irq_en) {
+		ret = nfiecc_wait_irq(ecc, 0x10000);
+		if (!ret)
+			return 0;
+
+		ret = nandx_event_wait_complete(ecc->done, ECC_TIMEOUT);
+		if (ret)
+			return 0;
+	}
+
+	ret = readl_poll_timeout_atomic(regs + NFIECC_DECDONE,
+					val, val & secbit,
+					10, ECC_TIMEOUT);
+	if (ret) {
+		pr_err("decode timeout\n");
+		return ret;
+	}
+
+	/* decode done does not stands for ecc all work done.
+	 * we need check syn, bma, chien, autoc all idle.
+	 * just check it when ECC_DECCNFG[13:12] is 3,
+	 * which means auto correct.
+	 */
+	ret = readl_poll_timeout_atomic(regs + NFIECC_DECFSM,
+					val, (val & FSM_MASK) == FSM_IDLE,
+					10, ECC_TIMEOUT);
+	if (ret)
+		pr_err("decode fsm(0x%x) is not idle\n",
+			   readl(regs + NFIECC_DECFSM));
+
+	return ret;
+}
+
+static int nfiecc_wait_done(struct nfiecc *ecc)
+{
+	if (ecc->config.op == ECC_ENCODE)
+		return nfiecc_wait_encode_done(ecc);
+
+	return nfiecc_wait_decode_done(ecc);
+}
+
+static void nfiecc_encode_config(struct nfiecc *ecc, u32 ecc_idx)
+{
+	struct nfiecc_config *config = &ecc->config;
+	u32 val;
+
+	val = ecc_idx | (config->mode << ecc->caps->ecc_mode_shift);
+
+	if (config->mode == ECC_DMA_MODE)
+		val |= ENC_BURST_EN;
+
+	val |= (config->len << 3) << ENCCNFG_MS_SHIFT;
+	writel(val, ecc->res.regs + NFIECC_ENCCNFG);
+}
+
+static void nfiecc_decode_config(struct nfiecc *ecc, u32 ecc_idx)
+{
+	struct nfiecc_config *config = &ecc->config;
+	u32 dec_sz = (config->len << 3) +
+		     config->strength * ecc->caps->parity_bits;
+	u32 val;
+
+	val = ecc_idx | (config->mode << ecc->caps->ecc_mode_shift);
+
+	if (config->mode == ECC_DMA_MODE)
+		val |= DEC_BURST_EN;
+
+	val |= (dec_sz << DECCNFG_MS_SHIFT) |
+	       (config->deccon << DEC_CON_SHIFT);
+	val |= DEC_EMPTY_EN;
+	writel(val, ecc->res.regs + NFIECC_DECCNFG);
+}
+
+static void nfiecc_config(struct nfiecc *ecc)
+{
+	u32 idx;
+
+	if (ecc->config.mode == ECC_DMA_MODE) {
+		if ((unsigned long)ecc->config.dma_addr & 0x3)
+			pr_err("encode address is not 4B aligned: 0x%x\n",
+			       (u32)(unsigned long)ecc->config.dma_addr);
+
+		writel((unsigned long)ecc->config.dma_addr,
+		       ecc->res.regs + NFIECC_ADDR(ecc->config.op));
+	}
+
+	for (idx = 0; idx < ecc->caps->ecc_strength_num; idx++) {
+		if (ecc->config.strength == ecc->caps->ecc_strength[idx])
+			break;
+	}
+
+	if (ecc->config.op == ECC_ENCODE)
+		nfiecc_encode_config(ecc, idx);
+	else
+		nfiecc_decode_config(ecc, idx);
+}
+
+static int nfiecc_enable(struct nfiecc *ecc)
+{
+	enum nfiecc_operation op = ecc->config.op;
+	void *regs = ecc->res.regs;
+
+	nfiecc_config(ecc);
+
+	writel(ECC_OP_EN, regs + NFIECC_CTL_REG(op));
+
+	if (ecc->irq_en) {
+		writel(ECC_IRQEN, regs + NFIECC_IRQ_REG(op));
+
+		if (ecc->page_irq_en)
+			writel(ECC_IRQEN | ECC_PG_IRQ_SEL,
+			       regs + NFIECC_IRQ_REG(op));
+		ecc->irq_status = 0;
+		nandx_event_init(ecc->done);
+	}
+
+	return 0;
+}
+
+static int nfiecc_disable(struct nfiecc *ecc)
+{
+	enum nfiecc_operation op = ecc->config.op;
+	void *regs = ecc->res.regs;
+
+	nfiecc_wait_idle(ecc);
+
+	writel(0, regs + NFIECC_IRQ_REG(op));
+	writel(~ECC_OP_EN, regs + NFIECC_CTL_REG(op));
+
+	return 0;
+}
+
+static int nfiecc_correct_data(struct nfiecc *ecc,
+							   struct nfiecc_status *status,			
+							   u8 *data, u32 sector)
+{
+	u32 err, offset, i;
+	u32 loc, byteloc, bitloc;
+
+	status->corrected = 0;
+	status->failed = 0;
+
+	offset = (sector >> 2);
+	err = readl(ecc->res.regs + NFIECC_DECENUM(offset));
+	err >>= (sector % 4) * 8;
+	err &= ecc->caps->err_mask;
+
+	if (err == ecc->caps->err_mask) {
+		status->failed++;
+		return -ENANDREAD;
+	}
+
+	status->corrected += err;
+	status->bitflips = max(status->bitflips, err);
+
+	for (i = 0; i < err; i++) {
+		loc = readl(ecc->res.regs + NFIECC_DECEL(i >> 1));
+		loc >>= ((i & 0x1) << 4);
+		loc &= 0xFFFF;
+		byteloc = loc >> 3;
+		bitloc = loc & 0x7;
+		data[byteloc] ^= (1 << bitloc);
+	}
+
+	return 0;
+}
+
+static int nfiecc_fill_data(struct nfiecc *ecc, u8 *data)
+{
+	struct nfiecc_config *config = &ecc->config;
+	void *regs = ecc->res.regs;
+	int size, ret, i;
+	u32 val;
+
+	if (config->mode != ECC_PIO_MODE)
+		return 0;
+
+	if (config->op == ECC_ENCODE)
+		size = (config->len + 3) >> 2;
+	else {
+		size = config->strength * ecc->caps->parity_bits;
+		size = (size + 7) >> 3;
+		size += config->len;
+		size = (size + 3) >> 2;
+	}
+
+	for (i = 0; i < size; i++) {
+		ret = readl_poll_timeout_atomic(regs + NFIECC_PIO_DIRDY,
+						val, val & PIO_DI_RDY,
+						10, ECC_TIMEOUT);
+		if (ret)
+			return ret;
+
+		writel(*((u32 *)data + i), regs + NFIECC_PIO_DI);
+	}
+
+	return 0;
+}
+
+static int nfiecc_encode(struct nfiecc *ecc, u8 *data)
+{
+	struct nfiecc_config *config = &ecc->config;
+	u32 len, i, val = 0;
+	u8 *p;
+	int ret;
+
+	/* Under NFI mode, nothing need to do */
+	if (config->mode == ECC_NFI_MODE)
+		return 0;
+
+	ret = nfiecc_fill_data(ecc, data);
+	if (ret)
+		return ret;
+
+	ret = nfiecc_wait_encode_done(ecc);
+	if (ret)
+		return ret;
+
+	ret = nfiecc_wait_idle(ecc);
+	if (ret)
+		return ret;
+
+	/* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
+	len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
+	p = data + config->len;
+
+	/* Write the parity bytes generated by the ECC back to the OOB region */
+	for (i = 0; i < len; i++) {
+		if ((i % 4) == 0)
+			val = readl(ecc->res.regs + NFIECC_ENCPAR(i / 4));
+
+		p[i] = (val >> ((i % 4) * 8)) & 0xff;
+	}
+
+	return 0;
+}
+
+static int nfiecc_decode(struct nfiecc *ecc, u8 *data)
+{
+	int ret;
+
+	/* Under NFI mode, nothing need to do */
+	if (ecc->config.mode == ECC_NFI_MODE)
+		return 0;
+
+	ret = nfiecc_fill_data(ecc, data);
+	if (ret)
+		return ret;
+
+	return nfiecc_wait_decode_done(ecc);
+}
+
+static int nfiecc_decode_status(struct nfiecc *ecc, u32 start_sector,
+				u32 sectors)
+{
+	void *regs = ecc->res.regs;
+	u32 i, val = 0, err;
+	u32 bitflips = 0;
+
+	for (i = start_sector; i < start_sector + sectors; i++) {
+		if (ecc->config.deccon == ECC_DEC_FER) {
+			if (readl(regs + NFIECC_DECFER) & (1 << i)) {
+				bitflips = 1;
+				/* $i is not larger than $MAX_SEC_NUMS */
+				ecc->last_decode_status[i] = bitflips;
+			}
+			continue;
+		}
+
+		if ((i % 4) == 0)
+			val = readl(regs + NFIECC_DECENUM(i / 4));
+
+		err = val >> ((i % 4) * 8);
+		err &= ecc->caps->err_mask;
+		ecc->last_decode_status[i] = err;
+
+		if (err == ecc->caps->err_mask)
+			pr_debug("sector %d is uncorrect\n", i);
+
+		bitflips = max(bitflips, err);
+		/* if (bitflips)
+			pr_debug("Corrected bitflips:%d sectors:%d start_sector:%d\n",
+			bitflips, sectors, start_sector); */
+	}
+
+	if (bitflips == ecc->caps->err_mask)
+		return -ENANDREAD;
+
+	return bitflips;
+}
+
+static int nfiecc_adjust_strength(struct nfiecc *ecc, int strength)
+{
+	struct nfiecc_caps *caps = ecc->caps;
+	int i, count = caps->ecc_strength_num;
+
+	if (strength >= caps->ecc_strength[count - 1])
+		return caps->ecc_strength[count - 1];
+
+	if (strength < caps->ecc_strength[0])
+		return -EINVAL;
+
+	for (i = 1; i < count; i++) {
+		if (strength < caps->ecc_strength[i])
+			return caps->ecc_strength[i - 1];
+	}
+
+	return -EINVAL;
+}
+
+static int nfiecc_ctrl(struct nfiecc *ecc, int cmd, void *args)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	case NFI_CTRL_ECC_IRQ:
+		ecc->irq_en = *(bool *)args;
+		pr_debug("ecc_irq_en :%d\n", ecc->irq_en);
+		break;
+
+	case NFI_CTRL_ECC_PAGE_IRQ:
+		ecc->page_irq_en = *(bool *)args;
+		pr_debug("ecc_page_irq_en:%d\n",
+			 ecc->page_irq_en);
+		break;
+
+	case NFI_CTRL_ECC_ERRNUM0:
+		*(u32 *)args = ecc->last_decode_status[0];
+		break;
+
+	case NFI_CTRL_ECC_GET_STATUS:
+		memcpy(args, &ecc->ecc_status, sizeof(struct nfiecc_status));
+		break;
+
+	default:
+		pr_err("cmd(%d) args(%d) not support.\n",
+		       cmd, *(u32 *)args);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int nfiecc_hw_init(struct nfiecc *ecc)
+{
+	int ret;
+
+	ret = nfiecc_wait_idle(ecc);
+	if (ret)
+		return ret;
+
+	writel(~ECC_OP_EN, ecc->res.regs + NFIECC_ENCCON);
+
+	ret = nfiecc_wait_idle(ecc);
+	if (ret)
+		return ret;
+
+	writel(~ECC_OP_EN, ecc->res.regs + NFIECC_DECCON);
+
+	writel(BIT(0), ecc->res.regs + NFIECC_DEBUG2);
+
+	return 0;
+}
+
+static struct nfiecc_caps nfiecc_caps_mt6880 = {
+	.err_mask = 0x1f,
+	.ecc_mode_shift = 5,
+	.parity_bits = 14,
+	.ecc_strength = ecc_strength_mt6880,
+	.ecc_strength_num = 11,
+};
+
+static struct nfiecc_caps *nfiecc_get_match_data(enum mtk_ic_version ic)
+{
+	/* NOTE: add other IC's data */
+	return &nfiecc_caps_mt6880;
+}
+
+struct nfiecc *nfiecc_init(struct nfiecc_resource *res)
+{
+	struct nfiecc *ecc;
+	int ret;
+
+	ecc = mem_alloc(1, sizeof(struct nfiecc));
+	if (!ecc)
+		return NULL;
+
+	ecc->res = *res;
+
+	ret = nandx_irq_register(res->dev, res->irq_id, nfiecc_irq_handler,
+				 "mtk_ecc", ecc);
+	if (ret) {
+		pr_err("ecc irq register failed!\n");
+		goto error;
+	}
+
+	ecc->irq_en = false;
+	ecc->page_irq_en = false;
+	ecc->done = nandx_event_create();
+	ecc->caps = nfiecc_get_match_data(res->ic_ver);
+
+	ecc->adjust_strength = nfiecc_adjust_strength;
+	ecc->enable = nfiecc_enable;
+	ecc->disable = nfiecc_disable;
+	ecc->decode = nfiecc_decode;
+	ecc->encode = nfiecc_encode;
+	ecc->wait_done = nfiecc_wait_done;
+	ecc->decode_status = nfiecc_decode_status;
+	ecc->correct_data = nfiecc_correct_data;
+	ecc->nfiecc_ctrl = nfiecc_ctrl;
+
+	ret = nfiecc_hw_init(ecc);
+	if (ret)
+		goto error;
+
+	return ecc;
+
+error:
+	mem_free(ecc);
+
+	return NULL;
+}
+
+void nfiecc_exit(struct nfiecc *ecc)
+{
+	nandx_irq_unregister(ecc->res.irq_id);
+	nandx_event_destroy(ecc->done);
+	mem_free(ecc);
+}
+
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfiecc.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfiecc.h
new file mode 100644
index 0000000..23a5751
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfiecc.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __NFIECC_H__
+#define __NFIECC_H__
+
+enum nfiecc_mode {
+	ECC_DMA_MODE,
+	ECC_NFI_MODE,
+	ECC_PIO_MODE
+};
+
+enum nfiecc_operation {
+	ECC_ENCODE,
+	ECC_DECODE
+};
+
+enum nfiecc_deccon {
+	ECC_DEC_FER = 1,
+	ECC_DEC_LOCATE = 2,
+	ECC_DEC_CORRECT = 3
+};
+
+struct nfiecc_resource {
+	int ic_ver;
+	void *dev;
+	void *regs;
+	int irq_id;
+
+};
+
+struct nfiecc_caps {
+	u32 err_mask;
+	u32 ecc_mode_shift;
+	u32 parity_bits;
+	const int *ecc_strength;
+	u32 ecc_strength_num;
+};
+
+#define MAX_SEC_NUMS 16
+
+struct nfiecc_config {
+	enum nfiecc_operation op;
+	enum nfiecc_mode mode;
+	enum nfiecc_deccon deccon;
+
+	void *dma_addr; /* DMA use only */
+	u32 strength;
+	u32 sectors;
+	u32 len;
+};
+
+struct nfiecc {
+	struct nfiecc_resource res;
+	struct nfiecc_config config;
+	struct nfiecc_caps *caps;
+	struct nfiecc_status ecc_status;
+
+	bool irq_en;
+
+	bool page_irq_en;
+	u32 irq_status;
+
+	u8 level_sel;
+
+	u32 last_decode_status[MAX_SEC_NUMS];
+
+	void *done;
+
+	int (*adjust_strength)(struct nfiecc *ecc, int strength);
+
+	int (*enable)(struct nfiecc *ecc);
+	int (*disable)(struct nfiecc *ecc);
+
+	int (*decode)(struct nfiecc *ecc, u8 *data);
+	int (*encode)(struct nfiecc *ecc, u8 *data);
+
+	int (*decode_status)(struct nfiecc *ecc, u32 start_sector, u32 sectors);
+	int (*correct_data)(struct nfiecc *ecc,
+						struct nfiecc_status *status,
+						u8 *data, u32 sector);
+	int (*wait_done)(struct nfiecc *ecc);
+
+	int (*nfiecc_ctrl)(struct nfiecc *ecc, int cmd, void *args);
+};
+
+struct nfiecc *nfiecc_init(struct nfiecc_resource *res);
+void nfiecc_exit(struct nfiecc *ecc);
+
+#endif /* __NFIECC_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfiecc_regs.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfiecc_regs.h
new file mode 100644
index 0000000..d77131c
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/core/nfi/nfiecc_regs.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __NFIECC_REGS_H__
+#define __NFIECC_REGS_H__
+
+#define NFIECC_ENCCON           0x000
+/* NFIECC_DECCON has same bit define */
+#define         ECC_OP_EN               BIT(0)
+#define NFIECC_ENCCNFG          0x004
+#define         ENCCNFG_MS_SHIFT        16
+#define         ENC_BURST_EN            BIT(8)
+#define NFIECC_ENCDIADDR        0x008
+#define NFIECC_ENCIDLE          0x00c
+#define NFIECC_ENCSTA           0x07c
+#define         ENC_FSM_IDLE            1
+#define NFIECC_ENCIRQEN         0x080
+/* NFIECC_DECIRQEN has same bit define */
+#define         ECC_IRQEN               BIT(0)
+#define         ECC_PG_IRQ_SEL          BIT(1)
+#define NFIECC_ENCIRQSTA        0x084
+#define         ENC_IRQSTA_GEN          BIT(0)
+#define NFIECC_PIO_DIRDY        0x090
+#define         PIO_DI_RDY              BIT(0)
+#define NFIECC_PIO_DI           0x094
+#define NFIECC_DECCON           0x100
+#define NFIECC_DECCNFG          0x104
+#define         DEC_BURST_EN            BIT(8)
+#define         DEC_EMPTY_EN            BIT(31)
+#define         DEC_CON_SHIFT           12
+#define         DECCNFG_MS_SHIFT        16
+#define NFIECC_DECDIADDR        0x108
+#define NFIECC_DECIDLE          0x10c
+#define NFIECC_DECFER           0x110
+#define NFIECC_DECENUM(x)       (0x114 + (x) * 4)
+#define NFIECC_DECDONE          0x124
+#define NFIECC_DECIRQEN         0x200
+#define NFIECC_DECIRQSTA        0x204
+#define NFIECC_DECFSM           0x208
+#define         FSM_MASK                0x3f3fff0f
+#define         FSM_IDLE                0x01011101
+#define NFIECC_BYPASS           0x20c
+#define         NFIECC_BYPASS_EN        BIT(0)
+#define NFIECC_DEBUG2           0x21c
+#define NFIECC_ENCPAR(x)        (0x300 + (x) * 4)
+#define NFIECC_DECEL(x)         (0x500 + (x) * 4)
+
+#endif /* __NFIECC_REGS_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/driver/Nandx.mk b/src/kernel/linux/v4.19/drivers/mtd/nandx/driver/Nandx.mk
new file mode 100644
index 0000000..c9c24d0
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/driver/Nandx.mk
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2017 MediaTek Inc.
+# Licensed under either
+#     BSD Licence, (see NOTICE for more details)
+#     GNU General Public License, version 2.0, (see NOTICE for more details)
+#
+
+nandx-y += platform/$(NANDX_IC_VERSION)/nandx_platform.c
+
+nandx-$(NANDX_SIMULATOR_SUPPORT) += simulator/driver.c
+
+nandx-$(NANDX_CTP_SUPPORT) += ctp/ts_nand.c
+
+nandx-$(NANDX_VERIFY_SUPPORT) += libnt/nt_generic.c
+nandx-$(NANDX_VERIFY_SUPPORT) += libnt/nt_misc.c
+nandx-$(NANDX_VERIFY_SUPPORT) += libnt/nt_nfi_spi.c
+nandx-$(NANDX_VERIFY_SUPPORT) += libnt/nt_ecc.c
+nandx-$(NANDX_VERIFY_SUPPORT) += libnt/nt_base.c
+nandx-$(NANDX_VERIFY_SUPPORT) += libnt/nt_slc.c
+nandx-$(NANDX_VERIFY_SUPPORT) += libnt/nt_spi.c
+nandx-$(NANDX_VERIFY_SUPPORT) += libnt/nt_ops.c
+nandx-header-$(NANDX_VERIFY_SUPPORT) += libnt/nt_ops.h
+nandx-header-$(NANDX_VERIFY_SUPPORT) += libnt/nt_generic.h
+
+nandx-$(NANDX_BBT_SUPPORT) += bbt/bbt.c
+nandx-$(NANDX_BROM_SUPPORT) += brom/driver.c
+nandx-$(NANDX_KERNEL_SUPPORT) += kernel/driver.c
+nandx-$(NANDX_LK_SUPPORT) += lk/driver.c
+nandx-$(NANDX_UBOOT_SUPPORT) += uboot/driver.c
+nandx-$(NANDX_AOS_SUPPORT) += aos/driver.c
+nandx-$(NANDX_AOS_SUPPORT) += aos/ftl.c
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/driver/bbt/bbt.c b/src/kernel/linux/v4.19/drivers/mtd/nandx/driver/bbt/bbt.c
new file mode 100644
index 0000000..41a01f7
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/driver/bbt/bbt.c
@@ -0,0 +1,481 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "bbt.h"
+
+/* Not support: multi-chip */
+static u8 main_bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_bbt_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct bbt_manager g_bbt_manager = {
+	{	{{main_bbt_pattern, 4}, 0, BBT_INVALID_ADDR},
+		{{mirror_bbt_pattern, 4}, 0, BBT_INVALID_ADDR}
+	},
+	NAND_BBT_SCAN_MAXBLOCKS, NULL
+};
+
+static inline void set_bbt_mark(u8 *bbt, u32 block, u8 mark)
+{
+	u32 index, offset;
+
+	index = GET_ENTRY(block);
+	offset = GET_POSITION(block);
+
+	bbt[index] &= ~(BBT_ENTRY_MASK << offset);
+	bbt[index] |= (mark & BBT_ENTRY_MASK) << offset;
+	pr_debug("%s %d block:%d, bbt[%d]:0x%x, offset:%d, mark:%d\n",
+			 __func__, __LINE__, block, index, bbt[index], offset, mark);
+}
+
+static inline u8 get_bbt_mark(u8 *bbt, u32 block)
+{
+	u32 offset = GET_POSITION(block);
+	u32 index = GET_ENTRY(block);
+	u8 value = bbt[index];
+
+	return (value >> offset) & BBT_ENTRY_MASK;
+}
+
+static void mark_nand_bad(struct nandx_info *nand, u32 block)
+{
+	u8 *buf;
+	int ret;
+
+	buf = mem_alloc(1, nand->page_size + nand->oob_size);
+	if (buf == NULL) {
+		pr_err("%s, %d, memory alloc fail, pagesize:%d, oobsize:%d\n",
+		       __func__, __LINE__, nand->page_size, nand->oob_size);
+		return;
+	}
+	memset(buf, 0, nand->page_size + nand->oob_size);
+	ret = nandx_erase((u64)block * nand->block_size, nand->block_size);
+	if (ret < 0) {
+		pr_err("%s, %d\n", __func__, __LINE__);
+		mem_free(buf);
+		return;
+	}
+	ret = nandx_write(buf, buf + nand->page_size,
+			  (u64)block * nand->block_size, nand->page_size);
+	if (ret < 0)
+		pr_err("%s, %d\n", __func__, __LINE__);
+
+	mem_free(buf);
+}
+
+static inline bool is_bbt_data(u8 *buf, struct bbt_pattern *pattern)
+{
+	int i;
+
+	for (i = 0; i < pattern->len; i++) {
+		if (buf[i] != pattern->data[i])
+			return false;
+	}
+
+	return true;
+}
+
+static u64 get_bbt_address(struct nandx_info *nand, u8 *bbt,
+			   u64 mirror_addr,
+			   int max_blocks)
+{
+	u64 addr, end_addr;
+	u8 mark;
+
+	addr = nand->total_size;
+	end_addr = nand->total_size - nand->block_size * (u64)max_blocks;
+
+	while (addr > end_addr) {
+		addr -= nand->block_size;
+		mark = get_bbt_mark(bbt, div_down(addr, nand->block_size));
+
+		if (mark == BBT_BLOCK_WORN || mark == BBT_BLOCK_FACTORY_BAD)
+			continue;
+		if (addr != mirror_addr)
+			return addr;
+	}
+
+	return BBT_INVALID_ADDR;
+}
+
+static int read_bbt(struct bbt_desc *desc, u8 *bbt, u32 len)
+{
+	int ret;
+
+	ret = nandx_read(bbt, NULL, desc->bbt_addr + desc->pattern.len + 1,
+			 len);
+	if (ret < 0)
+		pr_err("nand_bbt: error reading BBT page, ret:%d\n", ret);
+
+	return ret < 0 ? ret : 0;
+}
+
+static void create_bbt(struct nandx_info *nand, u8 *bbt)
+{
+	u32 offset = 0, block = 0;
+
+	do {
+		if (nandx_is_bad_block(offset)) {
+			pr_info("Create bbt at bad block:%d\n", block);
+			set_bbt_mark(bbt, block, BBT_BLOCK_FACTORY_BAD);
+		}
+		block++;
+		offset += nand->block_size;
+	} while (offset < nand->total_size);
+}
+
+static int search_bbt(struct nandx_info *nand, struct bbt_desc *desc,
+		      int max_blocks)
+{
+	u64 addr, end_addr;
+	u8 *buf;
+	int ret;
+
+	buf = mem_alloc(1, nand->page_size);
+	if (buf == NULL) {
+		pr_err("%s, %d, mem alloc fail!!! len:%d\n",
+			   __func__, __LINE__, nand->page_size);
+		return -ENOMEM;
+	}
+
+	addr = nand->total_size;
+	end_addr = nand->total_size - (u64)max_blocks * nand->block_size;
+	while (addr > end_addr) {
+		addr -= nand->block_size;
+
+		nandx_read(buf, NULL, addr, nand->page_size);
+
+		if (is_bbt_data(buf, &desc->pattern)) {
+			desc->bbt_addr = addr;
+			desc->version = buf[desc->pattern.len];
+			pr_info("BBT is found at addr 0x%llx, version %d\n",
+				desc->bbt_addr, desc->version);
+			ret = 0;
+			break;
+		}
+		ret = -EFAULT;
+	}
+
+	mem_free(buf);
+	return ret;
+}
+
+static int save_bbt(struct nandx_info *nand, struct bbt_desc *desc,
+		    u8 *bbt)
+{
+	u32 page_size_mask, total_block;
+	int write_len;
+	u8 *buf;
+	int ret;
+
+	ret = nandx_erase(desc->bbt_addr, nand->block_size);
+	if (ret) {
+		pr_err("erase addr 0x%llx fail !!!, ret %d\n",
+			   desc->bbt_addr, ret);
+		return ret;
+	}
+
+	total_block = div_down(nand->total_size, nand->block_size);
+	write_len = GET_BBT_LENGTH(total_block) + desc->pattern.len + 1;
+	page_size_mask = nand->page_size - 1;
+	write_len = (write_len + page_size_mask) & (~page_size_mask);
+
+	buf = (u8 *)mem_alloc(1, write_len);
+	if (buf == NULL) {
+		pr_err("%s, %d, mem alloc fail!!! len:%d\n",
+			   __func__, __LINE__, write_len);
+		return -ENOMEM;
+	}
+	memset(buf, 0xFF, write_len);
+
+	memcpy(buf, desc->pattern.data, desc->pattern.len);
+	buf[desc->pattern.len] = desc->version;
+
+	memcpy(buf + desc->pattern.len + 1, bbt, GET_BBT_LENGTH(total_block));
+
+	ret = nandx_write(buf, NULL, desc->bbt_addr, write_len);
+
+	if (ret)
+		pr_err("nandx_write fail(%d), offset:0x%llx, len(%d)\n",
+			   ret, desc->bbt_addr, write_len);
+	mem_free(buf);
+
+	return ret;
+}
+
+static int write_bbt(struct nandx_info *nand, struct bbt_desc *main,
+		     struct bbt_desc *mirror, u8 *bbt, int max_blocks)
+{
+	int block;
+	int ret;
+
+	do {
+		if (main->bbt_addr == BBT_INVALID_ADDR) {
+			main->bbt_addr = get_bbt_address(nand, bbt,
+											 mirror->bbt_addr, max_blocks);
+			if (main->bbt_addr == BBT_INVALID_ADDR)
+				return -ENOSPC;
+		}
+
+		ret = save_bbt(nand, main, bbt);
+		if (!ret)
+			break;
+
+		block = div_down(main->bbt_addr, nand->block_size);
+		set_bbt_mark(bbt, block, BBT_BLOCK_WORN);
+		main->version++;
+		mark_nand_bad(nand, block);
+		main->bbt_addr = BBT_INVALID_ADDR;
+	} while (1);
+
+	return 0;
+}
+
+static void mark_bbt_region(struct nandx_info *nand, u8 *bbt,
+							int bbt_blocks)
+{
+	int total_block;
+	int block;
+	u8 mark;
+
+	total_block = div_down(nand->total_size, nand->block_size);
+	block = total_block - bbt_blocks;
+
+	while (bbt_blocks) {
+		mark = get_bbt_mark(bbt, block);
+		if (mark == BBT_BLOCK_GOOD)
+			set_bbt_mark(bbt, block, BBT_BLOCK_RESERVED);
+		block++;
+		bbt_blocks--;
+	}
+}
+
+static void unmark_bbt_region(struct nandx_info *nand, u8 *bbt,
+							  int bbt_blocks)
+{
+	int total_block;
+	int block;
+	u8 mark;
+
+	total_block = div_down(nand->total_size, nand->block_size);
+	block = total_block - bbt_blocks;
+
+	while (bbt_blocks) {
+		mark = get_bbt_mark(bbt, block);
+		if (mark == BBT_BLOCK_RESERVED)
+			set_bbt_mark(bbt, block, BBT_BLOCK_GOOD);
+		block++;
+		bbt_blocks--;
+	}
+}
+
+static int update_bbt(struct nandx_info *nand, struct bbt_desc *desc,
+		      u8 *bbt,
+		      int max_blocks)
+{
+	int ret = 0, i;
+
+	/* The reserved info is not stored in NAND*/
+	unmark_bbt_region(nand, bbt, max_blocks);
+
+	desc[0].version++;
+	for (i = 0; i < 2; i++) {
+		if (i > 0)
+			desc[i].version = desc[i - 1].version;
+
+		ret = write_bbt(nand, &desc[i], &desc[1 - i], bbt, max_blocks);
+		if (ret)
+			break;
+	}
+	mark_bbt_region(nand, bbt, max_blocks);
+
+	return ret;
+}
+
+int scan_bbt(struct nandx_info *nand)
+{
+	struct bbt_manager *manager = &g_bbt_manager;
+	struct bbt_desc *pdesc;
+	u32 total_block, len, i;
+	int valid_desc = 0;
+	int ret = 0;
+	u8 *bbt;
+
+	total_block = div_down(nand->total_size, nand->block_size);
+	len = GET_BBT_LENGTH(total_block);
+
+	pr_debug("nand info, total_size:0x%llx, block_size:0x%x,"
+			"page_size:0x%x, oob_size:%d, page_parity_size:%d,"
+			"total_block:%d, len:%d\n",
+			nand->total_size, nand->block_size, nand->page_size,
+			nand->oob_size, nand->page_parity_size, total_block, len);
+
+	if (manager->bbt == NULL) {
+		manager->bbt = (u8 *)mem_alloc(1, len);
+		if (manager->bbt == NULL) {
+			pr_err("%s, %d, mem alloc fail!!! len:%d\n",
+				   __func__, __LINE__, len);
+			return -ENOMEM;
+		}
+	}
+	bbt = manager->bbt;
+	memset(bbt, 0xFF, len);
+
+	/* scan bbt */
+	for (i = 0; i < 2; i++) {
+		pdesc = &manager->desc[i];
+		pdesc->bbt_addr = BBT_INVALID_ADDR;
+		pdesc->version = 0;
+		ret = search_bbt(nand, pdesc, manager->max_blocks);
+		if (!ret && (pdesc->bbt_addr != BBT_INVALID_ADDR))
+			valid_desc += 1 << i;
+	}
+
+	pdesc = &manager->desc[0];
+	if ((valid_desc == 0x3) && (pdesc[0].version != pdesc[1].version))
+		valid_desc = (pdesc[0].version > pdesc[1].version) ? 1 : 2;
+
+	/* read bbt */
+	for (i = 0; i < 2; i++) {
+		if (!(valid_desc & (1 << i)))
+			continue;
+		ret = read_bbt(&pdesc[i], bbt, len);
+		if (ret < 0) {
+			pdesc->bbt_addr = BBT_INVALID_ADDR;
+			pdesc->version = 0;
+			valid_desc &= ~(1 << i);
+		}
+		/* If two BBT version is same, only need to read the first bbt*/
+		if ((valid_desc == 0x3) &&
+		    (pdesc[0].version == pdesc[1].version))
+			break;
+	}
+
+	if (!valid_desc) {
+		create_bbt(nand, bbt);
+		pdesc[0].version = 1;
+		pdesc[1].version = 1;
+	}
+
+	pdesc[0].version = max(pdesc[0].version, pdesc[1].version);
+	pdesc[1].version = pdesc[0].version;
+
+	for (i = 0; i < 2; i++) {
+		if (valid_desc & (1 << i))
+			continue;
+
+		ret = write_bbt(nand, &pdesc[i], &pdesc[1 - i], bbt,
+				manager->max_blocks);
+		if (ret) {
+			pr_err("write bbt(%d) fail, ret:%d\n", i, ret);
+			manager->bbt = NULL;
+			return ret;
+		}
+	}
+
+	/* Prevent the bbt regions from erasing / writing */
+	mark_bbt_region(nand, manager->bbt, manager->max_blocks);
+
+	for (i = 0; i < total_block; i++) {
+		if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_WORN)
+			pr_info("Checked WORN bad blk: %d\n", i);
+		else if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_FACTORY_BAD)
+			pr_info("Checked Factory bad blk: %d\n", i);
+		else if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_RESERVED)
+			pr_info("Checked Reserved blk: %d\n", i);
+		else if (get_bbt_mark(manager->bbt, i) != BBT_BLOCK_GOOD)
+			pr_info("Checked unknown blk: %d\n", i);
+	}
+
+	return 0;
+}
+
+int bbt_mark_bad(struct nandx_info *nand, off_t offset)
+{
+	struct bbt_manager *manager = &g_bbt_manager;
+	u32 block = div_down(offset, nand->block_size);
+	int ret;
+
+	if(g_bbt_manager.bbt ==  BBT_INVALID_ADDR)
+		return 0;
+
+	set_bbt_mark(manager->bbt, block, BBT_BLOCK_WORN);
+	mark_nand_bad(nand, block);
+
+	/* Update flash-based bad block table */
+	ret = update_bbt(nand, manager->desc, manager->bbt,
+			 manager->max_blocks);
+	pr_info("block %d, update result %d.\n", block, ret);
+
+	return ret;
+}
+
+int bbt_is_bad(struct nandx_info *nand, off_t offset)
+{
+	u32 block;
+
+	block = div_down(offset, nand->block_size);
+	if(g_bbt_manager.bbt ==  BBT_INVALID_ADDR)
+		return 0;
+	else
+		return get_bbt_mark(g_bbt_manager.bbt, block) != BBT_BLOCK_GOOD;
+}
+
+int nand_unmarkbad_bbt(struct nandx_info *nand, off_t offset)
+{
+	struct bbt_manager *manager = &g_bbt_manager;
+	u32 block = div_down(offset, nand->block_size);
+	int ret = 0;
+
+	/* Unmark bad block in memory */
+	set_bbt_mark(manager->bbt, block, BBT_BLOCK_GOOD);
+
+	/* Update flash-based bad block table */
+	ret = update_bbt(nand, manager->desc, manager->bbt,
+			 manager->max_blocks);
+	pr_info("block %d, update result %d.\n", block, ret);
+
+	return ret;
+}
+
+void get_bbt_goodblocks_num(struct nandx_info *nand)
+{
+	u64 offset = nand->total_size;
+	u32 i;
+
+	for (i = 0; i < NAND_BBT_SCAN_MAXBLOCKS; i++) {
+		offset -= nand->block_size;
+		if (!nandx_is_bad_block(offset))
+			nand->bbt_goodblocks++;
+	}
+}
+
+u32 get_bad_block(struct nandx_info *nand, u32 *bb_worn,
+				  u32 *bb_factory, char *bb_buf)
+{
+	struct bbt_manager *manager = &g_bbt_manager;
+	u32 block = 0, len = 0;
+	u32 ofs, mask;
+
+	for (ofs = 0; ofs < nand->total_size; ofs += nand->block_size) {
+		block = div_down(ofs, nand->block_size);
+		mask = get_bbt_mark(manager->bbt, block);
+
+		if (mask == BBT_BLOCK_GOOD || mask == BBT_BLOCK_RESERVED)
+			continue;
+		if ((mask == BBT_BLOCK_WORN) && bb_worn)
+			(*bb_worn)++;
+		if ((mask == BBT_BLOCK_FACTORY_BAD) && bb_factory)
+			(*bb_factory)++;
+		if (bb_buf)
+			len += snprintf(bb_buf + len, PAGE_SIZE - len,
+							"%d\t\t\t%s\n",
+							ofs / nand->block_size,
+							(mask == BBT_BLOCK_WORN) ? "worn" : "factory");
+	}
+
+	return len;
+}
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/driver/kernel/driver.c b/src/kernel/linux/v4.19/drivers/mtd/nandx/driver/kernel/driver.c
new file mode 100644
index 0000000..bdc9df9
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/driver/kernel/driver.c
@@ -0,0 +1,1096 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ * Author SkyLake Huang <SkyLake.Huang@mediatek.com>
+ */
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+#include "nandx_core.h"
+#include "nandx_util.h"
+#include "bbt.h"
+#include "nandx_platform.h"
+#include "../../core/nand_chip.h"
+
+typedef int (*func_nandx_operation)(u8 *, u8 *, u64, size_t);
+static u8 *nandx_write_buffer, *nandx_read_buffer;
+
+#define	MAX_OOB_KEEP	200
+#define OOB_SIZE 10
+static u32 oob_counter;
+static loff_t oob_page[MAX_OOB_KEEP];
+static u8 oob_buf[OOB_SIZE];
+//for jffs2 only
+
+struct nandx_clk {
+	/* top_clock */
+	struct clk *nfi_clk_sel;
+	struct clk *snfi_clk_sel;
+	struct clk *nfi_clk_parent;
+	struct clk *snfi_clk_parent;
+	/* infra sys*/
+	struct clk *nfi_cg;
+	struct clk *snfi_cg;
+	struct clk *dma_cg;
+};
+
+struct nandx_nfc {
+	struct nandx_info info;
+	struct nandx_clk clk;
+	struct nfi_resource *res;
+
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *pins_drive_high;
+
+	struct mutex lock;
+};
+
+#ifdef CONFIG_MTD_NAND_MTK_WORN_BAD
+static struct nandx_worn_bad_test g_worn_bad_test = {0};
+#endif
+
+static void nandx_get_device(struct mtd_info *mtd)
+{
+	struct nandx_nfc *nfc = (struct nandx_nfc *)mtd->priv;
+
+	pm_runtime_get_sync(mtd->dev.parent);
+
+	mutex_lock(&nfc->lock);
+}
+
+static void nandx_release_device(struct mtd_info *mtd)
+{
+	struct nandx_nfc *nfc = (struct nandx_nfc *)mtd->priv;
+
+	mutex_unlock(&nfc->lock);
+
+	pm_runtime_mark_last_busy(mtd->dev.parent);
+	pm_runtime_put_autosuspend(mtd->dev.parent);
+}
+
+static int nandx_enable_clk(struct nandx_clk *clk)
+{
+	int ret;
+
+	ret = clk_prepare_enable(clk->dma_cg);
+	if (ret) {
+		pr_err("failed to enable dma cg\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(clk->nfi_cg);
+	if (ret) {
+		pr_err("failed to enable nfi cg\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(clk->nfi_clk_sel);
+	if (ret) {
+		pr_err("failed to enable nfi clk sel\n");
+		return ret;
+	}
+#ifdef CONFIG_MTD_NANDX_V2_SPI
+	ret = clk_prepare_enable(clk->snfi_cg);
+	if (ret) {
+		pr_err("failed to enable snfi cg\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(clk->snfi_clk_sel);
+	if (ret) {
+		pr_err("failed to enable snfi clk sel\n");
+		return ret;
+	}
+#endif
+	clk_set_parent(clk->nfi_clk_sel, clk->nfi_clk_parent);
+
+	clk_disable_unprepare(clk->nfi_clk_sel);
+
+	return 0;
+}
+
+static void nandx_disable_clk(struct nandx_clk *clk)
+{
+	clk_disable_unprepare(clk->dma_cg);
+	clk_disable_unprepare(clk->nfi_cg);
+}
+
+#ifdef CONFIG_MTD_NAND_MTK_WORN_BAD
+/**
+ * mtk_nfc_check_worn_bad_test - check the page addr is simulate worn
+ *
+ * This applies to check the page addr is simulate worn out or not.
+ */
+int mtk_nfc_check_worn_bad_test(void)
+{
+	if (g_worn_bad_test.sim_op_mode == WRITE_MODE ||
+		g_worn_bad_test.sim_op_mode == ERASE_MODE)
+		return g_worn_bad_test.sim_worn_page_addr;
+	else
+		return 0;
+}
+#else
+int mtk_nfc_check_worn_bad_test(void)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_MTD_NAND_MTK_WORN_BAD
+/**
+ * mtk_nfc_set_worn_op - set worn out page and worn out mode
+ * @mtd: MTD device structure
+ * @ofs: offset in a partition.
+ * @op_mode: worn out operation mode which is write mode or erase mode.
+ *
+ * This applies to simulate worn bad test for erase and program only.
+ */
+static int mtk_nfc_set_worn_op(struct mtd_info *mtd, loff_t ofs,
+							   u32 op_mode)
+{
+	struct mtd_info *master_mtd = mtd_get_part_master(mtd);
+	struct nandx_nfc *nfc = (struct nandx_nfc *)master_mtd->priv;
+	u32 page, page_shift;
+	u64 offset;
+
+	if (ofs < 0 || ofs >= mtd->size)
+		return -EINVAL;
+
+	page_shift = ffs((u32)nfc->info.page_size) - 1;
+	offset = mtd_get_part_offset(mtd);
+	page = (offset >> page_shift) + (ofs >> page_shift);
+
+	g_worn_bad_test.sim_worn_page_addr = page;
+	g_worn_bad_test.sim_op_mode = op_mode;
+
+	return 0;
+}
+
+int mtk_nfc_set_write_worn_page(struct mtd_info *mtd, loff_t ofs)
+{
+	return mtk_nfc_set_worn_op(mtd, ofs, WRITE_MODE);
+}
+EXPORT_SYMBOL_GPL(mtk_nfc_set_write_worn_page);
+
+int mtk_nfc_set_erase_worn_block(struct mtd_info *mtd, loff_t ofs)
+{
+	return mtk_nfc_set_worn_op(mtd, ofs, ERASE_MODE);
+}
+EXPORT_SYMBOL_GPL(mtk_nfc_set_erase_worn_block);
+#endif
+
+static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oob_region)
+{
+	struct nandx_nfc *nfc = (struct nandx_nfc *)mtd->priv;
+	u32 eccsteps;
+
+	eccsteps = div_down(mtd->writesize, mtd->ecc_step_size);
+
+	if (section >= eccsteps)
+		return -EINVAL;
+
+	oob_region->length = mtd->oobavail / eccsteps;
+	oob_region->offset =
+		(section + 1) * nfc->info.fdm_reg_size - oob_region->length;
+
+	return 0;
+}
+
+static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oob_region)
+{
+	struct nandx_nfc *nfc = (struct nandx_nfc *)mtd->priv;
+	u32 eccsteps;
+
+	if (section)
+		return -EINVAL;
+
+	eccsteps = div_down(mtd->writesize, mtd->ecc_step_size);
+	oob_region->offset = nfc->info.fdm_reg_size * eccsteps;
+	oob_region->length = mtd->oobsize - oob_region->offset;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
+	.free = mtk_nfc_ooblayout_free,
+	.ecc = mtk_nfc_ooblayout_ecc,
+};
+
+struct nfc_compatible {
+	enum mtk_ic_version ic_ver;
+};
+
+static const struct nfc_compatible nfc_compats_mt6880 = {
+	.ic_ver = NANDX_MT6880,
+};
+
+static const struct of_device_id ic_of_match[] = {
+	{.compatible = "mediatek,mt6880-nfc", .data = &nfc_compats_mt6880},
+	{}
+};
+
+static const char *const part_types[] = {"gptpart", "ofpart", NULL};
+
+static int nand_operation(struct mtd_info *mtd, loff_t addr, size_t len,
+			  size_t *retlen, uint8_t *data, uint8_t *oob, bool read)
+{
+	struct nandx_split64 split = {0};
+	func_nandx_operation operation;
+	u64 block_oobs, val, align;
+	uint8_t *databuf, *oobbuf;
+	struct nandx_nfc *nfc;
+	bool readoob;
+	int ret = 0, ret_min = 0, ret_max = 0;
+
+	nfc = (struct nandx_nfc *)mtd->priv;
+
+	databuf = data;
+	oobbuf = oob;
+
+	readoob = data ? false : true;
+	block_oobs = div_up(mtd->erasesize, mtd->writesize) * mtd->oobavail;
+	align = readoob ? block_oobs : mtd->erasesize;
+
+	operation = read ? nandx_read : nandx_write;
+
+	nandx_split(&split, addr, len, val, align);
+
+	if (split.head_len) {
+		ret = operation((u8 *) databuf, oobbuf, addr, split.head_len);
+		if (ret < 0) {
+			pr_err("%s %d: read %d, ret %d\n", __func__,
+			       __LINE__, read, ret);
+			if (!read) {
+				if (ret == -ENANDWRITE)
+					ret = -EIO;
+
+				return ret;
+			}
+
+			if (read && (ret == -ENANDREAD))
+				ret = -EBADMSG;
+		}
+		ret_min = min_t(int, ret_min, ret);
+		ret_max = max_t(int, ret_max, ret);
+
+		if (databuf)
+			databuf += split.head_len;
+
+		if (oobbuf)
+			oobbuf += split.head_len;
+
+		addr += split.head_len;
+		*retlen += split.head_len;
+	}
+
+	if (split.body_len) {
+		while (div_up(split.body_len, align)) {
+			ret = operation((u8 *) databuf, oobbuf, addr, align);
+			if (ret < 0) {
+				pr_err("%s %d: read %d, ret %d\n", __func__,
+				       __LINE__, read, ret);
+				if (!read) {
+					if (ret == -ENANDWRITE)
+						ret = -EIO;
+
+					return ret;
+				}
+
+				if (read && (ret == -ENANDREAD))
+					ret = -EBADMSG;
+			}
+			ret_min = min_t(int, ret_min, ret);
+			ret_max = max_t(int, ret_max, ret);
+
+			if (databuf) {
+				databuf += mtd->erasesize;
+				split.body_len -= mtd->erasesize;
+				*retlen += mtd->erasesize;
+			}
+
+			if (oobbuf) {
+				oobbuf += block_oobs;
+				split.body_len -= block_oobs;
+				*retlen += block_oobs;
+			}
+
+			addr += mtd->erasesize;
+		}
+
+	}
+
+	if (split.tail_len) {
+		ret = operation((u8 *) databuf, oobbuf, addr, split.tail_len);
+		if (ret < 0) {
+			pr_err("%s %d: read %d, ret %d\n", __func__,
+			       __LINE__, read, ret);
+			if (!read) {
+				if (ret == -ENANDWRITE)
+					ret = -EIO;
+
+				return ret;
+                        }
+
+			if (read && (ret == -ENANDREAD))
+				ret = -EBADMSG;
+                }
+                ret_min = min_t(int, ret_min, ret);
+		ret_max = max_t(int, ret_max, ret);
+		*retlen += split.tail_len;
+	}
+
+	return ret_min < 0 ? ret_min : ret_max;
+}
+
+static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+					 size_t *retlen, uint8_t *buf)
+{
+	struct nfiecc_status status;
+	int ret;
+
+	nandx_get_device(mtd);
+	ret = nand_operation(mtd, from, len, retlen, buf, NULL, true);
+	if (ret >= (int)mtd->bitflip_threshold)
+		pr_err("read from: 0x%llx, len: %ld, bitflips: %d exceed threshold %d\n",
+			    from, len, ret, mtd->bitflip_threshold);
+
+	nandx_ioctl(NFI_CTRL_ECC_GET_STATUS, &status);
+	mtd->ecc_stats.corrected += status.corrected;
+	mtd->ecc_stats.failed += status.failed;
+
+	nandx_release_device(mtd);
+	return ret;
+}
+
+static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+					  size_t *retlen, const uint8_t *buf)
+{
+	int ret;
+
+	nandx_get_device(mtd);
+	ret = nand_operation(mtd, to, len, retlen, (uint8_t *)buf,
+						 NULL, false);
+	nandx_release_device(mtd);
+	return ret;
+}
+
+static void nand_copy_from_oob(struct mtd_info *mtd, u8 *dst, u8 *src, u8 len)
+{
+	u32 eccsteps, gap;
+	u32 oobavail_per_step, oobsize_per_step;
+	int i;
+
+	eccsteps = div_down(mtd->writesize, mtd->ecc_step_size);
+	gap = (mtd->oobsize - mtd->oobavail) / eccsteps;
+	oobsize_per_step = mtd->oobsize / eccsteps;
+	oobavail_per_step = mtd->oobavail / eccsteps;
+
+	pr_debug("from eccsteps %d, gap %d, oobsize_per_step %d, oobavail_per_step %d, len %d\n",
+			eccsteps, gap, oobsize_per_step, oobavail_per_step, len);
+	memset(dst, 0xFF, mtd->oobavail);
+
+	for (i = 0; i < eccsteps ; i++) {
+		int copylen = (len < oobavail_per_step) ? len : oobavail_per_step;
+		memcpy(dst + i * oobavail_per_step , src + i * oobsize_per_step + gap, copylen);
+		len -= copylen;
+		if(len == 0)
+			break;
+	}
+}
+
+static void nand_copy_to_oob(struct mtd_info *mtd, u8 *dst, u8 *src, u8 len)
+{
+	u32 eccsteps, gap;
+	u32 oobavail_per_step, oobsize_per_step;
+	int i;
+
+	eccsteps = div_down(mtd->writesize, mtd->ecc_step_size);
+	gap = (mtd->oobsize - mtd->oobavail) / eccsteps;
+	oobsize_per_step = mtd->oobsize / eccsteps;
+	oobavail_per_step = mtd->oobavail / eccsteps;
+
+	pr_debug("to eccsteps %d, gap %d, oobsize_per_step %d, oobavail_per_step %d, len %d\n",
+			eccsteps, gap, oobsize_per_step, oobavail_per_step, len);
+	memset(dst, 0xFF, mtd->oobsize);
+
+	for (i = 0; i < eccsteps ; i++) {
+		int copylen = (len < oobavail_per_step) ? len : oobavail_per_step;
+		memcpy(dst + i * oobsize_per_step + gap, src + i * oobavail_per_step, copylen);
+		len -= copylen;
+		if(len == 0)
+			break;
+	}
+}
+
+int nand_read_oob(struct mtd_info *mtd, loff_t from,
+				  struct mtd_oob_ops *ops)
+{
+	int ret, i;
+	size_t retlen = 0;
+	u32 loop = div_up(ops->ooblen, mtd->oobavail);
+
+	if (ops->oobbuf == NULL)
+		return nand_read(mtd, from, ops->len, &ops->retlen, ops->datbuf);
+
+	if (nandx_read_buffer == NULL)
+		nandx_read_buffer = kmalloc(2*mtd->writesize, GFP_KERNEL);
+	if (nandx_read_buffer == NULL)
+		return -ENOMEM;
+
+	pr_debug("from: 0x%llX read datbuf %p oobbuf %p len %ld, ooblen %ld\n",
+		from, ops->datbuf, ops->oobbuf, ops->len, ops->ooblen);
+
+	nandx_get_device(mtd);
+
+	if (ops->datbuf == NULL) {
+		int len = ops->ooblen;
+
+		for (i = 0 ;i < loop; i++) {
+			int copylen = (len < mtd->oobavail)? len : mtd->oobavail;
+			ret = nand_operation(mtd, from, mtd->writesize, &retlen, nandx_read_buffer,
+				nandx_read_buffer + mtd->writesize, true);
+			if (ret < 0) {
+				pr_err("retlen %ld, ret %d\n", retlen, ret);
+				goto read_oob_end;
+			}
+			pr_debug("%d len %d, OOB [%08x, %08x, %08x, %08x]\n",
+				i, copylen,
+				*(u32 *)nandx_read_buffer+mtd->writesize + 0,
+				*(u32 *)nandx_read_buffer+mtd->writesize + 4,
+				*(u32 *)nandx_read_buffer+mtd->writesize + 8,
+				*(u32 *)nandx_read_buffer+mtd->writesize + 12);
+			nand_copy_from_oob(mtd, ops->oobbuf + i * mtd->oobavail, nandx_read_buffer + mtd->writesize, copylen);
+			len -= copylen;
+			ops->oobretlen += copylen;
+		}
+		goto read_oob_end;
+	} else {
+		memset(nandx_read_buffer + mtd->writesize, 0xFF, mtd->oobsize);
+		ret = nand_operation(mtd, from, mtd->writesize, &ops->retlen, ops->datbuf,
+			nandx_read_buffer + mtd->writesize, true);
+		if (ret < 0) {
+			pr_err("retlen %ld, ret %d\n", retlen, ret);
+			goto read_oob_end;
+		}
+		nand_copy_from_oob(mtd, ops->oobbuf, nandx_read_buffer + mtd->writesize, ops->ooblen);
+		ops->oobretlen += ops->ooblen;
+	}
+	dump_stack();
+
+read_oob_end:
+	nandx_release_device(mtd);
+	return ret;
+}
+
+int nand_write_oob(struct mtd_info *mtd, loff_t to,
+				   struct mtd_oob_ops *ops)
+{
+	int ret, i;
+	size_t retlen;
+	u8 *oobbuf = ops->oobbuf;
+
+	if (nandx_write_buffer == NULL)
+		nandx_write_buffer = kmalloc(2*mtd->writesize, GFP_KERNEL);
+	if (nandx_write_buffer == NULL)
+		return -ENOMEM;
+
+	if(ops->len > mtd->writesize)
+		BUG();
+	if(ops->ooblen > mtd->oobavail)
+		BUG();
+
+	nandx_get_device(mtd);
+
+	if (ops->datbuf) {
+		if (oobbuf) {
+			nand_copy_to_oob(mtd, nandx_write_buffer, ops->oobbuf, ops->ooblen);
+			oobbuf = nandx_write_buffer;
+		} else {
+			for (i = 0; i < MAX_OOB_KEEP; i++) {
+				if (oob_page[i] == to) {
+					nand_copy_to_oob(mtd, nandx_write_buffer, &oob_buf[0], OOB_SIZE);
+					oobbuf = nandx_write_buffer;
+					oob_page[i] = 0;
+					oob_counter--;
+					break;
+				}
+			}
+		}
+		ret = nand_operation(mtd, to,  ops->len, &ops->retlen, ops->datbuf, oobbuf, false);
+
+	} else if(ops->oobbuf) {
+
+		pr_debug("write datbuf %p oobbuf %p ooboffs %d, len %ld, ooblen %ld @0x%llX\n",
+			ops->datbuf, ops->oobbuf, ops->ooboffs, ops->len, ops->ooblen, to);
+
+		if(ops->ooblen > OOB_SIZE)
+			BUG();
+		for (i = 0; i < MAX_OOB_KEEP; i++) {
+			if (oob_page[i] == to) {
+				memset(&oob_buf[0], 0xFF, OOB_SIZE);
+				memcpy(&oob_buf[0], ops->oobbuf, ops->ooblen);
+				break;
+			} else if (oob_page[i] == 0) {
+				memset(&oob_buf[0], 0xFF, OOB_SIZE);
+				memcpy(&oob_buf[0], ops->oobbuf, ops->ooblen);
+				oob_page[i] = to;
+				oob_counter++;
+				break;
+			}
+		}
+		if (i == MAX_OOB_KEEP)
+			pr_err("can found emtpy oob slot\n");
+		ops->oobretlen = ops->ooblen;
+		ret = 0;
+		goto write_oob_end;
+	} else { //all NULL
+		printk(KERN_ERR "TO:0x%llX write datbuf %p oobbuf %p len %ld, ooblen %ld\n",
+			to, ops->datbuf, ops->oobbuf, ops->len, ops->ooblen);
+
+		BUG();
+		memset(nandx_write_buffer, 0xFF, 2*mtd->writesize);
+		memcpy(nandx_write_buffer + mtd->writesize, ops->oobbuf,  ops->ooblen);
+		ret =  nand_operation(mtd, to,  mtd->writesize, &retlen, nandx_write_buffer, nandx_write_buffer + mtd->writesize, false);
+		if (ret < 0) {
+			pr_err("retlen %ld, ret %d\n", retlen, ret);
+			goto write_oob_end;
+		}
+		ops->oobretlen = ops->ooblen;
+	}
+
+write_oob_end:
+	nandx_release_device(mtd);
+	return ret;
+}
+
+static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+	struct nandx_nfc *nfc;
+	u32 block_size;
+	int ret = 0;
+
+	nandx_get_device(mtd);
+
+	nfc = (struct nandx_nfc *)mtd->priv;
+	block_size = nfc->info.block_size;
+
+	while (instr->len) {
+		if (bbt_is_bad(&nfc->info, instr->addr)) {
+			pr_err("block(0x%llx) is bad, not erase\n",
+				instr->addr);
+			ret = -EIO;
+			goto erase_exit;
+		} else {
+			ret = nandx_erase(instr->addr, block_size);
+			if (ret < 0) {
+				ret = -EIO;
+				goto erase_exit;
+				pr_err("erase fail at blk %llu, ret:%d\n",
+					instr->addr, ret);
+			}
+		}
+		instr->addr += block_size;
+		instr->len -= block_size;
+	}
+
+	ret = 0;
+
+erase_exit:
+	nandx_release_device(mtd);
+
+	return ret;
+}
+
+int nand_is_bad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct nandx_nfc *nfc;
+	int ret;
+
+	nfc = (struct nandx_nfc *)mtd->priv;
+	nandx_get_device(mtd);
+
+	ret = bbt_is_bad(&nfc->info, ofs);
+	nandx_release_device(mtd);
+
+	return ret;
+}
+
+int nand_mark_bad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct nandx_nfc *nfc;
+	int ret;
+
+	nfc = (struct nandx_nfc *)mtd->priv;
+	nandx_get_device(mtd);
+	pr_err("%s, %d\n", __func__, __LINE__);
+	ret = bbt_mark_bad(&nfc->info, ofs);
+
+	nandx_release_device(mtd);
+
+	return ret;
+}
+
+void nand_sync(struct mtd_info *mtd)
+{
+	nandx_get_device(mtd);
+	nandx_sync();
+	nandx_release_device(mtd);
+}
+
+static struct mtd_info *mtd_info_create(struct platform_device *pdev,
+										struct nandx_nfc *nfc)
+{
+	struct mtd_info *mtd;
+	int ret;
+
+	mtd = mem_alloc(1, sizeof(struct mtd_info));
+	if (!mtd)
+		return NULL;
+
+	ret = nandx_ioctl(CORE_CTRL_NAND_INFO, &nfc->info);
+	if (ret) {
+		pr_err("fail to get nand info (%d)!\n", ret);
+		mem_free(mtd);
+		return NULL;
+	}
+
+	mtd->priv = nfc;
+	mtd->owner = THIS_MODULE;
+	mtd->dev.parent = &pdev->dev;
+	mtd->name = "MTK-Nand";
+	mtd->writesize = nfc->info.page_size;
+	mtd->erasesize = nfc->info.block_size;
+	mtd->oobsize = nfc->info.oob_size;
+	mtd->size = nfc->info.total_size;
+	mtd->type = MTD_NANDFLASH;
+	mtd->flags = MTD_CAP_NANDFLASH;
+	mtd->_erase = nand_erase;
+	mtd->_point = NULL;
+	mtd->_unpoint = NULL;
+	mtd->_read = nand_read;
+	mtd->_write = nand_write;
+	mtd->_read_oob = nand_read_oob;
+	mtd->_write_oob = nand_write_oob;
+	mtd->_sync = nand_sync;
+	mtd->_lock = NULL;
+	mtd->_unlock = NULL;
+	mtd->_block_isbad = nand_is_bad;
+	mtd->_block_markbad = nand_mark_bad;
+	mtd->writebufsize = mtd->writesize;
+	mtd->oobavail = 2 * (mtd->writesize / nfc->info.sector_size);
+
+	mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
+
+	mtd->ecc_strength = nfc->info.ecc_strength;
+	mtd->ecc_step_size = nfc->info.sector_size;
+
+	if (!mtd->bitflip_threshold)
+		mtd->bitflip_threshold = (mtd->ecc_strength * 3) / 4;
+
+	return mtd;
+}
+
+static int get_platform_res(struct platform_device *pdev,
+							struct nandx_nfc *nfc)
+{
+	void __iomem *nfi_base, *ecc_base;
+	const struct of_device_id *of_id;
+	struct nfc_compatible *compat;
+	struct nfi_resource *res;
+	u32 nfi_irq, ecc_irq;
+	struct device *dev;
+	int ret = 0;
+
+	res = mem_alloc(1, sizeof(struct nfi_resource));
+	if (!res)
+		return -ENOMEM;
+
+	nfc->res = res;
+	dev = &pdev->dev;
+
+	nfi_base = of_iomap(dev->of_node, 0);
+	ecc_base = of_iomap(dev->of_node, 1);
+	nfi_irq = irq_of_parse_and_map(dev->of_node, 0);
+	ecc_irq = irq_of_parse_and_map(dev->of_node, 1);
+
+	of_id = of_match_node(ic_of_match, pdev->dev.of_node);
+	if (!of_id) {
+		ret = -EINVAL;
+		goto freeres;
+	}
+	compat = (struct nfc_compatible *)of_id->data;
+
+	nfc->pinctrl = devm_pinctrl_get(dev);
+	nfc->pins_drive_high = pinctrl_lookup_state(nfc->pinctrl,
+						    "state_drive_high");
+
+	nfc->clk.dma_cg = devm_clk_get(dev, "dma_cg");
+	if (IS_ERR(nfc->clk.dma_cg)) {
+		pr_err("no dma cg\n");
+		ret = -EINVAL;
+		goto freeres;
+	}
+
+	nfc->clk.nfi_cg = devm_clk_get(dev, "nfi_cg");
+	if (IS_ERR(nfc->clk.nfi_cg)) {
+		pr_err("no nfi cg\n");
+		ret = -EINVAL;
+		goto freeres;
+	}
+#ifdef CONFIG_MTD_NANDX_V2_SPI
+	nfc->clk.snfi_cg = devm_clk_get(dev, "snfi_cg");
+	if (IS_ERR(nfc->clk.snfi_cg)) {
+		pr_err("no snfi cg\n");
+		ret = -EINVAL;
+		goto freeres;
+	}
+#endif
+	nfc->clk.nfi_clk_sel = devm_clk_get(dev, "nfi_clk_sel");
+	if (IS_ERR(nfc->clk.nfi_clk_sel)) {
+		pr_err("no nfi clk sel\n");
+		ret = -EINVAL;
+		goto freeres;
+	}
+#ifdef CONFIG_MTD_NANDX_V2_SPI
+	nfc->clk.snfi_clk_sel = devm_clk_get(dev, "snfi_clk_sel");
+	if (IS_ERR(nfc->clk.snfi_clk_sel)) {
+		pr_err("no snfi clk sel\n");
+		ret = -EINVAL;
+		goto freeres;
+	}
+#endif
+	nfc->clk.nfi_clk_parent = devm_clk_get(dev, "nfi_parent156m");
+	if (IS_ERR(nfc->clk.nfi_clk_parent)) {
+		pr_err("no nfi_clk_parent\n");
+		ret = -EINVAL;
+		goto freeres;
+	}
+
+	nand_get_resource(res);
+
+	res->ic_ver = (enum mtk_ic_version)(compat->ic_ver);
+	res->nfi_regs = (void *)nfi_base;
+	res->nfi_irq_id = nfi_irq;
+	res->ecc_regs = (void *)ecc_base;
+	res->ecc_irq_id = ecc_irq;
+	res->dev = dev;
+
+	return ret;
+
+freeres:
+	mem_free(res);
+
+	return ret;
+}
+
+static ssize_t nand_ids_show(struct device *dev,
+							 struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+	struct nandx_nfc *nfc;
+
+	nfc = (struct nandx_nfc *)mtd->priv;
+
+	return snprintf(buf, 8, "%llx\n", nfc->info.ids);
+}
+static DEVICE_ATTR(nand_ids, 0444, nand_ids_show, NULL);
+
+static ssize_t bbt_goodblocks_show(struct device *dev,
+								   struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+	struct nandx_nfc *nfc;
+
+	nfc = (struct nandx_nfc *)mtd->priv;
+
+	return snprintf(buf, PAGE_SIZE, "%x\n", nfc->info.bbt_goodblocks);
+}
+static DEVICE_ATTR(bbt_goodblocks, 0444, bbt_goodblocks_show, NULL);
+
+static ssize_t bb_total_show(struct device *dev,
+							 struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+	struct nandx_nfc *nfc;
+	u32 bb_worn = 0, bb_factory = 0;
+
+	nfc = (struct nandx_nfc *)mtd->priv;
+
+	get_bad_block(&nfc->info, &bb_worn, &bb_factory, NULL);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", bb_worn + bb_factory);
+}
+static DEVICE_ATTR(bb_total, 0444, bb_total_show, NULL);
+
+static ssize_t bbt_show(struct device *dev,
+						struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+	struct nandx_nfc *nfc;
+
+	nfc = (struct nandx_nfc *)mtd->priv;
+
+	return get_bad_block(&nfc->info, NULL, NULL, buf);
+}
+static DEVICE_ATTR(bbtshow, 0444, bbt_show, NULL);
+
+static ssize_t bb_worn_total_show(struct device *dev,
+								  struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+	struct nandx_nfc *nfc;
+	u32 bb_worn = 0;
+
+	nfc = (struct nandx_nfc *)mtd->priv;
+
+	get_bad_block(&nfc->info, &bb_worn, NULL, NULL);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", bb_worn);
+}
+static DEVICE_ATTR(bb_worn_total, 0444, bb_worn_total_show, NULL);
+
+static ssize_t bb_factory_total_show(struct device *dev,
+									 struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+	struct nandx_nfc *nfc;
+	u32 bb_factory = 0;
+
+	nfc = (struct nandx_nfc *)mtd->priv;
+
+	get_bad_block(&nfc->info, NULL, &bb_factory, NULL);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", bb_factory);
+}
+static DEVICE_ATTR(bb_factory_total, 0444, bb_factory_total_show, NULL);
+
+static struct attribute *mtk_nand_attrs[] = {
+	&dev_attr_nand_ids.attr,
+	&dev_attr_bbt_goodblocks.attr,
+	&dev_attr_bb_total.attr,
+	&dev_attr_bbtshow.attr,
+	&dev_attr_bb_worn_total.attr,
+	&dev_attr_bb_factory_total.attr,
+	NULL,
+};
+
+static const struct attribute_group mtk_nand_attr_group = {
+	.attrs = mtk_nand_attrs,
+};
+
+static int nand_probe(struct platform_device *pdev)
+{
+	struct mtd_info *mtd;
+	struct nandx_nfc *nfc;
+	int arg;
+	int ret;
+	//u8 *buf;
+
+	nfc = mem_alloc(1, sizeof(struct nandx_nfc));
+	if (!nfc)
+		return -ENOMEM;
+
+	ret = get_platform_res(pdev, nfc);
+	if (ret)
+		goto release_nfc;
+
+	nfc->res->min_oob_req = 32;
+#ifdef CONFIG_MTD_NANDX_V2_SPI
+	nfc->res->nand_type = NAND_SPI;
+#else
+	nfc->res->nand_type = NAND_SLC;
+#endif
+
+	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if(ret) {
+		pr_err("fail to set dma mask (%d)!\n", ret);
+		goto release_res;
+	}
+
+	ret = nandx_enable_clk(&nfc->clk);
+	if (ret)
+		goto release_res;
+
+	ret = nandx_init(nfc->res);
+	if (ret) {
+		pr_err("nandx init error (%d)!\n", ret);
+		goto disable_clk;
+	}
+
+	/*if (!IS_ERR(nfc->pinctrl) && !IS_ERR(nfc->pins_drive_high)) {
+		clk_set_parent(nfc->clk.snfi_clk_sel, nfc->clk.snfi_parent_52m);
+		clk_set_parent(nfc->clk.nfi2x_clk_sel,
+			       nfc->clk.nfi2x_clk_parent);
+		pinctrl_select_state(nfc->pinctrl, nfc->pins_drive_high);
+
+		arg = 3;
+		ret = nandx_ioctl(SNFI_CTRL_DELAY_MODE, &arg);
+		if (ret)
+			goto release_res;
+	}*/
+	arg = 0xf;
+	nandx_ioctl(NFI_CTRL_IOCON, &arg);
+
+	arg = 1;
+	nandx_ioctl(NFI_CTRL_DMA, &arg);
+	nandx_ioctl(NFI_CTRL_ECC, &arg);
+	nandx_ioctl(NFI_CTRL_BAD_MARK_SWAP, &arg);
+
+	mtd = mtd_info_create(pdev, nfc);
+	if (!mtd) {
+		ret = -ENOMEM;
+		goto disable_clk;
+	}
+
+	mutex_init(&nfc->lock);
+
+	ret = scan_bbt(&nfc->info);
+	if (ret) {
+		pr_err("bbt init error (%d)!\n", ret);
+		goto release_mtd;
+	}
+
+	platform_set_drvdata(pdev, mtd);
+	mtd->priv = nfc;
+
+	ret = mtd_device_parse_register(mtd, part_types, NULL, NULL, 0);
+	if (ret) {
+		pr_err("mtd parse partition error! ret:%d\n", ret);
+		mtd_device_unregister(mtd);
+		goto release_mtd;
+	}
+
+	get_bbt_goodblocks_num(&nfc->info);
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	/* Add device attribute groups */
+	ret = sysfs_create_group(&pdev->dev.kobj, &mtk_nand_attr_group);
+	if (ret) {
+		pr_info("failed to create attribute group\n");
+		goto release_mtd;
+	}
+
+#if 0
+	buf = mem_alloc(MAX_OOB_KEEP, OOB_SIZE);
+	if(buf == NULL) {
+		int i;
+
+		for(i=0;i<MAX_OOB_KEEP;i++)
+			oob_buf[i] = buf + (i*OOB_SIZE);
+	} else {
+		pr_err("alloc oob_buf fail\n");
+		return -ENOMEM;
+	}
+#endif
+
+	return 0;
+
+release_mtd:
+	mem_free(mtd);
+disable_clk:
+	pm_runtime_disable(&pdev->dev);
+	nandx_disable_clk(&nfc->clk);
+release_res:
+	mem_free(nfc->res);
+release_nfc:
+	mem_free(nfc);
+
+	pr_err("%s: probe err %d\n", __func__, ret);
+	return ret;
+}
+
+static int nand_remove(struct platform_device *pdev)
+{
+	struct mtd_info *mtd = platform_get_drvdata(pdev);
+	struct nandx_nfc *nfc;
+
+	mtd_device_unregister(mtd);
+	nfc = (struct nandx_nfc *)mtd->priv;
+	nandx_disable_clk(&nfc->clk);
+
+	pm_runtime_get_sync(&pdev->dev);
+
+	mem_free(nfc->res);
+	mem_free(nfc);
+	mem_free(mtd);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int nandx_runtime_suspend(struct device *dev)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+	struct nandx_nfc *nfc = (struct nandx_nfc *)mtd->priv;
+	int ret;
+
+	ret = nandx_suspend();
+	nandx_disable_clk(&nfc->clk);
+
+	return ret;
+}
+
+static int nandx_runtime_resume(struct device *dev)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+	struct nandx_nfc *nfc;
+	int ret;
+
+	nfc = (struct nandx_nfc *)mtd->priv;
+
+	ret = nandx_enable_clk(&nfc->clk);
+	if (ret)
+		return ret;
+
+	ret = nandx_resume();
+	return ret;
+}
+#endif
+
+static const struct dev_pm_ops nfc_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+	SET_RUNTIME_PM_OPS(nandx_runtime_suspend, nandx_runtime_resume, NULL)
+};
+
+static struct platform_driver nand_driver = {
+	.probe = nand_probe,
+	.remove = nand_remove,
+	.driver = {
+		   .name = "mtk-nand",
+		   .owner = THIS_MODULE,
+		   .of_match_table = ic_of_match,
+		   .pm = &nfc_dev_pm_ops,
+	},
+};
+MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
+
+static int __init nand_init(void)
+{
+	return platform_driver_register(&nand_driver);
+}
+
+static void __exit nand_exit(void)
+{
+	platform_driver_unregister(&nand_driver);
+}
+module_init(nand_init);
+module_exit(nand_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
+MODULE_AUTHOR("MediaTek");
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/driver/platform/mt6880/nandx_platform.c b/src/kernel/linux/v4.19/drivers/mtd/nandx/driver/platform/mt6880/nandx_platform.c
new file mode 100644
index 0000000..ef73d59
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/driver/platform/mt6880/nandx_platform.c
@@ -0,0 +1,180 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ * Author SkyLake Huang <SkyLake.Huang@mediatek.com>
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "nandx_platform.h"
+
+int nand_types_support(void)
+{
+	return BIT(NAND_SLC);
+}
+
+#ifdef NANDX_DTS_SUPPORT
+void nand_get_resource(struct nfi_resource *res)
+{
+	res->ic_ver = NANDX_MT6880;
+
+	res->min_oob_req = NAND_MIN_OOB_REQ;
+
+	res->clock_1x = 156000000;
+	res->clock_2x = NULL;
+	res->clock_2x_num = 8;
+
+	/* Sector size force to 512Byte for MT8512 */
+	res->force_sector_size = 1024;
+}
+
+#else
+void nand_get_resource(struct nfi_resource *res)
+{
+	res->ic_ver = NANDX_MT8512;
+	res->dev = NULL;
+
+	res->ecc_regs = NAND_NFIECC_BASE;
+	res->ecc_irq_id = NAND_ECC_IRQ;
+
+	res->nfi_regs = NAND_NFI_BASE;
+	res->nfi_irq_id = NAND_NFI_IRQ;
+
+	res->clock_1x = 26000000;
+	res->clock_2x = NULL;
+	res->clock_2x_num = 0;
+
+	res->min_oob_req = NAND_MIN_OOB_REQ;
+
+	/* Sector size force to 512Byte for MT8512 */
+	res->force_sector_size = 512;
+	res->force_spare_size = 0;
+}
+
+void nand_high_clock_sel(void)
+{
+	nandx_set_bits32(NAND_CLK_BASE + 0xb8, 0x7 << 8, 0x7 << 8);
+	nandx_set_bits32(NAND_CLK_BASE + 0xb4, 0x7 << 8, 0x5 << 8);
+	nandx_set_bits32(NAND_CLK_BASE + 0x4, 0x1 << 29, 0x1 << 29);
+
+	nandx_set_bits32(NAND_CLK_BASE + 0xb8, 0x7, 0x7);
+	nandx_set_bits32(NAND_CLK_BASE + 0xb4, 0x7, 0x2);
+	nandx_set_bits32(NAND_CLK_BASE + 0x4, 0x1 << 28, 0x1 << 28);
+}
+
+void nand_hard_reset(void)
+{
+	u32 val;
+
+	val = readl(INFRACFG_AO_BASE + 0x130);
+	val |= BIT(15);
+	writel(val, INFRACFG_AO_BASE + 0x130);
+
+	udelay(5);
+
+	val = readl(INFRACFG_AO_BASE + 0x134);
+	val |= BIT(15);
+	writel(val, INFRACFG_AO_BASE + 0x134);
+}
+
+
+
+void nand_gpio_init(int nand_type, int pinmux_group)
+{
+	pr_debug("gpio init, nand type:0x%x, pinmux group:0x%x.\n",
+		 nand_type, pinmux_group);
+
+	if (nand_type == NAND_SPI) {
+		switch (pinmux_group) {
+		case 0:
+			pr_info("Try to setting nand_spi gpio pinmux 0.\n");
+			nandx_set_bits32(GPIO_MODE7_ADDR,
+					 0xFFF << 18, 0x6DB << 18);
+			nandx_set_bits32(GPIO_MODE8_ADDR,
+					 (0x7 << 6) | 0x7, (0x3 << 6) | 0x3);
+			nandx_set_bits32(GPIO_DRV1_ADDR, 0xF << 16, 3 << 16);
+			nandx_set_bits32(GPIO_DRV2_ADDR, 0xF, 3);
+			nandx_set_bits32(GPIO_DRV2_ADDR, 0xF << 8, 3 << 8);
+			break;
+		case 1:
+			pr_info("Try to setting nand_spi gpio pinmux 1.\n");
+			nandx_set_bits32(GPIO_MODE1_ADDR,
+					 0x7fff << 15, 0x2492 << 15);
+			nandx_set_bits32(GPIO_MODE2_ADDR, 0x7 << 0, 0x2);
+			break;
+		default:
+			break;
+		}
+	} else if (nand_type == NAND_SLC)
+		pr_warn("SLC NOT support for MT8512 !\n");
+}
+
+/*
+ * The CLK of MT8512 SNAND is clk_spinfi_bclk_sel[2:0] at 0x1000_00b0[10:8]
+ * The range of spi nand clock is 26MHz to 124.8MHz.
+ * 0: top_ap_clk_ctrl_f26m_ck, 26MHz
+ * 1: univpll2_d8, 52MHz
+ * 2: univpll3_d4, 62.4MHz
+ * 3: syspll1_d8, 68.25MHz
+ * 4: syspll4_d2: 78MHz
+ * 5: syspll2_d4: 91MHz
+ * 6: univpll2_d4: 104MHz
+ * 7: univpll3_d2: 124.8MHz
+ * The CLK of MT8512 NAND is clk_nfi2x_sel[2:0] at 0x1000_00b0[2:0]
+ * But 8512 haven't support P-NAND
+ */
+
+void nand_clock_init(int nand_type, int sel, int *rate)
+{
+	if ((nand_type != NAND_SPI) && (nand_type != NAND_SLC))
+		pr_err("nand type not support!\n");
+
+	pr_debug("%s sel:%d!\n", __func__, sel);
+
+	if (nand_type == NAND_SPI) {
+		nandx_set_bits32(NAND_CLK_BASE + 0xB8, 0x700, 0x7 << 8);
+		nandx_set_bits32(NAND_CLK_BASE + 0xB4, 0x700, sel << 8);
+		nandx_set_bits32(NAND_CLK_BASE + 0x4, BIT(29), BIT(29));
+
+		if (rate == NULL)
+			return;
+
+		switch (sel) {
+		case 0:
+			*rate = 26 * 1000 * 1000;
+			break;
+		case 1:
+			*rate = 52 * 1000 * 1000;
+			break;
+		case 2:
+			*rate = 62 * 1000 * 1000;
+			break;
+		case 3:
+			*rate = 68 * 1000 * 1000;
+			break;
+		case 4:
+			*rate = 78 * 1000 * 1000;
+			break;
+		case 5:
+			*rate = 91 * 1000 * 1000;
+			break;
+		case 6:
+			*rate = 104 * 1000 * 1000;
+			break;
+		case 7:
+			*rate = 124 * 1000 * 1000;
+			break;
+		default:
+			break;
+		}
+	}
+
+	/* TODO: setting for slc */
+}
+
+void nand_cg_switch(int nand_type, int gate, bool enable)
+{
+	/* NOTE: all cg should be enabled in nand_clock_init. */
+}
+
+#endif
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/include/Nandx.mk b/src/kernel/linux/v4.19/drivers/mtd/nandx/include/Nandx.mk
new file mode 100644
index 0000000..f369ffe
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/include/Nandx.mk
@@ -0,0 +1,18 @@
+#
+# Copyright (C) 2017 MediaTek Inc.
+# Licensed under either
+#     BSD Licence, (see NOTICE for more details)
+#     GNU General Public License, version 2.0, (see NOTICE for more details)
+#
+
+nandx-header-y += internal/nandx_core.h
+nandx-header-y += internal/nandx_errno.h
+nandx-header-y += internal/nandx_util.h
+nandx-header-y += platform/$(NANDX_IC_VERSION)/nandx_platform.h
+nandx-header-$(NANDX_BBT_SUPPORT) += internal/bbt.h
+nandx-header-$(NANDX_SIMULATOR_SUPPORT) += simulator/nandx_os.h
+nandx-header-$(NANDX_CTP_SUPPORT) += ctp/nandx_os.h
+nandx-header-$(NANDX_LK_SUPPORT) += lk/nandx_os.h
+nandx-header-$(NANDX_KERNEL_SUPPORT) += kernel/nandx_os.h
+nandx-header-$(NANDX_UBOOT_SUPPORT) += uboot/nandx_os.h
+nandx-header-$(NANDX_AOS_SUPPORT) += aos/nandx_os.h
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/include/internal/bbt.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/include/internal/bbt.h
new file mode 100644
index 0000000..e2a0b83
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/include/internal/bbt.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __BBT_H__
+#define __BBT_H__
+
+#define BBT_BLOCK_GOOD      0x03
+#define BBT_BLOCK_WORN      0x02
+#define BBT_BLOCK_RESERVED      0x01
+#define BBT_BLOCK_FACTORY_BAD   0x00
+
+#define BBT_INVALID_ADDR 0
+/* The maximum number of blocks to scan for a bbt */
+#define NAND_BBT_SCAN_MAXBLOCKS 4
+#define NAND_BBT_USE_FLASH  0x00020000
+#define NAND_BBT_NO_OOB     0x00040000
+
+/* Search good / bad pattern on the first and the second page */
+#define NAND_BBT_SCAN2NDPAGE    0x00008000
+/* Search good / bad pattern on the last page of the eraseblock */
+#define NAND_BBT_SCANLASTPAGE   0x00010000
+
+#define NAND_DRAM_BUF_DATABUF_ADDR  (NAND_BUF_ADDR)
+
+struct bbt_pattern {
+	u8 *data;
+	int len;
+};
+
+struct bbt_desc {
+	struct bbt_pattern pattern;
+	u8 version;
+	u64 bbt_addr;/*0: invalid value; otherwise, valid value*/
+};
+
+struct bbt_manager {
+	/* main bbt descriptor and mirror descriptor */
+	struct bbt_desc desc[2];/* 0: main bbt; 1: mirror bbt */
+	int max_blocks;
+	u8 *bbt;
+};
+
+#define BBT_ENTRY_MASK      0x03
+#define BBT_ENTRY_SHIFT     2
+
+#define GET_BBT_LENGTH(blocks) (blocks >> 2)
+#define GET_ENTRY(block) ((block) >> BBT_ENTRY_SHIFT)
+#define GET_POSITION(block) (((block) & BBT_ENTRY_MASK) * 2)
+#define GET_MARK_VALUE(block, mark) \
+	(((mark) & BBT_ENTRY_MASK) << GET_POSITION(block))
+
+int scan_bbt(struct nandx_info *nand);
+
+int bbt_mark_bad(struct nandx_info *nand, off_t offset);
+int nand_unmarkbad_bbt(struct nandx_info *nand, off_t offset);
+int bbt_is_bad(struct nandx_info *nand, off_t offset);
+void get_bbt_goodblocks_num(struct nandx_info *nand);
+u32 get_bad_block(struct nandx_info *nand, u32 *bb_worn,
+				  u32 *bb_factory, char *bb_buf);
+
+#endif /*__BBT_H__*/
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/include/internal/nandx_core.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/include/internal/nandx_core.h
new file mode 100644
index 0000000..384ac5c
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/include/internal/nandx_core.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __NANDX_CORE_H__
+#define __NANDX_CORE_H__
+
+#define NAND_MIN_OOB_REQ     16
+
+#define NAND_MAX_FDM_SIZE    8
+#define NAND_FDM_ECC_SIZE    1 /* it's a default value */
+#define NAND_ECC_PARITY_BITS 14
+
+/**
+ * mtk_ic_version - indicates specifical IC, IP need this to load some info
+ */
+enum mtk_ic_version {
+	NANDX_MT8518,
+    NANDX_MT6880,
+};
+
+/**
+ * nand_type - indicates nand type
+ */
+enum nand_type {
+	NAND_SPI,
+	NAND_SLC,
+	NAND_MLC,
+	NAND_TLC
+};
+
+/**
+ * nandx_ioctl_cmd - operations supported by nandx
+ *
+ * @NFI_CTRL_DMA dma enable or not
+ * @NFI_CTRL_NFI_MODE customer/read/program/erase...
+ * @NFI_CTRL_ECC ecc enable or not
+ * @NFI_CTRL_ECC_MODE nfi/dma/pio
+ * @CHIP_CTRL_DRIVE_STRENGTH enum chip_ctrl_drive_strength
+ */
+enum nandx_ctrl_cmd {
+	CORE_CTRL_NAND_INFO,
+	CORE_CTRL_PERF_INFO,
+	CORE_CTRL_PERF_INFO_CLEAR,
+
+	NFI_CTRL_BASE_INFO,
+	SNFI_CTRL_BASE_INFO,
+
+	NFI_CTRL_NFI_IRQ, /*5*/
+	NFI_CTRL_ECC_IRQ,
+	NFI_CTRL_ECC_PAGE_IRQ,
+
+	NFI_CTRL_DMA,
+	NFI_BURST_EN,
+	NFI_ADDR_ALIGNMENT_EN, /*10*/
+	NFI_BYTE_RW_EN,
+
+	NFI_CRC_EN,
+	NFI_CTRL_RANDOMIZE,
+	NFI_CTRL_RANDOMIZE_SEL,
+
+	NFI_CTRL_IO_FORMAT, /*15*/
+
+	NFI_CTRL_ECC,
+	NFI_CTRL_ECC_MODE,
+	NFI_CTRL_ECC_DECODE_MODE,
+	NFI_CTRL_ECC_ERRNUM0,
+	NFI_CTRL_ECC_GET_STATUS, /*20*/
+
+	NFI_CTRL_BAD_MARK_SWAP,
+	NFI_CTRL_IOCON,
+
+	SNFI_CTRL_OP_MODE,
+	SNFI_CTRL_RX_MODE,
+	SNFI_CTRL_TX_MODE, /*25*/
+	SNFI_CTRL_DELAY_MODE,
+	SNFI_CTRL_4FIFO_EN,
+	SNFI_CTRL_GF_CONFIG,
+	SNFI_CTRL_SAMPLE_DELAY,
+	SNFI_CTRL_LATCH_LATENCY, /*30*/
+	SNFI_CTRL_MAC_QPI_MODE,
+
+	CHIP_CTRL_OPS_CACHE,
+	CHIP_CTRL_OPS_MULTI,
+	CHIP_CTRL_PSLC_MODE,
+	CHIP_CTRL_DRIVE_STRENGTH, /*35*/
+	CHIP_CTRL_DDR_MODE,
+
+	CHIP_CTRL_DEVICE_RESET,
+	CHIP_CTRL_ONDIE_ECC,
+	CHIP_CTRL_TIMING_MODE,
+
+	CHIP_CTRL_PERF_INFO, /*40*/
+	CHIP_CTRL_PERF_INFO_CLEAR,
+};
+
+struct nfiecc_status {
+	u32 corrected;
+	u32 failed;
+	u32 bitflips;
+};
+
+enum snfi_ctrl_op_mode {
+	SNFI_CUSTOM_MODE,
+	SNFI_AUTO_MODE,
+	SNFI_MAC_MODE
+};
+
+enum snfi_ctrl_rx_mode {
+	SNFI_RX_111,
+	SNFI_RX_112,
+	SNFI_RX_114,
+	SNFI_RX_122,
+	SNFI_RX_144
+};
+
+enum snfi_ctrl_tx_mode {
+	SNFI_TX_111,
+	SNFI_TX_114,
+};
+
+enum chip_ctrl_drive_strength {
+	CHIP_DRIVE_NORMAL,
+	CHIP_DRIVE_HIGH,
+	CHIP_DRIVE_MIDDLE,
+	CHIP_DRIVE_LOW
+};
+
+enum chip_ctrl_timing_mode {
+	CHIP_TIMING_MODE0,
+	CHIP_TIMING_MODE1,
+	CHIP_TIMING_MODE2,
+	CHIP_TIMING_MODE3,
+	CHIP_TIMING_MODE4,
+	CHIP_TIMING_MODE5,
+};
+
+/**
+ * page_performance - performance information of read/write a single page
+ */
+struct page_performance {
+	int rx_page_total_time;
+	int read_page_time;
+	int read_data_time;
+
+	int tx_page_total_time;
+	int write_page_time;
+	int write_data_time;
+};
+
+/**
+ * nand_performance - performance information
+ */
+struct nand_performance {
+	struct page_performance page_perf;
+
+	int read_speed;
+	int write_speed;
+	int erase_speed;
+};
+
+/**
+ * nandx_info - basic information
+ */
+struct nandx_info {
+	u32 max_io_count;
+	u32 min_write_pages;
+	u32 plane_num;
+	u32 oob_size;
+	u32 page_parity_size;
+	u32 page_size;
+	u32 block_size;
+	u64 total_size;
+	u32 fdm_reg_size;
+	u32 fdm_ecc_size;
+	u32 ecc_strength;
+	u32 sector_size;
+	u64 ids;
+	u32 bbt_goodblocks;
+};
+
+/**
+ * nfi_io_format - nfi format for accessing data
+ */
+struct nfi_io_format {
+	int fdm_size;
+	int fdm_ecc_size;
+	int sec_spare_size;
+	int sec_size;
+	int cus_sec_en;
+	int cus_sec_size;
+	int last_cus_sec_en;
+	int last_cus_sec_size;
+	int ecc_level_sel;
+};
+
+/**
+ * nfi_resource - the resource needed by nfi & ecc to do initialization
+ */
+struct nfi_resource {
+	int ic_ver;
+	void *dev;
+
+	void *ecc_regs;
+	int ecc_irq_id;
+
+	void *nfi_regs;
+	int nfi_irq_id;
+
+	u32 clock_1x;
+	u32 *clock_2x;
+	int clock_2x_num;
+
+	int min_oob_req;
+	int force_sector_size;
+	int force_spare_size;
+
+	u8 nand_type;
+};
+
+/**
+ * nandx_init - init all related modules below
+ *
+ * @res: basic resource of the project
+ *
+ * return 0 if init success, otherwise return negative error code
+ */
+int nandx_init(struct nfi_resource *res);
+
+/**
+ * nandx_exit - release resource those that obtained in init flow
+ */
+void nandx_exit(void);
+
+/**
+ * nandx_read - read data from nand this function can read data and related
+ *   oob from specifical address
+ *   if do multi_ops, set one operation per time, and call nandx_sync at last
+ *   in multi mode, not support page partial read
+ *   oob not support partial read
+ *
+ * @data: buf to receive data from nand
+ * @oob: buf to receive oob data from nand which related to data page
+ *   length of @oob should oob size aligned, oob not support partial read
+ * @offset: offset address on the whole flash
+ * @len: the length of @data that need to read
+ *
+ * if read success return 0, otherwise return negative error code
+ */
+int nandx_read(u8 *data, u8 *oob, u64 offset, size_t len);
+
+/**
+ * nandx_write -  write data to nand
+ *   this function can write data and related oob to specifical address
+ *   if do multi_ops, set one operation per time, and call nandx_sync at last
+ *
+ * @data: source data to be written to nand,
+ *   for multi operation, the length of @data should be page size aliged
+ * @oob: source oob which related to data page to be written to nand,
+ *   length of @oob should oob size aligned
+ * @offset: offset address on the whole flash, the value should be start address
+ *   of a page
+ * @len: the length of @data that need to write,
+ *   for multi operation, the len should be page size aliged
+ *
+ * if write success return 0, otherwise return negative error code
+ * if return value > 0, it indicates that how many pages still need to write,
+ * and data has not been written to nand
+ * please call nandx_sync after pages alligned $nandx_info.min_write_pages
+ */
+int nandx_write(u8 *data, u8 *oob, u64 offset, size_t len);
+
+/**
+ * nandx_erase - erase an area of nand
+ *   if do multi_ops, set one operation per time, and call nandx_sync at last
+ *
+ * @offset: offset address on the flash
+ * @len: erase length which should be block size aligned
+ *
+ * if erase success return 0, otherwise return negative error code
+ */
+int nandx_erase(u64 offset, size_t len);
+
+/**
+ * nandx_sync - sync all operations to nand
+ *   when do multi_ops, this function will be called at last operation
+ *   when write data, if number of pages not alligned
+ *   by $nandx_info.min_write_pages, this interface could be called to do
+ *   force write, 0xff will be padded to blanked pages.
+ */
+int nandx_sync(void);
+
+/**
+ * nandx_is_bad_block - check if the block is bad
+ *   only check the flag that marked by the flash vendor
+ *
+ * @offset: offset address on the whole flash
+ *
+ * return true if the block is bad, otherwise return false
+ */
+bool nandx_is_bad_block(u64 offset);
+
+/**
+ * nandx_ioctl - set/get property of nand chip
+ *
+ * @cmd: parameter that defined in enum nandx_ioctl_cmd
+ * @arg: operate parameter
+ *
+ * return 0 if operate success, otherwise return negative error code
+ */
+int nandx_ioctl(int cmd, void *arg);
+
+/**
+ * nandx_suspend - suspend nand, and store some data
+ *
+ * return 0 if suspend success, otherwise return negative error code
+ */
+int nandx_suspend(void);
+
+/**
+ * nandx_resume - resume nand, and replay some data
+ *
+ * return 0 if resume success, otherwise return negative error code
+ */
+int nandx_resume(void);
+
+int nandx_write_raw_pages(u8 *data, u64 offset, size_t len);
+
+/**
+ * nandx_unit_test - unit test
+ *
+ * @offset: offset address on the whole flash
+ * @len: should be not larger than a block size, we only test a block per time
+ *
+ * return 0 if test success, otherwise return negative error code
+ */
+int nandx_unit_test(u64 offset, size_t len);
+
+#endif /* __NANDX_CORE_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/include/internal/nandx_errno.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/include/internal/nandx_errno.h
new file mode 100644
index 0000000..24a4b51
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/include/internal/nandx_errno.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __NANDX_ERRNO_H__
+#define __NANDX_ERRNO_H__
+
+#ifndef EIO
+#define EIO             5       /* I/O error */
+#define ENOMEM          12      /* Out of memory */
+#define EFAULT          14      /* Bad address */
+#define EBUSY           16      /* Device or resource busy */
+#define ENODEV          19      /* No such device */
+#define EINVAL          22      /* Invalid argument */
+#define ENOSPC          28      /* No space left on device */
+/* Operation not supported on transport endpoint */
+#define EOPNOTSUPP      95
+#define ETIMEDOUT       110     /* Connection timed out */
+#endif
+
+#define ENANDFLIPS      1024    /* Too many bitflips, uncorrected */
+#define ENANDREAD       1025    /* Read fail, can't correct */
+#define ENANDWRITE      1026    /* Write fail */
+#define ENANDERASE      1027    /* Erase fail */
+#define ENANDBAD        1028    /* Bad block */
+#define ENANDWP         1029
+
+#define IS_NAND_ERR(err)        ((err) >= -ENANDBAD && (err) <= -ENANDFLIPS)
+
+#endif /* __NANDX_ERRNO_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/include/internal/nandx_util.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/include/internal/nandx_util.h
new file mode 100644
index 0000000..929bec4
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/include/internal/nandx_util.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ */
+
+#ifndef __NANDX_UTIL_H__
+#define __NANDX_UTIL_H__
+
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+
+enum nand_irq_return {
+	NAND_IRQ_NONE,
+	NAND_IRQ_HANDLED,
+};
+
+enum nand_dma_operation {
+	NDMA_FROM_DEV,
+	NDMA_TO_DEV,
+};
+
+#include "nandx_errno.h"
+
+/*
+ * Compatible function
+ * used for preloader/lk/kernel environment
+ */
+#include "nandx_os.h"
+
+#ifndef BIT
+#define BIT(a)                  (1 << (a))
+#endif
+
+#ifndef min
+#define min(a, b)   (((a) > (b)) ? (b) : (a))
+#define max(a, b)   (((a) < (b)) ? (b) : (a))
+#endif
+
+#ifndef GENMASK
+#define GENMASK(h, l) \
+	(((~0UL) << (l)) & (~0UL >> ((sizeof(unsigned long) * 8) - 1 - (h))))
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(array) (sizeof(array) / sizeof(array[0]))
+#endif
+
+#ifndef __weak
+#define __weak __attribute__((__weak__))
+#endif
+
+#ifndef __packed
+#define __packed __attribute__((__packed__))
+#endif
+
+#ifndef KB
+#define KB(x)   ((x) << 10)
+#define MB(x)   (KB(x) << 10)
+#define GB(x)   (MB(x) << 10)
+#endif
+
+#ifndef offsetof
+#define offsetof(type, member) ((size_t)&((type *)0)->member)
+#endif
+
+#ifndef NULL
+#define NULL (void *)0
+#endif
+static inline u32 nandx_popcount(u32 x)
+{
+	x = (x & 0x55555555) + ((x >> 1) & 0x55555555);
+	x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+	x = (x & 0x0F0F0F0F) + ((x >> 4) & 0x0F0F0F0F);
+	x = (x & 0x00FF00FF) + ((x >> 8) & 0x00FF00FF);
+	x = (x & 0x0000FFFF) + ((x >> 16) & 0x0000FFFF);
+
+	return x;
+}
+
+#ifndef zero_popcount
+#define zero_popcount(x) (32 - nandx_popcount(x))
+#endif
+
+#ifndef do_div
+#define do_div(n, base) \
+	({ \
+		u32 __base = (base); \
+		u32 __rem; \
+		__rem = ((u64)(n)) % __base; \
+		(n) = ((u64)(n)) / __base; \
+		__rem; \
+	})
+#endif
+
+#define div_up(x, y) \
+	({ \
+		u64 __temp = ((x) + (y) - 1); \
+		do_div(__temp, (y)); \
+		__temp; \
+	})
+
+#define div_down(x, y) \
+	({ \
+		u64 __temp = (x); \
+		do_div(__temp, (y)); \
+		__temp; \
+	})
+
+#define div_round_up(x, y)      (div_up(x, y) * (y))
+#define div_round_down(x, y)    (div_down(x, y) * (y))
+
+#define reminder(x, y) \
+	({ \
+		u64 __temp = (x); \
+		do_div(__temp, (y)); \
+	})
+
+#ifndef round_up
+#define round_up(x, y)          ((((x) - 1) | ((y) - 1)) + 1)
+#define round_down(x, y)        ((x) & ~((y) - 1))
+#endif
+
+#ifndef readx_poll_timeout_atomic
+#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
+	({ \
+		u64 end = get_current_time_us() + timeout_us; \
+		for (;;) { \
+			u64 now = get_current_time_us(); \
+			(val) = op(addr); \
+			if (cond) \
+				break; \
+			if (now > end) { \
+				(val) = op(addr); \
+				break; \
+			} \
+		} \
+		(cond) ? 0 : -ETIMEDOUT; \
+	})
+
+#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+	readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us)
+#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+	readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us)
+#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+	readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us)
+#endif
+
+struct nandx_split64 {
+	u64 head;
+	size_t head_len;
+	u64 body;
+	size_t body_len;
+	u64 tail;
+	size_t tail_len;
+};
+
+struct nandx_split32 {
+	u32 head;
+	u32 head_len;
+	u32 body;
+	u32 body_len;
+	u32 tail;
+	u32 tail_len;
+};
+
+#define nandx_split(split, offset, len, val, align) \
+	do { \
+		(split)->head = (offset); \
+		(val) = div_round_down((offset), (align)); \
+		(val) = (align) - ((offset) - (val)); \
+		if ((val) == (align)) \
+			(split)->head_len = 0; \
+		else if ((val) > (len)) \
+			(split)->head_len = len; \
+		else \
+			(split)->head_len = val; \
+		(split)->body = (offset) + (split)->head_len; \
+		(split)->body_len = div_round_down((len) - \
+						   (split)->head_len,\
+						   (align)); \
+		(split)->tail = (split)->body + (split)->body_len; \
+		(split)->tail_len = (len) - (split)->head_len - \
+				    (split)->body_len; \
+	} while (0)
+
+#ifndef container_of
+#define container_of(ptr, type, member) \
+	({const __typeof__(((type *)0)->member) * __mptr = (ptr); \
+		(type *)((char *)__mptr - offsetof(type, member)); })
+#endif
+
+static inline u32 nandx_cpu_to_be32(u32 val)
+{
+	u32 temp = 1;
+	u8 *p_temp = (u8 *)&temp;
+
+	if (*p_temp)
+		return (((val & 0xff) << 24) | ((val & 0xff00) << 8) |
+				((val >> 8) & 0xff00) | ((val >> 24) & 0xff));
+
+	return val;
+}
+
+static inline void nandx_set_bits32(unsigned long addr, u32 mask,
+				    u32 val)
+{
+	u32 temp = readl((void *)addr);
+
+	temp &= ~(mask);
+	temp |= val;
+	writel(temp, (void *)addr);
+}
+
+#endif /* __NANDX_UTIL_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/include/kernel/nandx_os.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/include/kernel/nandx_os.h
new file mode 100644
index 0000000..fba33e4
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/include/kernel/nandx_os.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef __NANDX_OS_H__
+#define __NANDX_OS_H__
+
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/timekeeping.h>
+#include <linux/dma-mapping.h>
+#include <linux/compiler-gcc.h>
+#include <linux/delay.h>
+
+#define NANDX_PERFORMANCE_TRACE 0
+#define NANDX_PAGE_PERFORMANCE_TRACE 0
+
+#define NANDX_DTS_SUPPORT
+
+#define NANDX_BULK_IO_USE_DRAM 0
+
+static inline int nandx_irq_register(void *dev, int irq,
+				     void *irq_handler, char *name, void *data)
+{
+	return devm_request_irq(dev, irq, (irq_handler_t)irq_handler, 0x0, name, data);
+}
+
+static inline int nandx_irq_unregister(int irq)
+{
+	return 0;
+}
+
+static inline void nandx_irq_enable(int irq)
+{
+
+}
+
+static inline void nandx_irq_disable(int irq)
+{
+
+}
+
+static inline void *pmem_alloc(u32 count, u32 size)
+{
+	return kmalloc(count * size, GFP_KERNEL | __GFP_ZERO | GFP_DMA);
+}
+
+static inline void *mem_alloc(u32 count, u32 size)
+{
+	return vzalloc(count * size);
+}
+
+static inline void pmem_free(void *mem)
+{
+	kfree(mem);
+}
+
+static inline void mem_free(void *mem)
+{
+	if (mem)
+		vfree(mem);
+}
+
+static inline void *nandx_event_create(void)
+{
+	return pmem_alloc(1, sizeof(struct completion));
+}
+
+static inline void nandx_event_destroy(void * event)
+{
+	pmem_free(event);
+}
+
+static inline void nandx_event_complete(void * event)
+{
+	complete((struct completion *)event);
+}
+
+static inline void nandx_event_init(void *event)
+{
+	init_completion((struct completion *)event);
+}
+
+static inline int nandx_event_wait_complete(void *event, u32 timeout)
+{
+	return wait_for_completion_timeout((struct completion *)event, usecs_to_jiffies(timeout));
+}
+
+static inline u64 get_current_time_us(void)
+{
+	return ktime_get_ns() >> 10;
+}
+
+static inline u32 nandx_dma_map(void *dev, void *buf, u64 len,
+				enum nand_dma_operation op)
+{
+	return dma_map_single(dev, buf, len,
+			      (op == NDMA_FROM_DEV) ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+}
+
+static inline void nandx_dma_unmap(void *dev, void *buf, void *addr,
+				   u64 len,
+				   enum nand_dma_operation op)
+{
+	dma_unmap_single(dev, (dma_addr_t)addr, len,
+			 (op == NDMA_FROM_DEV) ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+}
+
+#endif /* __NANDX_OS_H__ */
diff --git a/src/kernel/linux/v4.19/drivers/mtd/nandx/include/platform/mt6880/nandx_platform.h b/src/kernel/linux/v4.19/drivers/mtd/nandx/include/platform/mt6880/nandx_platform.h
new file mode 100644
index 0000000..1c8bb96
--- /dev/null
+++ b/src/kernel/linux/v4.19/drivers/mtd/nandx/include/platform/mt6880/nandx_platform.h
@@ -0,0 +1,73 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ * Author SkyLake Huang <SkyLake.Huang@mediatek.com>
+ */
+
+#ifndef __NANDX_PLATFORM_H__
+#define __NANDX_PLATFORM_H__
+
+/* #define NANDX_RANDOM_SUPPORT */
+
+
+#ifndef NANDX_DTS_SUPPORT
+/****** controller resource defines *******/
+#define NAND_NFI_BASE        NFI_BASE
+#define NAND_NFIECC_BASE     NFIECC_BASE
+#define NAND_NFI_IRQ         119
+#define NAND_ECC_IRQ    118
+
+/***** other module resource defines *****/
+#define NAND_CLK_BASE        CKSYS_BASE
+
+#define NAND_CLK_SEL         (CKSYS_BASE + 0x4)
+#define SNAND_CLK_SEL        (CKSYS_BASE + 0xDC)
+
+#define NAND_GPIO_BASE       (IO_PHYS+0x5000)
+
+/* for SLC */
+#define NAND_GPIO_MODE1      (NAND_GPIO_BASE + 0x300)
+#define NAND_GPIO_MODE2      (NAND_GPIO_BASE + 0x310)
+#define NAND_GPIO_MODE3      (NAND_GPIO_BASE + 0x320)
+#define NAND_GPIO_PUPD_CTRL0 (NAND_GPIO_BASE + 0xE00)
+#define NAND_GPIO_PUPD_CTRL1 (NAND_GPIO_BASE + 0xE10)
+#define NAND_GPIO_PUPD_CTRL2 (NAND_GPIO_BASE + 0xE20)
+#define NAND_GPIO_PUPD_CTRL6 (NAND_GPIO_BASE + 0xE60)
+#define NAND_GPIO_DRV_MODE0  (NAND_GPIO_BASE + 0xD00)
+#define NAND_GPIO_DRV_MODE5  (NAND_GPIO_BASE + 0xD50)
+#define NAND_GPIO_DRV_MODE6  (NAND_GPIO_BASE + 0xD60)
+#define NAND_GPIO_DRV_MODE7  (NAND_GPIO_BASE + 0xD70)
+#define NAND_GPIO_TDSEL6_EN  (NAND_GPIO_BASE + 0xB60)
+#define NAND_GPIO_TDSEL7_EN  (NAND_GPIO_BASE + 0xB70)
+#define NAND_GPIO_RDSEL1_EN  (NAND_GPIO_BASE + 0xC10)
+#define NAND_GPIO_RDSELE_EN  (NAND_GPIO_BASE + 0xCE0)
+#define NAND_GPIO_RDSELF_EN  (NAND_GPIO_BASE + 0xCF0)
+
+/* SPI nand */
+#define NAND_GPIO_MODE17     (NAND_GPIO_BASE + 0x460)
+#define NAND_GPIO_MODE18     (NAND_GPIO_BASE + 0x470)
+#define NAND_GPIO_DRV_MODE5  (NAND_GPIO_BASE + 0xD50)
+#define NAND_GPIO_DRV_MODE6  (NAND_GPIO_BASE + 0xD60)
+#define NAND_GPIO_RDSELC_EN  (NAND_GPIO_BASE + 0xCC0)
+#define NAND_GPIO_RDSELD_EN  (NAND_GPIO_BASE + 0xCD0)
+
+/********** reserved buffer for test ******/
+#define NAND_BUF             0x5FA00000
+#define NAND_BUF_LEN         0x500000
+#define NAND_DATA            0x5FA00000
+#define NAND_DATA_R          0x5FB00000
+#define NAND_DATA_W          0x5FC00000
+#define NAND_DATA_LEN        0x400000
+#define NAND_OOB             0x5FE00000
+#define NAND_OOB_R           0x5FE00000
+#define NAND_OOB_W           0x5FE80000
+#define NAND_OOB_LEN         0x100000
+
+void nand_gpio_init(int nand_type, int pinmux_group);
+void nand_clock_init(int nand_type, int sel, int *rate);
+#endif
+void nand_get_resource(struct nfi_resource *res);
+int nand_types_support(void);
+
+#endif /* end of __NANDX_PLATFORM_H__ */
+