blob: dc3ebaf7af059e4ab1d1aec3f1daabc067475789 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001From de8b6cf615be20b25d0f3c817866de2c0d46a704 Mon Sep 17 00:00:00 2001
2From: Sam Shih <sam.shih@mediatek.com>
3Date: Mon, 20 Apr 2020 17:10:05 +0800
4Subject: [PATCH 1/3] nand: add spi nand driver
5
6Add spi nand driver support for mt7622 based on nfi controller
7
8Signed-off-by: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
9---
10 drivers/mtd/Kconfig | 7 +
11 drivers/mtd/Makefile | 4 +
12 drivers/mtd/nand/raw/nand.c | 2 +
13 drivers/mtd/nandx/NOTICE | 52 +
14 drivers/mtd/nandx/Nandx.config | 17 +
15 drivers/mtd/nandx/Nandx.mk | 91 ++
16 drivers/mtd/nandx/README | 31 +
17 drivers/mtd/nandx/core/Nandx.mk | 38 +
18 drivers/mtd/nandx/core/core_io.c | 735 +++++++++
19 drivers/mtd/nandx/core/core_io.h | 39 +
20 drivers/mtd/nandx/core/nand/device_spi.c | 200 +++
21 drivers/mtd/nandx/core/nand/device_spi.h | 132 ++
22 drivers/mtd/nandx/core/nand/nand_spi.c | 526 +++++++
23 drivers/mtd/nandx/core/nand/nand_spi.h | 35 +
24 drivers/mtd/nandx/core/nand_base.c | 304 ++++
25 drivers/mtd/nandx/core/nand_base.h | 71 +
26 drivers/mtd/nandx/core/nand_chip.c | 272 ++++
27 drivers/mtd/nandx/core/nand_chip.h | 103 ++
28 drivers/mtd/nandx/core/nand_device.c | 285 ++++
29 drivers/mtd/nandx/core/nand_device.h | 608 ++++++++
30 drivers/mtd/nandx/core/nfi.h | 51 +
31 drivers/mtd/nandx/core/nfi/nfi_base.c | 1357 +++++++++++++++++
32 drivers/mtd/nandx/core/nfi/nfi_base.h | 95 ++
33 drivers/mtd/nandx/core/nfi/nfi_regs.h | 114 ++
34 drivers/mtd/nandx/core/nfi/nfi_spi.c | 689 +++++++++
35 drivers/mtd/nandx/core/nfi/nfi_spi.h | 44 +
36 drivers/mtd/nandx/core/nfi/nfi_spi_regs.h | 64 +
37 drivers/mtd/nandx/core/nfi/nfiecc.c | 510 +++++++
38 drivers/mtd/nandx/core/nfi/nfiecc.h | 90 ++
39 drivers/mtd/nandx/core/nfi/nfiecc_regs.h | 51 +
40 drivers/mtd/nandx/driver/Nandx.mk | 18 +
41 drivers/mtd/nandx/driver/bbt/bbt.c | 408 +++++
42 drivers/mtd/nandx/driver/uboot/driver.c | 574 +++++++
43 drivers/mtd/nandx/include/Nandx.mk | 16 +
44 drivers/mtd/nandx/include/internal/bbt.h | 62 +
45 .../mtd/nandx/include/internal/nandx_core.h | 250 +++
46 .../mtd/nandx/include/internal/nandx_errno.h | 40 +
47 .../mtd/nandx/include/internal/nandx_util.h | 221 +++
48 drivers/mtd/nandx/include/uboot/nandx_os.h | 78 +
49 include/configs/mt7622.h | 25 +
50 40 files changed, 8309 insertions(+)
51 create mode 100644 drivers/mtd/nandx/NOTICE
52 create mode 100644 drivers/mtd/nandx/Nandx.config
53 create mode 100644 drivers/mtd/nandx/Nandx.mk
54 create mode 100644 drivers/mtd/nandx/README
55 create mode 100644 drivers/mtd/nandx/core/Nandx.mk
56 create mode 100644 drivers/mtd/nandx/core/core_io.c
57 create mode 100644 drivers/mtd/nandx/core/core_io.h
58 create mode 100644 drivers/mtd/nandx/core/nand/device_spi.c
59 create mode 100644 drivers/mtd/nandx/core/nand/device_spi.h
60 create mode 100644 drivers/mtd/nandx/core/nand/nand_spi.c
61 create mode 100644 drivers/mtd/nandx/core/nand/nand_spi.h
62 create mode 100644 drivers/mtd/nandx/core/nand_base.c
63 create mode 100644 drivers/mtd/nandx/core/nand_base.h
64 create mode 100644 drivers/mtd/nandx/core/nand_chip.c
65 create mode 100644 drivers/mtd/nandx/core/nand_chip.h
66 create mode 100644 drivers/mtd/nandx/core/nand_device.c
67 create mode 100644 drivers/mtd/nandx/core/nand_device.h
68 create mode 100644 drivers/mtd/nandx/core/nfi.h
69 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_base.c
70 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_base.h
71 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_regs.h
72 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_spi.c
73 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_spi.h
74 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_spi_regs.h
75 create mode 100644 drivers/mtd/nandx/core/nfi/nfiecc.c
76 create mode 100644 drivers/mtd/nandx/core/nfi/nfiecc.h
77 create mode 100644 drivers/mtd/nandx/core/nfi/nfiecc_regs.h
78 create mode 100644 drivers/mtd/nandx/driver/Nandx.mk
79 create mode 100644 drivers/mtd/nandx/driver/bbt/bbt.c
80 create mode 100644 drivers/mtd/nandx/driver/uboot/driver.c
81 create mode 100644 drivers/mtd/nandx/include/Nandx.mk
82 create mode 100644 drivers/mtd/nandx/include/internal/bbt.h
83 create mode 100644 drivers/mtd/nandx/include/internal/nandx_core.h
84 create mode 100644 drivers/mtd/nandx/include/internal/nandx_errno.h
85 create mode 100644 drivers/mtd/nandx/include/internal/nandx_util.h
86 create mode 100644 drivers/mtd/nandx/include/uboot/nandx_os.h
87
88diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
89index 5e7571cf3d..34a59b44b9 100644
90--- a/drivers/mtd/Kconfig
91+++ b/drivers/mtd/Kconfig
92@@ -101,6 +101,13 @@ config HBMC_AM654
93 This is the driver for HyperBus controller on TI's AM65x and
94 other SoCs
95
96+config MTK_SPI_NAND
97+ tristate "Mediatek SPI Nand"
98+ depends on DM_MTD
99+ help
100+ This option will support SPI Nand device via Mediatek
101+ NFI controller.
102+
103 source "drivers/mtd/nand/Kconfig"
104
105 source "drivers/mtd/spi/Kconfig"
106diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
107index 318788c5e2..1df1031b23 100644
108--- a/drivers/mtd/Makefile
109+++ b/drivers/mtd/Makefile
110@@ -41,3 +41,7 @@ obj-$(CONFIG_$(SPL_TPL_)SPI_FLASH_SUPPORT) += spi/
111 obj-$(CONFIG_SPL_UBI) += ubispl/
112
113 endif
114+
115+ifeq ($(CONFIG_MTK_SPI_NAND), y)
116+include $(srctree)/drivers/mtd/nandx/Nandx.mk
117+endif
118diff --git a/drivers/mtd/nand/raw/nand.c b/drivers/mtd/nand/raw/nand.c
119index 026419e4e6..4be0c7d8f3 100644
120--- a/drivers/mtd/nand/raw/nand.c
121+++ b/drivers/mtd/nand/raw/nand.c
122@@ -91,8 +91,10 @@ static void nand_init_chip(int i)
123 if (board_nand_init(nand))
124 return;
125
126+#ifndef CONFIG_MTK_SPI_NAND
127 if (nand_scan(mtd, maxchips))
128 return;
129+#endif
130
131 nand_register(i, mtd);
132 }
133diff --git a/drivers/mtd/nandx/NOTICE b/drivers/mtd/nandx/NOTICE
134new file mode 100644
135index 0000000000..1a06ca3867
136--- /dev/null
137+++ b/drivers/mtd/nandx/NOTICE
138@@ -0,0 +1,52 @@
139+
140+/*
141+ * Nandx - Mediatek Common Nand Driver
142+ * Copyright (C) 2017 MediaTek Inc.
143+ *
144+ * Nandx is dual licensed: you can use it either under the terms of
145+ * the GPL, or the BSD license, at your option.
146+ *
147+ * a) This program is free software; you can redistribute it and/or modify
148+ * it under the terms of the GNU General Public License version 2 as
149+ * published by the Free Software Foundation.
150+ *
151+ * This library is distributed in the hope that it will be useful,
152+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
153+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
154+ * GNU General Public License for more details.
155+ *
156+ * This program is distributed in the hope that it will be useful,
157+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
158+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
159+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
160+ *
161+ * Alternatively,
162+ *
163+ * b) Redistribution and use in source and binary forms, with or
164+ * without modification, are permitted provided that the following
165+ * conditions are met:
166+ *
167+ * 1. Redistributions of source code must retain the above
168+ * copyright notice, this list of conditions and the following
169+ * disclaimer.
170+ * 2. Redistributions in binary form must reproduce the above
171+ * copyright notice, this list of conditions and the following
172+ * disclaimer in the documentation and/or other materials
173+ * provided with the distribution.
174+ *
175+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
176+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
177+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
178+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
179+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
180+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
181+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
182+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
183+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
184+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
185+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
186+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
187+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
188+ */
189+
190+####################################################################################################
191\ No newline at end of file
192diff --git a/drivers/mtd/nandx/Nandx.config b/drivers/mtd/nandx/Nandx.config
193new file mode 100644
194index 0000000000..35705ee28d
195--- /dev/null
196+++ b/drivers/mtd/nandx/Nandx.config
197@@ -0,0 +1,17 @@
198+NANDX_SIMULATOR_SUPPORT := n
199+NANDX_CTP_SUPPORT := n
200+NANDX_DA_SUPPORT := n
201+NANDX_PRELOADER_SUPPORT := n
202+NANDX_LK_SUPPORT := n
203+NANDX_KERNEL_SUPPORT := n
204+NANDX_BROM_SUPPORT := n
205+NANDX_UBOOT_SUPPORT := y
206+NANDX_BBT_SUPPORT := y
207+
208+NANDX_NAND_SPI := y
209+NANDX_NAND_SLC := n
210+NANDX_NAND_MLC := n
211+NANDX_NAND_TLC := n
212+NANDX_NFI_BASE := y
213+NANDX_NFI_ECC := y
214+NANDX_NFI_SPI := y
215diff --git a/drivers/mtd/nandx/Nandx.mk b/drivers/mtd/nandx/Nandx.mk
216new file mode 100644
217index 0000000000..f5a6f2a628
218--- /dev/null
219+++ b/drivers/mtd/nandx/Nandx.mk
220@@ -0,0 +1,91 @@
221+#
222+# Copyright (C) 2017 MediaTek Inc.
223+# Licensed under either
224+# BSD Licence, (see NOTICE for more details)
225+# GNU General Public License, version 2.0, (see NOTICE for more details)
226+#
227+
228+nandx_dir := $(shell dirname $(lastword $(MAKEFILE_LIST)))
229+include $(nandx_dir)/Nandx.config
230+
231+ifeq ($(NANDX_SIMULATOR_SUPPORT), y)
232+sim-obj :=
233+sim-inc :=
234+nandx-obj := sim-obj
235+nandx-prefix := .
236+nandx-postfix := %.o
237+sim-inc += -I$(nandx-prefix)/include/internal
238+sim-inc += -I$(nandx-prefix)/include/simulator
239+endif
240+
241+ifeq ($(NANDX_CTP_SUPPORT), y)
242+nandx-obj := C_SRC_FILES
243+nandx-prefix := $(nandx_dir)
244+nandx-postfix := %.c
245+INC_DIRS += $(nandx_dir)/include/internal
246+INC_DIRS += $(nandx_dir)/include/ctp
247+endif
248+
249+ifeq ($(NANDX_DA_SUPPORT), y)
250+nandx-obj := obj-y
251+nandx-prefix := $(nandx_dir)
252+nandx-postfix := %.o
253+INCLUDE_PATH += $(TOPDIR)/platform/$(CODE_BASE)/dev/nand/nandx/include/internal
254+INCLUDE_PATH += $(TOPDIR)/platform/$(CODE_BASE)/dev/nand/nandx/include/da
255+endif
256+
257+ifeq ($(NANDX_PRELOADER_SUPPORT), y)
258+nandx-obj := MOD_SRC
259+nandx-prefix := $(nandx_dir)
260+nandx-postfix := %.c
261+C_OPTION += -I$(MTK_PATH_PLATFORM)/src/drivers/nandx/include/internal
262+C_OPTION += -I$(MTK_PATH_PLATFORM)/src/drivers/nandx/include/preloader
263+endif
264+
265+ifeq ($(NANDX_LK_SUPPORT), y)
266+nandx-obj := MODULE_SRCS
267+nandx-prefix := $(nandx_dir)
268+nandx-postfix := %.c
269+GLOBAL_INCLUDES += $(nandx_dir)/include/internal
270+GLOBAL_INCLUDES += $(nandx_dir)/include/lk
271+endif
272+
273+ifeq ($(NANDX_KERNEL_SUPPORT), y)
274+nandx-obj := obj-y
275+nandx-prefix := nandx
276+nandx-postfix := %.o
277+ccflags-y += -I$(nandx_dir)/include/internal
278+ccflags-y += -I$(nandx_dir)/include/kernel
279+endif
280+
281+ifeq ($(NANDX_UBOOT_SUPPORT), y)
282+nandx-obj := obj-y
283+nandx-prefix := nandx
284+nandx-postfix := %.o
285+ccflags-y += -I$(nandx_dir)/include/internal
286+ccflags-y += -I$(nandx_dir)/include/uboot
287+endif
288+
289+nandx-y :=
290+include $(nandx_dir)/core/Nandx.mk
291+nandx-target := $(nandx-prefix)/core/$(nandx-postfix)
292+$(nandx-obj) += $(patsubst %.c, $(nandx-target), $(nandx-y))
293+
294+
295+nandx-y :=
296+include $(nandx_dir)/driver/Nandx.mk
297+nandx-target := $(nandx-prefix)/driver/$(nandx-postfix)
298+$(nandx-obj) += $(patsubst %.c, $(nandx-target), $(nandx-y))
299+
300+ifeq ($(NANDX_SIMULATOR_SUPPORT), y)
301+cc := gcc
302+CFLAGS += $(sim-inc)
303+
304+.PHONY:nandx
305+nandx: $(sim-obj)
306+ $(cc) $(sim-obj) -o nandx
307+
308+.PHONY:clean
309+clean:
310+ rm -rf $(sim-obj) nandx
311+endif
312diff --git a/drivers/mtd/nandx/README b/drivers/mtd/nandx/README
313new file mode 100644
314index 0000000000..0feaeaeb88
315--- /dev/null
316+++ b/drivers/mtd/nandx/README
317@@ -0,0 +1,31 @@
318+
319+ NAND2.0
320+ ===============================
321+
322+ NAND2.0 is a common nand driver which designed for accessing
323+different type of NANDs(SLC, SPI-NAND, MLC, TLC) on various OS. This
324+driver can work on mostly SoCs of Mediatek.
325+
326+ Although there already has a common nand driver, it doesn't cover
327+SPI-NAND, and not match our IC-Verification's reqirement. We need
328+a driver that can be exten or cut easily.
329+
330+ This driver is base on NANDX & SLC. We try to refactor structures,
331+and make them inheritable. We also refactor some operations' flow
332+principally for adding SPI-NAND support.
333+
334+ This driver's architecture is like:
335+
336+ Driver @LK/Uboot/DA... |IC verify/other purposes
337+ ----------------------------------------------------------------
338+ partition | BBM |
339+ -------------------------------------- | extend_core
340+ nandx_core/core_io |
341+ ----------------------------------------------------------------
342+ nand_chip/nand_base |
343+ -------------------------------------- | extend_nfi
344+ nand_device | nfi/nfi_base |
345+
346+ Any block of above graph can be extended at your will, if you
347+want add new feature into this code, please make sure that your code
348+would follow the framework, and we will be appreciated about it.
349diff --git a/drivers/mtd/nandx/core/Nandx.mk b/drivers/mtd/nandx/core/Nandx.mk
350new file mode 100644
351index 0000000000..7a5661c044
352--- /dev/null
353+++ b/drivers/mtd/nandx/core/Nandx.mk
354@@ -0,0 +1,38 @@
355+#
356+# Copyright (C) 2017 MediaTek Inc.
357+# Licensed under either
358+# BSD Licence, (see NOTICE for more details)
359+# GNU General Public License, version 2.0, (see NOTICE for more details)
360+#
361+
362+nandx-y += nand_device.c
363+nandx-y += nand_base.c
364+nandx-y += nand_chip.c
365+nandx-y += core_io.c
366+
367+nandx-header-y += nand_device.h
368+nandx-header-y += nand_base.h
369+nandx-header-y += nand_chip.h
370+nandx-header-y += core_io.h
371+nandx-header-y += nfi.h
372+
373+nandx-$(NANDX_NAND_SPI) += nand/device_spi.c
374+nandx-$(NANDX_NAND_SPI) += nand/nand_spi.c
375+nandx-$(NANDX_NAND_SLC) += nand/device_slc.c
376+nandx-$(NANDX_NAND_SLC) += nand/nand_slc.c
377+
378+nandx-header-$(NANDX_NAND_SPI) += nand/device_spi.h
379+nandx-header-$(NANDX_NAND_SPI) += nand/nand_spi.h
380+nandx-header-$(NANDX_NAND_SLC) += nand/device_slc.h
381+nandx-header-$(NANDX_NAND_SLC) += nand/nand_slc.h
382+
383+nandx-$(NANDX_NFI_BASE) += nfi/nfi_base.c
384+nandx-$(NANDX_NFI_ECC) += nfi/nfiecc.c
385+nandx-$(NANDX_NFI_SPI) += nfi/nfi_spi.c
386+
387+nandx-header-$(NANDX_NFI_BASE) += nfi/nfi_base.h
388+nandx-header-$(NANDX_NFI_BASE) += nfi/nfi_regs.h
389+nandx-header-$(NANDX_NFI_ECC) += nfi/nfiecc.h
390+nandx-header-$(NANDX_NFI_ECC) += nfi/nfiecc_regs.h
391+nandx-header-$(NANDX_NFI_SPI) += nfi/nfi_spi.h
392+nandx-header-$(NANDX_NFI_SPI) += nfi/nfi_spi_regs.h
393diff --git a/drivers/mtd/nandx/core/core_io.c b/drivers/mtd/nandx/core/core_io.c
394new file mode 100644
395index 0000000000..716eeed38d
396--- /dev/null
397+++ b/drivers/mtd/nandx/core/core_io.c
398@@ -0,0 +1,735 @@
399+/*
400+ * Copyright (C) 2017 MediaTek Inc.
401+ * Licensed under either
402+ * BSD Licence, (see NOTICE for more details)
403+ * GNU General Public License, version 2.0, (see NOTICE for more details)
404+ */
405+
406+/*NOTE: switch cache/multi*/
407+#include "nandx_util.h"
408+#include "nandx_core.h"
409+#include "nand_chip.h"
410+#include "core_io.h"
411+
412+static struct nandx_desc *g_nandx;
413+
414+static inline bool is_sector_align(u64 val)
415+{
416+ return reminder(val, g_nandx->chip->sector_size) ? false : true;
417+}
418+
419+static inline bool is_page_align(u64 val)
420+{
421+ return reminder(val, g_nandx->chip->page_size) ? false : true;
422+}
423+
424+static inline bool is_block_align(u64 val)
425+{
426+ return reminder(val, g_nandx->chip->block_size) ? false : true;
427+}
428+
429+static inline u32 page_sectors(void)
430+{
431+ return div_down(g_nandx->chip->page_size, g_nandx->chip->sector_size);
432+}
433+
434+static inline u32 sector_oob(void)
435+{
436+ return div_down(g_nandx->chip->oob_size, page_sectors());
437+}
438+
439+static inline u32 sector_padded_size(void)
440+{
441+ return g_nandx->chip->sector_size + g_nandx->chip->sector_spare_size;
442+}
443+
444+static inline u32 page_padded_size(void)
445+{
446+ return page_sectors() * sector_padded_size();
447+}
448+
449+static inline u32 offset_to_padded_col(u64 offset)
450+{
451+ struct nandx_desc *nandx = g_nandx;
452+ u32 col, sectors;
453+
454+ col = reminder(offset, nandx->chip->page_size);
455+ sectors = div_down(col, nandx->chip->sector_size);
456+
457+ return col + sectors * nandx->chip->sector_spare_size;
458+}
459+
460+static inline u32 offset_to_row(u64 offset)
461+{
462+ return div_down(offset, g_nandx->chip->page_size);
463+}
464+
465+static inline u32 offset_to_col(u64 offset)
466+{
467+ return reminder(offset, g_nandx->chip->page_size);
468+}
469+
470+static inline u32 oob_upper_size(void)
471+{
472+ return g_nandx->ecc_en ? g_nandx->chip->oob_size :
473+ g_nandx->chip->sector_spare_size * page_sectors();
474+}
475+
476+static inline bool is_upper_oob_align(u64 val)
477+{
478+ return reminder(val, oob_upper_size()) ? false : true;
479+}
480+
481+#define prepare_op(_op, _row, _col, _len, _data, _oob) \
482+ do { \
483+ (_op).row = (_row); \
484+ (_op).col = (_col); \
485+ (_op).len = (_len); \
486+ (_op).data = (_data); \
487+ (_op).oob = (_oob); \
488+ } while (0)
489+
490+static int operation_multi(enum nandx_op_mode mode, u8 *data, u8 *oob,
491+ u64 offset, size_t len)
492+{
493+ struct nandx_desc *nandx = g_nandx;
494+ u32 row = offset_to_row(offset);
495+ u32 col = offset_to_padded_col(offset);
496+
497+ if (nandx->mode == NANDX_IDLE) {
498+ nandx->mode = mode;
499+ nandx->ops_current = 0;
500+ } else if (nandx->mode != mode) {
501+ pr_info("forbid mixed operations.\n");
502+ return -EOPNOTSUPP;
503+ }
504+
505+ prepare_op(nandx->ops[nandx->ops_current], row, col, len, data, oob);
506+ nandx->ops_current++;
507+
508+ if (nandx->ops_current == nandx->ops_multi_len)
509+ return nandx_sync();
510+
511+ return nandx->ops_multi_len - nandx->ops_current;
512+}
513+
514+static int operation_sequent(enum nandx_op_mode mode, u8 *data, u8 *oob,
515+ u64 offset, size_t len)
516+{
517+ struct nandx_desc *nandx = g_nandx;
518+ struct nand_chip *chip = nandx->chip;
519+ u32 row = offset_to_row(offset);
520+ func_chip_ops chip_ops;
521+ u8 *ref_data = data, *ref_oob = oob;
522+ int align, ops, row_step;
523+ int i, rem;
524+
525+ align = data ? chip->page_size : oob_upper_size();
526+ ops = data ? div_down(len, align) : div_down(len, oob_upper_size());
527+ row_step = 1;
528+
529+ switch (mode) {
530+ case NANDX_ERASE:
531+ chip_ops = chip->erase_block;
532+ align = chip->block_size;
533+ ops = div_down(len, align);
534+ row_step = chip->block_pages;
535+ break;
536+
537+ case NANDX_READ:
538+ chip_ops = chip->read_page;
539+ break;
540+
541+ case NANDX_WRITE:
542+ chip_ops = chip->write_page;
543+ break;
544+
545+ default:
546+ return -EINVAL;
547+ }
548+
549+ if (!data) {
550+ ref_data = nandx->head_buf;
551+ memset(ref_data, 0xff, chip->page_size);
552+ }
553+
554+ if (!oob) {
555+ ref_oob = nandx->head_buf + chip->page_size;
556+ memset(ref_oob, 0xff, oob_upper_size());
557+ }
558+
559+ for (i = 0; i < ops; i++) {
560+ prepare_op(nandx->ops[nandx->ops_current],
561+ row + i * row_step, 0, align, ref_data, ref_oob);
562+ nandx->ops_current++;
563+ /* if data or oob is null, nandx->head_buf or
564+ * nandx->head_buf + chip->page_size should not been used
565+ * so, here it is safe to use the buf.
566+ */
567+ ref_data = data ? ref_data + chip->page_size : nandx->head_buf;
568+ ref_oob = oob ? ref_oob + oob_upper_size() :
569+ nandx->head_buf + chip->page_size;
570+ }
571+
572+ if (nandx->mode == NANDX_WRITE) {
573+ rem = reminder(nandx->ops_current, nandx->min_write_pages);
574+ if (rem)
575+ return nandx->min_write_pages - rem;
576+ }
577+
578+ nandx->ops_current = 0;
579+ return chip_ops(chip, nandx->ops, ops);
580+}
581+
582+static int read_pages(u8 *data, u8 *oob, u64 offset, size_t len)
583+{
584+ struct nandx_desc *nandx = g_nandx;
585+ struct nand_chip *chip = nandx->chip;
586+ struct nandx_split64 split = {0};
587+ u8 *ref_data = data, *ref_oob;
588+ u32 row, col;
589+ int ret = 0, i, ops;
590+ u32 head_offset = 0;
591+ u64 val;
592+
593+ if (!data)
594+ return operation_sequent(NANDX_READ, NULL, oob, offset, len);
595+
596+ ref_oob = oob ? oob : nandx->head_buf + chip->page_size;
597+
598+ nandx_split(&split, offset, len, val, chip->page_size);
599+
600+ if (split.head_len) {
601+ row = offset_to_row(split.head);
602+ col = offset_to_col(split.head);
603+ prepare_op(nandx->ops[nandx->ops_current], row, 0,
604+ chip->page_size,
605+ nandx->head_buf, ref_oob);
606+ nandx->ops_current++;
607+
608+ head_offset = col;
609+
610+ ref_data += split.head_len;
611+ ref_oob = oob ? ref_oob + oob_upper_size() :
612+ nandx->head_buf + chip->page_size;
613+ }
614+
615+ if (split.body_len) {
616+ ops = div_down(split.body_len, chip->page_size);
617+ row = offset_to_row(split.body);
618+ for (i = 0; i < ops; i++) {
619+ prepare_op(nandx->ops[nandx->ops_current],
620+ row + i, 0, chip->page_size,
621+ ref_data, ref_oob);
622+ nandx->ops_current++;
623+ ref_data += chip->page_size;
624+ ref_oob = oob ? ref_oob + oob_upper_size() :
625+ nandx->head_buf + chip->page_size;
626+ }
627+ }
628+
629+ if (split.tail_len) {
630+ row = offset_to_row(split.tail);
631+ prepare_op(nandx->ops[nandx->ops_current], row, 0,
632+ chip->page_size, nandx->tail_buf, ref_oob);
633+ nandx->ops_current++;
634+ }
635+
636+ ret = chip->read_page(chip, nandx->ops, nandx->ops_current);
637+
638+ if (split.head_len)
639+ memcpy(data, nandx->head_buf + head_offset, split.head_len);
640+ if (split.tail_len)
641+ memcpy(ref_data, nandx->tail_buf, split.tail_len);
642+
643+ nandx->ops_current = 0;
644+ return ret;
645+}
646+
647+int nandx_read(u8 *data, u8 *oob, u64 offset, size_t len)
648+{
649+ struct nandx_desc *nandx = g_nandx;
650+
651+ if (!len || len > nandx->info.total_size)
652+ return -EINVAL;
653+ if (div_up(len, nandx->chip->page_size) > nandx->ops_len)
654+ return -EINVAL;
655+ if (!data && !oob)
656+ return -EINVAL;
657+ /**
658+ * as design, oob not support partial read
659+ * and, the length of oob buf should be oob size aligned
660+ */
661+ if (!data && !is_upper_oob_align(len))
662+ return -EINVAL;
663+
664+ if (g_nandx->multi_en) {
665+ /* as design, there only 2 buf for partial read,
666+ * if partial read allowed for multi read,
667+ * there are not enough buf
668+ */
669+ if (!is_sector_align(offset))
670+ return -EINVAL;
671+ if (data && !is_sector_align(len))
672+ return -EINVAL;
673+ return operation_multi(NANDX_READ, data, oob, offset, len);
674+ }
675+
676+ nandx->ops_current = 0;
677+ nandx->mode = NANDX_IDLE;
678+ return read_pages(data, oob, offset, len);
679+}
680+
681+static int write_pages(u8 *data, u8 *oob, u64 offset, size_t len)
682+{
683+ struct nandx_desc *nandx = g_nandx;
684+ struct nand_chip *chip = nandx->chip;
685+ struct nandx_split64 split = {0};
686+ int ret, rem, i, ops;
687+ u32 row, col;
688+ u8 *ref_oob = oob;
689+ u64 val;
690+
691+ nandx->mode = NANDX_WRITE;
692+
693+ if (!data)
694+ return operation_sequent(NANDX_WRITE, NULL, oob, offset, len);
695+
696+ if (!oob) {
697+ ref_oob = nandx->head_buf + chip->page_size;
698+ memset(ref_oob, 0xff, oob_upper_size());
699+ }
700+
701+ nandx_split(&split, offset, len, val, chip->page_size);
702+
703+ /*NOTE: slc can support sector write, here copy too many data.*/
704+ if (split.head_len) {
705+ row = offset_to_row(split.head);
706+ col = offset_to_col(split.head);
707+ memset(nandx->head_buf, 0xff, page_padded_size());
708+ memcpy(nandx->head_buf + col, data, split.head_len);
709+ prepare_op(nandx->ops[nandx->ops_current], row, 0,
710+ chip->page_size, nandx->head_buf, ref_oob);
711+ nandx->ops_current++;
712+
713+ data += split.head_len;
714+ ref_oob = oob ? ref_oob + oob_upper_size() :
715+ nandx->head_buf + chip->page_size;
716+ }
717+
718+ if (split.body_len) {
719+ row = offset_to_row(split.body);
720+ ops = div_down(split.body_len, chip->page_size);
721+ for (i = 0; i < ops; i++) {
722+ prepare_op(nandx->ops[nandx->ops_current],
723+ row + i, 0, chip->page_size, data, ref_oob);
724+ nandx->ops_current++;
725+ data += chip->page_size;
726+ ref_oob = oob ? ref_oob + oob_upper_size() :
727+ nandx->head_buf + chip->page_size;
728+ }
729+ }
730+
731+ if (split.tail_len) {
732+ row = offset_to_row(split.tail);
733+ memset(nandx->tail_buf, 0xff, page_padded_size());
734+ memcpy(nandx->tail_buf, data, split.tail_len);
735+ prepare_op(nandx->ops[nandx->ops_current], row, 0,
736+ chip->page_size, nandx->tail_buf, ref_oob);
737+ nandx->ops_current++;
738+ }
739+
740+ rem = reminder(nandx->ops_current, nandx->min_write_pages);
741+ if (rem)
742+ return nandx->min_write_pages - rem;
743+
744+ ret = chip->write_page(chip, nandx->ops, nandx->ops_current);
745+
746+ nandx->ops_current = 0;
747+ nandx->mode = NANDX_IDLE;
748+ return ret;
749+}
750+
751+int nandx_write(u8 *data, u8 *oob, u64 offset, size_t len)
752+{
753+ struct nandx_desc *nandx = g_nandx;
754+
755+ if (!len || len > nandx->info.total_size)
756+ return -EINVAL;
757+ if (div_up(len, nandx->chip->page_size) > nandx->ops_len)
758+ return -EINVAL;
759+ if (!data && !oob)
760+ return -EINVAL;
761+ if (!data && !is_upper_oob_align(len))
762+ return -EINVAL;
763+
764+ if (nandx->multi_en) {
765+ if (!is_page_align(offset))
766+ return -EINVAL;
767+ if (data && !is_page_align(len))
768+ return -EINVAL;
769+
770+ return operation_multi(NANDX_WRITE, data, oob, offset, len);
771+ }
772+
773+ return write_pages(data, oob, offset, len);
774+}
775+
776+int nandx_erase(u64 offset, size_t len)
777+{
778+ struct nandx_desc *nandx = g_nandx;
779+
780+ if (!len || len > nandx->info.total_size)
781+ return -EINVAL;
782+ if (div_down(len, nandx->chip->block_size) > nandx->ops_len)
783+ return -EINVAL;
784+ if (!is_block_align(offset) || !is_block_align(len))
785+ return -EINVAL;
786+
787+ if (g_nandx->multi_en)
788+ return operation_multi(NANDX_ERASE, NULL, NULL, offset, len);
789+
790+ nandx->ops_current = 0;
791+ nandx->mode = NANDX_IDLE;
792+ return operation_sequent(NANDX_ERASE, NULL, NULL, offset, len);
793+}
794+
795+int nandx_sync(void)
796+{
797+ struct nandx_desc *nandx = g_nandx;
798+ struct nand_chip *chip = nandx->chip;
799+ func_chip_ops chip_ops;
800+ int ret, i, rem;
801+
802+ if (!nandx->ops_current)
803+ return 0;
804+
805+ rem = reminder(nandx->ops_current, nandx->ops_multi_len);
806+ if (nandx->multi_en && rem) {
807+ ret = -EIO;
808+ goto error;
809+ }
810+
811+ switch (nandx->mode) {
812+ case NANDX_IDLE:
813+ return 0;
814+ case NANDX_ERASE:
815+ chip_ops = chip->erase_block;
816+ break;
817+ case NANDX_READ:
818+ chip_ops = chip->read_page;
819+ break;
820+ case NANDX_WRITE:
821+ chip_ops = chip->write_page;
822+ break;
823+ default:
824+ return -EINVAL;
825+ }
826+
827+ rem = reminder(nandx->ops_current, nandx->min_write_pages);
828+ if (!nandx->multi_en && nandx->mode == NANDX_WRITE && rem) {
829+ /* in one process of program, only allow 2 pages to do partial
830+ * write, here we supposed 1st buf would be used, and 2nd
831+ * buf should be not used.
832+ */
833+ memset(nandx->tail_buf, 0xff,
834+ chip->page_size + oob_upper_size());
835+ for (i = 0; i < rem; i++) {
836+ prepare_op(nandx->ops[nandx->ops_current],
837+ nandx->ops[nandx->ops_current - 1].row + 1,
838+ 0, chip->page_size, nandx->tail_buf,
839+ nandx->tail_buf + chip->page_size);
840+ nandx->ops_current++;
841+ }
842+ }
843+
844+ ret = chip_ops(nandx->chip, nandx->ops, nandx->ops_current);
845+
846+error:
847+ nandx->mode = NANDX_IDLE;
848+ nandx->ops_current = 0;
849+
850+ return ret;
851+}
852+
853+int nandx_ioctl(int cmd, void *arg)
854+{
855+ struct nandx_desc *nandx = g_nandx;
856+ struct nand_chip *chip = nandx->chip;
857+ int ret = 0;
858+
859+ switch (cmd) {
860+ case CORE_CTRL_NAND_INFO:
861+ *(struct nandx_info *)arg = nandx->info;
862+ break;
863+
864+ case CHIP_CTRL_OPS_MULTI:
865+ ret = chip->chip_ctrl(chip, cmd, arg);
866+ if (!ret)
867+ nandx->multi_en = *(bool *)arg;
868+ break;
869+
870+ case NFI_CTRL_ECC:
871+ ret = chip->chip_ctrl(chip, cmd, arg);
872+ if (!ret)
873+ nandx->ecc_en = *(bool *)arg;
874+ break;
875+
876+ default:
877+ ret = chip->chip_ctrl(chip, cmd, arg);
878+ break;
879+ }
880+
881+ return ret;
882+}
883+
884+bool nandx_is_bad_block(u64 offset)
885+{
886+ struct nandx_desc *nandx = g_nandx;
887+
888+ prepare_op(nandx->ops[0], offset_to_row(offset), 0,
889+ nandx->chip->page_size, nandx->head_buf,
890+ nandx->head_buf + nandx->chip->page_size);
891+
892+ return nandx->chip->is_bad_block(nandx->chip, nandx->ops, 1);
893+}
894+
895+int nandx_suspend(void)
896+{
897+ return g_nandx->chip->suspend(g_nandx->chip);
898+}
899+
900+int nandx_resume(void)
901+{
902+ return g_nandx->chip->resume(g_nandx->chip);
903+}
904+
905+int nandx_init(struct nfi_resource *res)
906+{
907+ struct nand_chip *chip;
908+ struct nandx_desc *nandx;
909+ int ret = 0;
910+
911+ if (!res)
912+ return -EINVAL;
913+
914+ chip = nand_chip_init(res);
915+ if (!chip) {
916+ pr_info("nand chip init fail.\n");
917+ return -EFAULT;
918+ }
919+
920+ nandx = (struct nandx_desc *)mem_alloc(1, sizeof(struct nandx_desc));
921+ if (!nandx)
922+ return -ENOMEM;
923+
924+ g_nandx = nandx;
925+
926+ nandx->chip = chip;
927+ nandx->min_write_pages = chip->min_program_pages;
928+ nandx->ops_multi_len = nandx->min_write_pages * chip->plane_num;
929+ nandx->ops_len = chip->block_pages * chip->plane_num;
930+ nandx->ops = mem_alloc(1, sizeof(struct nand_ops) * nandx->ops_len);
931+ if (!nandx->ops) {
932+ ret = -ENOMEM;
933+ goto ops_error;
934+ }
935+
936+#if NANDX_BULK_IO_USE_DRAM
937+ nandx->head_buf = NANDX_CORE_BUF_ADDR;
938+#else
939+ nandx->head_buf = mem_alloc(2, page_padded_size());
940+#endif
941+ if (!nandx->head_buf) {
942+ ret = -ENOMEM;
943+ goto buf_error;
944+ }
945+ nandx->tail_buf = nandx->head_buf + page_padded_size();
946+ memset(nandx->head_buf, 0xff, 2 * page_padded_size());
947+ nandx->multi_en = false;
948+ nandx->ecc_en = false;
949+ nandx->ops_current = 0;
950+ nandx->mode = NANDX_IDLE;
951+
952+ nandx->info.max_io_count = nandx->ops_len;
953+ nandx->info.min_write_pages = nandx->min_write_pages;
954+ nandx->info.plane_num = chip->plane_num;
955+ nandx->info.oob_size = chip->oob_size;
956+ nandx->info.page_parity_size = chip->sector_spare_size * page_sectors();
957+ nandx->info.page_size = chip->page_size;
958+ nandx->info.block_size = chip->block_size;
959+ nandx->info.total_size = chip->block_size * chip->block_num;
960+ nandx->info.fdm_ecc_size = chip->fdm_ecc_size;
961+ nandx->info.fdm_reg_size = chip->fdm_reg_size;
962+ nandx->info.ecc_strength = chip->ecc_strength;
963+ nandx->info.sector_size = chip->sector_size;
964+
965+ return 0;
966+
967+buf_error:
968+#if !NANDX_BULK_IO_USE_DRAM
969+ mem_free(nandx->head_buf);
970+#endif
971+ops_error:
972+ mem_free(nandx);
973+
974+ return ret;
975+}
976+
977+void nandx_exit(void)
978+{
979+ nand_chip_exit(g_nandx->chip);
980+#if !NANDX_BULK_IO_USE_DRAM
981+ mem_free(g_nandx->head_buf);
982+#endif
983+ mem_free(g_nandx->ops);
984+ mem_free(g_nandx);
985+}
986+
987+#ifdef NANDX_UNIT_TEST
988+static void dump_buf(u8 *buf, u32 len)
989+{
990+ u32 i;
991+
992+ pr_info("dump buf@0x%X start", (u32)buf);
993+ for (i = 0; i < len; i++) {
994+ if (!reminder(i, 16))
995+ pr_info("\n0x");
996+ pr_info("%x ", buf[i]);
997+ }
998+ pr_info("\ndump buf done.\n");
999+}
1000+
1001+int nandx_unit_test(u64 offset, size_t len)
1002+{
1003+ u8 *src_buf, *dst_buf;
1004+ u32 i, j;
1005+ int ret;
1006+
1007+ if (!len || len > g_nandx->chip->block_size)
1008+ return -EINVAL;
1009+
1010+#if NANDX_BULK_IO_USE_DRAM
1011+ src_buf = NANDX_UT_SRC_ADDR;
1012+ dst_buf = NANDX_UT_DST_ADDR;
1013+
1014+#else
1015+ src_buf = mem_alloc(1, g_nandx->chip->page_size);
1016+ if (!src_buf)
1017+ return -ENOMEM;
1018+ dst_buf = mem_alloc(1, g_nandx->chip->page_size);
1019+ if (!dst_buf) {
1020+ mem_free(src_buf);
1021+ return -ENOMEM;
1022+ }
1023+#endif
1024+
1025+ pr_info("%s: src_buf address 0x%x, dst_buf address 0x%x\n",
1026+ __func__, (int)((unsigned long)src_buf),
1027+ (int)((unsigned long)dst_buf));
1028+
1029+ memset(dst_buf, 0, g_nandx->chip->page_size);
1030+ pr_info("read page 0 data...!\n");
1031+ ret = nandx_read(dst_buf, NULL, 0, g_nandx->chip->page_size);
1032+ if (ret < 0) {
1033+ pr_info("read fail with ret %d\n", ret);
1034+ } else {
1035+ pr_info("read page success!\n");
1036+ }
1037+
1038+ for (i = 0; i < g_nandx->chip->page_size; i++) {
1039+ src_buf[i] = 0x5a;
1040+ }
1041+
1042+ ret = nandx_erase(offset, g_nandx->chip->block_size);
1043+ if (ret < 0) {
1044+ pr_info("erase fail with ret %d\n", ret);
1045+ goto error;
1046+ }
1047+
1048+ for (j = 0; j < g_nandx->chip->block_pages; j++) {
1049+ memset(dst_buf, 0, g_nandx->chip->page_size);
1050+ pr_info("check data after erase...!\n");
1051+ ret = nandx_read(dst_buf, NULL, offset, g_nandx->chip->page_size);
1052+ if (ret < 0) {
1053+ pr_info("read fail with ret %d\n", ret);
1054+ goto error;
1055+ }
1056+
1057+ for (i = 0; i < g_nandx->chip->page_size; i++) {
1058+ if (dst_buf[i] != 0xff) {
1059+ pr_info("read after erase, check fail @%d\n", i);
1060+ pr_info("all data should be 0xff\n");
1061+ ret = -ENANDERASE;
1062+ dump_buf(dst_buf, 128);
1063+ //goto error;
1064+ break;
1065+ }
1066+ }
1067+
1068+ pr_info("write data...!\n");
1069+ ret = nandx_write(src_buf, NULL, offset, g_nandx->chip->page_size);
1070+ if (ret < 0) {
1071+ pr_info("write fail with ret %d\n", ret);
1072+ goto error;
1073+ }
1074+
1075+ memset(dst_buf, 0, g_nandx->chip->page_size);
1076+ pr_info("read data...!\n");
1077+ ret = nandx_read(dst_buf, NULL, offset, g_nandx->chip->page_size);
1078+ if (ret < 0) {
1079+ pr_info("read fail with ret %d\n", ret);
1080+ goto error;
1081+ }
1082+
1083+ for (i = 0; i < g_nandx->chip->page_size; i++) {
1084+ if (dst_buf[i] != src_buf[i]) {
1085+ pr_info("read after write, check fail @%d\n", i);
1086+ pr_info("dst_buf should be same as src_buf\n");
1087+ ret = -EIO;
1088+ dump_buf(src_buf + i, 128);
1089+ dump_buf(dst_buf + i, 128);
1090+ break;
1091+ }
1092+ }
1093+
1094+ pr_err("%s %d %s@%d\n", __func__, __LINE__, ret?"Failed":"OK", j);
1095+ if (ret)
1096+ break;
1097+
1098+ offset += g_nandx->chip->page_size;
1099+ }
1100+
1101+ ret = nandx_erase(offset, g_nandx->chip->block_size);
1102+ if (ret < 0) {
1103+ pr_info("erase fail with ret %d\n", ret);
1104+ goto error;
1105+ }
1106+
1107+ memset(dst_buf, 0, g_nandx->chip->page_size);
1108+ ret = nandx_read(dst_buf, NULL, offset, g_nandx->chip->page_size);
1109+ if (ret < 0) {
1110+ pr_info("read fail with ret %d\n", ret);
1111+ goto error;
1112+ }
1113+
1114+ for (i = 0; i < g_nandx->chip->page_size; i++) {
1115+ if (dst_buf[i] != 0xff) {
1116+ pr_info("read after erase, check fail\n");
1117+ pr_info("all data should be 0xff\n");
1118+ ret = -ENANDERASE;
1119+ dump_buf(dst_buf, 128);
1120+ goto error;
1121+ }
1122+ }
1123+
1124+ return 0;
1125+
1126+error:
1127+#if !NANDX_BULK_IO_USE_DRAM
1128+ mem_free(src_buf);
1129+ mem_free(dst_buf);
1130+#endif
1131+ return ret;
1132+}
1133+#endif
1134diff --git a/drivers/mtd/nandx/core/core_io.h b/drivers/mtd/nandx/core/core_io.h
1135new file mode 100644
1136index 0000000000..edcb60908a
1137--- /dev/null
1138+++ b/drivers/mtd/nandx/core/core_io.h
1139@@ -0,0 +1,39 @@
1140+/*
1141+ * Copyright (C) 2017 MediaTek Inc.
1142+ * Licensed under either
1143+ * BSD Licence, (see NOTICE for more details)
1144+ * GNU General Public License, version 2.0, (see NOTICE for more details)
1145+ */
1146+
1147+#ifndef __CORE_IO_H__
1148+#define __CORE_IO_H__
1149+
1150+typedef int (*func_chip_ops)(struct nand_chip *, struct nand_ops *,
1151+ int);
1152+
1153+enum nandx_op_mode {
1154+ NANDX_IDLE,
1155+ NANDX_WRITE,
1156+ NANDX_READ,
1157+ NANDX_ERASE
1158+};
1159+
1160+struct nandx_desc {
1161+ struct nand_chip *chip;
1162+ struct nandx_info info;
1163+ enum nandx_op_mode mode;
1164+
1165+ bool multi_en;
1166+ bool ecc_en;
1167+
1168+ struct nand_ops *ops;
1169+ int ops_len;
1170+ int ops_multi_len;
1171+ int ops_current;
1172+ int min_write_pages;
1173+
1174+ u8 *head_buf;
1175+ u8 *tail_buf;
1176+};
1177+
1178+#endif /* __CORE_IO_H__ */
1179diff --git a/drivers/mtd/nandx/core/nand/device_spi.c b/drivers/mtd/nandx/core/nand/device_spi.c
1180new file mode 100644
1181index 0000000000..db338c28c2
1182--- /dev/null
1183+++ b/drivers/mtd/nandx/core/nand/device_spi.c
1184@@ -0,0 +1,200 @@
1185+/*
1186+ * Copyright (C) 2017 MediaTek Inc.
1187+ * Licensed under either
1188+ * BSD Licence, (see NOTICE for more details)
1189+ * GNU General Public License, version 2.0, (see NOTICE for more details)
1190+ */
1191+
1192+#include "nandx_util.h"
1193+#include "../nand_device.h"
1194+#include "device_spi.h"
1195+
1196+/* spi nand basic commands */
1197+static struct nand_cmds spi_cmds = {
1198+ .reset = 0xff,
1199+ .read_id = 0x9f,
1200+ .read_status = 0x0f,
1201+ .read_param_page = 0x03,
1202+ .set_feature = 0x1f,
1203+ .get_feature = 0x0f,
1204+ .read_1st = 0x13,
1205+ .read_2nd = -1,
1206+ .random_out_1st = 0x03,
1207+ .random_out_2nd = -1,
1208+ .program_1st = 0x02,
1209+ .program_2nd = 0x10,
1210+ .erase_1st = 0xd8,
1211+ .erase_2nd = -1,
1212+ .read_cache = 0x30,
1213+ .read_cache_last = 0x3f,
1214+ .program_cache = 0x02
1215+};
1216+
1217+/* spi nand extend commands */
1218+static struct spi_extend_cmds spi_extend_cmds = {
1219+ .die_select = 0xc2,
1220+ .write_enable = 0x06
1221+};
1222+
1223+/* means the start bit of addressing type */
1224+static struct nand_addressing spi_addressing = {
1225+ .row_bit_start = 0,
1226+ .block_bit_start = 0,
1227+ .plane_bit_start = 12,
1228+ .lun_bit_start = 0,
1229+};
1230+
1231+/* spi nand endurance */
1232+static struct nand_endurance spi_endurance = {
1233+ .pe_cycle = 100000,
1234+ .ecc_req = 1,
1235+ .max_bitflips = 1
1236+};
1237+
1238+/* array_busy, write_protect, erase_fail, program_fail */
1239+static struct nand_status spi_status[] = {
1240+ {.array_busy = BIT(0),
1241+ .write_protect = BIT(1),
1242+ .erase_fail = BIT(2),
1243+ .program_fail = BIT(3)}
1244+};
1245+
1246+/* measure time by the us */
1247+static struct nand_array_timing spi_array_timing = {
1248+ .tRST = 500,
1249+ .tWHR = 1,
1250+ .tR = 25,
1251+ .tRCBSY = 25,
1252+ .tFEAT = 1,
1253+ .tPROG = 600,
1254+ .tPCBSY = 600,
1255+ .tBERS = 10000,
1256+ .tDBSY = 1
1257+};
1258+
1259+/* spi nand device table */
1260+static struct device_spi spi_nand[] = {
1261+ {
1262+ NAND_DEVICE("W25N01GV",
1263+ NAND_PACK_ID(0xef, 0xaa, 0x21, 0, 0, 0, 0, 0),
1264+ 3, 0, 3, 3,
1265+ 1, 1, 1, 1024, KB(128), KB(2), 64, 1,
1266+ &spi_cmds, &spi_addressing, &spi_status[0],
1267+ &spi_endurance, &spi_array_timing),
1268+ {
1269+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1270+ NAND_SPI_CONFIG(0xb0, 4, 6, 0),
1271+ NAND_SPI_STATUS(0xc0, 4, 5),
1272+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1273+ },
1274+ &spi_extend_cmds, 0xff, 0xff
1275+ },
1276+ {
1277+ NAND_DEVICE("MX35LF1G",
1278+ NAND_PACK_ID(0xc2, 0x12, 0x21, 0, 0, 0, 0, 0),
1279+ 2, 0, 3, 3,
1280+ 1, 1, 1, 1024, KB(128), KB(2), 64, 1,
1281+ &spi_cmds, &spi_addressing, &spi_status[0],
1282+ &spi_endurance, &spi_array_timing),
1283+ {
1284+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1285+ NAND_SPI_CONFIG(0xb0, 4, 6, 1),
1286+ NAND_SPI_STATUS(0xc0, 4, 5),
1287+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1288+ },
1289+ &spi_extend_cmds, 0xff, 0xff
1290+ },
1291+ {
1292+ NAND_DEVICE("MT29F4G01ABAFDWB",
1293+ NAND_PACK_ID(0x2c, 0x34, 0, 0, 0, 0, 0, 0),
1294+ 2, 0, 3, 3,
1295+ 1, 1, 1, 2048, KB(256), KB(4), 256, 1,
1296+ &spi_cmds, &spi_addressing, &spi_status[0],
1297+ &spi_endurance, &spi_array_timing),
1298+ {
1299+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1300+ NAND_SPI_CONFIG(0xb0, 4, 6, 1),
1301+ NAND_SPI_STATUS(0xc0, 4, 5),
1302+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1303+ },
1304+ &spi_extend_cmds, 0xff, 0xff
1305+ },
1306+ {
1307+ NAND_DEVICE("GD5F4GQ4UB",
1308+ NAND_PACK_ID(0xc8, 0xd4, 0, 0, 0, 0, 0, 0),
1309+ 2, 0, 3, 3,
1310+ 1, 1, 1, 2048, KB(256), KB(4), 256, 1,
1311+ &spi_cmds, &spi_addressing, &spi_status[0],
1312+ &spi_endurance, &spi_array_timing),
1313+ {
1314+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1315+ NAND_SPI_CONFIG(0xb0, 4, 6, 1),
1316+ NAND_SPI_STATUS(0xc0, 4, 5),
1317+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1318+ },
1319+ &spi_extend_cmds, 0xff, 0xff
1320+ },
1321+ {
1322+ NAND_DEVICE("TC58CVG2S0HRAIJ",
1323+ NAND_PACK_ID(0x98, 0xED, 0x51, 0, 0, 0, 0, 0),
1324+ 3, 0, 3, 3,
1325+ 1, 1, 1, 2048, KB(256), KB(4), 256, 1,
1326+ &spi_cmds, &spi_addressing, &spi_status[0],
1327+ &spi_endurance, &spi_array_timing),
1328+ {
1329+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1330+ NAND_SPI_CONFIG(0xb0, 4, 6, 1),
1331+ NAND_SPI_STATUS(0xc0, 4, 5),
1332+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1333+ },
1334+ &spi_extend_cmds, 0xff, 0xff
1335+ },
1336+ {
1337+ NAND_DEVICE("NO-DEVICE",
1338+ NAND_PACK_ID(0, 0, 0, 0, 0, 0, 0, 0), 0, 0, 0, 0,
1339+ 0, 0, 0, 0, 0, 0, 0, 1,
1340+ &spi_cmds, &spi_addressing, &spi_status[0],
1341+ &spi_endurance, &spi_array_timing),
1342+ {
1343+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1344+ NAND_SPI_CONFIG(0xb0, 4, 6, 0),
1345+ NAND_SPI_STATUS(0xc0, 4, 5),
1346+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1347+ },
1348+ &spi_extend_cmds, 0xff, 0xff
1349+ }
1350+};
1351+
1352+u8 spi_replace_rx_cmds(u8 mode)
1353+{
1354+ u8 rx_replace_cmds[] = {0x03, 0x3b, 0x6b, 0xbb, 0xeb};
1355+
1356+ return rx_replace_cmds[mode];
1357+}
1358+
1359+u8 spi_replace_tx_cmds(u8 mode)
1360+{
1361+ u8 tx_replace_cmds[] = {0x02, 0x32};
1362+
1363+ return tx_replace_cmds[mode];
1364+}
1365+
1366+u8 spi_replace_rx_col_cycle(u8 mode)
1367+{
1368+ u8 rx_replace_col_cycle[] = {3, 3, 3, 3, 4};
1369+
1370+ return rx_replace_col_cycle[mode];
1371+}
1372+
1373+u8 spi_replace_tx_col_cycle(u8 mode)
1374+{
1375+ u8 tx_replace_col_cycle[] = {2, 2};
1376+
1377+ return tx_replace_col_cycle[mode];
1378+}
1379+
1380+struct nand_device *nand_get_device(int index)
1381+{
1382+ return &spi_nand[index].dev;
1383+}
1384+
1385diff --git a/drivers/mtd/nandx/core/nand/device_spi.h b/drivers/mtd/nandx/core/nand/device_spi.h
1386new file mode 100644
1387index 0000000000..1676b61fc8
1388--- /dev/null
1389+++ b/drivers/mtd/nandx/core/nand/device_spi.h
1390@@ -0,0 +1,132 @@
1391+/*
1392+ * Copyright (C) 2017 MediaTek Inc.
1393+ * Licensed under either
1394+ * BSD Licence, (see NOTICE for more details)
1395+ * GNU General Public License, version 2.0, (see NOTICE for more details)
1396+ */
1397+
1398+#ifndef __DEVICE_SPI_H__
1399+#define __DEVICE_SPI_H__
1400+
1401+/*
1402+ * extend commands
1403+ * @die_select: select nand device die command
1404+ * @write_enable: enable write command before write data to spi nand
1405+ * spi nand device will auto to be disable after write done
1406+ */
1407+struct spi_extend_cmds {
1408+ short die_select;
1409+ short write_enable;
1410+};
1411+
1412+/*
1413+ * protection feature register
1414+ * @addr: register address
1415+ * @wp_en_bit: write protection enable bit
1416+ * @bp_start_bit: block protection mask start bit
1417+ * @bp_end_bit: block protection mask end bit
1418+ */
1419+struct feature_protect {
1420+ u8 addr;
1421+ u8 wp_en_bit;
1422+ u8 bp_start_bit;
1423+ u8 bp_end_bit;
1424+};
1425+
1426+/*
1427+ * configuration feature register
1428+ * @addr: register address
1429+ * @ecc_en_bit: in-die ecc enable bit
1430+ * @otp_en_bit: enter otp access mode bit
1431+ * @need_qe: quad io enable bit
1432+ */
1433+struct feature_config {
1434+ u8 addr;
1435+ u8 ecc_en_bit;
1436+ u8 otp_en_bit;
1437+ u8 need_qe;
1438+};
1439+
1440+/*
1441+ * status feature register
1442+ * @addr: register address
1443+ * @ecc_start_bit: ecc status mask start bit for error bits number
1444+ * @ecc_end_bit: ecc status mask end bit for error bits number
1445+ * note that:
1446+ * operations status (ex. array busy status) could see on struct nand_status
1447+ */
1448+struct feature_status {
1449+ u8 addr;
1450+ u8 ecc_start_bit;
1451+ u8 ecc_end_bit;
1452+};
1453+
1454+/*
1455+ * character feature register
1456+ * @addr: register address
1457+ * @die_sel_bit: die select bit
1458+ * @drive_start_bit: drive strength mask start bit
1459+ * @drive_end_bit: drive strength mask end bit
1460+ */
1461+struct feature_character {
1462+ u8 addr;
1463+ u8 die_sel_bit;
1464+ u8 drive_start_bit;
1465+ u8 drive_end_bit;
1466+};
1467+
1468+/*
1469+ * spi features
1470+ * @protect: protection feature register
1471+ * @config: configuration feature register
1472+ * @status: status feature register
1473+ * @character: character feature register
1474+ */
1475+struct spi_features {
1476+ struct feature_protect protect;
1477+ struct feature_config config;
1478+ struct feature_status status;
1479+ struct feature_character character;
1480+};
1481+
1482+/*
1483+ * device_spi
1484+ * configurations of spi nand device table
1485+ * @dev: base information of nand device
1486+ * @feature: feature information for spi nand
1487+ * @extend_cmds: extended the nand base commands
1488+ * @tx_mode_mask: tx mode mask for chip read
1489+ * @rx_mode_mask: rx mode mask for chip write
1490+ */
1491+struct device_spi {
1492+ struct nand_device dev;
1493+ struct spi_features feature;
1494+ struct spi_extend_cmds *extend_cmds;
1495+
1496+ u8 tx_mode_mask;
1497+ u8 rx_mode_mask;
1498+};
1499+
1500+#define NAND_SPI_PROTECT(addr, wp_en_bit, bp_start_bit, bp_end_bit) \
1501+ {addr, wp_en_bit, bp_start_bit, bp_end_bit}
1502+
1503+#define NAND_SPI_CONFIG(addr, ecc_en_bit, otp_en_bit, need_qe) \
1504+ {addr, ecc_en_bit, otp_en_bit, need_qe}
1505+
1506+#define NAND_SPI_STATUS(addr, ecc_start_bit, ecc_end_bit) \
1507+ {addr, ecc_start_bit, ecc_end_bit}
1508+
1509+#define NAND_SPI_CHARACTER(addr, die_sel_bit, drive_start_bit, drive_end_bit) \
1510+ {addr, die_sel_bit, drive_start_bit, drive_end_bit}
1511+
1512+static inline struct device_spi *device_to_spi(struct nand_device *dev)
1513+{
1514+ return container_of(dev, struct device_spi, dev);
1515+}
1516+
1517+u8 spi_replace_rx_cmds(u8 mode);
1518+u8 spi_replace_tx_cmds(u8 mode);
1519+u8 spi_replace_rx_col_cycle(u8 mode);
1520+u8 spi_replace_tx_col_cycle(u8 mode);
1521+
1522+#endif /* __DEVICE_SPI_H__ */
1523diff --git a/drivers/mtd/nandx/core/nand/nand_spi.c b/drivers/mtd/nandx/core/nand/nand_spi.c
1524new file mode 100644
1525index 0000000000..2ae03e1cf4
1526--- /dev/null
1527+++ b/drivers/mtd/nandx/core/nand/nand_spi.c
1528@@ -0,0 +1,526 @@
1529+/*
1530+ * Copyright (C) 2017 MediaTek Inc.
1531+ * Licensed under either
1532+ * BSD Licence, (see NOTICE for more details)
1533+ * GNU General Public License, version 2.0, (see NOTICE for more details)
1534+ */
1535+
1536+#include "nandx_util.h"
1537+#include "nandx_core.h"
1538+#include "../nand_chip.h"
1539+#include "../nand_device.h"
1540+#include "../nfi.h"
1541+#include "../nand_base.h"
1542+#include "device_spi.h"
1543+#include "nand_spi.h"
1544+
1545+#define READY_TIMEOUT 500000 /* us */
1546+
1547+static int nand_spi_read_status(struct nand_base *nand)
1548+{
1549+ struct device_spi *dev = device_to_spi(nand->dev);
1550+ u8 status;
1551+
1552+ nand->get_feature(nand, dev->feature.status.addr, &status, 1);
1553+
1554+ return status;
1555+}
1556+
1557+static int nand_spi_wait_ready(struct nand_base *nand, u32 timeout)
1558+{
1559+ u64 now, end;
1560+ int status;
1561+
1562+ end = get_current_time_us() + timeout;
1563+
1564+ do {
1565+ status = nand_spi_read_status(nand);
1566+ status &= nand->dev->status->array_busy;
1567+ now = get_current_time_us();
1568+
1569+ if (now > end)
1570+ break;
1571+ } while (status);
1572+
1573+ return status ? -EBUSY : 0;
1574+}
1575+
1576+static int nand_spi_set_op_mode(struct nand_base *nand, u8 mode)
1577+{
1578+ struct nand_spi *spi_nand = base_to_spi(nand);
1579+ struct nfi *nfi = nand->nfi;
1580+ int ret = 0;
1581+
1582+ if (spi_nand->op_mode != mode) {
1583+ ret = nfi->nfi_ctrl(nfi, SNFI_CTRL_OP_MODE, (void *)&mode);
1584+ spi_nand->op_mode = mode;
1585+ }
1586+
1587+ return ret;
1588+}
1589+
1590+static int nand_spi_set_config(struct nand_base *nand, u8 addr, u8 mask,
1591+ bool en)
1592+{
1593+ u8 configs = 0;
1594+
1595+ nand->get_feature(nand, addr, &configs, 1);
1596+
1597+ if (en)
1598+ configs |= mask;
1599+ else
1600+ configs &= ~mask;
1601+
1602+ nand->set_feature(nand, addr, &configs, 1);
1603+
1604+ configs = 0;
1605+ nand->get_feature(nand, addr, &configs, 1);
1606+
1607+ return (configs & mask) == en ? 0 : -EFAULT;
1608+}
1609+
1610+static int nand_spi_die_select(struct nand_base *nand, int *row)
1611+{
1612+ struct device_spi *dev = device_to_spi(nand->dev);
1613+ struct nfi *nfi = nand->nfi;
1614+ int lun_blocks, block_pages, lun, blocks;
1615+ int page = *row, ret = 0;
1616+ u8 param = 0, die_sel;
1617+
1618+ if (nand->dev->lun_num < 2)
1619+ return 0;
1620+
1621+ block_pages = nand_block_pages(nand->dev);
1622+ lun_blocks = nand_lun_blocks(nand->dev);
1623+ blocks = div_down(page, block_pages);
1624+ lun = div_down(blocks, lun_blocks);
1625+
1626+ if (dev->extend_cmds->die_select == -1) {
1627+ die_sel = (u8)(lun << dev->feature.character.die_sel_bit);
1628+ nand->get_feature(nand, dev->feature.character.addr, &param, 1);
1629+ param |= die_sel;
1630+ nand->set_feature(nand, dev->feature.character.addr, &param, 1);
1631+ param = 0;
1632+ nand->get_feature(nand, dev->feature.character.addr, &param, 1);
1633+ ret = (param & die_sel) ? 0 : -EFAULT;
1634+ } else {
1635+ nfi->reset(nfi);
1636+ nfi->send_cmd(nfi, dev->extend_cmds->die_select);
1637+ nfi->send_addr(nfi, lun, 0, 1, 0);
1638+ nfi->trigger(nfi);
1639+ }
1640+
1641+ *row = page - (lun_blocks * block_pages) * lun;
1642+
1643+ return ret;
1644+}
1645+
1646+static int nand_spi_select_device(struct nand_base *nand, int cs)
1647+{
1648+ struct nand_spi *spi = base_to_spi(nand);
1649+ struct nand_base *parent = spi->parent;
1650+
1651+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1652+
1653+ return parent->select_device(nand, cs);
1654+}
1655+
1656+static int nand_spi_reset(struct nand_base *nand)
1657+{
1658+ struct nand_spi *spi = base_to_spi(nand);
1659+ struct nand_base *parent = spi->parent;
1660+
1661+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1662+
1663+ parent->reset(nand);
1664+
1665+ return nand_spi_wait_ready(nand, READY_TIMEOUT);
1666+}
1667+
1668+static int nand_spi_read_id(struct nand_base *nand, u8 *id, int count)
1669+{
1670+ struct nand_spi *spi = base_to_spi(nand);
1671+ struct nand_base *parent = spi->parent;
1672+
1673+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1674+
1675+ return parent->read_id(nand, id, count);
1676+}
1677+
1678+static int nand_spi_read_param_page(struct nand_base *nand, u8 *data,
1679+ int count)
1680+{
1681+ struct device_spi *dev = device_to_spi(nand->dev);
1682+ struct nand_spi *spi = base_to_spi(nand);
1683+ struct nfi *nfi = nand->nfi;
1684+ int sectors, value;
1685+ u8 param = 0;
1686+
1687+ sectors = div_round_up(count, nfi->sector_size);
1688+
1689+ nand->get_feature(nand, dev->feature.config.addr, &param, 1);
1690+ param |= BIT(dev->feature.config.otp_en_bit);
1691+ nand->set_feature(nand, dev->feature.config.addr, &param, 1);
1692+
1693+ param = 0;
1694+ nand->get_feature(nand, dev->feature.config.addr, &param, 1);
1695+ if (param & BIT(dev->feature.config.otp_en_bit)) {
1696+ value = 0;
1697+ nfi->nfi_ctrl(nfi, NFI_CTRL_ECC, &value);
1698+ nand->dev->col_cycle = spi_replace_rx_col_cycle(spi->rx_mode);
1699+ nand->read_page(nand, 0x01);
1700+ nand->read_data(nand, 0x01, 0, sectors, data, NULL);
1701+ }
1702+
1703+ param &= ~BIT(dev->feature.config.otp_en_bit);
1704+ nand->set_feature(nand, dev->feature.config.addr, &param, 1);
1705+
1706+ return 0;
1707+}
1708+
1709+static int nand_spi_set_feature(struct nand_base *nand, u8 addr,
1710+ u8 *param,
1711+ int count)
1712+{
1713+ struct nand_spi *spi = base_to_spi(nand);
1714+ struct nand_base *parent = spi->parent;
1715+
1716+ nand->write_enable(nand);
1717+
1718+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1719+
1720+ return parent->set_feature(nand, addr, param, count);
1721+}
1722+
1723+static int nand_spi_get_feature(struct nand_base *nand, u8 addr,
1724+ u8 *param,
1725+ int count)
1726+{
1727+ struct nand_spi *spi = base_to_spi(nand);
1728+ struct nand_base *parent = spi->parent;
1729+
1730+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1731+
1732+ return parent->get_feature(nand, addr, param, count);
1733+}
1734+
1735+static int nand_spi_addressing(struct nand_base *nand, int *row,
1736+ int *col)
1737+{
1738+ struct nand_device *dev = nand->dev;
1739+ int plane, block, block_pages;
1740+ int ret;
1741+
1742+ ret = nand_spi_die_select(nand, row);
1743+ if (ret)
1744+ return ret;
1745+
1746+ block_pages = nand_block_pages(dev);
1747+ block = div_down(*row, block_pages);
1748+
1749+ plane = block % dev->plane_num;
1750+ *col |= (plane << dev->addressing->plane_bit_start);
1751+
1752+ return 0;
1753+}
1754+
1755+static int nand_spi_read_page(struct nand_base *nand, int row)
1756+{
1757+ struct nand_spi *spi = base_to_spi(nand);
1758+ struct nand_base *parent = spi->parent;
1759+
1760+ if (spi->op_mode == SNFI_AUTO_MODE)
1761+ nand_spi_set_op_mode(nand, SNFI_AUTO_MODE);
1762+ else
1763+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1764+
1765+ parent->read_page(nand, row);
1766+
1767+ return nand_spi_wait_ready(nand, READY_TIMEOUT);
1768+}
1769+
1770+static int nand_spi_read_data(struct nand_base *nand, int row, int col,
1771+ int sectors, u8 *data, u8 *oob)
1772+{
1773+ struct device_spi *dev = device_to_spi(nand->dev);
1774+ struct nand_spi *spi = base_to_spi(nand);
1775+ struct nand_base *parent = spi->parent;
1776+ int ret;
1777+
1778+ if ((spi->rx_mode == SNFI_RX_114 || spi->rx_mode == SNFI_RX_144) &&
1779+ dev->feature.config.need_qe)
1780+ nand_spi_set_config(nand, dev->feature.config.addr,
1781+ BIT(0), true);
1782+
1783+ nand->dev->col_cycle = spi_replace_rx_col_cycle(spi->rx_mode);
1784+
1785+ nand_spi_set_op_mode(nand, SNFI_CUSTOM_MODE);
1786+
1787+ ret = parent->read_data(nand, row, col, sectors, data, oob);
1788+ if (ret)
1789+ return -ENANDREAD;
1790+
1791+ if (spi->ondie_ecc) {
1792+ ret = nand_spi_read_status(nand);
1793+ ret &= GENMASK(dev->feature.status.ecc_end_bit,
1794+ dev->feature.status.ecc_start_bit);
1795+ ret >>= dev->feature.status.ecc_start_bit;
1796+ if (ret > nand->dev->endurance->ecc_req)
1797+ return -ENANDREAD;
1798+ else if (ret > nand->dev->endurance->max_bitflips)
1799+ return -ENANDFLIPS;
1800+ }
1801+
1802+ return 0;
1803+}
1804+
1805+static int nand_spi_write_enable(struct nand_base *nand)
1806+{
1807+ struct device_spi *dev = device_to_spi(nand->dev);
1808+ struct nfi *nfi = nand->nfi;
1809+ int status;
1810+
1811+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1812+
1813+ nfi->reset(nfi);
1814+ nfi->send_cmd(nfi, dev->extend_cmds->write_enable);
1815+
1816+ nfi->trigger(nfi);
1817+
1818+ status = nand_spi_read_status(nand);
1819+ status &= nand->dev->status->write_protect;
1820+
1821+ return !status;
1822+}
1823+
1824+static int nand_spi_program_data(struct nand_base *nand, int row,
1825+ int col,
1826+ u8 *data, u8 *oob)
1827+{
1828+ struct device_spi *dev = device_to_spi(nand->dev);
1829+ struct nand_spi *spi = base_to_spi(nand);
1830+
1831+ if (spi->tx_mode == SNFI_TX_114 && dev->feature.config.need_qe)
1832+ nand_spi_set_config(nand, dev->feature.config.addr,
1833+ BIT(0), true);
1834+
1835+ nand_spi_set_op_mode(nand, SNFI_CUSTOM_MODE);
1836+
1837+ nand->dev->col_cycle = spi_replace_tx_col_cycle(spi->tx_mode);
1838+
1839+ return spi->parent->program_data(nand, row, col, data, oob);
1840+}
1841+
1842+static int nand_spi_program_page(struct nand_base *nand, int row)
1843+{
1844+ struct nand_spi *spi = base_to_spi(nand);
1845+ struct nand_device *dev = nand->dev;
1846+ struct nfi *nfi = nand->nfi;
1847+
1848+ if (spi->op_mode == SNFI_AUTO_MODE)
1849+ nand_spi_set_op_mode(nand, SNFI_AUTO_MODE);
1850+ else
1851+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1852+
1853+ nfi->reset(nfi);
1854+ nfi->send_cmd(nfi, dev->cmds->program_2nd);
1855+ nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
1856+ nfi->trigger(nfi);
1857+
1858+ return nand_spi_wait_ready(nand, READY_TIMEOUT);
1859+}
1860+
1861+static int nand_spi_erase_block(struct nand_base *nand, int row)
1862+{
1863+ struct nand_spi *spi = base_to_spi(nand);
1864+ struct nand_base *parent = spi->parent;
1865+
1866+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1867+
1868+ parent->erase_block(nand, row);
1869+
1870+ return nand_spi_wait_ready(nand, READY_TIMEOUT);
1871+}
1872+
1873+static int nand_chip_spi_ctrl(struct nand_chip *chip, int cmd,
1874+ void *args)
1875+{
1876+ struct nand_base *nand = chip->nand;
1877+ struct device_spi *dev = device_to_spi(nand->dev);
1878+ struct nand_spi *spi = base_to_spi(nand);
1879+ struct nfi *nfi = nand->nfi;
1880+ int ret = 0, value = *(int *)args;
1881+
1882+ switch (cmd) {
1883+ case CHIP_CTRL_ONDIE_ECC:
1884+ spi->ondie_ecc = (bool)value;
1885+ ret = nand_spi_set_config(nand, dev->feature.config.addr,
1886+ BIT(dev->feature.config.ecc_en_bit),
1887+ spi->ondie_ecc);
1888+ break;
1889+
1890+ case SNFI_CTRL_TX_MODE:
1891+ if (value < 0 || value > SNFI_TX_114)
1892+ return -EOPNOTSUPP;
1893+
1894+ if (dev->tx_mode_mask & BIT(value)) {
1895+ spi->tx_mode = value;
1896+ nand->dev->cmds->random_out_1st = spi_replace_tx_cmds(
1897+ spi->tx_mode);
1898+ ret = nfi->nfi_ctrl(nfi, cmd, args);
1899+ }
1900+
1901+ break;
1902+
1903+ case SNFI_CTRL_RX_MODE:
1904+ if (value < 0 || value > SNFI_RX_144)
1905+ return -EOPNOTSUPP;
1906+
1907+ if (dev->rx_mode_mask & BIT(value)) {
1908+ spi->rx_mode = value;
1909+ nand->dev->cmds->program_1st = spi_replace_rx_cmds(
1910+ spi->rx_mode);
1911+ ret = nfi->nfi_ctrl(nfi, cmd, args);
1912+ }
1913+
1914+ break;
1915+
1916+ case CHIP_CTRL_OPS_CACHE:
1917+ case CHIP_CTRL_OPS_MULTI:
1918+ case CHIP_CTRL_PSLC_MODE:
1919+ case CHIP_CTRL_DDR_MODE:
1920+ case CHIP_CTRL_DRIVE_STRENGTH:
1921+ case CHIP_CTRL_TIMING_MODE:
1922+ ret = -EOPNOTSUPP;
1923+ break;
1924+
1925+ default:
1926+ ret = nfi->nfi_ctrl(nfi, cmd, args);
1927+ break;
1928+ }
1929+
1930+ return ret;
1931+}
1932+
1933+int nand_chip_spi_resume(struct nand_chip *chip)
1934+{
1935+ struct nand_base *nand = chip->nand;
1936+ struct nand_spi *spi = base_to_spi(nand);
1937+ struct device_spi *dev = device_to_spi(nand->dev);
1938+ struct nfi *nfi = nand->nfi;
1939+ struct nfi_format format;
1940+ u8 mask;
1941+
1942+ nand->reset(nand);
1943+
1944+ mask = GENMASK(dev->feature.protect.bp_end_bit,
1945+ dev->feature.protect.bp_start_bit);
1946+ nand_spi_set_config(nand, dev->feature.config.addr, mask, false);
1947+ mask = BIT(dev->feature.config.ecc_en_bit);
1948+ nand_spi_set_config(nand, dev->feature.config.addr, mask,
1949+ spi->ondie_ecc);
1950+
1951+ format.page_size = nand->dev->page_size;
1952+ format.spare_size = nand->dev->spare_size;
1953+ format.ecc_req = nand->dev->endurance->ecc_req;
1954+
1955+ return nfi->set_format(nfi, &format);
1956+}
1957+
1958+static int nand_spi_set_format(struct nand_base *nand)
1959+{
1960+ struct nfi_format format = {
1961+ nand->dev->page_size,
1962+ nand->dev->spare_size,
1963+ nand->dev->endurance->ecc_req
1964+ };
1965+
1966+ return nand->nfi->set_format(nand->nfi, &format);
1967+}
1968+
1969+struct nand_base *nand_device_init(struct nand_chip *chip)
1970+{
1971+ struct nand_base *nand;
1972+ struct nand_spi *spi;
1973+ struct device_spi *dev;
1974+ int ret;
1975+ u8 mask;
1976+
1977+ spi = mem_alloc(1, sizeof(struct nand_spi));
1978+ if (!spi) {
1979+ pr_info("alloc nand_spi fail\n");
1980+ return NULL;
1981+ }
1982+
1983+ spi->ondie_ecc = false;
1984+ spi->op_mode = SNFI_CUSTOM_MODE;
1985+ spi->rx_mode = SNFI_RX_114;
1986+ spi->tx_mode = SNFI_TX_114;
1987+
1988+ spi->parent = chip->nand;
1989+ nand = &spi->base;
1990+ nand->dev = spi->parent->dev;
1991+ nand->nfi = spi->parent->nfi;
1992+
1993+ nand->select_device = nand_spi_select_device;
1994+ nand->reset = nand_spi_reset;
1995+ nand->read_id = nand_spi_read_id;
1996+ nand->read_param_page = nand_spi_read_param_page;
1997+ nand->set_feature = nand_spi_set_feature;
1998+ nand->get_feature = nand_spi_get_feature;
1999+ nand->read_status = nand_spi_read_status;
2000+ nand->addressing = nand_spi_addressing;
2001+ nand->read_page = nand_spi_read_page;
2002+ nand->read_data = nand_spi_read_data;
2003+ nand->write_enable = nand_spi_write_enable;
2004+ nand->program_data = nand_spi_program_data;
2005+ nand->program_page = nand_spi_program_page;
2006+ nand->erase_block = nand_spi_erase_block;
2007+
2008+ chip->chip_ctrl = nand_chip_spi_ctrl;
2009+ chip->nand_type = NAND_SPI;
2010+ chip->resume = nand_chip_spi_resume;
2011+
2012+ ret = nand_detect_device(nand);
2013+ if (ret)
2014+ goto err;
2015+
2016+ nand->select_device(nand, 0);
2017+
2018+ ret = nand_spi_set_format(nand);
2019+ if (ret)
2020+ goto err;
2021+
2022+ dev = (struct device_spi *)nand->dev;
2023+
2024+ nand->dev->cmds->random_out_1st =
2025+ spi_replace_rx_cmds(spi->rx_mode);
2026+ nand->dev->cmds->program_1st =
2027+ spi_replace_tx_cmds(spi->tx_mode);
2028+
2029+ mask = GENMASK(dev->feature.protect.bp_end_bit,
2030+ dev->feature.protect.bp_start_bit);
2031+ ret = nand_spi_set_config(nand, dev->feature.protect.addr, mask, false);
2032+ if (ret)
2033+ goto err;
2034+
2035+ mask = BIT(dev->feature.config.ecc_en_bit);
2036+ ret = nand_spi_set_config(nand, dev->feature.config.addr, mask,
2037+ spi->ondie_ecc);
2038+ if (ret)
2039+ goto err;
2040+
2041+ return nand;
2042+
2043+err:
2044+ mem_free(spi);
2045+ return NULL;
2046+}
2047+
2048+void nand_exit(struct nand_base *nand)
2049+{
2050+ struct nand_spi *spi = base_to_spi(nand);
2051+
2052+ nand_base_exit(spi->parent);
2053+ mem_free(spi);
2054+}
2055diff --git a/drivers/mtd/nandx/core/nand/nand_spi.h b/drivers/mtd/nandx/core/nand/nand_spi.h
2056new file mode 100644
2057index 0000000000..e55e4de6f7
2058--- /dev/null
2059+++ b/drivers/mtd/nandx/core/nand/nand_spi.h
2060@@ -0,0 +1,35 @@
2061+/*
2062+ * Copyright (C) 2017 MediaTek Inc.
2063+ * Licensed under either
2064+ * BSD Licence, (see NOTICE for more details)
2065+ * GNU General Public License, version 2.0, (see NOTICE for more details)
2066+ */
2067+
2068+#ifndef __NAND_SPI_H__
2069+#define __NAND_SPI_H__
2070+
2071+/*
2072+ * spi nand handler
2073+ * @base: spi nand base functions
2074+ * @parent: common parent nand base functions
2075+ * @tx_mode: spi bus width of transfer to device
2076+ * @rx_mode: spi bus width of transfer from device
2077+ * @op_mode: spi nand controller (NFI) operation mode
2078+ * @ondie_ecc: spi nand on-die ecc flag
2079+ */
2080+
2081+struct nand_spi {
2082+ struct nand_base base;
2083+ struct nand_base *parent;
2084+ u8 tx_mode;
2085+ u8 rx_mode;
2086+ u8 op_mode;
2087+ bool ondie_ecc;
2088+};
2089+
2090+static inline struct nand_spi *base_to_spi(struct nand_base *base)
2091+{
2092+ return container_of(base, struct nand_spi, base);
2093+}
2094+
2095+#endif /* __NAND_SPI_H__ */
2096diff --git a/drivers/mtd/nandx/core/nand_base.c b/drivers/mtd/nandx/core/nand_base.c
2097new file mode 100644
2098index 0000000000..65998e5460
2099--- /dev/null
2100+++ b/drivers/mtd/nandx/core/nand_base.c
2101@@ -0,0 +1,304 @@
2102+/*
2103+ * Copyright (C) 2017 MediaTek Inc.
2104+ * Licensed under either
2105+ * BSD Licence, (see NOTICE for more details)
2106+ * GNU General Public License, version 2.0, (see NOTICE for more details)
2107+ */
2108+
2109+#include "nandx_util.h"
2110+#include "nandx_core.h"
2111+#include "nand_chip.h"
2112+#include "nand_device.h"
2113+#include "nfi.h"
2114+#include "nand_base.h"
2115+
2116+static int nand_base_select_device(struct nand_base *nand, int cs)
2117+{
2118+ struct nfi *nfi = nand->nfi;
2119+
2120+ nfi->reset(nfi);
2121+
2122+ return nfi->select_chip(nfi, cs);
2123+}
2124+
2125+static int nand_base_reset(struct nand_base *nand)
2126+{
2127+ struct nfi *nfi = nand->nfi;
2128+ struct nand_device *dev = nand->dev;
2129+
2130+ nfi->reset(nfi);
2131+ nfi->send_cmd(nfi, dev->cmds->reset);
2132+ nfi->trigger(nfi);
2133+
2134+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tRST);
2135+}
2136+
2137+static int nand_base_read_id(struct nand_base *nand, u8 *id, int count)
2138+{
2139+ struct nfi *nfi = nand->nfi;
2140+ struct nand_device *dev = nand->dev;
2141+
2142+ nfi->reset(nfi);
2143+ nfi->send_cmd(nfi, dev->cmds->read_id);
2144+ nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tWHR);
2145+ nfi->send_addr(nfi, 0, 0, 1, 0);
2146+
2147+ return nfi->read_bytes(nfi, id, count);
2148+}
2149+
2150+static int nand_base_read_param_page(struct nand_base *nand, u8 *data,
2151+ int count)
2152+{
2153+ struct nfi *nfi = nand->nfi;
2154+ struct nand_device *dev = nand->dev;
2155+
2156+ nfi->reset(nfi);
2157+ nfi->send_cmd(nfi, dev->cmds->read_param_page);
2158+ nfi->send_addr(nfi, 0, 0, 1, 0);
2159+
2160+ nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tR);
2161+
2162+ return nfi->read_bytes(nfi, data, count);
2163+}
2164+
2165+static int nand_base_set_feature(struct nand_base *nand, u8 addr,
2166+ u8 *param,
2167+ int count)
2168+{
2169+ struct nfi *nfi = nand->nfi;
2170+ struct nand_device *dev = nand->dev;
2171+
2172+ nfi->reset(nfi);
2173+ nfi->send_cmd(nfi, dev->cmds->set_feature);
2174+ nfi->send_addr(nfi, addr, 0, 1, 0);
2175+
2176+ nfi->write_bytes(nfi, param, count);
2177+
2178+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2179+ dev->array_timing->tFEAT);
2180+}
2181+
2182+static int nand_base_get_feature(struct nand_base *nand, u8 addr,
2183+ u8 *param,
2184+ int count)
2185+{
2186+ struct nfi *nfi = nand->nfi;
2187+ struct nand_device *dev = nand->dev;
2188+
2189+ nfi->reset(nfi);
2190+ nfi->send_cmd(nfi, dev->cmds->get_feature);
2191+ nfi->send_addr(nfi, addr, 0, 1, 0);
2192+ nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tFEAT);
2193+
2194+ return nfi->read_bytes(nfi, param, count);
2195+}
2196+
2197+static int nand_base_read_status(struct nand_base *nand)
2198+{
2199+ struct nfi *nfi = nand->nfi;
2200+ struct nand_device *dev = nand->dev;
2201+ u8 status = 0;
2202+
2203+ nfi->reset(nfi);
2204+ nfi->send_cmd(nfi, dev->cmds->read_status);
2205+ nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tWHR);
2206+ nfi->read_bytes(nfi, &status, 1);
2207+
2208+ return status;
2209+}
2210+
2211+static int nand_base_addressing(struct nand_base *nand, int *row,
2212+ int *col)
2213+{
2214+ struct nand_device *dev = nand->dev;
2215+ int lun, plane, block, page, cs = 0;
2216+ int block_pages, target_blocks, wl = 0;
2217+ int icol = *col;
2218+
2219+ if (dev->target_num > 1) {
2220+ block_pages = nand_block_pages(dev);
2221+ target_blocks = nand_target_blocks(dev);
2222+ cs = div_down(*row, block_pages * target_blocks);
2223+ *row -= cs * block_pages * target_blocks;
2224+ }
2225+
2226+ nand->select_device(nand, cs);
2227+
2228+ block_pages = nand_block_pages(dev);
2229+ block = div_down(*row, block_pages);
2230+ page = *row - block * block_pages;
2231+ plane = reminder(block, dev->plane_num);
2232+ lun = div_down(block, nand_lun_blocks(dev));
2233+
2234+ wl |= (page << dev->addressing->row_bit_start);
2235+ wl |= (block << dev->addressing->block_bit_start);
2236+ wl |= (plane << dev->addressing->plane_bit_start);
2237+ wl |= (lun << dev->addressing->lun_bit_start);
2238+
2239+ *row = wl;
2240+ *col = icol;
2241+
2242+ return 0;
2243+}
2244+
2245+static int nand_base_read_page(struct nand_base *nand, int row)
2246+{
2247+ struct nfi *nfi = nand->nfi;
2248+ struct nand_device *dev = nand->dev;
2249+
2250+ nfi->reset(nfi);
2251+ nfi->send_cmd(nfi, dev->cmds->read_1st);
2252+ nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
2253+ nfi->send_cmd(nfi, dev->cmds->read_2nd);
2254+ nfi->trigger(nfi);
2255+
2256+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tR);
2257+}
2258+
2259+static int nand_base_read_data(struct nand_base *nand, int row, int col,
2260+ int sectors, u8 *data, u8 *oob)
2261+{
2262+ struct nfi *nfi = nand->nfi;
2263+ struct nand_device *dev = nand->dev;
2264+
2265+ nfi->reset(nfi);
2266+ nfi->send_cmd(nfi, dev->cmds->random_out_1st);
2267+ nfi->send_addr(nfi, col, row, dev->col_cycle, dev->row_cycle);
2268+ nfi->send_cmd(nfi, dev->cmds->random_out_2nd);
2269+ nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tRCBSY);
2270+
2271+ return nfi->read_sectors(nfi, data, oob, sectors);
2272+}
2273+
2274+static int nand_base_write_enable(struct nand_base *nand)
2275+{
2276+ struct nand_device *dev = nand->dev;
2277+ int status;
2278+
2279+ status = nand_base_read_status(nand);
2280+ if (status & dev->status->write_protect)
2281+ return 0;
2282+
2283+ return -ENANDWP;
2284+}
2285+
2286+static int nand_base_program_data(struct nand_base *nand, int row,
2287+ int col,
2288+ u8 *data, u8 *oob)
2289+{
2290+ struct nfi *nfi = nand->nfi;
2291+ struct nand_device *dev = nand->dev;
2292+
2293+ nfi->reset(nfi);
2294+ nfi->send_cmd(nfi, dev->cmds->program_1st);
2295+ nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
2296+
2297+ return nfi->write_page(nfi, data, oob);
2298+}
2299+
2300+static int nand_base_program_page(struct nand_base *nand, int row)
2301+{
2302+ struct nfi *nfi = nand->nfi;
2303+ struct nand_device *dev = nand->dev;
2304+
2305+ nfi->reset(nfi);
2306+ nfi->send_cmd(nfi, dev->cmds->program_2nd);
2307+ nfi->trigger(nfi);
2308+
2309+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2310+ dev->array_timing->tPROG);
2311+}
2312+
2313+static int nand_base_erase_block(struct nand_base *nand, int row)
2314+{
2315+ struct nfi *nfi = nand->nfi;
2316+ struct nand_device *dev = nand->dev;
2317+
2318+ nfi->reset(nfi);
2319+ nfi->send_cmd(nfi, dev->cmds->erase_1st);
2320+ nfi->send_addr(nfi, 0, row, 0, dev->row_cycle);
2321+ nfi->send_cmd(nfi, dev->cmds->erase_2nd);
2322+ nfi->trigger(nfi);
2323+
2324+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2325+ dev->array_timing->tBERS);
2326+}
2327+
2328+static int nand_base_read_cache(struct nand_base *nand, int row)
2329+{
2330+ struct nfi *nfi = nand->nfi;
2331+ struct nand_device *dev = nand->dev;
2332+
2333+ nfi->reset(nfi);
2334+ nfi->send_cmd(nfi, dev->cmds->read_1st);
2335+ nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
2336+ nfi->send_cmd(nfi, dev->cmds->read_cache);
2337+ nfi->trigger(nfi);
2338+
2339+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2340+ dev->array_timing->tRCBSY);
2341+}
2342+
2343+static int nand_base_read_last(struct nand_base *nand)
2344+{
2345+ struct nfi *nfi = nand->nfi;
2346+ struct nand_device *dev = nand->dev;
2347+
2348+ nfi->reset(nfi);
2349+ nfi->send_cmd(nfi, dev->cmds->read_cache_last);
2350+ nfi->trigger(nfi);
2351+
2352+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2353+ dev->array_timing->tRCBSY);
2354+}
2355+
2356+static int nand_base_program_cache(struct nand_base *nand)
2357+{
2358+ struct nfi *nfi = nand->nfi;
2359+ struct nand_device *dev = nand->dev;
2360+
2361+ nfi->reset(nfi);
2362+ nfi->send_cmd(nfi, dev->cmds->program_cache);
2363+ nfi->trigger(nfi);
2364+
2365+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2366+ dev->array_timing->tPCBSY);
2367+}
2368+
2369+struct nand_base *nand_base_init(struct nand_device *dev,
2370+ struct nfi *nfi)
2371+{
2372+ struct nand_base *nand;
2373+
2374+ nand = mem_alloc(1, sizeof(struct nand_base));
2375+ if (!nand)
2376+ return NULL;
2377+
2378+ nand->dev = dev;
2379+ nand->nfi = nfi;
2380+ nand->select_device = nand_base_select_device;
2381+ nand->reset = nand_base_reset;
2382+ nand->read_id = nand_base_read_id;
2383+ nand->read_param_page = nand_base_read_param_page;
2384+ nand->set_feature = nand_base_set_feature;
2385+ nand->get_feature = nand_base_get_feature;
2386+ nand->read_status = nand_base_read_status;
2387+ nand->addressing = nand_base_addressing;
2388+ nand->read_page = nand_base_read_page;
2389+ nand->read_data = nand_base_read_data;
2390+ nand->read_cache = nand_base_read_cache;
2391+ nand->read_last = nand_base_read_last;
2392+ nand->write_enable = nand_base_write_enable;
2393+ nand->program_data = nand_base_program_data;
2394+ nand->program_page = nand_base_program_page;
2395+ nand->program_cache = nand_base_program_cache;
2396+ nand->erase_block = nand_base_erase_block;
2397+
2398+ return nand;
2399+}
2400+
2401+void nand_base_exit(struct nand_base *base)
2402+{
2403+ nfi_exit(base->nfi);
2404+ mem_free(base);
2405+}
2406diff --git a/drivers/mtd/nandx/core/nand_base.h b/drivers/mtd/nandx/core/nand_base.h
2407new file mode 100644
2408index 0000000000..13217978e5
2409--- /dev/null
2410+++ b/drivers/mtd/nandx/core/nand_base.h
2411@@ -0,0 +1,71 @@
2412+/*
2413+ * Copyright (C) 2017 MediaTek Inc.
2414+ * Licensed under either
2415+ * BSD Licence, (see NOTICE for more details)
2416+ * GNU General Public License, version 2.0, (see NOTICE for more details)
2417+ */
2418+
2419+#ifndef __NAND_BASE_H__
2420+#define __NAND_BASE_H__
2421+
2422+/*
2423+ * nand base functions
2424+ * @dev: nand device infomations
2425+ * @nfi: nand host controller
2426+ * @select_device: select one nand device of multi nand on chip
2427+ * @reset: reset current nand device
2428+ * @read_id: read current nand id
2429+ * @read_param_page: read current nand parameters page
2430+ * @set_feature: configurate the nand device feature
2431+ * @get_feature: get the nand device feature
2432+ * @read_status: read nand device status
2433+ * @addressing: addressing the address to nand device physical address
2434+ * @read_page: read page data to device cache register
2435+ * @read_data: read data from device cache register by bus protocol
2436+ * @read_cache: nand cache read operation for data output
2437+ * @read_last: nand cache read operation for last page output
2438+ * @write_enable: enable program/erase for nand, especially spi nand
2439+ * @program_data: program data to nand device cache register
2440+ * @program_page: program page data from nand device cache register to array
2441+ * @program_cache: nand cache program operation for data input
2442+ * @erase_block: erase nand block operation
2443+ */
2444+struct nand_base {
2445+ struct nand_device *dev;
2446+ struct nfi *nfi;
2447+ int (*select_device)(struct nand_base *nand, int cs);
2448+ int (*reset)(struct nand_base *nand);
2449+ int (*read_id)(struct nand_base *nand, u8 *id, int count);
2450+ int (*read_param_page)(struct nand_base *nand, u8 *data, int count);
2451+ int (*set_feature)(struct nand_base *nand, u8 addr, u8 *param,
2452+ int count);
2453+ int (*get_feature)(struct nand_base *nand, u8 addr, u8 *param,
2454+ int count);
2455+ int (*read_status)(struct nand_base *nand);
2456+ int (*addressing)(struct nand_base *nand, int *row, int *col);
2457+
2458+ int (*read_page)(struct nand_base *nand, int row);
2459+ int (*read_data)(struct nand_base *nand, int row, int col, int sectors,
2460+ u8 *data, u8 *oob);
2461+ int (*read_cache)(struct nand_base *nand, int row);
2462+ int (*read_last)(struct nand_base *nand);
2463+
2464+ int (*write_enable)(struct nand_base *nand);
2465+ int (*program_data)(struct nand_base *nand, int row, int col, u8 *data,
2466+ u8 *oob);
2467+ int (*program_page)(struct nand_base *nand, int row);
2468+ int (*program_cache)(struct nand_base *nand);
2469+
2470+ int (*erase_block)(struct nand_base *nand, int row);
2471+};
2472+
2473+struct nand_base *nand_base_init(struct nand_device *device,
2474+ struct nfi *nfi);
2475+void nand_base_exit(struct nand_base *base);
2476+
2477+struct nand_base *nand_device_init(struct nand_chip *nand);
2478+void nand_exit(struct nand_base *nand);
2479+
2480+int nand_detect_device(struct nand_base *nand);
2481+
2482+#endif /* __NAND_BASE_H__ */
2483diff --git a/drivers/mtd/nandx/core/nand_chip.c b/drivers/mtd/nandx/core/nand_chip.c
2484new file mode 100644
2485index 0000000000..02adc6f52e
2486--- /dev/null
2487+++ b/drivers/mtd/nandx/core/nand_chip.c
2488@@ -0,0 +1,272 @@
2489+/*
2490+ * Copyright (C) 2017 MediaTek Inc.
2491+ * Licensed under either
2492+ * BSD Licence, (see NOTICE for more details)
2493+ * GNU General Public License, version 2.0, (see NOTICE for more details)
2494+ */
2495+
2496+#include "nandx_util.h"
2497+#include "nandx_core.h"
2498+#include "nand_chip.h"
2499+#include "nand_device.h"
2500+#include "nfi.h"
2501+#include "nand_base.h"
2502+
2503+static int nand_chip_read_page(struct nand_chip *chip,
2504+ struct nand_ops *ops,
2505+ int count)
2506+{
2507+ struct nand_base *nand = chip->nand;
2508+ struct nand_device *dev = nand->dev;
2509+ int i, ret = 0;
2510+ int row, col, sectors;
2511+ u8 *data, *oob;
2512+
2513+ for (i = 0; i < count; i++) {
2514+ row = ops[i].row;
2515+ col = ops[i].col;
2516+
2517+ nand->addressing(nand, &row, &col);
2518+ ops[i].status = nand->read_page(nand, row);
2519+ if (ops[i].status < 0) {
2520+ ret = ops[i].status;
2521+ continue;
2522+ }
2523+
2524+ data = ops[i].data;
2525+ oob = ops[i].oob;
2526+ sectors = ops[i].len / chip->sector_size;
2527+ ops[i].status = nand->read_data(nand, row, col,
2528+ sectors, data, oob);
2529+ if (ops[i].status > 0)
2530+ ops[i].status = ops[i].status >=
2531+ dev->endurance->max_bitflips ?
2532+ -ENANDFLIPS : 0;
2533+
2534+ ret = min_t(int, ret, ops[i].status);
2535+ }
2536+
2537+ return ret;
2538+}
2539+
2540+static int nand_chip_write_page(struct nand_chip *chip,
2541+ struct nand_ops *ops,
2542+ int count)
2543+{
2544+ struct nand_base *nand = chip->nand;
2545+ struct nand_device *dev = nand->dev;
2546+ int i, ret = 0;
2547+ int row, col;
2548+ u8 *data, *oob;
2549+
2550+ for (i = 0; i < count; i++) {
2551+ row = ops[i].row;
2552+ col = ops[i].col;
2553+
2554+ nand->addressing(nand, &row, &col);
2555+
2556+ ops[i].status = nand->write_enable(nand);
2557+ if (ops[i].status) {
2558+ pr_debug("Write Protect at %x!\n", row);
2559+ ops[i].status = -ENANDWP;
2560+ return -ENANDWP;
2561+ }
2562+
2563+ data = ops[i].data;
2564+ oob = ops[i].oob;
2565+ ops[i].status = nand->program_data(nand, row, col, data, oob);
2566+ if (ops[i].status < 0) {
2567+ ret = ops[i].status;
2568+ continue;
2569+ }
2570+
2571+ ops[i].status = nand->program_page(nand, row);
2572+ if (ops[i].status < 0) {
2573+ ret = ops[i].status;
2574+ continue;
2575+ }
2576+
2577+ ops[i].status = nand->read_status(nand);
2578+ if (ops[i].status & dev->status->program_fail)
2579+ ops[i].status = -ENANDWRITE;
2580+
2581+ ret = min_t(int, ret, ops[i].status);
2582+ }
2583+
2584+ return ret;
2585+}
2586+
2587+static int nand_chip_erase_block(struct nand_chip *chip,
2588+ struct nand_ops *ops,
2589+ int count)
2590+{
2591+ struct nand_base *nand = chip->nand;
2592+ struct nand_device *dev = nand->dev;
2593+ int i, ret = 0;
2594+ int row, col;
2595+
2596+ for (i = 0; i < count; i++) {
2597+ row = ops[i].row;
2598+ col = ops[i].col;
2599+
2600+ nand->addressing(nand, &row, &col);
2601+
2602+ ops[i].status = nand->write_enable(nand);
2603+ if (ops[i].status) {
2604+ pr_debug("Write Protect at %x!\n", row);
2605+ ops[i].status = -ENANDWP;
2606+ return -ENANDWP;
2607+ }
2608+
2609+ ops[i].status = nand->erase_block(nand, row);
2610+ if (ops[i].status < 0) {
2611+ ret = ops[i].status;
2612+ continue;
2613+ }
2614+
2615+ ops[i].status = nand->read_status(nand);
2616+ if (ops[i].status & dev->status->erase_fail)
2617+ ops[i].status = -ENANDERASE;
2618+
2619+ ret = min_t(int, ret, ops[i].status);
2620+ }
2621+
2622+ return ret;
2623+}
2624+
2625+/* read first bad mark on spare */
2626+static int nand_chip_is_bad_block(struct nand_chip *chip,
2627+ struct nand_ops *ops,
2628+ int count)
2629+{
2630+ int i, ret, value;
2631+ int status = 0;
2632+ u8 *data, *tmp_buf;
2633+
2634+ tmp_buf = mem_alloc(1, chip->page_size);
2635+ if (!tmp_buf)
2636+ return -ENOMEM;
2637+
2638+ memset(tmp_buf, 0x00, chip->page_size);
2639+
2640+ /* Disable ECC */
2641+ value = 0;
2642+ ret = chip->chip_ctrl(chip, NFI_CTRL_ECC, &value);
2643+ if (ret)
2644+ goto out;
2645+
2646+ ret = chip->read_page(chip, ops, count);
2647+ if (ret)
2648+ goto out;
2649+
2650+ for (i = 0; i < count; i++) {
2651+ data = ops[i].data;
2652+
2653+ /* temp solution for mt7622, because of no bad mark swap */
2654+ if (!memcmp(data, tmp_buf, chip->page_size)) {
2655+ ops[i].status = -ENANDBAD;
2656+ status = -ENANDBAD;
2657+
2658+ } else {
2659+ ops[i].status = 0;
2660+ }
2661+ }
2662+
2663+ /* Enable ECC */
2664+ value = 1;
2665+ ret = chip->chip_ctrl(chip, NFI_CTRL_ECC, &value);
2666+ if (ret)
2667+ goto out;
2668+
2669+ mem_free(tmp_buf);
2670+ return status;
2671+
2672+out:
2673+ mem_free(tmp_buf);
2674+ return ret;
2675+}
2676+
2677+static int nand_chip_ctrl(struct nand_chip *chip, int cmd, void *args)
2678+{
2679+ return -EOPNOTSUPP;
2680+}
2681+
2682+static int nand_chip_suspend(struct nand_chip *chip)
2683+{
2684+ return 0;
2685+}
2686+
2687+static int nand_chip_resume(struct nand_chip *chip)
2688+{
2689+ return 0;
2690+}
2691+
2692+struct nand_chip *nand_chip_init(struct nfi_resource *res)
2693+{
2694+ struct nand_chip *chip;
2695+ struct nand_base *nand;
2696+ struct nfi *nfi;
2697+
2698+ chip = mem_alloc(1, sizeof(struct nand_chip));
2699+ if (!chip) {
2700+ pr_info("nand chip alloc fail!\n");
2701+ return NULL;
2702+ }
2703+
2704+ nfi = nfi_init(res);
2705+ if (!nfi) {
2706+ pr_info("nfi init fail!\n");
2707+ goto nfi_err;
2708+ }
2709+
2710+ nand = nand_base_init(NULL, nfi);
2711+ if (!nand) {
2712+ pr_info("nand base init fail!\n");
2713+ goto base_err;
2714+ }
2715+
2716+ chip->nand = (void *)nand;
2717+ chip->read_page = nand_chip_read_page;
2718+ chip->write_page = nand_chip_write_page;
2719+ chip->erase_block = nand_chip_erase_block;
2720+ chip->is_bad_block = nand_chip_is_bad_block;
2721+ chip->chip_ctrl = nand_chip_ctrl;
2722+ chip->suspend = nand_chip_suspend;
2723+ chip->resume = nand_chip_resume;
2724+
2725+ nand = nand_device_init(chip);
2726+ if (!nand)
2727+ goto nand_err;
2728+
2729+ chip->nand = (void *)nand;
2730+ chip->plane_num = nand->dev->plane_num;
2731+ chip->block_num = nand_total_blocks(nand->dev);
2732+ chip->block_size = nand->dev->block_size;
2733+ chip->block_pages = nand_block_pages(nand->dev);
2734+ chip->page_size = nand->dev->page_size;
2735+ chip->oob_size = nfi->fdm_size * div_down(chip->page_size,
2736+ nfi->sector_size);
2737+ chip->sector_size = nfi->sector_size;
2738+ chip->sector_spare_size = nfi->sector_spare_size;
2739+ chip->min_program_pages = nand->dev->min_program_pages;
2740+ chip->ecc_strength = nfi->ecc_strength;
2741+ chip->ecc_parity_size = nfi->ecc_parity_size;
2742+ chip->fdm_ecc_size = nfi->fdm_ecc_size;
2743+ chip->fdm_reg_size = nfi->fdm_size;
2744+
2745+ return chip;
2746+
2747+nand_err:
2748+ mem_free(nand);
2749+base_err:
2750+ nfi_exit(nfi);
2751+nfi_err:
2752+ mem_free(chip);
2753+ return NULL;
2754+}
2755+
2756+void nand_chip_exit(struct nand_chip *chip)
2757+{
2758+ nand_exit(chip->nand);
2759+ mem_free(chip);
2760+}
2761diff --git a/drivers/mtd/nandx/core/nand_chip.h b/drivers/mtd/nandx/core/nand_chip.h
2762new file mode 100644
2763index 0000000000..3e9c8e6ca3
2764--- /dev/null
2765+++ b/drivers/mtd/nandx/core/nand_chip.h
2766@@ -0,0 +1,103 @@
2767+/*
2768+ * Copyright (C) 2017 MediaTek Inc.
2769+ * Licensed under either
2770+ * BSD Licence, (see NOTICE for more details)
2771+ * GNU General Public License, version 2.0, (see NOTICE for more details)
2772+ */
2773+
2774+#ifndef __NAND_CHIP_H__
2775+#define __NAND_CHIP_H__
2776+
2777+enum nand_type {
2778+ NAND_SPI,
2779+ NAND_SLC,
2780+ NAND_MLC,
2781+ NAND_TLC
2782+};
2783+
2784+/*
2785+ * nand chip operation unit
2786+ * one nand_ops indicates one row operation
2787+ * @row: nand chip row address, like as nand row
2788+ * @col: nand chip column address, like as nand column
2789+ * @len: operate data length, min is sector_size,
2790+ * max is page_size and sector_size aligned
2791+ * @status: one operation result status
2792+ * @data: data buffer for operation
2793+ * @oob: oob buffer for operation, like as nand spare area
2794+ */
2795+struct nand_ops {
2796+ int row;
2797+ int col;
2798+ int len;
2799+ int status;
2800+ void *data;
2801+ void *oob;
2802+};
2803+
2804+/*
2805+ * nand chip descriptions
2806+ * nand chip includes nand controller and the several same nand devices
2807+ * @nand_type: the nand type on this chip,
2808+ * the chip maybe have several nand device and the type must be same
2809+ * @plane_num: the whole plane number on the chip
2810+ * @block_num: the whole block number on the chip
2811+ * @block_size: nand device block size
2812+ * @block_pages: nand device block has page number
2813+ * @page_size: nand device page size
2814+ * @oob_size: chip out of band size, like as nand spare szie,
2815+ * but restricts this:
2816+ * the size is provied by nand controller(NFI),
2817+ * because NFI would use some nand spare size
2818+ * @min_program_pages: chip needs min pages per program operations
2819+ * one page as one nand_ops
2820+ * @sector_size: chip min read size
2821+ * @sector_spare_size: spare size for sector, is spare_size/page_sectors
2822+ * @ecc_strength: ecc stregth per sector_size, it would be for calculated ecc
2823+ * @ecc_parity_size: ecc parity size for one sector_size data
2824+ * @nand: pointer to inherited struct nand_base
2825+ * @read_page: read %count pages on chip
2826+ * @write_page: write %count pages on chip
2827+ * @erase_block: erase %count blocks on chip, one block is one nand_ops
2828+ * it is better to set nand_ops.row to block start row
2829+ * @is_bad_block: judge the %count blocks on chip if they are bad
2830+ * by vendor specification
2831+ * @chip_ctrl: control the chip features by nandx_ctrl_cmd
2832+ * @suspend: suspend nand chip
2833+ * @resume: resume nand chip
2834+ */
2835+struct nand_chip {
2836+ int nand_type;
2837+ int plane_num;
2838+ int block_num;
2839+ int block_size;
2840+ int block_pages;
2841+ int page_size;
2842+ int oob_size;
2843+
2844+ int min_program_pages;
2845+ int sector_size;
2846+ int sector_spare_size;
2847+ int ecc_strength;
2848+ int ecc_parity_size;
2849+ u32 fdm_ecc_size;
2850+ u32 fdm_reg_size;
2851+
2852+ void *nand;
2853+
2854+ int (*read_page)(struct nand_chip *chip, struct nand_ops *ops,
2855+ int count);
2856+ int (*write_page)(struct nand_chip *chip, struct nand_ops *ops,
2857+ int count);
2858+ int (*erase_block)(struct nand_chip *chip, struct nand_ops *ops,
2859+ int count);
2860+ int (*is_bad_block)(struct nand_chip *chip, struct nand_ops *ops,
2861+ int count);
2862+ int (*chip_ctrl)(struct nand_chip *chip, int cmd, void *args);
2863+ int (*suspend)(struct nand_chip *chip);
2864+ int (*resume)(struct nand_chip *chip);
2865+};
2866+
2867+struct nand_chip *nand_chip_init(struct nfi_resource *res);
2868+void nand_chip_exit(struct nand_chip *chip);
2869+#endif /* __NAND_CHIP_H__ */
2870diff --git a/drivers/mtd/nandx/core/nand_device.c b/drivers/mtd/nandx/core/nand_device.c
2871new file mode 100644
2872index 0000000000..9f6764d1bc
2873--- /dev/null
2874+++ b/drivers/mtd/nandx/core/nand_device.c
2875@@ -0,0 +1,285 @@
2876+/*
2877+ * Copyright (C) 2017 MediaTek Inc.
2878+ * Licensed under either
2879+ * BSD Licence, (see NOTICE for more details)
2880+ * GNU General Public License, version 2.0, (see NOTICE for more details)
2881+ */
2882+
2883+#include "nandx_util.h"
2884+#include "nandx_core.h"
2885+#include "nand_chip.h"
2886+#include "nand_device.h"
2887+#include "nand_base.h"
2888+
2889+#define MAX_CHIP_DEVICE 4
2890+#define PARAM_PAGE_LEN 2048
2891+#define ONFI_CRC_BASE 0x4f4e
2892+
2893+static u16 nand_onfi_crc16(u16 crc, u8 const *p, size_t len)
2894+{
2895+ int i;
2896+
2897+ while (len--) {
2898+ crc ^= *p++ << 8;
2899+
2900+ for (i = 0; i < 8; i++)
2901+ crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
2902+ }
2903+
2904+ return crc;
2905+}
2906+
2907+static inline void decode_addr_cycle(u8 addr_cycle, u8 *row_cycle,
2908+ u8 *col_cycle)
2909+{
2910+ *row_cycle = addr_cycle & 0xf;
2911+ *col_cycle = (addr_cycle >> 4) & 0xf;
2912+}
2913+
2914+static int detect_onfi(struct nand_device *dev,
2915+ struct nand_onfi_params *onfi)
2916+{
2917+ struct nand_endurance *endurance = dev->endurance;
2918+ u16 size, i, crc16;
2919+ u8 *id;
2920+
2921+ size = sizeof(struct nand_onfi_params) - sizeof(u16);
2922+
2923+ for (i = 0; i < 3; i++) {
2924+ crc16 = nand_onfi_crc16(ONFI_CRC_BASE, (u8 *)&onfi[i], size);
2925+
2926+ if (onfi[i].signature[0] == 'O' &&
2927+ onfi[i].signature[1] == 'N' &&
2928+ onfi[i].signature[2] == 'F' &&
2929+ onfi[i].signature[3] == 'I' &&
2930+ onfi[i].crc16 == crc16)
2931+ break;
2932+
2933+ /* in some spi nand, onfi signature maybe "NAND" */
2934+ if (onfi[i].signature[0] == 'N' &&
2935+ onfi[i].signature[1] == 'A' &&
2936+ onfi[i].signature[2] == 'N' &&
2937+ onfi[i].signature[3] == 'D' &&
2938+ onfi[i].crc16 == crc16)
2939+ break;
2940+ }
2941+
2942+ if (i == 3)
2943+ return -ENODEV;
2944+
2945+ memcpy(dev->name, onfi[i].model, 20);
2946+ id = onfi[i].manufacturer;
2947+ dev->id = NAND_PACK_ID(id[0], id[1], id[2], id[3], id[4], id[5], id[6],
2948+ id[7]);
2949+ dev->id_len = MAX_ID_NUM;
2950+ dev->io_width = (onfi[i].features & 1) ? NAND_IO16 : NAND_IO8;
2951+ decode_addr_cycle(onfi[i].addr_cycle, &dev->row_cycle,
2952+ &dev->col_cycle);
2953+ dev->target_num = 1;
2954+ dev->lun_num = onfi[i].lun_num;
2955+ dev->plane_num = BIT(onfi[i].plane_address_bits);
2956+ dev->block_num = onfi[i].lun_blocks / dev->plane_num;
2957+ dev->block_size = onfi[i].block_pages * onfi[i].page_size;
2958+ dev->page_size = onfi[i].page_size;
2959+ dev->spare_size = onfi[i].spare_size;
2960+
2961+ endurance->ecc_req = onfi[i].ecc_req;
2962+ endurance->pe_cycle = onfi[i].valid_block_endurance;
2963+ endurance->max_bitflips = endurance->ecc_req >> 1;
2964+
2965+ return 0;
2966+}
2967+
2968+static int detect_jedec(struct nand_device *dev,
2969+ struct nand_jedec_params *jedec)
2970+{
2971+ struct nand_endurance *endurance = dev->endurance;
2972+ u16 size, i, crc16;
2973+ u8 *id;
2974+
2975+ size = sizeof(struct nand_jedec_params) - sizeof(u16);
2976+
2977+ for (i = 0; i < 3; i++) {
2978+ crc16 = nand_onfi_crc16(ONFI_CRC_BASE, (u8 *)&jedec[i], size);
2979+
2980+ if (jedec[i].signature[0] == 'J' &&
2981+ jedec[i].signature[1] == 'E' &&
2982+ jedec[i].signature[2] == 'S' &&
2983+ jedec[i].signature[3] == 'D' &&
2984+ jedec[i].crc16 == crc16)
2985+ break;
2986+ }
2987+
2988+ if (i == 3)
2989+ return -ENODEV;
2990+
2991+ memcpy(dev->name, jedec[i].model, 20);
2992+ id = jedec[i].manufacturer;
2993+ dev->id = NAND_PACK_ID(id[0], id[1], id[2], id[3], id[4], id[5], id[6],
2994+ id[7]);
2995+ dev->id_len = MAX_ID_NUM;
2996+ dev->io_width = (jedec[i].features & 1) ? NAND_IO16 : NAND_IO8;
2997+ decode_addr_cycle(jedec[i].addr_cycle, &dev->row_cycle,
2998+ &dev->col_cycle);
2999+ dev->target_num = 1;
3000+ dev->lun_num = jedec[i].lun_num;
3001+ dev->plane_num = BIT(jedec[i].plane_address_bits);
3002+ dev->block_num = jedec[i].lun_blocks / dev->plane_num;
3003+ dev->block_size = jedec[i].block_pages * jedec[i].page_size;
3004+ dev->page_size = jedec[i].page_size;
3005+ dev->spare_size = jedec[i].spare_size;
3006+
3007+ endurance->ecc_req = jedec[i].endurance_block0[0];
3008+ endurance->pe_cycle = jedec[i].valid_block_endurance;
3009+ endurance->max_bitflips = endurance->ecc_req >> 1;
3010+
3011+ return 0;
3012+}
3013+
3014+static struct nand_device *detect_parameters_page(struct nand_base
3015+ *nand)
3016+{
3017+ struct nand_device *dev = nand->dev;
3018+ void *params;
3019+ int ret;
3020+
3021+ params = mem_alloc(1, PARAM_PAGE_LEN);
3022+ if (!params)
3023+ return NULL;
3024+
3025+ memset(params, 0, PARAM_PAGE_LEN);
3026+ ret = nand->read_param_page(nand, params, PARAM_PAGE_LEN);
3027+ if (ret < 0) {
3028+ pr_info("read parameters page fail!\n");
3029+ goto error;
3030+ }
3031+
3032+ ret = detect_onfi(dev, params);
3033+ if (ret) {
3034+ pr_info("detect onfi device fail! try to detect jedec\n");
3035+ ret = detect_jedec(dev, params);
3036+ if (ret) {
3037+ pr_info("detect jedec device fail!\n");
3038+ goto error;
3039+ }
3040+ }
3041+
3042+ mem_free(params);
3043+ return dev;
3044+
3045+error:
3046+ mem_free(params);
3047+ return NULL;
3048+}
3049+
3050+static int read_device_id(struct nand_base *nand, int cs, u8 *id)
3051+{
3052+ int i;
3053+
3054+ nand->select_device(nand, cs);
3055+ nand->reset(nand);
3056+ nand->read_id(nand, id, MAX_ID_NUM);
3057+ pr_info("device %d ID: ", cs);
3058+
3059+ for (i = 0; i < MAX_ID_NUM; i++)
3060+ pr_info("%x ", id[i]);
3061+
3062+ pr_info("\n");
3063+
3064+ return 0;
3065+}
3066+
3067+static int detect_more_device(struct nand_base *nand, u8 *id)
3068+{
3069+ u8 id_ext[MAX_ID_NUM];
3070+ int i, j, target_num = 0;
3071+
3072+ for (i = 1; i < MAX_CHIP_DEVICE; i++) {
3073+ memset(id_ext, 0xff, MAX_ID_NUM);
3074+ read_device_id(nand, i, id_ext);
3075+
3076+ for (j = 0; j < MAX_ID_NUM; j++) {
3077+ if (id_ext[j] != id[j])
3078+ goto out;
3079+ }
3080+
3081+ target_num += 1;
3082+ }
3083+
3084+out:
3085+ return target_num;
3086+}
3087+
3088+static struct nand_device *scan_device_table(const u8 *id, int id_len)
3089+{
3090+ struct nand_device *dev;
3091+ int i = 0, j;
3092+ u8 ids[MAX_ID_NUM] = {0};
3093+
3094+ while (1) {
3095+ dev = nand_get_device(i);
3096+
3097+ if (!strcmp(dev->name, "NO-DEVICE"))
3098+ break;
3099+
3100+ if (id_len < dev->id_len) {
3101+ i += 1;
3102+ continue;
3103+ }
3104+
3105+ NAND_UNPACK_ID(dev->id, ids, MAX_ID_NUM);
3106+ for (j = 0; j < dev->id_len; j++) {
3107+ if (ids[j] != id[j])
3108+ break;
3109+ }
3110+
3111+ if (j == dev->id_len)
3112+ break;
3113+
3114+ i += 1;
3115+ }
3116+
3117+ return dev;
3118+}
3119+
3120+int nand_detect_device(struct nand_base *nand)
3121+{
3122+ struct nand_device *dev;
3123+ u8 id[MAX_ID_NUM] = { 0 };
3124+ int target_num = 0;
3125+
3126+ /* Get nand device default setting for reset/read_id */
3127+ nand->dev = scan_device_table(NULL, -1);
3128+
3129+ read_device_id(nand, 0, id);
3130+ dev = scan_device_table(id, MAX_ID_NUM);
3131+
3132+ if (!strcmp(dev->name, "NO-DEVICE")) {
3133+ pr_info("device scan fail\n");
3134+ return -ENODEV;
3135+ }
3136+
3137+ /* TobeFix: has null pointer issue in this funciton */
3138+ if (!strcmp(dev->name, "NO-DEVICE")) {
3139+ pr_info("device scan fail, detect parameters page\n");
3140+ dev = detect_parameters_page(nand);
3141+ if (!dev) {
3142+ pr_info("detect parameters fail\n");
3143+ return -ENODEV;
3144+ }
3145+ }
3146+
3147+ if (dev->target_num > 1)
3148+ target_num = detect_more_device(nand, id);
3149+
3150+ target_num += 1;
3151+ pr_debug("chip has target device num: %d\n", target_num);
3152+
3153+ if (dev->target_num != target_num)
3154+ dev->target_num = target_num;
3155+
3156+ nand->dev = dev;
3157+
3158+ return 0;
3159+}
3160+
3161diff --git a/drivers/mtd/nandx/core/nand_device.h b/drivers/mtd/nandx/core/nand_device.h
3162new file mode 100644
3163index 0000000000..e142cf529d
3164--- /dev/null
3165+++ b/drivers/mtd/nandx/core/nand_device.h
3166@@ -0,0 +1,608 @@
3167+/*
3168+ * Copyright (C) 2017 MediaTek Inc.
3169+ * Licensed under either
3170+ * BSD Licence, (see NOTICE for more details)
3171+ * GNU General Public License, version 2.0, (see NOTICE for more details)
3172+ */
3173+
3174+#ifndef __NAND_DEVICE_H__
3175+#define __NAND_DEVICE_H__
3176+
3177+/* onfi 3.2 */
3178+struct nand_onfi_params {
3179+ /* Revision information and features block. 0 */
3180+ /*
3181+ * Byte 0: 4Fh,
3182+ * Byte 1: 4Eh,
3183+ * Byte 2: 46h,
3184+ * Byte 3: 49h,
3185+ */
3186+ u8 signature[4];
3187+ /*
3188+ * 9-15 Reserved (0)
3189+ * 8 1 = supports ONFI version 3.2
3190+ * 7 1 = supports ONFI version 3.1
3191+ * 6 1 = supports ONFI version 3.0
3192+ * 5 1 = supports ONFI version 2.3
3193+ * 4 1 = supports ONFI version 2.2
3194+ * 3 1 = supports ONFI version 2.1
3195+ * 2 1 = supports ONFI version 2.0
3196+ * 1 1 = supports ONFI version 1.0
3197+ * 0 Reserved (0)
3198+ */
3199+ u16 revision;
3200+ /*
3201+ * 13-15 Reserved (0)
3202+ * 12 1 = supports external Vpp
3203+ * 11 1 = supports Volume addressing
3204+ * 10 1 = supports NV-DDR2
3205+ * 9 1 = supports EZ NAND
3206+ * 8 1 = supports program page register clear enhancement
3207+ * 7 1 = supports extended parameter page
3208+ * 6 1 = supports multi-plane read operations
3209+ * 5 1 = supports NV-DDR
3210+ * 4 1 = supports odd to even page Copyback
3211+ * 3 1 = supports multi-plane program and erase operations
3212+ * 2 1 = supports non-sequential page programming
3213+ * 1 1 = supports multiple LUN operations
3214+ * 0 1 = supports 16-bit data bus width
3215+ */
3216+ u16 features;
3217+ /*
3218+ * 13-15 Reserved (0)
3219+ * 12 1 = supports LUN Get and LUN Set Features
3220+ * 11 1 = supports ODT Configure
3221+ * 10 1 = supports Volume Select
3222+ * 9 1 = supports Reset LUN
3223+ * 8 1 = supports Small Data Move
3224+ * 7 1 = supports Change Row Address
3225+ * 6 1 = supports Change Read Column Enhanced
3226+ * 5 1 = supports Read Unique ID
3227+ * 4 1 = supports Copyback
3228+ * 3 1 = supports Read Status Enhanced
3229+ * 2 1 = supports Get Features and Set Features
3230+ * 1 1 = supports Read Cache commands
3231+ * 0 1 = supports Page Cache Program command
3232+ */
3233+ u16 opt_cmds;
3234+ /*
3235+ * 4-7 Reserved (0)
3236+ * 3 1 = supports Multi-plane Block Erase
3237+ * 2 1 = supports Multi-plane Copyback Program
3238+ * 1 1 = supports Multi-plane Page Program
3239+ * 0 1 = supports Random Data Out
3240+ */
3241+ u8 advance_cmds;
3242+ u8 reserved0[1];
3243+ u16 extend_param_len;
3244+ u8 param_page_num;
3245+ u8 reserved1[17];
3246+
3247+ /* Manufacturer information block. 32 */
3248+ u8 manufacturer[12];
3249+ u8 model[20];
3250+ u8 jedec_id;
3251+ u16 data_code;
3252+ u8 reserved2[13];
3253+
3254+ /* Memory organization block. 80 */
3255+ u32 page_size;
3256+ u16 spare_size;
3257+ u32 partial_page_size; /* obsolete */
3258+ u16 partial_spare_size; /* obsolete */
3259+ u32 block_pages;
3260+ u32 lun_blocks;
3261+ u8 lun_num;
3262+ /*
3263+ * 4-7 Column address cycles
3264+ * 0-3 Row address cycles
3265+ */
3266+ u8 addr_cycle;
3267+ u8 cell_bits;
3268+ u16 lun_max_bad_blocks;
3269+ u16 block_endurance;
3270+ u8 target_begin_valid_blocks;
3271+ u16 valid_block_endurance;
3272+ u8 page_program_num;
3273+ u8 partial_program_attr; /* obsolete */
3274+ u8 ecc_req;
3275+ /*
3276+ * 4-7 Reserved (0)
3277+ * 0-3 Number of plane address bits
3278+ */
3279+ u8 plane_address_bits;
3280+ /*
3281+ * 6-7 Reserved (0)
3282+ * 5 1 = lower bit XNOR block address restriction
3283+ * 4 1 = read cache supported
3284+ * 3 Address restrictions for cache operations
3285+ * 2 1 = program cache supported
3286+ * 1 1 = no block address restrictions
3287+ * 0 Overlapped / concurrent multi-plane support
3288+ */
3289+ u8 multi_plane_attr;
3290+ u8 ez_nand_support;
3291+ u8 reserved3[12];
3292+
3293+ /* Electrical parameters block. 128 */
3294+ u8 io_pin_max_capacitance;
3295+ /*
3296+ * 6-15 Reserved (0)
3297+ * 5 1 = supports timing mode 5
3298+ * 4 1 = supports timing mode 4
3299+ * 3 1 = supports timing mode 3
3300+ * 2 1 = supports timing mode 2
3301+ * 1 1 = supports timing mode 1
3302+ * 0 1 = supports timing mode 0, shall be 1
3303+ */
3304+ u16 sdr_timing_mode;
3305+ u16 sdr_program_cache_timing_mode; /* obsolete */
3306+ u16 tPROG;
3307+ u16 tBERS;
3308+ u16 tR;
3309+ u16 tCCS;
3310+ /*
3311+ * 7 Reserved (0)
3312+ * 6 1 = supports NV-DDR2 timing mode 8
3313+ * 5 1 = supports NV-DDR timing mode 5
3314+ * 4 1 = supports NV-DDR timing mode 4
3315+ * 3 1 = supports NV-DDR timing mode 3
3316+ * 2 1 = supports NV-DDR timing mode 2
3317+ * 1 1 = supports NV-DDR timing mode 1
3318+ * 0 1 = supports NV-DDR timing mode 0
3319+ */
3320+ u8 nvddr_timing_mode;
3321+ /*
3322+ * 7 1 = supports timing mode 7
3323+ * 6 1 = supports timing mode 6
3324+ * 5 1 = supports timing mode 5
3325+ * 4 1 = supports timing mode 4
3326+ * 3 1 = supports timing mode 3
3327+ * 2 1 = supports timing mode 2
3328+ * 1 1 = supports timing mode 1
3329+ * 0 1 = supports timing mode 0
3330+ */
3331+ u8 nvddr2_timing_mode;
3332+ /*
3333+ * 4-7 Reserved (0)
3334+ * 3 1 = device requires Vpp enablement sequence
3335+ * 2 1 = device supports CLK stopped for data input
3336+ * 1 1 = typical capacitance
3337+ * 0 tCAD value to use
3338+ */
3339+ u8 nvddr_fetures;
3340+ u16 clk_pin_capacitance;
3341+ u16 io_pin_capacitance;
3342+ u16 input_pin_capacitance;
3343+ u8 input_pin_max_capacitance;
3344+ /*
3345+ * 3-7 Reserved (0)
3346+ * 2 1 = supports 18 Ohm drive strength
3347+ * 1 1 = supports 25 Ohm drive strength
3348+ * 0 1 = supports driver strength settings
3349+ */
3350+ u8 drive_strength;
3351+ u16 tR_multi_plane;
3352+ u16 tADL;
3353+ u16 tR_ez_nand;
3354+ /*
3355+ * 6-7 Reserved (0)
3356+ * 5 1 = external VREFQ required for >= 200 MT/s
3357+ * 4 1 = supports differential signaling for DQS
3358+ * 3 1 = supports differential signaling for RE_n
3359+ * 2 1 = supports ODT value of 30 Ohms
3360+ * 1 1 = supports matrix termination ODT
3361+ * 0 1 = supports self-termination ODT
3362+ */
3363+ u8 nvddr2_features;
3364+ u8 nvddr2_warmup_cycles;
3365+ u8 reserved4[4];
3366+
3367+ /* vendor block. 164 */
3368+ u16 vendor_revision;
3369+ u8 vendor_spec[88];
3370+
3371+ /* CRC for Parameter Page. 254 */
3372+ u16 crc16;
3373+} __packed;
3374+
3375+/* JESD230-B */
3376+struct nand_jedec_params {
3377+ /* Revision information and features block. 0 */
3378+ /*
3379+ * Byte 0:4Ah
3380+ * Byte 1:45h
3381+ * Byte 2:53h
3382+ * Byte 3:44h
3383+ */
3384+ u8 signature[4];
3385+ /*
3386+ * 3-15: Reserved (0)
3387+ * 2: 1 = supports parameter page revision 1.0 and standard revision 1.0
3388+ * 1: 1 = supports vendor specific parameter page
3389+ * 0: Reserved (0)
3390+ */
3391+ u16 revision;
3392+ /*
3393+ * 9-15 Reserved (0)
3394+ * 8: 1 = supports program page register clear enhancement
3395+ * 7: 1 = supports external Vpp
3396+ * 6: 1 = supports Toggle Mode DDR
3397+ * 5: 1 = supports Synchronous DDR
3398+ * 4: 1 = supports multi-plane read operations
3399+ * 3: 1 = supports multi-plane program and erase operations
3400+ * 2: 1 = supports non-sequential page programming
3401+ * 1: 1 = supports multiple LUN operations
3402+ * 0: 1 = supports 16-bit data bus width
3403+ */
3404+ u16 features;
3405+ /*
3406+ * 11-23: Reserved (0)
3407+ * 10: 1 = supports Synchronous Reset
3408+ * 9: 1 = supports Reset LUN (Primary)
3409+ * 8: 1 = supports Small Data Move
3410+ * 7: 1 = supports Multi-plane Copyback Program (Primary)
3411+ * 6: 1 = supports Random Data Out (Primary)
3412+ * 5: 1 = supports Read Unique ID
3413+ * 4: 1 = supports Copyback
3414+ * 3: 1 = supports Read Status Enhanced (Primary)
3415+ * 2: 1 = supports Get Features and Set Features
3416+ * 1: 1 = supports Read Cache commands
3417+ * 0: 1 = supports Page Cache Program command
3418+ */
3419+ u8 opt_cmds[3];
3420+ /*
3421+ * 8-15: Reserved (0)
3422+ * 7: 1 = supports secondary Read Status Enhanced
3423+ * 6: 1 = supports secondary Multi-plane Block Erase
3424+ * 5: 1 = supports secondary Multi-plane Copyback Program
3425+ * 4: 1 = supports secondary Multi-plane Program
3426+ * 3: 1 = supports secondary Random Data Out
3427+ * 2: 1 = supports secondary Multi-plane Copyback Read
3428+ * 1: 1 = supports secondary Multi-plane Read Cache Random
3429+ * 0: 1 = supports secondary Multi-plane Read
3430+ */
3431+ u16 secondary_cmds;
3432+ u8 param_page_num;
3433+ u8 reserved0[18];
3434+
3435+ /* Manufacturer information block. 32*/
3436+ u8 manufacturer[12];
3437+ u8 model[20];
3438+ u8 jedec_id[6];
3439+ u8 reserved1[10];
3440+
3441+ /* Memory organization block. 80 */
3442+ u32 page_size;
3443+ u16 spare_size;
3444+ u8 reserved2[6];
3445+ u32 block_pages;
3446+ u32 lun_blocks;
3447+ u8 lun_num;
3448+ /*
3449+ * 4-7 Column address cycles
3450+ * 0-3 Row address cycles
3451+ */
3452+ u8 addr_cycle;
3453+ u8 cell_bits;
3454+ u8 page_program_num;
3455+ /*
3456+ * 4-7 Reserved (0)
3457+ * 0-3 Number of plane address bits
3458+ */
3459+ u8 plane_address_bits;
3460+ /*
3461+ * 3-7: Reserved (0)
3462+ * 2: 1= read cache supported
3463+ * 1: 1 = program cache supported
3464+ * 0: 1= No multi-plane block address restrictions
3465+ */
3466+ u8 multi_plane_attr;
3467+ u8 reserved3[38];
3468+
3469+ /* Electrical parameters block. 144 */
3470+ /*
3471+ * 6-15: Reserved (0)
3472+ * 5: 1 = supports 20 ns speed grade (50 MHz)
3473+ * 4: 1 = supports 25 ns speed grade (40 MHz)
3474+ * 3: 1 = supports 30 ns speed grade (~33 MHz)
3475+ * 2: 1 = supports 35 ns speed grade (~28 MHz)
3476+ * 1: 1 = supports 50 ns speed grade (20 MHz)
3477+ * 0: 1 = supports 100 ns speed grade (10 MHz)
3478+ */
3479+ u16 sdr_speed;
3480+ /*
3481+ * 8-15: Reserved (0)
3482+ * 7: 1 = supports 5 ns speed grade (200 MHz)
3483+ * 6: 1 = supports 6 ns speed grade (~166 MHz)
3484+ * 5: 1 = supports 7.5 ns speed grade (~133 MHz)
3485+ * 4: 1 = supports 10 ns speed grade (100 MHz)
3486+ * 3: 1 = supports 12 ns speed grade (~83 MHz)
3487+ * 2: 1 = supports 15 ns speed grade (~66 MHz)
3488+ * 1: 1 = supports 25 ns speed grade (40 MHz)
3489+ * 0: 1 = supports 30 ns speed grade (~33 MHz)
3490+ */
3491+ u16 toggle_ddr_speed;
3492+ /*
3493+ * 6-15: Reserved (0)
3494+ * 5: 1 = supports 10 ns speed grade (100 MHz)
3495+ * 4: 1 = supports 12 ns speed grade (~83 MHz)
3496+ * 3: 1 = supports 15 ns speed grade (~66 MHz)
3497+ * 2: 1 = supports 20 ns speed grade (50 MHz)
3498+ * 1: 1 = supports 30 ns speed grade (~33 MHz)
3499+ * 0: 1 = supports 50 ns speed grade (20 MHz)
3500+ */
3501+ u16 sync_ddr_speed;
3502+ u8 sdr_features;
3503+ u8 toggle_ddr_features;
3504+ /*
3505+ * 2-7: Reserved (0)
3506+ * 1: Device supports CK stopped for data input
3507+ * 0: tCAD value to use
3508+ */
3509+ u8 sync_ddr_features;
3510+ u16 tPROG;
3511+ u16 tBERS;
3512+ u16 tR;
3513+ u16 tR_multi_plane;
3514+ u16 tCCS;
3515+ u16 io_pin_capacitance;
3516+ u16 input_pin_capacitance;
3517+ u16 ck_pin_capacitance;
3518+ /*
3519+ * 3-7: Reserved (0)
3520+ * 2: 1 = supports 18 ohm drive strength
3521+ * 1: 1 = supports 25 ohm drive strength
3522+ * 0: 1 = supports 35ohm/50ohm drive strength
3523+ */
3524+ u8 drive_strength;
3525+ u16 tADL;
3526+ u8 reserved4[36];
3527+
3528+ /* ECC and endurance block. 208 */
3529+ u8 target_begin_valid_blocks;
3530+ u16 valid_block_endurance;
3531+ /*
3532+ * Byte 0: Number of bits ECC correctability
3533+ * Byte 1: Codeword size
3534+ * Byte 2-3: Bad blocks maximum per LUN
3535+ * Byte 4-5: Block endurance
3536+ * Byte 6-7: Reserved (0)
3537+ */
3538+ u8 endurance_block0[8];
3539+ u8 endurance_block1[8];
3540+ u8 endurance_block2[8];
3541+ u8 endurance_block3[8];
3542+ u8 reserved5[29];
3543+
3544+ /* Reserved. 272 */
3545+ u8 reserved6[148];
3546+
3547+ /* Vendor specific block. 420 */
3548+ u16 vendor_revision;
3549+ u8 vendor_spec[88];
3550+
3551+ /* CRC for Parameter Page. 510 */
3552+ u16 crc16;
3553+} __packed;
3554+
3555+/* parallel nand io width */
3556+enum nand_io_width {
3557+ NAND_IO8,
3558+ NAND_IO16
3559+};
3560+
3561+/* all supported nand timming type */
3562+enum nand_timing_type {
3563+ NAND_TIMING_SDR,
3564+ NAND_TIMING_SYNC_DDR,
3565+ NAND_TIMING_TOGGLE_DDR,
3566+ NAND_TIMING_NVDDR2
3567+};
3568+
3569+/* nand basic commands */
3570+struct nand_cmds {
3571+ short reset;
3572+ short read_id;
3573+ short read_status;
3574+ short read_param_page;
3575+ short set_feature;
3576+ short get_feature;
3577+ short read_1st;
3578+ short read_2nd;
3579+ short random_out_1st;
3580+ short random_out_2nd;
3581+ short program_1st;
3582+ short program_2nd;
3583+ short erase_1st;
3584+ short erase_2nd;
3585+ short read_cache;
3586+ short read_cache_last;
3587+ short program_cache;
3588+};
3589+
3590+/*
3591+ * addressing for nand physical address
3592+ * @row_bit_start: row address start bit
3593+ * @block_bit_start: block address start bit
3594+ * @plane_bit_start: plane address start bit
3595+ * @lun_bit_start: lun address start bit
3596+ */
3597+struct nand_addressing {
3598+ u8 row_bit_start;
3599+ u8 block_bit_start;
3600+ u8 plane_bit_start;
3601+ u8 lun_bit_start;
3602+};
3603+
3604+/*
3605+ * nand operations status
3606+ * @array_busy: indicates device array operation busy
3607+ * @write_protect: indicates the device cannot be wrote or erased
3608+ * @erase_fail: indicates erase operation fail
3609+ * @program_fail: indicates program operation fail
3610+ */
3611+struct nand_status {
3612+ u8 array_busy;
3613+ u8 write_protect;
3614+ u8 erase_fail;
3615+ u8 program_fail;
3616+};
3617+
3618+/*
3619+ * nand endurance information
3620+ * @pe_cycle: max program/erase cycle for nand stored data stability
3621+ * @ecc_req: ecc strength required for the nand, measured per 1KB
3622+ * @max_bitflips: bitflips is ecc corrected bits,
3623+ * max_bitflips is the threshold for nand stored data stability
3624+ * if corrected bits is over max_bitflips, stored data must be moved
3625+ * to another good block
3626+ */
3627+struct nand_endurance {
3628+ int pe_cycle;
3629+ int ecc_req;
3630+ int max_bitflips;
3631+};
3632+
3633+/* wait for nand busy type */
3634+enum nand_wait_type {
3635+ NAND_WAIT_IRQ,
3636+ NAND_WAIT_POLLING,
3637+ NAND_WAIT_TWHR2,
3638+};
3639+
3640+/* each nand array operations time */
3641+struct nand_array_timing {
3642+ u16 tRST;
3643+ u16 tWHR;
3644+ u16 tR;
3645+ u16 tRCBSY;
3646+ u16 tFEAT;
3647+ u16 tPROG;
3648+ u16 tPCBSY;
3649+ u16 tBERS;
3650+ u16 tDBSY;
3651+};
3652+
3653+/* nand sdr interface timing required */
3654+struct nand_sdr_timing {
3655+ u16 tREA;
3656+ u16 tREH;
3657+ u16 tCR;
3658+ u16 tRP;
3659+ u16 tWP;
3660+ u16 tWH;
3661+ u16 tWHR;
3662+ u16 tCLS;
3663+ u16 tALS;
3664+ u16 tCLH;
3665+ u16 tALH;
3666+ u16 tWC;
3667+ u16 tRC;
3668+};
3669+
3670+/* nand onfi ddr (nvddr) interface timing required */
3671+struct nand_onfi_timing {
3672+ u16 tCAD;
3673+ u16 tWPRE;
3674+ u16 tWPST;
3675+ u16 tWRCK;
3676+ u16 tDQSCK;
3677+ u16 tWHR;
3678+};
3679+
3680+/* nand toggle ddr (toggle 1.0) interface timing required */
3681+struct nand_toggle_timing {
3682+ u16 tCS;
3683+ u16 tCH;
3684+ u16 tCAS;
3685+ u16 tCAH;
3686+ u16 tCALS;
3687+ u16 tCALH;
3688+ u16 tWP;
3689+ u16 tWPRE;
3690+ u16 tWPST;
3691+ u16 tWPSTH;
3692+ u16 tCR;
3693+ u16 tRPRE;
3694+ u16 tRPST;
3695+ u16 tRPSTH;
3696+ u16 tCDQSS;
3697+ u16 tWHR;
3698+};
3699+
3700+/* nand basic device information */
3701+struct nand_device {
3702+ u8 *name;
3703+ u64 id;
3704+ u8 id_len;
3705+ u8 io_width;
3706+ u8 row_cycle;
3707+ u8 col_cycle;
3708+ u8 target_num;
3709+ u8 lun_num;
3710+ u8 plane_num;
3711+ int block_num;
3712+ int block_size;
3713+ int page_size;
3714+ int spare_size;
3715+ int min_program_pages;
3716+ struct nand_cmds *cmds;
3717+ struct nand_addressing *addressing;
3718+ struct nand_status *status;
3719+ struct nand_endurance *endurance;
3720+ struct nand_array_timing *array_timing;
3721+};
3722+
3723+#define NAND_DEVICE(_name, _id, _id_len, _io_width, _row_cycle, \
3724+ _col_cycle, _target_num, _lun_num, _plane_num, \
3725+ _block_num, _block_size, _page_size, _spare_size, \
3726+ _min_program_pages, _cmds, _addressing, _status, \
3727+ _endurance, _array_timing) \
3728+{ \
3729+ _name, _id, _id_len, _io_width, _row_cycle, \
3730+ _col_cycle, _target_num, _lun_num, _plane_num, \
3731+ _block_num, _block_size, _page_size, _spare_size, \
3732+ _min_program_pages, _cmds, _addressing, _status, \
3733+ _endurance, _array_timing \
3734+}
3735+
3736+#define MAX_ID_NUM sizeof(u64)
3737+
3738+#define NAND_PACK_ID(id0, id1, id2, id3, id4, id5, id6, id7) \
3739+ ( \
3740+ id0 | id1 << 8 | id2 << 16 | id3 << 24 | \
3741+ (u64)id4 << 32 | (u64)id5 << 40 | \
3742+ (u64)id6 << 48 | (u64)id7 << 56 \
3743+ )
3744+
3745+#define NAND_UNPACK_ID(id, ids, len) \
3746+ do { \
3747+ int _i; \
3748+ for (_i = 0; _i < len; _i++) \
3749+ ids[_i] = id >> (_i << 3) & 0xff; \
3750+ } while (0)
3751+
3752+static inline int nand_block_pages(struct nand_device *device)
3753+{
3754+ return div_down(device->block_size, device->page_size);
3755+}
3756+
3757+static inline int nand_lun_blocks(struct nand_device *device)
3758+{
3759+ return device->plane_num * device->block_num;
3760+}
3761+
3762+static inline int nand_target_blocks(struct nand_device *device)
3763+{
3764+ return device->lun_num * device->plane_num * device->block_num;
3765+}
3766+
3767+static inline int nand_total_blocks(struct nand_device *device)
3768+{
3769+ return device->target_num * device->lun_num * device->plane_num *
3770+ device->block_num;
3771+}
3772+
3773+struct nand_device *nand_get_device(int index);
3774+#endif /* __NAND_DEVICE_H__ */
3775diff --git a/drivers/mtd/nandx/core/nfi.h b/drivers/mtd/nandx/core/nfi.h
3776new file mode 100644
3777index 0000000000..ba84e73ccc
3778--- /dev/null
3779+++ b/drivers/mtd/nandx/core/nfi.h
3780@@ -0,0 +1,51 @@
3781+/*
3782+ * Copyright (C) 2017 MediaTek Inc.
3783+ * Licensed under either
3784+ * BSD Licence, (see NOTICE for more details)
3785+ * GNU General Public License, version 2.0, (see NOTICE for more details)
3786+ */
3787+
3788+#ifndef __NFI_H__
3789+#define __NFI_H__
3790+
3791+struct nfi_format {
3792+ int page_size;
3793+ int spare_size;
3794+ int ecc_req;
3795+};
3796+
3797+struct nfi {
3798+ int sector_size;
3799+ int sector_spare_size;
3800+ int fdm_size; /*for sector*/
3801+ int fdm_ecc_size;
3802+ int ecc_strength;
3803+ int ecc_parity_size; /*for sector*/
3804+
3805+ int (*select_chip)(struct nfi *nfi, int cs);
3806+ int (*set_format)(struct nfi *nfi, struct nfi_format *format);
3807+ int (*set_timing)(struct nfi *nfi, void *timing, int type);
3808+ int (*nfi_ctrl)(struct nfi *nfi, int cmd, void *args);
3809+
3810+ int (*reset)(struct nfi *nfi);
3811+ int (*send_cmd)(struct nfi *nfi, short cmd);
3812+ int (*send_addr)(struct nfi *nfi, int col, int row,
3813+ int col_cycle, int row_cycle);
3814+ int (*trigger)(struct nfi *nfi);
3815+
3816+ int (*write_page)(struct nfi *nfi, u8 *data, u8 *fdm);
3817+ int (*write_bytes)(struct nfi *nfi, u8 *data, int count);
3818+ int (*read_sectors)(struct nfi *nfi, u8 *data, u8 *fdm,
3819+ int sectors);
3820+ int (*read_bytes)(struct nfi *nfi, u8 *data, int count);
3821+
3822+ int (*wait_ready)(struct nfi *nfi, int type, u32 timeout);
3823+
3824+ int (*enable_randomizer)(struct nfi *nfi, u32 row, bool encode);
3825+ int (*disable_randomizer)(struct nfi *nfi);
3826+};
3827+
3828+struct nfi *nfi_init(struct nfi_resource *res);
3829+void nfi_exit(struct nfi *nfi);
3830+
3831+#endif /* __NFI_H__ */
3832diff --git a/drivers/mtd/nandx/core/nfi/nfi_base.c b/drivers/mtd/nandx/core/nfi/nfi_base.c
3833new file mode 100644
3834index 0000000000..d8679d7aa3
3835--- /dev/null
3836+++ b/drivers/mtd/nandx/core/nfi/nfi_base.c
3837@@ -0,0 +1,1357 @@
3838+/*
3839+ * Copyright (C) 2017 MediaTek Inc.
3840+ * Licensed under either
3841+ * BSD Licence, (see NOTICE for more details)
3842+ * GNU General Public License, version 2.0, (see NOTICE for more details)
3843+ */
3844+
3845+/**
3846+ * nfi_base.c - the base logic for nfi to access nand flash
3847+ *
3848+ * slc/mlc/tlc could use same code to access nand
3849+ * of cause, there still some work need to do
3850+ * even for spi nand, there should be a chance to integrate code together
3851+ */
3852+
3853+#include "nandx_util.h"
3854+#include "nandx_core.h"
3855+#include "../nfi.h"
3856+#include "../nand_device.h"
3857+#include "nfi_regs.h"
3858+#include "nfiecc.h"
3859+#include "nfi_base.h"
3860+
3861+static const int spare_size_mt7622[] = {
3862+ 16, 26, 27, 28
3863+};
3864+
3865+#define RAND_SEED_SHIFT(op) \
3866+ ((op) == RAND_ENCODE ? ENCODE_SEED_SHIFT : DECODE_SEED_SHIFT)
3867+#define RAND_EN(op) \
3868+ ((op) == RAND_ENCODE ? RAN_ENCODE_EN : RAN_DECODE_EN)
3869+
3870+#define SS_SEED_NUM 128
3871+static u16 ss_randomizer_seed[SS_SEED_NUM] = {
3872+ 0x576A, 0x05E8, 0x629D, 0x45A3, 0x649C, 0x4BF0, 0x2342, 0x272E,
3873+ 0x7358, 0x4FF3, 0x73EC, 0x5F70, 0x7A60, 0x1AD8, 0x3472, 0x3612,
3874+ 0x224F, 0x0454, 0x030E, 0x70A5, 0x7809, 0x2521, 0x484F, 0x5A2D,
3875+ 0x492A, 0x043D, 0x7F61, 0x3969, 0x517A, 0x3B42, 0x769D, 0x0647,
3876+ 0x7E2A, 0x1383, 0x49D9, 0x07B8, 0x2578, 0x4EEC, 0x4423, 0x352F,
3877+ 0x5B22, 0x72B9, 0x367B, 0x24B6, 0x7E8E, 0x2318, 0x6BD0, 0x5519,
3878+ 0x1783, 0x18A7, 0x7B6E, 0x7602, 0x4B7F, 0x3648, 0x2C53, 0x6B99,
3879+ 0x0C23, 0x67CF, 0x7E0E, 0x4D8C, 0x5079, 0x209D, 0x244A, 0x747B,
3880+ 0x350B, 0x0E4D, 0x7004, 0x6AC3, 0x7F3E, 0x21F5, 0x7A15, 0x2379,
3881+ 0x1517, 0x1ABA, 0x4E77, 0x15A1, 0x04FA, 0x2D61, 0x253A, 0x1302,
3882+ 0x1F63, 0x5AB3, 0x049A, 0x5AE8, 0x1CD7, 0x4A00, 0x30C8, 0x3247,
3883+ 0x729C, 0x5034, 0x2B0E, 0x57F2, 0x00E4, 0x575B, 0x6192, 0x38F8,
3884+ 0x2F6A, 0x0C14, 0x45FC, 0x41DF, 0x38DA, 0x7AE1, 0x7322, 0x62DF,
3885+ 0x5E39, 0x0E64, 0x6D85, 0x5951, 0x5937, 0x6281, 0x33A1, 0x6A32,
3886+ 0x3A5A, 0x2BAC, 0x743A, 0x5E74, 0x3B2E, 0x7EC7, 0x4FD2, 0x5D28,
3887+ 0x751F, 0x3EF8, 0x39B1, 0x4E49, 0x746B, 0x6EF6, 0x44BE, 0x6DB7
3888+};
3889+
3890+#if 0
3891+static void dump_register(void *regs)
3892+{
3893+ int i;
3894+
3895+ pr_info("registers:\n");
3896+ for (i = 0; i < 0x600; i += 0x10) {
3897+ pr_info(" address 0x%X : %X %X %X %X\n",
3898+ (u32)((unsigned long)regs + i),
3899+ (u32)readl(regs + i),
3900+ (u32)readl(regs + i + 0x4),
3901+ (u32)readl(regs + i + 0x8),
3902+ (u32)readl(regs + i + 0xC));
3903+ }
3904+}
3905+#endif
3906+
3907+static int nfi_enable_randomizer(struct nfi *nfi, u32 row, bool encode)
3908+{
3909+ struct nfi_base *nb = nfi_to_base(nfi);
3910+ enum randomizer_op op = RAND_ENCODE;
3911+ void *regs = nb->res.nfi_regs;
3912+ u32 val;
3913+
3914+ if (!encode)
3915+ op = RAND_DECODE;
3916+
3917+ /* randomizer type and reseed type setup */
3918+ val = readl(regs + NFI_CNFG);
3919+ val |= CNFG_RAND_SEL | CNFG_RESEED_SEC_EN;
3920+ writel(val, regs + NFI_CNFG);
3921+
3922+ /* randomizer seed and type setup */
3923+ val = ss_randomizer_seed[row % SS_SEED_NUM] & RAN_SEED_MASK;
3924+ val <<= RAND_SEED_SHIFT(op);
3925+ val |= RAND_EN(op);
3926+ writel(val, regs + NFI_RANDOM_CNFG);
3927+
3928+ return 0;
3929+}
3930+
3931+static int nfi_disable_randomizer(struct nfi *nfi)
3932+{
3933+ struct nfi_base *nb = nfi_to_base(nfi);
3934+
3935+ writel(0, nb->res.nfi_regs + NFI_RANDOM_CNFG);
3936+
3937+ return 0;
3938+}
3939+
3940+static int nfi_irq_handler(int irq, void *data)
3941+{
3942+ struct nfi_base *nb = (struct nfi_base *) data;
3943+ void *regs = nb->res.nfi_regs;
3944+ u16 status, en;
3945+
3946+ status = readw(regs + NFI_INTR_STA);
3947+ en = readw(regs + NFI_INTR_EN);
3948+
3949+ if (!(status & en))
3950+ return NAND_IRQ_NONE;
3951+
3952+ writew(~status & en, regs + NFI_INTR_EN);
3953+
3954+ nandx_event_complete(nb->done);
3955+
3956+ return NAND_IRQ_HANDLED;
3957+}
3958+
3959+static int nfi_select_chip(struct nfi *nfi, int cs)
3960+{
3961+ struct nfi_base *nb = nfi_to_base(nfi);
3962+
3963+ writel(cs, nb->res.nfi_regs + NFI_CSEL);
3964+
3965+ return 0;
3966+}
3967+
3968+static inline void set_op_mode(void *regs, u32 mode)
3969+{
3970+ u32 val = readl(regs + NFI_CNFG);
3971+
3972+ val &= ~CNFG_OP_MODE_MASK;
3973+ val |= mode;
3974+
3975+ writel(val, regs + NFI_CNFG);
3976+}
3977+
3978+static int nfi_reset(struct nfi *nfi)
3979+{
3980+ struct nfi_base *nb = nfi_to_base(nfi);
3981+ void *regs = nb->res.nfi_regs;
3982+ int ret, val;
3983+
3984+ /* The NFI reset to reset all registers and force the NFI
3985+ * master be early terminated
3986+ */
3987+ writel(CON_FIFO_FLUSH | CON_NFI_RST, regs + NFI_CON);
3988+
3989+ /* check state of NFI internal FSM and NAND interface FSM */
3990+ ret = readl_poll_timeout_atomic(regs + NFI_MASTER_STA, val,
3991+ !(val & MASTER_BUS_BUSY),
3992+ 10, NFI_TIMEOUT);
3993+ if (ret)
3994+ pr_info("nfi reset timeout...\n");
3995+
3996+ writel(CON_FIFO_FLUSH | CON_NFI_RST, regs + NFI_CON);
3997+ writew(STAR_DE, regs + NFI_STRDATA);
3998+
3999+ return ret;
4000+}
4001+
4002+static void bad_mark_swap(struct nfi *nfi, u8 *buf, u8 *fdm)
4003+{
4004+ struct nfi_base *nb = nfi_to_base(nfi);
4005+ u32 start_sector = div_down(nb->col, nfi->sector_size);
4006+ u32 data_mark_pos;
4007+ u8 temp;
4008+
4009+ /* raw access, no need to do swap. */
4010+ if (!nb->ecc_en)
4011+ return;
4012+
4013+ if (!buf || !fdm)
4014+ return;
4015+
4016+ if (nb->bad_mark_ctrl.sector < start_sector ||
4017+ nb->bad_mark_ctrl.sector > start_sector + nb->rw_sectors)
4018+ return;
4019+
4020+ data_mark_pos = nb->bad_mark_ctrl.position +
4021+ (nb->bad_mark_ctrl.sector - start_sector) *
4022+ nfi->sector_size;
4023+
4024+ temp = *fdm;
4025+ *fdm = *(buf + data_mark_pos);
4026+ *(buf + data_mark_pos) = temp;
4027+}
4028+
4029+static u8 *fdm_shift(struct nfi *nfi, u8 *fdm, int sector)
4030+{
4031+ struct nfi_base *nb = nfi_to_base(nfi);
4032+ u8 *pos;
4033+
4034+ if (!fdm)
4035+ return NULL;
4036+
4037+ /* map the sector's FDM data to free oob:
4038+ * the beginning of the oob area stores the FDM data of bad mark sectors
4039+ */
4040+ if (sector < nb->bad_mark_ctrl.sector)
4041+ pos = fdm + (sector + 1) * nfi->fdm_size;
4042+ else if (sector == nb->bad_mark_ctrl.sector)
4043+ pos = fdm;
4044+ else
4045+ pos = fdm + sector * nfi->fdm_size;
4046+
4047+ return pos;
4048+
4049+}
4050+
4051+static void set_bad_mark_ctrl(struct nfi_base *nb)
4052+{
4053+ int temp, page_size = nb->format.page_size;
4054+
4055+ nb->bad_mark_ctrl.bad_mark_swap = bad_mark_swap;
4056+ nb->bad_mark_ctrl.fdm_shift = fdm_shift;
4057+
4058+ temp = nb->nfi.sector_size + nb->nfi.sector_spare_size;
4059+ nb->bad_mark_ctrl.sector = div_down(page_size, temp);
4060+ nb->bad_mark_ctrl.position = reminder(page_size, temp);
4061+}
4062+
4063+/* NOTE: check if page_size valid future */
4064+static int setup_format(struct nfi_base *nb, int spare_idx)
4065+{
4066+ struct nfi *nfi = &nb->nfi;
4067+ u32 page_size = nb->format.page_size;
4068+ u32 val;
4069+
4070+ switch (page_size) {
4071+ case 512:
4072+ val = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
4073+ break;
4074+
4075+ case KB(2):
4076+ if (nfi->sector_size == 512)
4077+ val = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
4078+ else
4079+ val = PAGEFMT_512_2K;
4080+
4081+ break;
4082+
4083+ case KB(4):
4084+ if (nfi->sector_size == 512)
4085+ val = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
4086+ else
4087+ val = PAGEFMT_2K_4K;
4088+
4089+ break;
4090+
4091+ case KB(8):
4092+ if (nfi->sector_size == 512)
4093+ val = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
4094+ else
4095+ val = PAGEFMT_4K_8K;
4096+
4097+ break;
4098+
4099+ case KB(16):
4100+ val = PAGEFMT_8K_16K;
4101+ break;
4102+
4103+ default:
4104+ pr_info("invalid page len: %d\n", page_size);
4105+ return -EINVAL;
4106+ }
4107+
4108+ val |= spare_idx << PAGEFMT_SPARE_SHIFT;
4109+ val |= nfi->fdm_size << PAGEFMT_FDM_SHIFT;
4110+ val |= nfi->fdm_ecc_size << PAGEFMT_FDM_ECC_SHIFT;
4111+ writel(val, nb->res.nfi_regs + NFI_PAGEFMT);
4112+
4113+ if (nb->custom_sector_en) {
4114+ val = nfi->sector_spare_size + nfi->sector_size;
4115+ val |= SECCUS_SIZE_EN;
4116+ writel(val, nb->res.nfi_regs + NFI_SECCUS_SIZE);
4117+ }
4118+
4119+ return 0;
4120+}
4121+
4122+static int adjust_spare(struct nfi_base *nb, int *spare)
4123+{
4124+ int multi = nb->nfi.sector_size == 512 ? 1 : 2;
4125+ int i, count = nb->caps->spare_size_num;
4126+
4127+ if (*spare >= nb->caps->spare_size[count - 1] * multi) {
4128+ *spare = nb->caps->spare_size[count - 1] * multi;
4129+ return count - 1;
4130+ }
4131+
4132+ if (*spare < nb->caps->spare_size[0] * multi)
4133+ return -EINVAL;
4134+
4135+ for (i = 1; i < count; i++) {
4136+ if (*spare < nb->caps->spare_size[i] * multi) {
4137+ *spare = nb->caps->spare_size[i - 1] * multi;
4138+ return i - 1;
4139+ }
4140+ }
4141+
4142+ return -EINVAL;
4143+}
4144+
4145+static int nfi_set_format(struct nfi *nfi, struct nfi_format *format)
4146+{
4147+ struct nfi_base *nb = nfi_to_base(nfi);
4148+ struct nfiecc *ecc = nb->ecc;
4149+ int ecc_strength = format->ecc_req;
4150+ int min_fdm, min_ecc, max_ecc;
4151+ u32 temp, page_sectors;
4152+ int spare_idx = 0;
4153+
4154+ if (!nb->buf) {
4155+#if NANDX_BULK_IO_USE_DRAM
4156+ nb->buf = NANDX_NFI_BUF_ADDR;
4157+#else
4158+ nb->buf = mem_alloc(1, format->page_size + format->spare_size);
4159+#endif
4160+ if (!nb->buf)
4161+ return -ENOMEM;
4162+ }
4163+
4164+ nb->format = *format;
4165+
4166+ /* ToBeFixed: for spi nand, now sector size is 512,
4167+ * it should be same with slc.
4168+ */
4169+ nfi->sector_size = 512;
4170+ /* format->ecc_req is the requirement per 1KB */
4171+ ecc_strength >>= 1;
4172+
4173+ page_sectors = div_down(format->page_size, nfi->sector_size);
4174+ nfi->sector_spare_size = div_down(format->spare_size, page_sectors);
4175+
4176+ if (!nb->custom_sector_en) {
4177+ spare_idx = adjust_spare(nb, &nfi->sector_spare_size);
4178+ if (spare_idx < 0)
4179+ return -EINVAL;
4180+ }
4181+
4182+ /* calculate ecc strength and fdm size */
4183+ temp = (nfi->sector_spare_size - nb->caps->max_fdm_size) * 8;
4184+ min_ecc = div_down(temp, nb->caps->ecc_parity_bits);
4185+ min_ecc = ecc->adjust_strength(ecc, min_ecc);
4186+ if (min_ecc < 0)
4187+ return -EINVAL;
4188+
4189+ temp = div_up(nb->res.min_oob_req, page_sectors);
4190+ temp = (nfi->sector_spare_size - temp) * 8;
4191+ max_ecc = div_down(temp, nb->caps->ecc_parity_bits);
4192+ max_ecc = ecc->adjust_strength(ecc, max_ecc);
4193+ if (max_ecc < 0)
4194+ return -EINVAL;
4195+
4196+ temp = div_up(temp * nb->caps->ecc_parity_bits, 8);
4197+ temp = nfi->sector_spare_size - temp;
4198+ min_fdm = min_t(u32, temp, (u32)nb->caps->max_fdm_size);
4199+
4200+ if (ecc_strength > max_ecc) {
4201+ pr_info("required ecc strength %d, max supported %d\n",
4202+ ecc_strength, max_ecc);
4203+ nfi->ecc_strength = max_ecc;
4204+ nfi->fdm_size = min_fdm;
4205+ } else if (format->ecc_req < min_ecc) {
4206+ nfi->ecc_strength = min_ecc;
4207+ nfi->fdm_size = nb->caps->max_fdm_size;
4208+ } else {
4209+ ecc_strength = ecc->adjust_strength(ecc, ecc_strength);
4210+ if (ecc_strength < 0)
4211+ return -EINVAL;
4212+
4213+ nfi->ecc_strength = ecc_strength;
4214+ temp = div_up(ecc_strength * nb->caps->ecc_parity_bits, 8);
4215+ nfi->fdm_size = nfi->sector_spare_size - temp;
4216+ }
4217+
4218+ nb->page_sectors = div_down(format->page_size, nfi->sector_size);
4219+
4220+ /* some IC has fixed fdm_ecc_size, if not assigend, set to fdm_size */
4221+ nfi->fdm_ecc_size = nb->caps->fdm_ecc_size ? : nfi->fdm_size;
4222+
4223+ nfi->ecc_parity_size = div_up(nfi->ecc_strength *
4224+ nb->caps->ecc_parity_bits,
4225+ 8);
4226+ set_bad_mark_ctrl(nb);
4227+
4228+ pr_debug("sector_size: %d\n", nfi->sector_size);
4229+ pr_debug("sector_spare_size: %d\n", nfi->sector_spare_size);
4230+ pr_debug("fdm_size: %d\n", nfi->fdm_size);
4231+ pr_debug("fdm_ecc_size: %d\n", nfi->fdm_ecc_size);
4232+ pr_debug("ecc_strength: %d\n", nfi->ecc_strength);
4233+ pr_debug("ecc_parity_size: %d\n", nfi->ecc_parity_size);
4234+
4235+ return setup_format(nb, spare_idx);
4236+}
4237+
4238+static int nfi_ctrl(struct nfi *nfi, int cmd, void *args)
4239+{
4240+ struct nfi_base *nb = nfi_to_base(nfi);
4241+ int ret = 0;
4242+
4243+ switch (cmd) {
4244+ case NFI_CTRL_DMA:
4245+ nb->dma_en = *(bool *)args;
4246+ break;
4247+
4248+ case NFI_CTRL_AUTOFORMAT:
4249+ nb->auto_format = *(bool *)args;
4250+ break;
4251+
4252+ case NFI_CTRL_NFI_IRQ:
4253+ nb->nfi_irq_en = *(bool *)args;
4254+ break;
4255+
4256+ case NFI_CTRL_PAGE_IRQ:
4257+ nb->page_irq_en = *(bool *)args;
4258+ break;
4259+
4260+ case NFI_CTRL_BAD_MARK_SWAP:
4261+ nb->bad_mark_swap_en = *(bool *)args;
4262+ break;
4263+
4264+ case NFI_CTRL_ECC:
4265+ nb->ecc_en = *(bool *)args;
4266+ break;
4267+
4268+ case NFI_CTRL_ECC_MODE:
4269+ nb->ecc_mode = *(enum nfiecc_mode *)args;
4270+ break;
4271+
4272+ case NFI_CTRL_ECC_CLOCK:
4273+ /* NOTE: it seems that there's nothing need to do
4274+ * if new IC need, just add tht logic
4275+ */
4276+ nb->ecc_clk_en = *(bool *)args;
4277+ break;
4278+
4279+ case NFI_CTRL_ECC_IRQ:
4280+ nb->ecc_irq_en = *(bool *)args;
4281+ break;
4282+
4283+ case NFI_CTRL_ECC_DECODE_MODE:
4284+ nb->ecc_deccon = *(enum nfiecc_deccon *)args;
4285+ break;
4286+
4287+ default:
4288+ pr_info("invalid arguments.\n");
4289+ ret = -EOPNOTSUPP;
4290+ break;
4291+ }
4292+
4293+ pr_debug("%s: set cmd(%d) to %d\n", __func__, cmd, *(int *)args);
4294+ return ret;
4295+}
4296+
4297+static int nfi_send_cmd(struct nfi *nfi, short cmd)
4298+{
4299+ struct nfi_base *nb = nfi_to_base(nfi);
4300+ void *regs = nb->res.nfi_regs;
4301+ int ret;
4302+ u32 val;
4303+
4304+ pr_debug("%s: cmd 0x%x\n", __func__, cmd);
4305+
4306+ if (cmd < 0)
4307+ return -EINVAL;
4308+
4309+ set_op_mode(regs, nb->op_mode);
4310+
4311+ writel(cmd, regs + NFI_CMD);
4312+
4313+ ret = readl_poll_timeout_atomic(regs + NFI_STA,
4314+ val, !(val & STA_CMD),
4315+ 5, NFI_TIMEOUT);
4316+ if (ret)
4317+ pr_info("send cmd 0x%x timeout\n", cmd);
4318+
4319+ return ret;
4320+}
4321+
4322+static int nfi_send_addr(struct nfi *nfi, int col, int row,
4323+ int col_cycle, int row_cycle)
4324+{
4325+ struct nfi_base *nb = nfi_to_base(nfi);
4326+ void *regs = nb->res.nfi_regs;
4327+ int ret;
4328+ u32 val;
4329+
4330+ pr_debug("%s: col 0x%x, row 0x%x, col_cycle 0x%x, row_cycle 0x%x\n",
4331+ __func__, col, row, col_cycle, row_cycle);
4332+
4333+ nb->col = col;
4334+ nb->row = row;
4335+
4336+ writel(col, regs + NFI_COLADDR);
4337+ writel(row, regs + NFI_ROWADDR);
4338+ writel(col_cycle | (row_cycle << ROW_SHIFT), regs + NFI_ADDRNOB);
4339+
4340+ ret = readl_poll_timeout_atomic(regs + NFI_STA,
4341+ val, !(val & STA_ADDR),
4342+ 5, NFI_TIMEOUT);
4343+ if (ret)
4344+ pr_info("send address timeout\n");
4345+
4346+ return ret;
4347+}
4348+
4349+static int nfi_trigger(struct nfi *nfi)
4350+{
4351+ /* Nothing need to do. */
4352+ return 0;
4353+}
4354+
4355+static inline int wait_io_ready(void *regs)
4356+{
4357+ u32 val;
4358+ int ret;
4359+
4360+ ret = readl_poll_timeout_atomic(regs + NFI_PIO_DIRDY,
4361+ val, val & PIO_DI_RDY,
4362+ 2, NFI_TIMEOUT);
4363+ if (ret)
4364+ pr_info("wait io ready timeout\n");
4365+
4366+ return ret;
4367+}
4368+
4369+static int wait_ready_irq(struct nfi_base *nb, u32 timeout)
4370+{
4371+ void *regs = nb->res.nfi_regs;
4372+ int ret;
4373+ u32 val;
4374+
4375+ writel(0xf1, regs + NFI_CNRNB);
4376+ nandx_event_init(nb->done);
4377+
4378+ writel(INTR_BUSY_RETURN_EN, (void *)(regs + NFI_INTR_EN));
4379+
4380+ /**
4381+ * check if nand already bean ready,
4382+ * avoid issue that casued by missing irq-event.
4383+ */
4384+ val = readl(regs + NFI_STA);
4385+ if (val & STA_BUSY2READY) {
4386+ readl(regs + NFI_INTR_STA);
4387+ writel(0, (void *)(regs + NFI_INTR_EN));
4388+ return 0;
4389+ }
4390+
4391+ ret = nandx_event_wait_complete(nb->done, timeout);
4392+
4393+ writew(0, regs + NFI_CNRNB);
4394+ return ret;
4395+}
4396+
4397+static void wait_ready_twhr2(struct nfi_base *nb, u32 timeout)
4398+{
4399+ /* NOTE: this for tlc */
4400+}
4401+
4402+static int wait_ready_poll(struct nfi_base *nb, u32 timeout)
4403+{
4404+ void *regs = nb->res.nfi_regs;
4405+ int ret;
4406+ u32 val;
4407+
4408+ writel(0x21, regs + NFI_CNRNB);
4409+ ret = readl_poll_timeout_atomic(regs + NFI_STA, val,
4410+ val & STA_BUSY2READY,
4411+ 2, timeout);
4412+ writew(0, regs + NFI_CNRNB);
4413+
4414+ return ret;
4415+}
4416+
4417+static int nfi_wait_ready(struct nfi *nfi, int type, u32 timeout)
4418+{
4419+ struct nfi_base *nb = nfi_to_base(nfi);
4420+ int ret;
4421+
4422+ switch (type) {
4423+ case NAND_WAIT_IRQ:
4424+ if (nb->nfi_irq_en)
4425+ ret = wait_ready_irq(nb, timeout);
4426+ else
4427+ ret = -EINVAL;
4428+
4429+ break;
4430+
4431+ case NAND_WAIT_POLLING:
4432+ ret = wait_ready_poll(nb, timeout);
4433+ break;
4434+
4435+ case NAND_WAIT_TWHR2:
4436+ wait_ready_twhr2(nb, timeout);
4437+ ret = 0;
4438+ break;
4439+
4440+ default:
4441+ ret = -EINVAL;
4442+ break;
4443+ }
4444+
4445+ if (ret)
4446+ pr_info("%s: type 0x%x, timeout 0x%x\n",
4447+ __func__, type, timeout);
4448+
4449+ return ret;
4450+}
4451+
4452+static int enable_ecc_decode(struct nfi_base *nb, int sectors)
4453+{
4454+ struct nfi *nfi = &nb->nfi;
4455+ struct nfiecc *ecc = nb->ecc;
4456+
4457+ ecc->config.op = ECC_DECODE;
4458+ ecc->config.mode = nb->ecc_mode;
4459+ ecc->config.deccon = nb->ecc_deccon;
4460+ ecc->config.sectors = sectors;
4461+ ecc->config.len = nfi->sector_size + nfi->fdm_ecc_size;
4462+ ecc->config.strength = nfi->ecc_strength;
4463+
4464+ return ecc->enable(ecc);
4465+}
4466+
4467+static int enable_ecc_encode(struct nfi_base *nb)
4468+{
4469+ struct nfiecc *ecc = nb->ecc;
4470+ struct nfi *nfi = &nb->nfi;
4471+
4472+ ecc->config.op = ECC_ENCODE;
4473+ ecc->config.mode = nb->ecc_mode;
4474+ ecc->config.len = nfi->sector_size + nfi->fdm_ecc_size;
4475+ ecc->config.strength = nfi->ecc_strength;
4476+
4477+ return ecc->enable(ecc);
4478+}
4479+
4480+static void read_fdm(struct nfi_base *nb, u8 *fdm, int start_sector,
4481+ int sectors)
4482+{
4483+ void *regs = nb->res.nfi_regs;
4484+ int j, i = start_sector;
4485+ u32 vall, valm;
4486+ u8 *buf = fdm;
4487+
4488+ for (; i < start_sector + sectors; i++) {
4489+ if (nb->bad_mark_swap_en)
4490+ buf = nb->bad_mark_ctrl.fdm_shift(&nb->nfi, fdm, i);
4491+
4492+ vall = readl(regs + NFI_FDML(i));
4493+ valm = readl(regs + NFI_FDMM(i));
4494+
4495+ for (j = 0; j < nb->nfi.fdm_size; j++)
4496+ *buf++ = (j >= 4 ? valm : vall) >> ((j & 3) << 3);
4497+ }
4498+}
4499+
4500+static void write_fdm(struct nfi_base *nb, u8 *fdm)
4501+{
4502+ struct nfi *nfi = &nb->nfi;
4503+ void *regs = nb->res.nfi_regs;
4504+ u32 vall, valm;
4505+ int i, j;
4506+ u8 *buf = fdm;
4507+
4508+ for (i = 0; i < nb->page_sectors; i++) {
4509+ if (nb->bad_mark_swap_en)
4510+ buf = nb->bad_mark_ctrl.fdm_shift(nfi, fdm, i);
4511+
4512+ vall = 0;
4513+ for (j = 0; j < 4; j++)
4514+ vall |= (j < nfi->fdm_size ? *buf++ : 0xff) << (j * 8);
4515+ writel(vall, regs + NFI_FDML(i));
4516+
4517+ valm = 0;
4518+ for (j = 0; j < 4; j++)
4519+ valm |= (j < nfi->fdm_size ? *buf++ : 0xff) << (j * 8);
4520+ writel(valm, regs + NFI_FDMM(i));
4521+ }
4522+}
4523+
4524+/* NOTE: pio not use auto format */
4525+static int pio_rx_data(struct nfi_base *nb, u8 *data, u8 *fdm,
4526+ int sectors)
4527+{
4528+ struct nfiecc_status ecc_status;
4529+ struct nfi *nfi = &nb->nfi;
4530+ void *regs = nb->res.nfi_regs;
4531+ u32 val, bitflips = 0;
4532+ int len, ret, i;
4533+ u8 *buf;
4534+
4535+ val = readl(regs + NFI_CNFG) | CNFG_BYTE_RW;
4536+ writel(val, regs + NFI_CNFG);
4537+
4538+ len = nfi->sector_size + nfi->sector_spare_size;
4539+ len *= sectors;
4540+
4541+ for (i = 0; i < len; i++) {
4542+ ret = wait_io_ready(regs);
4543+ if (ret)
4544+ return ret;
4545+
4546+ nb->buf[i] = readb(regs + NFI_DATAR);
4547+ }
4548+
4549+ /* TODO: do error handle for autoformat setting of pio */
4550+ if (nb->ecc_en) {
4551+ for (i = 0; i < sectors; i++) {
4552+ buf = nb->buf + i * (nfi->sector_size +
4553+ nfi->sector_spare_size);
4554+ ret = nb->ecc->correct_data(nb->ecc, &ecc_status,
4555+ buf, i);
4556+ if (data)
4557+ memcpy(data + i * nfi->sector_size,
4558+ buf, nfi->sector_size);
4559+ if (fdm)
4560+ memcpy(fdm + i * nfi->fdm_size,
4561+ buf + nfi->sector_size, nfi->fdm_size);
4562+ if (ret) {
4563+ ret = nb->ecc->decode_status(nb->ecc, i, 1);
4564+ if (ret < 0)
4565+ return ret;
4566+
4567+ bitflips = max_t(int, (int)bitflips, ret);
4568+ }
4569+ }
4570+
4571+ return bitflips;
4572+ }
4573+
4574+ /* raw read, only data not null, and its length should be $len */
4575+ if (data)
4576+ memcpy(data, nb->buf, len);
4577+
4578+ return 0;
4579+}
4580+
4581+static int pio_tx_data(struct nfi_base *nb, u8 *data, u8 *fdm,
4582+ int sectors)
4583+{
4584+ struct nfi *nfi = &nb->nfi;
4585+ void *regs = nb->res.nfi_regs;
4586+ u32 i, val;
4587+ int len, ret;
4588+
4589+ val = readw(regs + NFI_CNFG) | CNFG_BYTE_RW;
4590+ writew(val, regs + NFI_CNFG);
4591+
4592+ len = nb->ecc_en ? nfi->sector_size :
4593+ nfi->sector_size + nfi->sector_spare_size;
4594+ len *= sectors;
4595+
4596+ /* data shouldn't null,
4597+ * and if ecc enable ,fdm been written in prepare process
4598+ */
4599+ for (i = 0; i < len; i++) {
4600+ ret = wait_io_ready(regs);
4601+ if (ret)
4602+ return ret;
4603+ writeb(data[i], regs + NFI_DATAW);
4604+ }
4605+
4606+ return 0;
4607+}
4608+
4609+static bool is_page_empty(struct nfi_base *nb, u8 *data, u8 *fdm,
4610+ int sectors)
4611+{
4612+ u32 empty = readl(nb->res.nfi_regs + NFI_STA) & STA_EMP_PAGE;
4613+
4614+ if (empty) {
4615+ pr_info("empty page!\n");
4616+ return true;
4617+ }
4618+
4619+ return false;
4620+}
4621+
4622+static int rw_prepare(struct nfi_base *nb, int sectors, u8 *data,
4623+ u8 *fdm, bool read)
4624+{
4625+ void *regs = nb->res.nfi_regs;
4626+ u32 len = nb->nfi.sector_size * sectors;
4627+ bool irq_en = nb->dma_en && nb->nfi_irq_en;
4628+ void *dma_addr;
4629+ u32 val;
4630+ int ret;
4631+
4632+ nb->rw_sectors = sectors;
4633+
4634+ if (irq_en) {
4635+ nandx_event_init(nb->done);
4636+ writel(INTR_AHB_DONE_EN, regs + NFI_INTR_EN);
4637+ }
4638+
4639+ val = readw(regs + NFI_CNFG);
4640+ if (read)
4641+ val |= CNFG_READ_EN;
4642+ else
4643+ val &= ~CNFG_READ_EN;
4644+
4645+ /* as design, now, auto format enabled when ecc enabled */
4646+ if (nb->ecc_en) {
4647+ val |= CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
4648+
4649+ if (read)
4650+ ret = enable_ecc_decode(nb, sectors);
4651+ else
4652+ ret = enable_ecc_encode(nb);
4653+
4654+ if (ret) {
4655+ pr_info("%s: ecc enable %s fail!\n", __func__,
4656+ read ? "decode" : "encode");
4657+ return ret;
4658+ }
4659+ } else {
4660+ val &= ~(CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN);
4661+ }
4662+
4663+ if (!read && nb->bad_mark_swap_en)
4664+ nb->bad_mark_ctrl.bad_mark_swap(&nb->nfi, data, fdm);
4665+
4666+ if (!nb->ecc_en && read)
4667+ len += sectors * nb->nfi.sector_spare_size;
4668+
4669+ if (nb->dma_en) {
4670+ val |= CNFG_DMA_BURST_EN | CNFG_AHB;
4671+
4672+ if (read) {
4673+ dma_addr = (void *)(unsigned long)nandx_dma_map(
4674+ nb->res.dev, nb->buf,
4675+ (u64)len, NDMA_FROM_DEV);
4676+ } else {
4677+ memcpy(nb->buf, data, len);
4678+ dma_addr = (void *)(unsigned long)nandx_dma_map(
4679+ nb->res.dev, nb->buf,
4680+ (u64)len, NDMA_TO_DEV);
4681+ }
4682+
4683+ writel((unsigned long)dma_addr, (void *)regs + NFI_STRADDR);
4684+
4685+ nb->access_len = len;
4686+ nb->dma_addr = dma_addr;
4687+ }
4688+
4689+ if (nb->ecc_en && !read && fdm)
4690+ write_fdm(nb, fdm);
4691+
4692+ writew(val, regs + NFI_CNFG);
4693+ /* setup R/W sector number */
4694+ writel(sectors << CON_SEC_SHIFT, regs + NFI_CON);
4695+
4696+ return 0;
4697+}
4698+
4699+static void rw_trigger(struct nfi_base *nb, bool read)
4700+{
4701+ void *regs = nb->res.nfi_regs;
4702+ u32 val;
4703+
4704+ val = read ? CON_BRD : CON_BWR;
4705+ val |= readl(regs + NFI_CON);
4706+ writel(val, regs + NFI_CON);
4707+
4708+ writel(STAR_EN, regs + NFI_STRDATA);
4709+}
4710+
4711+static int rw_wait_done(struct nfi_base *nb, int sectors, bool read)
4712+{
4713+ void *regs = nb->res.nfi_regs;
4714+ bool irq_en = nb->dma_en && nb->nfi_irq_en;
4715+ int ret;
4716+ u32 val;
4717+
4718+ if (irq_en) {
4719+ ret = nandx_event_wait_complete(nb->done, NFI_TIMEOUT);
4720+ if (!ret) {
4721+ writew(0, regs + NFI_INTR_EN);
4722+ return ret;
4723+ }
4724+ }
4725+
4726+ if (read) {
4727+ ret = readl_poll_timeout_atomic(regs + NFI_BYTELEN, val,
4728+ ADDRCNTR_SEC(val) >=
4729+ (u32)sectors,
4730+ 2, NFI_TIMEOUT);
4731+ /* HW issue: if not wait ahb done, need polling bus busy */
4732+ if (!ret && !irq_en)
4733+ ret = readl_poll_timeout_atomic(regs + NFI_MASTER_STA,
4734+ val,
4735+ !(val &
4736+ MASTER_BUS_BUSY),
4737+ 2, NFI_TIMEOUT);
4738+ } else {
4739+ ret = readl_poll_timeout_atomic(regs + NFI_ADDRCNTR, val,
4740+ ADDRCNTR_SEC(val) >=
4741+ (u32)sectors,
4742+ 2, NFI_TIMEOUT);
4743+ }
4744+
4745+ if (ret) {
4746+ pr_info("do page %s timeout\n", read ? "read" : "write");
4747+ return ret;
4748+ }
4749+
4750+ if (read && nb->ecc_en) {
4751+ ret = nb->ecc->wait_done(nb->ecc);
4752+ if (ret)
4753+ return ret;
4754+
4755+ return nb->ecc->decode_status(nb->ecc, 0, sectors);
4756+ }
4757+
4758+ return 0;
4759+}
4760+
4761+static int rw_data(struct nfi_base *nb, u8 *data, u8 *fdm, int sectors,
4762+ bool read)
4763+{
4764+ if (read && nb->dma_en && nb->ecc_en && fdm)
4765+ read_fdm(nb, fdm, 0, sectors);
4766+
4767+ if (!nb->dma_en) {
4768+ if (read)
4769+ return pio_rx_data(nb, data, fdm, sectors);
4770+
4771+ return pio_tx_data(nb, data, fdm, sectors);
4772+ }
4773+
4774+ return 0;
4775+}
4776+
4777+static void rw_complete(struct nfi_base *nb, u8 *data, u8 *fdm,
4778+ bool read)
4779+{
4780+ int data_len = 0;
4781+ bool is_empty;
4782+
4783+ if (nb->dma_en) {
4784+ if (read) {
4785+ nandx_dma_unmap(nb->res.dev, nb->buf, nb->dma_addr,
4786+ (u64)nb->access_len, NDMA_FROM_DEV);
4787+
4788+ if (data) {
4789+ data_len = nb->rw_sectors * nb->nfi.sector_size;
4790+ memcpy(data, nb->buf, data_len);
4791+ }
4792+
4793+ if (fdm)
4794+ memcpy(fdm, nb->buf + data_len,
4795+ nb->access_len - data_len);
4796+
4797+ if (nb->read_status == -ENANDREAD) {
4798+ is_empty = nb->is_page_empty(nb, data, fdm,
4799+ nb->rw_sectors);
4800+ if (is_empty)
4801+ nb->read_status = 0;
4802+ }
4803+ } else {
4804+ nandx_dma_unmap(nb->res.dev, nb->buf, nb->dma_addr,
4805+ (u64)nb->access_len, NDMA_TO_DEV);
4806+ }
4807+ }
4808+
4809+ /* whether it's reading or writing, we all check if nee swap
4810+ * for write, we need to restore data
4811+ */
4812+ if (nb->bad_mark_swap_en)
4813+ nb->bad_mark_ctrl.bad_mark_swap(&nb->nfi, data, fdm);
4814+
4815+ if (nb->ecc_en)
4816+ nb->ecc->disable(nb->ecc);
4817+
4818+ writel(0, nb->res.nfi_regs + NFI_CNFG);
4819+ writel(0, nb->res.nfi_regs + NFI_CON);
4820+}
4821+
4822+static int nfi_read_sectors(struct nfi *nfi, u8 *data, u8 *fdm,
4823+ int sectors)
4824+{
4825+ struct nfi_base *nb = nfi_to_base(nfi);
4826+ int bitflips = 0, ret;
4827+
4828+ pr_debug("%s: read page#%d\n", __func__, nb->row);
4829+ pr_debug("%s: data address 0x%x, fdm address 0x%x, sectors 0x%x\n",
4830+ __func__, (u32)((unsigned long)data),
4831+ (u32)((unsigned long)fdm), sectors);
4832+
4833+ nb->read_status = 0;
4834+
4835+ ret = nb->rw_prepare(nb, sectors, data, fdm, true);
4836+ if (ret)
4837+ return ret;
4838+
4839+ nb->rw_trigger(nb, true);
4840+
4841+ if (nb->dma_en) {
4842+ ret = nb->rw_wait_done(nb, sectors, true);
4843+ if (ret > 0)
4844+ bitflips = ret;
4845+ else if (ret == -ENANDREAD)
4846+ nb->read_status = -ENANDREAD;
4847+ else if (ret < 0)
4848+ goto complete;
4849+
4850+ }
4851+
4852+ ret = nb->rw_data(nb, data, fdm, sectors, true);
4853+ if (ret > 0)
4854+ ret = max_t(int, ret, bitflips);
4855+
4856+complete:
4857+ nb->rw_complete(nb, data, fdm, true);
4858+
4859+ if (nb->read_status == -ENANDREAD)
4860+ return -ENANDREAD;
4861+
4862+ return ret;
4863+}
4864+
4865+int nfi_write_page(struct nfi *nfi, u8 *data, u8 *fdm)
4866+{
4867+ struct nfi_base *nb = nfi_to_base(nfi);
4868+ u32 sectors = div_down(nb->format.page_size, nfi->sector_size);
4869+ int ret;
4870+
4871+ pr_debug("%s: data address 0x%x, fdm address 0x%x\n",
4872+ __func__, (int)((unsigned long)data),
4873+ (int)((unsigned long)fdm));
4874+
4875+ ret = nb->rw_prepare(nb, sectors, data, fdm, false);
4876+ if (ret)
4877+ return ret;
4878+
4879+ nb->rw_trigger(nb, false);
4880+
4881+ ret = nb->rw_data(nb, data, fdm, sectors, false);
4882+ if (ret)
4883+ return ret;
4884+
4885+ ret = nb->rw_wait_done(nb, sectors, false);
4886+
4887+ nb->rw_complete(nb, data, fdm, false);
4888+
4889+ return ret;
4890+}
4891+
4892+static int nfi_rw_bytes(struct nfi *nfi, u8 *data, int count, bool read)
4893+{
4894+ struct nfi_base *nb = nfi_to_base(nfi);
4895+ void *regs = nb->res.nfi_regs;
4896+ int i, ret;
4897+ u32 val;
4898+
4899+ for (i = 0; i < count; i++) {
4900+ val = readl(regs + NFI_STA) & NFI_FSM_MASK;
4901+ if (val != NFI_FSM_CUSTDATA) {
4902+ val = readw(regs + NFI_CNFG) | CNFG_BYTE_RW;
4903+ if (read)
4904+ val |= CNFG_READ_EN;
4905+ writew(val, regs + NFI_CNFG);
4906+
4907+ val = div_up(count, nfi->sector_size);
4908+ val = (val << CON_SEC_SHIFT) | CON_BRD | CON_BWR;
4909+ writel(val, regs + NFI_CON);
4910+
4911+ writew(STAR_EN, regs + NFI_STRDATA);
4912+ }
4913+
4914+ ret = wait_io_ready(regs);
4915+ if (ret)
4916+ return ret;
4917+
4918+ if (read)
4919+ data[i] = readb(regs + NFI_DATAR);
4920+ else
4921+ writeb(data[i], regs + NFI_DATAW);
4922+ }
4923+
4924+ writel(0, nb->res.nfi_regs + NFI_CNFG);
4925+
4926+ return 0;
4927+}
4928+
4929+static int nfi_read_bytes(struct nfi *nfi, u8 *data, int count)
4930+{
4931+ return nfi_rw_bytes(nfi, data, count, true);
4932+}
4933+
4934+static int nfi_write_bytes(struct nfi *nfi, u8 *data, int count)
4935+{
4936+ return nfi_rw_bytes(nfi, data, count, false);
4937+}
4938+
4939+/* As register map says, only when flash macro is idle,
4940+ * sw reset or nand interface change can be issued
4941+ */
4942+static inline int wait_flash_macro_idle(void *regs)
4943+{
4944+ u32 val;
4945+
4946+ return readl_poll_timeout_atomic(regs + NFI_STA, val,
4947+ val & FLASH_MACRO_IDLE, 2,
4948+ NFI_TIMEOUT);
4949+}
4950+
4951+#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
4952+ ((tpoecs) << 28 | (tprecs) << 22 | (tc2r) << 16 | \
4953+ (tw2r) << 12 | (twh) << 8 | (twst) << 4 | (trlt))
4954+
4955+static int nfi_set_sdr_timing(struct nfi *nfi, void *timing, u8 type)
4956+{
4957+ struct nand_sdr_timing *sdr = (struct nand_sdr_timing *) timing;
4958+ struct nfi_base *nb = nfi_to_base(nfi);
4959+ void *regs = nb->res.nfi_regs;
4960+ u32 tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt, tstrobe;
4961+ u32 rate, val;
4962+ int ret;
4963+
4964+ ret = wait_flash_macro_idle(regs);
4965+ if (ret)
4966+ return ret;
4967+
4968+ /* turn clock rate into KHZ */
4969+ rate = nb->res.clock_1x / 1000;
4970+
4971+ tpoecs = max_t(u16, sdr->tALH, sdr->tCLH);
4972+ tpoecs = div_up(tpoecs * rate, 1000000);
4973+ tpoecs &= 0xf;
4974+
4975+ tprecs = max_t(u16, sdr->tCLS, sdr->tALS);
4976+ tprecs = div_up(tprecs * rate, 1000000);
4977+ tprecs &= 0x3f;
4978+
4979+ /* tc2r is in unit of 2T */
4980+ tc2r = div_up(sdr->tCR * rate, 1000000);
4981+ tc2r = div_down(tc2r, 2);
4982+ tc2r &= 0x3f;
4983+
4984+ tw2r = div_up(sdr->tWHR * rate, 1000000);
4985+ tw2r = div_down(tw2r, 2);
4986+ tw2r &= 0xf;
4987+
4988+ twh = max_t(u16, sdr->tREH, sdr->tWH);
4989+ twh = div_up(twh * rate, 1000000) - 1;
4990+ twh &= 0xf;
4991+
4992+ twst = div_up(sdr->tWP * rate, 1000000) - 1;
4993+ twst &= 0xf;
4994+
4995+ trlt = div_up(sdr->tRP * rate, 1000000) - 1;
4996+ trlt &= 0xf;
4997+
4998+ /* If tREA is bigger than tRP, setup strobe sel here */
4999+ if ((trlt + 1) * 1000000 / rate < sdr->tREA) {
5000+ tstrobe = sdr->tREA - (trlt + 1) * 1000000 / rate;
5001+ tstrobe = div_up(tstrobe * rate, 1000000);
5002+ val = readl(regs + NFI_DEBUG_CON1);
5003+ val &= ~STROBE_MASK;
5004+ val |= tstrobe << STROBE_SHIFT;
5005+ writel(val, regs + NFI_DEBUG_CON1);
5006+ }
5007+
5008+ /*
5009+ * ACCON: access timing control register
5010+ * -------------------------------------
5011+ * 31:28: tpoecs, minimum required time for CS post pulling down after
5012+ * accessing the device
5013+ * 27:22: tprecs, minimum required time for CS pre pulling down before
5014+ * accessing the device
5015+ * 21:16: tc2r, minimum required time from NCEB low to NREB low
5016+ * 15:12: tw2r, minimum required time from NWEB high to NREB low.
5017+ * 11:08: twh, write enable hold time
5018+ * 07:04: twst, write wait states
5019+ * 03:00: trlt, read wait states
5020+ */
5021+ val = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt);
5022+ pr_info("acctiming: 0x%x\n", val);
5023+ writel(val, regs + NFI_ACCCON);
5024+
5025+ /* set NAND type */
5026+ writel(NAND_TYPE_ASYNC, regs + NFI_NAND_TYPE_CNFG);
5027+
5028+ return ret;
5029+}
5030+
5031+static int nfi_set_timing(struct nfi *nfi, void *timing, int type)
5032+{
5033+ switch (type) {
5034+ case NAND_TIMING_SDR:
5035+ return nfi_set_sdr_timing(nfi, timing, type);
5036+
5037+ /* NOTE: for mlc/tlc */
5038+ case NAND_TIMING_SYNC_DDR:
5039+ case NAND_TIMING_TOGGLE_DDR:
5040+ case NAND_TIMING_NVDDR2:
5041+ default:
5042+ return -EINVAL;
5043+ }
5044+
5045+ return 0;
5046+}
5047+
5048+static void set_nfi_funcs(struct nfi *nfi)
5049+{
5050+ nfi->select_chip = nfi_select_chip;
5051+ nfi->set_format = nfi_set_format;
5052+ nfi->nfi_ctrl = nfi_ctrl;
5053+ nfi->set_timing = nfi_set_timing;
5054+
5055+ nfi->reset = nfi_reset;
5056+ nfi->send_cmd = nfi_send_cmd;
5057+ nfi->send_addr = nfi_send_addr;
5058+ nfi->trigger = nfi_trigger;
5059+
5060+ nfi->write_page = nfi_write_page;
5061+ nfi->write_bytes = nfi_write_bytes;
5062+ nfi->read_sectors = nfi_read_sectors;
5063+ nfi->read_bytes = nfi_read_bytes;
5064+
5065+ nfi->wait_ready = nfi_wait_ready;
5066+
5067+ nfi->enable_randomizer = nfi_enable_randomizer;
5068+ nfi->disable_randomizer = nfi_disable_randomizer;
5069+}
5070+
5071+static struct nfi_caps nfi_caps_mt7622 = {
5072+ .max_fdm_size = 8,
5073+ .fdm_ecc_size = 1,
5074+ .ecc_parity_bits = 13,
5075+ .spare_size = spare_size_mt7622,
5076+ .spare_size_num = 4,
5077+};
5078+
5079+static struct nfi_caps *nfi_get_match_data(enum mtk_ic_version ic)
5080+{
5081+ /* NOTE: add other IC's data */
5082+ return &nfi_caps_mt7622;
5083+}
5084+
5085+static void set_nfi_base_params(struct nfi_base *nb)
5086+{
5087+ nb->ecc_en = false;
5088+ nb->dma_en = false;
5089+ nb->nfi_irq_en = false;
5090+ nb->ecc_irq_en = false;
5091+ nb->page_irq_en = false;
5092+ nb->ecc_clk_en = false;
5093+ nb->randomize_en = false;
5094+ nb->custom_sector_en = false;
5095+ nb->bad_mark_swap_en = false;
5096+
5097+ nb->op_mode = CNFG_CUSTOM_MODE;
5098+ nb->ecc_deccon = ECC_DEC_CORRECT;
5099+ nb->ecc_mode = ECC_NFI_MODE;
5100+
5101+ nb->done = nandx_event_create();
5102+ nb->caps = nfi_get_match_data(nb->res.ic_ver);
5103+
5104+ nb->set_op_mode = set_op_mode;
5105+ nb->is_page_empty = is_page_empty;
5106+
5107+ nb->rw_prepare = rw_prepare;
5108+ nb->rw_trigger = rw_trigger;
5109+ nb->rw_wait_done = rw_wait_done;
5110+ nb->rw_data = rw_data;
5111+ nb->rw_complete = rw_complete;
5112+}
5113+
5114+struct nfi *__weak nfi_extend_init(struct nfi_base *nb)
5115+{
5116+ return &nb->nfi;
5117+}
5118+
5119+void __weak nfi_extend_exit(struct nfi_base *nb)
5120+{
5121+ mem_free(nb);
5122+}
5123+
5124+struct nfi *nfi_init(struct nfi_resource *res)
5125+{
5126+ struct nfiecc_resource ecc_res;
5127+ struct nfi_base *nb;
5128+ struct nfiecc *ecc;
5129+ struct nfi *nfi;
5130+ int ret;
5131+
5132+ nb = mem_alloc(1, sizeof(struct nfi_base));
5133+ if (!nb) {
5134+ pr_info("nfi alloc memory fail @%s.\n", __func__);
5135+ return NULL;
5136+ }
5137+
5138+ nb->res = *res;
5139+
5140+ ret = nandx_irq_register(res->dev, res->nfi_irq_id, nfi_irq_handler,
5141+ "mtk_nand", nb);
5142+ if (ret) {
5143+ pr_info("nfi irq register failed!\n");
5144+ goto error;
5145+ }
5146+
5147+ /* fill ecc paras and init ecc */
5148+ ecc_res.ic_ver = nb->res.ic_ver;
5149+ ecc_res.dev = nb->res.dev;
5150+ ecc_res.irq_id = nb->res.ecc_irq_id;
5151+ ecc_res.regs = nb->res.ecc_regs;
5152+ ecc = nfiecc_init(&ecc_res);
5153+ if (!ecc) {
5154+ pr_info("nfiecc init fail.\n");
5155+ return NULL;
5156+ }
5157+
5158+ nb->ecc = ecc;
5159+
5160+ set_nfi_base_params(nb);
5161+ set_nfi_funcs(&nb->nfi);
5162+
5163+ /* Assign a temp sector size for reading ID & para page.
5164+ * We may assign new value later.
5165+ */
5166+ nb->nfi.sector_size = 512;
5167+
5168+ /* give a default timing, and as discuss
5169+ * this is the only thing what we need do for nfi init
5170+ * if need do more, then we can add a function
5171+ */
5172+ writel(0x30C77FFF, nb->res.nfi_regs + NFI_ACCCON);
5173+
5174+ nfi = nfi_extend_init(nb);
5175+ if (nfi)
5176+ return nfi;
5177+
5178+error:
5179+ mem_free(nb);
5180+ return NULL;
5181+}
5182+
5183+void nfi_exit(struct nfi *nfi)
5184+{
5185+ struct nfi_base *nb = nfi_to_base(nfi);
5186+
5187+ nandx_event_destroy(nb->done);
5188+ nfiecc_exit(nb->ecc);
5189+#if !NANDX_BULK_IO_USE_DRAM
5190+ mem_free(nb->buf);
5191+#endif
5192+ nfi_extend_exit(nb);
5193+}
5194+
5195diff --git a/drivers/mtd/nandx/core/nfi/nfi_base.h b/drivers/mtd/nandx/core/nfi/nfi_base.h
5196new file mode 100644
5197index 0000000000..ae894eaa31
5198--- /dev/null
5199+++ b/drivers/mtd/nandx/core/nfi/nfi_base.h
5200@@ -0,0 +1,95 @@
5201+/*
5202+ * Copyright (C) 2017 MediaTek Inc.
5203+ * Licensed under either
5204+ * BSD Licence, (see NOTICE for more details)
5205+ * GNU General Public License, version 2.0, (see NOTICE for more details)
5206+ */
5207+
5208+#ifndef __NFI_BASE_H__
5209+#define __NFI_BASE_H__
5210+
5211+#define NFI_TIMEOUT 1000000
5212+
5213+enum randomizer_op {
5214+ RAND_ENCODE,
5215+ RAND_DECODE
5216+};
5217+
5218+struct bad_mark_ctrl {
5219+ void (*bad_mark_swap)(struct nfi *nfi, u8 *buf, u8 *fdm);
5220+ u8 *(*fdm_shift)(struct nfi *nfi, u8 *fdm, int sector);
5221+ u32 sector;
5222+ u32 position;
5223+};
5224+
5225+struct nfi_caps {
5226+ u8 max_fdm_size;
5227+ u8 fdm_ecc_size;
5228+ u8 ecc_parity_bits;
5229+ const int *spare_size;
5230+ u32 spare_size_num;
5231+};
5232+
5233+struct nfi_base {
5234+ struct nfi nfi;
5235+ struct nfi_resource res;
5236+ struct nfiecc *ecc;
5237+ struct nfi_format format;
5238+ struct nfi_caps *caps;
5239+ struct bad_mark_ctrl bad_mark_ctrl;
5240+
5241+ /* page_size + spare_size */
5242+ u8 *buf;
5243+
5244+ /* used for spi nand */
5245+ u8 cmd_mode;
5246+ u32 op_mode;
5247+
5248+ int page_sectors;
5249+
5250+ void *done;
5251+
5252+ /* for read/write */
5253+ int col;
5254+ int row;
5255+ int access_len;
5256+ int rw_sectors;
5257+ void *dma_addr;
5258+ int read_status;
5259+
5260+ bool dma_en;
5261+ bool nfi_irq_en;
5262+ bool page_irq_en;
5263+ bool auto_format;
5264+ bool ecc_en;
5265+ bool ecc_irq_en;
5266+ bool ecc_clk_en;
5267+ bool randomize_en;
5268+ bool custom_sector_en;
5269+ bool bad_mark_swap_en;
5270+
5271+ enum nfiecc_deccon ecc_deccon;
5272+ enum nfiecc_mode ecc_mode;
5273+
5274+ void (*set_op_mode)(void *regs, u32 mode);
5275+ bool (*is_page_empty)(struct nfi_base *nb, u8 *data, u8 *fdm,
5276+ int sectors);
5277+
5278+ int (*rw_prepare)(struct nfi_base *nb, int sectors, u8 *data, u8 *fdm,
5279+ bool read);
5280+ void (*rw_trigger)(struct nfi_base *nb, bool read);
5281+ int (*rw_wait_done)(struct nfi_base *nb, int sectors, bool read);
5282+ int (*rw_data)(struct nfi_base *nb, u8 *data, u8 *fdm, int sectors,
5283+ bool read);
5284+ void (*rw_complete)(struct nfi_base *nb, u8 *data, u8 *fdm, bool read);
5285+};
5286+
5287+static inline struct nfi_base *nfi_to_base(struct nfi *nfi)
5288+{
5289+ return container_of(nfi, struct nfi_base, nfi);
5290+}
5291+
5292+struct nfi *nfi_extend_init(struct nfi_base *nb);
5293+void nfi_extend_exit(struct nfi_base *nb);
5294+
5295+#endif /* __NFI_BASE_H__ */
5296diff --git a/drivers/mtd/nandx/core/nfi/nfi_regs.h b/drivers/mtd/nandx/core/nfi/nfi_regs.h
5297new file mode 100644
5298index 0000000000..ba4868acc8
5299--- /dev/null
5300+++ b/drivers/mtd/nandx/core/nfi/nfi_regs.h
5301@@ -0,0 +1,114 @@
5302+/*
5303+ * Copyright (C) 2017 MediaTek Inc.
5304+ * Licensed under either
5305+ * BSD Licence, (see NOTICE for more details)
5306+ * GNU General Public License, version 2.0, (see NOTICE for more details)
5307+ */
5308+
5309+#ifndef __NFI_REGS_H__
5310+#define __NFI_REGS_H__
5311+
5312+#define NFI_CNFG 0x000
5313+#define CNFG_AHB BIT(0)
5314+#define CNFG_READ_EN BIT(1)
5315+#define CNFG_DMA_BURST_EN BIT(2)
5316+#define CNFG_RESEED_SEC_EN BIT(4)
5317+#define CNFG_RAND_SEL BIT(5)
5318+#define CNFG_BYTE_RW BIT(6)
5319+#define CNFG_HW_ECC_EN BIT(8)
5320+#define CNFG_AUTO_FMT_EN BIT(9)
5321+#define CNFG_RAND_MASK GENMASK(5, 4)
5322+#define CNFG_OP_MODE_MASK GENMASK(14, 12)
5323+#define CNFG_IDLE_MOD 0
5324+#define CNFG_READ_MODE (1 << 12)
5325+#define CNFG_SINGLE_READ_MODE (2 << 12)
5326+#define CNFG_PROGRAM_MODE (3 << 12)
5327+#define CNFG_ERASE_MODE (4 << 12)
5328+#define CNFG_RESET_MODE (5 << 12)
5329+#define CNFG_CUSTOM_MODE (6 << 12)
5330+#define NFI_PAGEFMT 0x004
5331+#define PAGEFMT_SPARE_SHIFT 4
5332+#define PAGEFMT_FDM_ECC_SHIFT 12
5333+#define PAGEFMT_FDM_SHIFT 8
5334+#define PAGEFMT_SEC_SEL_512 BIT(2)
5335+#define PAGEFMT_512_2K 0
5336+#define PAGEFMT_2K_4K 1
5337+#define PAGEFMT_4K_8K 2
5338+#define PAGEFMT_8K_16K 3
5339+#define NFI_CON 0x008
5340+#define CON_FIFO_FLUSH BIT(0)
5341+#define CON_NFI_RST BIT(1)
5342+#define CON_BRD BIT(8)
5343+#define CON_BWR BIT(9)
5344+#define CON_SEC_SHIFT 12
5345+#define NFI_ACCCON 0x00c
5346+#define NFI_INTR_EN 0x010
5347+#define INTR_BUSY_RETURN_EN BIT(4)
5348+#define INTR_AHB_DONE_EN BIT(6)
5349+#define NFI_INTR_STA 0x014
5350+#define NFI_CMD 0x020
5351+#define NFI_ADDRNOB 0x030
5352+#define ROW_SHIFT 4
5353+#define NFI_COLADDR 0x034
5354+#define NFI_ROWADDR 0x038
5355+#define NFI_STRDATA 0x040
5356+#define STAR_EN 1
5357+#define STAR_DE 0
5358+#define NFI_CNRNB 0x044
5359+#define NFI_DATAW 0x050
5360+#define NFI_DATAR 0x054
5361+#define NFI_PIO_DIRDY 0x058
5362+#define PIO_DI_RDY 1
5363+#define NFI_STA 0x060
5364+#define STA_CMD BIT(0)
5365+#define STA_ADDR BIT(1)
5366+#define FLASH_MACRO_IDLE BIT(5)
5367+#define STA_BUSY BIT(8)
5368+#define STA_BUSY2READY BIT(9)
5369+#define STA_EMP_PAGE BIT(12)
5370+#define NFI_FSM_CUSTDATA (0xe << 16)
5371+#define NFI_FSM_MASK GENMASK(19, 16)
5372+#define NAND_FSM_MASK GENMASK(29, 23)
5373+#define NFI_ADDRCNTR 0x070
5374+#define CNTR_VALID_MASK GENMASK(16, 0)
5375+#define CNTR_MASK GENMASK(15, 12)
5376+#define ADDRCNTR_SEC_SHIFT 12
5377+#define ADDRCNTR_SEC(val) \
5378+ (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
5379+#define NFI_STRADDR 0x080
5380+#define NFI_BYTELEN 0x084
5381+#define NFI_CSEL 0x090
5382+#define NFI_FDML(x) (0x0a0 + (x) * 8)
5383+#define NFI_FDMM(x) (0x0a4 + (x) * 8)
5384+#define NFI_DEBUG_CON1 0x220
5385+#define STROBE_MASK GENMASK(4, 3)
5386+#define STROBE_SHIFT 3
5387+#define ECC_CLK_EN BIT(11)
5388+#define AUTOC_SRAM_MODE BIT(12)
5389+#define BYPASS_MASTER_EN BIT(15)
5390+#define NFI_MASTER_STA 0x224
5391+#define MASTER_BUS_BUSY 0x3
5392+#define NFI_SECCUS_SIZE 0x22c
5393+#define SECCUS_SIZE_EN BIT(17)
5394+#define NFI_RANDOM_CNFG 0x238
5395+#define RAN_ENCODE_EN BIT(0)
5396+#define ENCODE_SEED_SHIFT 1
5397+#define RAN_DECODE_EN BIT(16)
5398+#define DECODE_SEED_SHIFT 17
5399+#define RAN_SEED_MASK 0x7fff
5400+#define NFI_EMPTY_THRESH 0x23c
5401+#define NFI_NAND_TYPE_CNFG 0x240
5402+#define NAND_TYPE_ASYNC 0
5403+#define NAND_TYPE_TOGGLE 1
5404+#define NAND_TYPE_SYNC 2
5405+#define NFI_ACCCON1 0x244
5406+#define NFI_DELAY_CTRL 0x248
5407+#define NFI_TLC_RD_WHR2 0x300
5408+#define TLC_RD_WHR2_EN BIT(12)
5409+#define TLC_RD_WHR2_MASK GENMASK(11, 0)
5410+#define SNF_SNF_CNFG 0x55c
5411+#define SPI_MODE_EN 1
5412+#define SPI_MODE_DIS 0
5413+
5414+#endif /* __NFI_REGS_H__ */
5415+
5416diff --git a/drivers/mtd/nandx/core/nfi/nfi_spi.c b/drivers/mtd/nandx/core/nfi/nfi_spi.c
5417new file mode 100644
5418index 0000000000..67cd0aaad9
5419--- /dev/null
5420+++ b/drivers/mtd/nandx/core/nfi/nfi_spi.c
5421@@ -0,0 +1,689 @@
5422+/*
5423+ * Copyright (C) 2017 MediaTek Inc.
5424+ * Licensed under either
5425+ * BSD Licence, (see NOTICE for more details)
5426+ * GNU General Public License, version 2.0, (see NOTICE for more details)
5427+ */
5428+
5429+#include "nandx_util.h"
5430+#include "nandx_core.h"
5431+#include "../nfi.h"
5432+#include "nfiecc.h"
5433+#include "nfi_regs.h"
5434+#include "nfi_base.h"
5435+#include "nfi_spi_regs.h"
5436+#include "nfi_spi.h"
5437+
5438+#define NFI_CMD_DUMMY_RD 0x00
5439+#define NFI_CMD_DUMMY_WR 0x80
5440+
5441+static struct nfi_spi_delay spi_delay[SPI_NAND_MAX_DELAY] = {
5442+ /*
5443+ * tCLK_SAM_DLY, tCLK_OUT_DLY, tCS_DLY, tWR_EN_DLY,
5444+ * tIO_IN_DLY[4], tIO_OUT_DLY[4], tREAD_LATCH_LATENCY
5445+ */
5446+ {0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
5447+ {21, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
5448+ {63, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
5449+ {0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1},
5450+ {21, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1},
5451+ {63, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1}
5452+};
5453+
5454+static inline struct nfi_spi *base_to_snfi(struct nfi_base *nb)
5455+{
5456+ return container_of(nb, struct nfi_spi, base);
5457+}
5458+
5459+static void snfi_mac_enable(struct nfi_base *nb)
5460+{
5461+ void *regs = nb->res.nfi_regs;
5462+ u32 val;
5463+
5464+ val = readl(regs + SNF_MAC_CTL);
5465+ val &= ~MAC_XIO_SEL;
5466+ val |= SF_MAC_EN;
5467+
5468+ writel(val, regs + SNF_MAC_CTL);
5469+}
5470+
5471+static void snfi_mac_disable(struct nfi_base *nb)
5472+{
5473+ void *regs = nb->res.nfi_regs;
5474+ u32 val;
5475+
5476+ val = readl(regs + SNF_MAC_CTL);
5477+ val &= ~(SF_TRIG | SF_MAC_EN);
5478+ writel(val, regs + SNF_MAC_CTL);
5479+}
5480+
5481+static int snfi_mac_trigger(struct nfi_base *nb)
5482+{
5483+ void *regs = nb->res.nfi_regs;
5484+ int ret;
5485+ u32 val;
5486+
5487+ val = readl(regs + SNF_MAC_CTL);
5488+ val |= SF_TRIG;
5489+ writel(val, regs + SNF_MAC_CTL);
5490+
5491+ ret = readl_poll_timeout_atomic(regs + SNF_MAC_CTL, val,
5492+ val & WIP_READY, 10,
5493+ NFI_TIMEOUT);
5494+ if (ret) {
5495+ pr_info("polling wip ready for read timeout\n");
5496+ return ret;
5497+ }
5498+
5499+ return readl_poll_timeout_atomic(regs + SNF_MAC_CTL, val,
5500+ !(val & WIP), 10,
5501+ NFI_TIMEOUT);
5502+}
5503+
5504+static int snfi_mac_op(struct nfi_base *nb)
5505+{
5506+ int ret;
5507+
5508+ snfi_mac_enable(nb);
5509+ ret = snfi_mac_trigger(nb);
5510+ snfi_mac_disable(nb);
5511+
5512+ return ret;
5513+}
5514+
5515+static void snfi_write_mac(struct nfi_spi *nfi_spi, u8 *data, int count)
5516+{
5517+ struct nandx_split32 split = {0};
5518+ u32 reg_offset = round_down(nfi_spi->tx_count, 4);
5519+ void *regs = nfi_spi->base.res.nfi_regs;
5520+ u32 data_offset = 0, i, val;
5521+ u8 *p_val = (u8 *)(&val);
5522+
5523+ nandx_split(&split, nfi_spi->tx_count, count, val, 4);
5524+
5525+ if (split.head_len) {
5526+ val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
5527+
5528+ for (i = 0; i < split.head_len; i++)
5529+ p_val[split.head + i] = data[i];
5530+
5531+ writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
5532+ }
5533+
5534+ if (split.body_len) {
5535+ reg_offset = split.body;
5536+ data_offset = split.head_len;
5537+
5538+ for (i = 0; i < split.body_len; i++) {
5539+ p_val[i & 3] = data[data_offset + i];
5540+
5541+ if ((i & 3) == 3) {
5542+ writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
5543+ reg_offset += 4;
5544+ }
5545+ }
5546+ }
5547+
5548+ if (split.tail_len) {
5549+ reg_offset = split.tail;
5550+ data_offset += split.body_len;
5551+
5552+ for (i = 0; i < split.tail_len; i++) {
5553+ p_val[i] = data[data_offset + i];
5554+
5555+ if (i == split.tail_len - 1)
5556+ writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
5557+ }
5558+ }
5559+}
5560+
5561+static void snfi_read_mac(struct nfi_spi *nfi_spi, u8 *data, int count)
5562+{
5563+ void *regs = nfi_spi->base.res.nfi_regs;
5564+ u32 reg_offset = round_down(nfi_spi->tx_count, 4);
5565+ struct nandx_split32 split = {0};
5566+ u32 data_offset = 0, i, val;
5567+ u8 *p_val = (u8 *)&val;
5568+
5569+ nandx_split(&split, nfi_spi->tx_count, count, val, 4);
5570+
5571+ if (split.head_len) {
5572+ val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
5573+
5574+ for (i = 0; i < split.head_len; i++)
5575+ data[data_offset + i] = p_val[split.head + i];
5576+ }
5577+
5578+ if (split.body_len) {
5579+ reg_offset = split.body;
5580+ data_offset = split.head_len;
5581+
5582+ for (i = 0; i < split.body_len; i++) {
5583+ if ((i & 3) == 0) {
5584+ val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
5585+ reg_offset += 4;
5586+ }
5587+
5588+ data[data_offset + i] = p_val[i % 4];
5589+ }
5590+ }
5591+
5592+ if (split.tail_len) {
5593+ reg_offset = split.tail;
5594+ data_offset += split.body_len;
5595+ val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
5596+
5597+ for (i = 0; i < split.tail_len; i++)
5598+ data[data_offset + i] = p_val[i];
5599+ }
5600+}
5601+
5602+static int snfi_send_command(struct nfi *nfi, short cmd)
5603+{
5604+ struct nfi_base *nb = nfi_to_base(nfi);
5605+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
5606+
5607+ if (cmd == -1)
5608+ return 0;
5609+
5610+ if (nfi_spi->snfi_mode == SNFI_MAC_MODE) {
5611+ snfi_write_mac(nfi_spi, (u8 *)&cmd, 1);
5612+ nfi_spi->tx_count++;
5613+ return 0;
5614+ }
5615+
5616+ nfi_spi->cmd[nfi_spi->cur_cmd_idx++] = cmd;
5617+ return 0;
5618+}
5619+
5620+static int snfi_send_address(struct nfi *nfi, int col, int row,
5621+ int col_cycle,
5622+ int row_cycle)
5623+{
5624+ struct nfi_base *nb = nfi_to_base(nfi);
5625+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
5626+ u32 addr, cycle, temp;
5627+
5628+ nb->col = col;
5629+ nb->row = row;
5630+
5631+ if (nfi_spi->snfi_mode == SNFI_MAC_MODE) {
5632+ addr = row;
5633+ cycle = row_cycle;
5634+
5635+ if (!row_cycle) {
5636+ addr = col;
5637+ cycle = col_cycle;
5638+ }
5639+
5640+ temp = nandx_cpu_to_be32(addr) >> ((4 - cycle) << 3);
5641+ snfi_write_mac(nfi_spi, (u8 *)&temp, cycle);
5642+ nfi_spi->tx_count += cycle;
5643+ } else {
5644+ nfi_spi->row_addr[nfi_spi->cur_addr_idx++] = row;
5645+ nfi_spi->col_addr[nfi_spi->cur_addr_idx++] = col;
5646+ }
5647+
5648+ return 0;
5649+}
5650+
5651+static int snfi_trigger(struct nfi *nfi)
5652+{
5653+ struct nfi_base *nb = nfi_to_base(nfi);
5654+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
5655+ void *regs = nb->res.nfi_regs;
5656+
5657+ writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
5658+ writel(0, regs + SNF_MAC_INL);
5659+
5660+ nfi_spi->tx_count = 0;
5661+ nfi_spi->cur_cmd_idx = 0;
5662+ nfi_spi->cur_addr_idx = 0;
5663+
5664+ return snfi_mac_op(nb);
5665+}
5666+
5667+static int snfi_select_chip(struct nfi *nfi, int cs)
5668+{
5669+ struct nfi_base *nb = nfi_to_base(nfi);
5670+ void *regs = nb->res.nfi_regs;
5671+ u32 val;
5672+
5673+ val = readl(regs + SNF_MISC_CTL);
5674+
5675+ if (cs == 0) {
5676+ val &= ~SF2CS_SEL;
5677+ val &= ~SF2CS_EN;
5678+ } else if (cs == 1) {
5679+ val |= SF2CS_SEL;
5680+ val |= SF2CS_EN;
5681+ } else {
5682+ return -EIO;
5683+ }
5684+
5685+ writel(val, regs + SNF_MISC_CTL);
5686+
5687+ return 0;
5688+}
5689+
5690+static int snfi_set_delay(struct nfi_base *nb, u8 delay_mode)
5691+{
5692+ void *regs = nb->res.nfi_regs;
5693+ struct nfi_spi_delay *delay;
5694+ u32 val;
5695+
5696+ if (delay_mode < 0 || delay_mode > SPI_NAND_MAX_DELAY)
5697+ return -EINVAL;
5698+
5699+ delay = &spi_delay[delay_mode];
5700+
5701+ val = delay->tIO_OUT_DLY[0] | delay->tIO_OUT_DLY[1] << 8 |
5702+ delay->tIO_OUT_DLY[2] << 16 |
5703+ delay->tIO_OUT_DLY[3] << 24;
5704+ writel(val, regs + SNF_DLY_CTL1);
5705+
5706+ val = delay->tIO_IN_DLY[0] | (delay->tIO_IN_DLY[1] << 8) |
5707+ delay->tIO_IN_DLY[2] << 16 |
5708+ delay->tIO_IN_DLY[3] << 24;
5709+ writel(val, regs + SNF_DLY_CTL2);
5710+
5711+ val = delay->tCLK_SAM_DLY | delay->tCLK_OUT_DLY << 8 |
5712+ delay->tCS_DLY << 16 |
5713+ delay->tWR_EN_DLY << 24;
5714+ writel(val, regs + SNF_DLY_CTL3);
5715+
5716+ writel(delay->tCS_DLY, regs + SNF_DLY_CTL4);
5717+
5718+ val = readl(regs + SNF_MISC_CTL);
5719+ val |= (delay->tREAD_LATCH_LATENCY) <<
5720+ LATCH_LAT_SHIFT;
5721+ writel(val, regs + SNF_MISC_CTL);
5722+
5723+ return 0;
5724+}
5725+
5726+static int snfi_set_timing(struct nfi *nfi, void *timing, int type)
5727+{
5728+ /* Nothing need to do. */
5729+ return 0;
5730+}
5731+
5732+static int snfi_wait_ready(struct nfi *nfi, int type, u32 timeout)
5733+{
5734+ /* Nothing need to do. */
5735+ return 0;
5736+}
5737+
5738+static int snfi_ctrl(struct nfi *nfi, int cmd, void *args)
5739+{
5740+ struct nfi_base *nb = nfi_to_base(nfi);
5741+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
5742+ int ret = 0;
5743+
5744+ if (!args)
5745+ return -EINVAL;
5746+
5747+ switch (cmd) {
5748+ case NFI_CTRL_DMA:
5749+ nb->dma_en = *(bool *)args;
5750+ break;
5751+
5752+ case NFI_CTRL_NFI_IRQ:
5753+ nb->nfi_irq_en = *(bool *)args;
5754+ break;
5755+
5756+ case NFI_CTRL_ECC_IRQ:
5757+ nb->ecc_irq_en = *(bool *)args;
5758+ break;
5759+
5760+ case NFI_CTRL_PAGE_IRQ:
5761+ nb->page_irq_en = *(bool *)args;
5762+ break;
5763+
5764+ case NFI_CTRL_ECC:
5765+ nb->ecc_en = *(bool *)args;
5766+ break;
5767+
5768+ case NFI_CTRL_BAD_MARK_SWAP:
5769+ nb->bad_mark_swap_en = *(bool *)args;
5770+ break;
5771+
5772+ case NFI_CTRL_ECC_CLOCK:
5773+ nb->ecc_clk_en = *(bool *)args;
5774+ break;
5775+
5776+ case SNFI_CTRL_OP_MODE:
5777+ nfi_spi->snfi_mode = *(u8 *)args;
5778+ break;
5779+
5780+ case SNFI_CTRL_RX_MODE:
5781+ nfi_spi->read_cache_mode = *(u8 *)args;
5782+ break;
5783+
5784+ case SNFI_CTRL_TX_MODE:
5785+ nfi_spi->write_cache_mode = *(u8 *)args;
5786+ break;
5787+
5788+ case SNFI_CTRL_DELAY_MODE:
5789+ ret = snfi_set_delay(nb, *(u8 *)args);
5790+ break;
5791+
5792+ default:
5793+ pr_info("operation not support.\n");
5794+ ret = -EOPNOTSUPP;
5795+ break;
5796+ }
5797+
5798+ return ret;
5799+}
5800+
5801+static int snfi_read_bytes(struct nfi *nfi, u8 *data, int count)
5802+{
5803+ struct nfi_base *nb = nfi_to_base(nfi);
5804+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
5805+ void *regs = nb->res.nfi_regs;
5806+ int ret;
5807+
5808+ writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
5809+ writel(count, regs + SNF_MAC_INL);
5810+
5811+ ret = snfi_mac_op(nb);
5812+ if (ret)
5813+ return ret;
5814+
5815+ snfi_read_mac(nfi_spi, data, count);
5816+
5817+ nfi_spi->tx_count = 0;
5818+
5819+ return 0;
5820+}
5821+
5822+static int snfi_write_bytes(struct nfi *nfi, u8 *data, int count)
5823+{
5824+ struct nfi_base *nb = nfi_to_base(nfi);
5825+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
5826+ void *regs = nb->res.nfi_regs;
5827+
5828+ snfi_write_mac(nfi_spi, data, count);
5829+ nfi_spi->tx_count += count;
5830+
5831+ writel(0, regs + SNF_MAC_INL);
5832+ writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
5833+
5834+ nfi_spi->tx_count = 0;
5835+
5836+ return snfi_mac_op(nb);
5837+}
5838+
5839+static int snfi_reset(struct nfi *nfi)
5840+{
5841+ struct nfi_base *nb = nfi_to_base(nfi);
5842+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
5843+ void *regs = nb->res.nfi_regs;
5844+ u32 val;
5845+ int ret;
5846+
5847+ ret = nfi_spi->parent->nfi.reset(nfi);
5848+ if (ret)
5849+ return ret;
5850+
5851+ val = readl(regs + SNF_MISC_CTL);
5852+ val |= SW_RST;
5853+ writel(val, regs + SNF_MISC_CTL);
5854+
5855+ ret = readx_poll_timeout_atomic(readw, regs + SNF_STA_CTL1, val,
5856+ !(val & SPI_STATE), 50,
5857+ NFI_TIMEOUT);
5858+ if (ret) {
5859+ pr_info("spi state active in reset [0x%x] = 0x%x\n",
5860+ SNF_STA_CTL1, val);
5861+ return ret;
5862+ }
5863+
5864+ val = readl(regs + SNF_MISC_CTL);
5865+ val &= ~SW_RST;
5866+ writel(val, regs + SNF_MISC_CTL);
5867+
5868+ return 0;
5869+}
5870+
5871+static int snfi_config_for_write(struct nfi_base *nb, int count)
5872+{
5873+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
5874+ void *regs = nb->res.nfi_regs;
5875+ u32 val;
5876+
5877+ nb->set_op_mode(regs, CNFG_CUSTOM_MODE);
5878+
5879+ val = readl(regs + SNF_MISC_CTL);
5880+
5881+ if (nfi_spi->write_cache_mode == SNFI_TX_114)
5882+ val |= PG_LOAD_X4_EN;
5883+
5884+ if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
5885+ val |= PG_LOAD_CUSTOM_EN;
5886+
5887+ writel(val, regs + SNF_MISC_CTL);
5888+
5889+ val = count * (nb->nfi.sector_size + nb->nfi.sector_spare_size);
5890+ writel(val << PG_LOAD_SHIFT, regs + SNF_MISC_CTL2);
5891+
5892+ val = readl(regs + SNF_PG_CTL1);
5893+
5894+ if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
5895+ val |= nfi_spi->cmd[0] << PG_LOAD_CMD_SHIFT;
5896+ else {
5897+ val |= nfi_spi->cmd[0] | nfi_spi->cmd[1] << PG_LOAD_CMD_SHIFT |
5898+ nfi_spi->cmd[2] << PG_EXE_CMD_SHIFT;
5899+
5900+ writel(nfi_spi->row_addr[1], regs + SNF_PG_CTL3);
5901+ writel(nfi_spi->cmd[3] << GF_CMD_SHIFT | nfi_spi->col_addr[2] <<
5902+ GF_ADDR_SHIFT, regs + SNF_GF_CTL1);
5903+ }
5904+
5905+ writel(val, regs + SNF_PG_CTL1);
5906+ writel(nfi_spi->col_addr[1], regs + SNF_PG_CTL2);
5907+
5908+ writel(NFI_CMD_DUMMY_WR, regs + NFI_CMD);
5909+
5910+ return 0;
5911+}
5912+
5913+static int snfi_config_for_read(struct nfi_base *nb, int count)
5914+{
5915+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
5916+ void *regs = nb->res.nfi_regs;
5917+ u32 val;
5918+ int ret = 0;
5919+
5920+ nb->set_op_mode(regs, CNFG_CUSTOM_MODE);
5921+
5922+ val = readl(regs + SNF_MISC_CTL);
5923+ val &= ~DARA_READ_MODE_MASK;
5924+
5925+ switch (nfi_spi->read_cache_mode) {
5926+
5927+ case SNFI_RX_111:
5928+ break;
5929+
5930+ case SNFI_RX_112:
5931+ val |= X2_DATA_MODE << READ_MODE_SHIFT;
5932+ break;
5933+
5934+ case SNFI_RX_114:
5935+ val |= X4_DATA_MODE << READ_MODE_SHIFT;
5936+ break;
5937+
5938+ case SNFI_RX_122:
5939+ val |= DUAL_IO_MODE << READ_MODE_SHIFT;
5940+ break;
5941+
5942+ case SNFI_RX_144:
5943+ val |= QUAD_IO_MODE << READ_MODE_SHIFT;
5944+ break;
5945+
5946+ default:
5947+ pr_info("Not support this read operarion: %d!\n",
5948+ nfi_spi->read_cache_mode);
5949+ ret = -EINVAL;
5950+ break;
5951+ }
5952+
5953+ if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
5954+ val |= DATARD_CUSTOM_EN;
5955+
5956+ writel(val, regs + SNF_MISC_CTL);
5957+
5958+ val = count * (nb->nfi.sector_size + nb->nfi.sector_spare_size);
5959+ writel(val, regs + SNF_MISC_CTL2);
5960+
5961+ val = readl(regs + SNF_RD_CTL2);
5962+
5963+ if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE) {
5964+ val |= nfi_spi->cmd[0];
5965+ writel(nfi_spi->col_addr[1], regs + SNF_RD_CTL3);
5966+ } else {
5967+ val |= nfi_spi->cmd[2];
5968+ writel(nfi_spi->cmd[0] << PAGE_READ_CMD_SHIFT |
5969+ nfi_spi->row_addr[0], regs + SNF_RD_CTL1);
5970+ writel(nfi_spi->cmd[1] << GF_CMD_SHIFT |
5971+ nfi_spi->col_addr[1] << GF_ADDR_SHIFT,
5972+ regs + SNF_GF_CTL1);
5973+ writel(nfi_spi->col_addr[2], regs + SNF_RD_CTL3);
5974+ }
5975+
5976+ writel(val, regs + SNF_RD_CTL2);
5977+
5978+ writel(NFI_CMD_DUMMY_RD, regs + NFI_CMD);
5979+
5980+ return ret;
5981+}
5982+
5983+static bool is_page_empty(struct nfi_base *nb, u8 *data, u8 *fdm,
5984+ int sectors)
5985+{
5986+ u32 *data32 = (u32 *)data;
5987+ u32 *fdm32 = (u32 *)fdm;
5988+ u32 i, count = 0;
5989+
5990+ for (i = 0; i < nb->format.page_size >> 2; i++) {
5991+ if (data32[i] != 0xffff) {
5992+ count += zero_popcount(data32[i]);
5993+ if (count > 10) {
5994+ pr_info("%s %d %d count:%d\n",
5995+ __func__, __LINE__, i, count);
5996+ return false;
5997+ }
5998+ }
5999+ }
6000+
6001+ if (fdm) {
6002+ for (i = 0; i < (nb->nfi.fdm_size * sectors >> 2); i++)
6003+ if (fdm32[i] != 0xffff) {
6004+ count += zero_popcount(fdm32[i]);
6005+ if (count > 10) {
6006+ pr_info("%s %d %d count:%d\n",
6007+ __func__, __LINE__, i, count);
6008+ return false;
6009+ }
6010+ }
6011+ }
6012+
6013+ return true;
6014+}
6015+
6016+static int rw_prepare(struct nfi_base *nb, int sectors, u8 *data,
6017+ u8 *fdm,
6018+ bool read)
6019+{
6020+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
6021+ int ret;
6022+
6023+ ret = nfi_spi->parent->rw_prepare(nb, sectors, data, fdm, read);
6024+ if (ret)
6025+ return ret;
6026+
6027+ if (read)
6028+ ret = snfi_config_for_read(nb, sectors);
6029+ else
6030+ ret = snfi_config_for_write(nb, sectors);
6031+
6032+ return ret;
6033+}
6034+
6035+static void rw_complete(struct nfi_base *nb, u8 *data, u8 *fdm,
6036+ bool read)
6037+{
6038+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
6039+ void *regs = nb->res.nfi_regs;
6040+ u32 val;
6041+
6042+ nfi_spi->parent->rw_complete(nb, data, fdm, read);
6043+
6044+ val = readl(regs + SNF_MISC_CTL);
6045+
6046+ if (read)
6047+ val &= ~DATARD_CUSTOM_EN;
6048+ else
6049+ val &= ~PG_LOAD_CUSTOM_EN;
6050+
6051+ writel(val, regs + SNF_MISC_CTL);
6052+
6053+ nfi_spi->tx_count = 0;
6054+ nfi_spi->cur_cmd_idx = 0;
6055+ nfi_spi->cur_addr_idx = 0;
6056+}
6057+
6058+static void set_nfi_base_funcs(struct nfi_base *nb)
6059+{
6060+ nb->nfi.reset = snfi_reset;
6061+ nb->nfi.set_timing = snfi_set_timing;
6062+ nb->nfi.wait_ready = snfi_wait_ready;
6063+
6064+ nb->nfi.send_cmd = snfi_send_command;
6065+ nb->nfi.send_addr = snfi_send_address;
6066+ nb->nfi.trigger = snfi_trigger;
6067+ nb->nfi.nfi_ctrl = snfi_ctrl;
6068+ nb->nfi.select_chip = snfi_select_chip;
6069+
6070+ nb->nfi.read_bytes = snfi_read_bytes;
6071+ nb->nfi.write_bytes = snfi_write_bytes;
6072+
6073+ nb->rw_prepare = rw_prepare;
6074+ nb->rw_complete = rw_complete;
6075+ nb->is_page_empty = is_page_empty;
6076+
6077+}
6078+
6079+struct nfi *nfi_extend_init(struct nfi_base *nb)
6080+{
6081+ struct nfi_spi *nfi_spi;
6082+
6083+ nfi_spi = mem_alloc(1, sizeof(struct nfi_spi));
6084+ if (!nfi_spi) {
6085+ pr_info("snfi alloc memory fail @%s.\n", __func__);
6086+ return NULL;
6087+ }
6088+
6089+ memcpy(&nfi_spi->base, nb, sizeof(struct nfi_base));
6090+ nfi_spi->parent = nb;
6091+
6092+ nfi_spi->read_cache_mode = SNFI_RX_114;
6093+ nfi_spi->write_cache_mode = SNFI_TX_114;
6094+
6095+ set_nfi_base_funcs(&nfi_spi->base);
6096+
6097+ /* Change nfi to spi mode */
6098+ writel(SPI_MODE, nb->res.nfi_regs + SNF_SNF_CNFG);
6099+
6100+ return &(nfi_spi->base.nfi);
6101+}
6102+
6103+void nfi_extend_exit(struct nfi_base *nb)
6104+{
6105+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
6106+
6107+ mem_free(nfi_spi->parent);
6108+ mem_free(nfi_spi);
6109+}
6110+
6111diff --git a/drivers/mtd/nandx/core/nfi/nfi_spi.h b/drivers/mtd/nandx/core/nfi/nfi_spi.h
6112new file mode 100644
6113index 0000000000..a52255663a
6114--- /dev/null
6115+++ b/drivers/mtd/nandx/core/nfi/nfi_spi.h
6116@@ -0,0 +1,44 @@
6117+/*
6118+ * Copyright (C) 2017 MediaTek Inc.
6119+ * Licensed under either
6120+ * BSD Licence, (see NOTICE for more details)
6121+ * GNU General Public License, version 2.0, (see NOTICE for more details)
6122+ */
6123+
6124+#ifndef __NFI_SPI_H__
6125+#define __NFI_SPI_H__
6126+
6127+#define SPI_NAND_MAX_DELAY 6
6128+#define SPI_NAND_MAX_OP 4
6129+
6130+/*TODO - add comments */
6131+struct nfi_spi_delay {
6132+ u8 tCLK_SAM_DLY;
6133+ u8 tCLK_OUT_DLY;
6134+ u8 tCS_DLY;
6135+ u8 tWR_EN_DLY;
6136+ u8 tIO_IN_DLY[4];
6137+ u8 tIO_OUT_DLY[4];
6138+ u8 tREAD_LATCH_LATENCY;
6139+};
6140+
6141+/* SPI Nand structure */
6142+struct nfi_spi {
6143+ struct nfi_base base;
6144+ struct nfi_base *parent;
6145+
6146+ u8 snfi_mode;
6147+ u8 tx_count;
6148+
6149+ u8 cmd[SPI_NAND_MAX_OP];
6150+ u8 cur_cmd_idx;
6151+
6152+ u32 row_addr[SPI_NAND_MAX_OP];
6153+ u32 col_addr[SPI_NAND_MAX_OP];
6154+ u8 cur_addr_idx;
6155+
6156+ u8 read_cache_mode;
6157+ u8 write_cache_mode;
6158+};
6159+
6160+#endif /* __NFI_SPI_H__ */
6161diff --git a/drivers/mtd/nandx/core/nfi/nfi_spi_regs.h b/drivers/mtd/nandx/core/nfi/nfi_spi_regs.h
6162new file mode 100644
6163index 0000000000..77adf46782
6164--- /dev/null
6165+++ b/drivers/mtd/nandx/core/nfi/nfi_spi_regs.h
6166@@ -0,0 +1,64 @@
6167+/*
6168+ * Copyright (C) 2017 MediaTek Inc.
6169+ * Licensed under either
6170+ * BSD Licence, (see NOTICE for more details)
6171+ * GNU General Public License, version 2.0, (see NOTICE for more details)
6172+ */
6173+
6174+#ifndef __NFI_SPI_REGS_H__
6175+#define __NFI_SPI_REGS_H__
6176+
6177+#define SNF_MAC_CTL 0x500
6178+#define WIP BIT(0)
6179+#define WIP_READY BIT(1)
6180+#define SF_TRIG BIT(2)
6181+#define SF_MAC_EN BIT(3)
6182+#define MAC_XIO_SEL BIT(4)
6183+#define SNF_MAC_OUTL 0x504
6184+#define SNF_MAC_INL 0x508
6185+#define SNF_RD_CTL1 0x50c
6186+#define PAGE_READ_CMD_SHIFT 24
6187+#define SNF_RD_CTL2 0x510
6188+#define SNF_RD_CTL3 0x514
6189+#define SNF_GF_CTL1 0x518
6190+#define GF_ADDR_SHIFT 16
6191+#define GF_CMD_SHIFT 24
6192+#define SNF_GF_CTL3 0x520
6193+#define SNF_PG_CTL1 0x524
6194+#define PG_EXE_CMD_SHIFT 16
6195+#define PG_LOAD_CMD_SHIFT 8
6196+#define SNF_PG_CTL2 0x528
6197+#define SNF_PG_CTL3 0x52c
6198+#define SNF_ER_CTL 0x530
6199+#define SNF_ER_CTL2 0x534
6200+#define SNF_MISC_CTL 0x538
6201+#define SW_RST BIT(28)
6202+#define PG_LOAD_X4_EN BIT(20)
6203+#define X2_DATA_MODE 1
6204+#define X4_DATA_MODE 2
6205+#define DUAL_IO_MODE 5
6206+#define QUAD_IO_MODE 6
6207+#define READ_MODE_SHIFT 16
6208+#define LATCH_LAT_SHIFT 8
6209+#define LATCH_LAT_MASK GENMASK(9, 8)
6210+#define DARA_READ_MODE_MASK GENMASK(18, 16)
6211+#define SF2CS_SEL BIT(13)
6212+#define SF2CS_EN BIT(12)
6213+#define PG_LOAD_CUSTOM_EN BIT(7)
6214+#define DATARD_CUSTOM_EN BIT(6)
6215+#define SNF_MISC_CTL2 0x53c
6216+#define PG_LOAD_SHIFT 16
6217+#define SNF_DLY_CTL1 0x540
6218+#define SNF_DLY_CTL2 0x544
6219+#define SNF_DLY_CTL3 0x548
6220+#define SNF_DLY_CTL4 0x54c
6221+#define SNF_STA_CTL1 0x550
6222+#define SPI_STATE GENMASK(3, 0)
6223+#define SNF_STA_CTL2 0x554
6224+#define SNF_STA_CTL3 0x558
6225+#define SNF_SNF_CNFG 0x55c
6226+#define SPI_MODE BIT(0)
6227+#define SNF_DEBUG_SEL 0x560
6228+#define SPI_GPRAM_ADDR 0x800
6229+
6230+#endif /* __NFI_SPI_REGS_H__ */
6231diff --git a/drivers/mtd/nandx/core/nfi/nfiecc.c b/drivers/mtd/nandx/core/nfi/nfiecc.c
6232new file mode 100644
6233index 0000000000..14246fbc3e
6234--- /dev/null
6235+++ b/drivers/mtd/nandx/core/nfi/nfiecc.c
6236@@ -0,0 +1,510 @@
6237+/*
6238+ * Copyright (C) 2017 MediaTek Inc.
6239+ * Licensed under either
6240+ * BSD Licence, (see NOTICE for more details)
6241+ * GNU General Public License, version 2.0, (see NOTICE for more details)
6242+ */
6243+
6244+#include "nandx_util.h"
6245+#include "nandx_core.h"
6246+#include "nfiecc_regs.h"
6247+#include "nfiecc.h"
6248+
6249+#define NFIECC_IDLE_REG(op) \
6250+ ((op) == ECC_ENCODE ? NFIECC_ENCIDLE : NFIECC_DECIDLE)
6251+#define IDLE_MASK 1
6252+#define NFIECC_CTL_REG(op) \
6253+ ((op) == ECC_ENCODE ? NFIECC_ENCCON : NFIECC_DECCON)
6254+#define NFIECC_IRQ_REG(op) \
6255+ ((op) == ECC_ENCODE ? NFIECC_ENCIRQEN : NFIECC_DECIRQEN)
6256+#define NFIECC_ADDR(op) \
6257+ ((op) == ECC_ENCODE ? NFIECC_ENCDIADDR : NFIECC_DECDIADDR)
6258+
6259+#define ECC_TIMEOUT 500000
6260+
6261+/* ecc strength that each IP supports */
6262+static const int ecc_strength_mt7622[] = {
6263+ 4, 6, 8, 10, 12, 14, 16
6264+};
6265+
6266+static int nfiecc_irq_handler(void *data)
6267+{
6268+ struct nfiecc *ecc = data;
6269+ void *regs = ecc->res.regs;
6270+ u32 status;
6271+
6272+ status = readl(regs + NFIECC_DECIRQSTA) & DEC_IRQSTA_GEN;
6273+ if (status) {
6274+ status = readl(regs + NFIECC_DECDONE);
6275+ if (!(status & ecc->config.sectors))
6276+ return NAND_IRQ_NONE;
6277+
6278+ /*
6279+ * Clear decode IRQ status once again to ensure that
6280+ * there will be no extra IRQ.
6281+ */
6282+ readl(regs + NFIECC_DECIRQSTA);
6283+ ecc->config.sectors = 0;
6284+ nandx_event_complete(ecc->done);
6285+ } else {
6286+ status = readl(regs + NFIECC_ENCIRQSTA) & ENC_IRQSTA_GEN;
6287+ if (!status)
6288+ return NAND_IRQ_NONE;
6289+
6290+ nandx_event_complete(ecc->done);
6291+ }
6292+
6293+ return NAND_IRQ_HANDLED;
6294+}
6295+
6296+static inline int nfiecc_wait_idle(struct nfiecc *ecc)
6297+{
6298+ int op = ecc->config.op;
6299+ int ret, val;
6300+
6301+ ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_IDLE_REG(op),
6302+ val, val & IDLE_MASK,
6303+ 10, ECC_TIMEOUT);
6304+ if (ret)
6305+ pr_info("%s not idle\n",
6306+ op == ECC_ENCODE ? "encoder" : "decoder");
6307+
6308+ return ret;
6309+}
6310+
6311+static int nfiecc_wait_encode_done(struct nfiecc *ecc)
6312+{
6313+ int ret, val;
6314+
6315+ if (ecc->ecc_irq_en) {
6316+ /* poll one time to avoid missing irq event */
6317+ ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_ENCSTA,
6318+ val, val & ENC_FSM_IDLE, 1, 1);
6319+ if (!ret)
6320+ return 0;
6321+
6322+ /* irq done, if not, we can go on to poll status for a while */
6323+ ret = nandx_event_wait_complete(ecc->done, ECC_TIMEOUT);
6324+ if (ret)
6325+ return 0;
6326+ }
6327+
6328+ ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_ENCSTA,
6329+ val, val & ENC_FSM_IDLE,
6330+ 10, ECC_TIMEOUT);
6331+ if (ret)
6332+ pr_info("encode timeout\n");
6333+
6334+ return ret;
6335+
6336+}
6337+
6338+static int nfiecc_wait_decode_done(struct nfiecc *ecc)
6339+{
6340+ u32 secbit = BIT(ecc->config.sectors - 1);
6341+ void *regs = ecc->res.regs;
6342+ int ret, val;
6343+
6344+ if (ecc->ecc_irq_en) {
6345+ ret = readl_poll_timeout_atomic(regs + NFIECC_DECDONE,
6346+ val, val & secbit, 1, 1);
6347+ if (!ret)
6348+ return 0;
6349+
6350+ ret = nandx_event_wait_complete(ecc->done, ECC_TIMEOUT);
6351+ if (ret)
6352+ return 0;
6353+ }
6354+
6355+ ret = readl_poll_timeout_atomic(regs + NFIECC_DECDONE,
6356+ val, val & secbit,
6357+ 10, ECC_TIMEOUT);
6358+ if (ret) {
6359+ pr_info("decode timeout\n");
6360+ return ret;
6361+ }
6362+
6363+ /* decode done does not stands for ecc all work done.
6364+ * we need check syn, bma, chien, autoc all idle.
6365+ * just check it when ECC_DECCNFG[13:12] is 3,
6366+ * which means auto correct.
6367+ */
6368+ ret = readl_poll_timeout_atomic(regs + NFIECC_DECFSM,
6369+ val, (val & FSM_MASK) == FSM_IDLE,
6370+ 10, ECC_TIMEOUT);
6371+ if (ret)
6372+ pr_info("decode fsm(0x%x) is not idle\n",
6373+ readl(regs + NFIECC_DECFSM));
6374+
6375+ return ret;
6376+}
6377+
6378+static int nfiecc_wait_done(struct nfiecc *ecc)
6379+{
6380+ if (ecc->config.op == ECC_ENCODE)
6381+ return nfiecc_wait_encode_done(ecc);
6382+
6383+ return nfiecc_wait_decode_done(ecc);
6384+}
6385+
6386+static void nfiecc_encode_config(struct nfiecc *ecc, u32 ecc_idx)
6387+{
6388+ struct nfiecc_config *config = &ecc->config;
6389+ u32 val;
6390+
6391+ val = ecc_idx | (config->mode << ecc->caps->ecc_mode_shift);
6392+
6393+ if (config->mode == ECC_DMA_MODE)
6394+ val |= ENC_BURST_EN;
6395+
6396+ val |= (config->len << 3) << ENCCNFG_MS_SHIFT;
6397+ writel(val, ecc->res.regs + NFIECC_ENCCNFG);
6398+}
6399+
6400+static void nfiecc_decode_config(struct nfiecc *ecc, u32 ecc_idx)
6401+{
6402+ struct nfiecc_config *config = &ecc->config;
6403+ u32 dec_sz = (config->len << 3) +
6404+ config->strength * ecc->caps->parity_bits;
6405+ u32 val;
6406+
6407+ val = ecc_idx | (config->mode << ecc->caps->ecc_mode_shift);
6408+
6409+ if (config->mode == ECC_DMA_MODE)
6410+ val |= DEC_BURST_EN;
6411+
6412+ val |= (dec_sz << DECCNFG_MS_SHIFT) |
6413+ (config->deccon << DEC_CON_SHIFT);
6414+ val |= DEC_EMPTY_EN;
6415+ writel(val, ecc->res.regs + NFIECC_DECCNFG);
6416+}
6417+
6418+static void nfiecc_config(struct nfiecc *ecc)
6419+{
6420+ u32 idx;
6421+
6422+ for (idx = 0; idx < ecc->caps->ecc_strength_num; idx++) {
6423+ if (ecc->config.strength == ecc->caps->ecc_strength[idx])
6424+ break;
6425+ }
6426+
6427+ if (ecc->config.op == ECC_ENCODE)
6428+ nfiecc_encode_config(ecc, idx);
6429+ else
6430+ nfiecc_decode_config(ecc, idx);
6431+}
6432+
6433+static int nfiecc_enable(struct nfiecc *ecc)
6434+{
6435+ enum nfiecc_operation op = ecc->config.op;
6436+ void *regs = ecc->res.regs;
6437+
6438+ nfiecc_config(ecc);
6439+
6440+ writel(ECC_OP_EN, regs + NFIECC_CTL_REG(op));
6441+
6442+ if (ecc->ecc_irq_en) {
6443+ writel(ECC_IRQEN, regs + NFIECC_IRQ_REG(op));
6444+
6445+ if (ecc->page_irq_en)
6446+ writel(ECC_IRQEN | ECC_PG_IRQ_SEL,
6447+ regs + NFIECC_IRQ_REG(op));
6448+
6449+ nandx_event_init(ecc->done);
6450+ }
6451+
6452+ return 0;
6453+}
6454+
6455+static int nfiecc_disable(struct nfiecc *ecc)
6456+{
6457+ enum nfiecc_operation op = ecc->config.op;
6458+ void *regs = ecc->res.regs;
6459+
6460+ nfiecc_wait_idle(ecc);
6461+
6462+ writel(0, regs + NFIECC_IRQ_REG(op));
6463+ writel(~ECC_OP_EN, regs + NFIECC_CTL_REG(op));
6464+
6465+ return 0;
6466+}
6467+
6468+static int nfiecc_correct_data(struct nfiecc *ecc,
6469+ struct nfiecc_status *status,
6470+ u8 *data, u32 sector)
6471+{
6472+ u32 err, offset, i;
6473+ u32 loc, byteloc, bitloc;
6474+
6475+ status->corrected = 0;
6476+ status->failed = 0;
6477+
6478+ offset = (sector >> 2);
6479+ err = readl(ecc->res.regs + NFIECC_DECENUM(offset));
6480+ err >>= (sector % 4) * 8;
6481+ err &= ecc->caps->err_mask;
6482+
6483+ if (err == ecc->caps->err_mask) {
6484+ status->failed++;
6485+ return -ENANDREAD;
6486+ }
6487+
6488+ status->corrected += err;
6489+ status->bitflips = max_t(u32, status->bitflips, err);
6490+
6491+ for (i = 0; i < err; i++) {
6492+ loc = readl(ecc->res.regs + NFIECC_DECEL(i >> 1));
6493+ loc >>= ((i & 0x1) << 4);
6494+ byteloc = loc >> 3;
6495+ bitloc = loc & 0x7;
6496+ data[byteloc] ^= (1 << bitloc);
6497+ }
6498+
6499+ return 0;
6500+}
6501+
6502+static int nfiecc_fill_data(struct nfiecc *ecc, u8 *data)
6503+{
6504+ struct nfiecc_config *config = &ecc->config;
6505+ void *regs = ecc->res.regs;
6506+ int size, ret, i;
6507+ u32 val;
6508+
6509+ if (config->mode == ECC_DMA_MODE) {
6510+ if ((unsigned long)config->dma_addr & 0x3)
6511+ pr_info("encode address is not 4B aligned: 0x%x\n",
6512+ (u32)(unsigned long)config->dma_addr);
6513+
6514+ writel((unsigned long)config->dma_addr,
6515+ regs + NFIECC_ADDR(config->op));
6516+ } else if (config->mode == ECC_PIO_MODE) {
6517+ if (config->op == ECC_ENCODE) {
6518+ size = (config->len + 3) >> 2;
6519+ } else {
6520+ size = config->strength * ecc->caps->parity_bits;
6521+ size = (size + 7) >> 3;
6522+ size += config->len;
6523+ size >>= 2;
6524+ }
6525+
6526+ for (i = 0; i < size; i++) {
6527+ ret = readl_poll_timeout_atomic(regs + NFIECC_PIO_DIRDY,
6528+ val, val & PIO_DI_RDY,
6529+ 10, ECC_TIMEOUT);
6530+ if (ret)
6531+ return ret;
6532+
6533+ writel(*((u32 *)data + i), regs + NFIECC_PIO_DI);
6534+ }
6535+ }
6536+
6537+ return 0;
6538+}
6539+
6540+static int nfiecc_encode(struct nfiecc *ecc, u8 *data)
6541+{
6542+ struct nfiecc_config *config = &ecc->config;
6543+ u32 len, i, val = 0;
6544+ u8 *p;
6545+ int ret;
6546+
6547+ /* Under NFI mode, nothing need to do */
6548+ if (config->mode == ECC_NFI_MODE)
6549+ return 0;
6550+
6551+ ret = nfiecc_fill_data(ecc, data);
6552+ if (ret)
6553+ return ret;
6554+
6555+ ret = nfiecc_wait_encode_done(ecc);
6556+ if (ret)
6557+ return ret;
6558+
6559+ ret = nfiecc_wait_idle(ecc);
6560+ if (ret)
6561+ return ret;
6562+
6563+ /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
6564+ len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
6565+ p = data + config->len;
6566+
6567+ /* Write the parity bytes generated by the ECC back to the OOB region */
6568+ for (i = 0; i < len; i++) {
6569+ if ((i % 4) == 0)
6570+ val = readl(ecc->res.regs + NFIECC_ENCPAR(i / 4));
6571+
6572+ p[i] = (val >> ((i % 4) * 8)) & 0xff;
6573+ }
6574+
6575+ return 0;
6576+}
6577+
6578+static int nfiecc_decode(struct nfiecc *ecc, u8 *data)
6579+{
6580+ int ret;
6581+
6582+ /* Under NFI mode, nothing need to do */
6583+ if (ecc->config.mode == ECC_NFI_MODE)
6584+ return 0;
6585+
6586+ ret = nfiecc_fill_data(ecc, data);
6587+ if (ret)
6588+ return ret;
6589+
6590+ return nfiecc_wait_decode_done(ecc);
6591+}
6592+
6593+static int nfiecc_decode_status(struct nfiecc *ecc, u32 start_sector,
6594+ u32 sectors)
6595+{
6596+ void *regs = ecc->res.regs;
6597+ u32 i, val = 0, err;
6598+ u32 bitflips = 0;
6599+
6600+ for (i = start_sector; i < start_sector + sectors; i++) {
6601+ if ((i % 4) == 0)
6602+ val = readl(regs + NFIECC_DECENUM(i / 4));
6603+
6604+ err = val >> ((i % 4) * 5);
6605+ err &= ecc->caps->err_mask;
6606+
6607+ if (err == ecc->caps->err_mask)
6608+ pr_err("sector %d is uncorrect\n", i);
6609+
6610+ bitflips = max_t(u32, bitflips, err);
6611+ }
6612+
6613+ if (bitflips == ecc->caps->err_mask)
6614+ return -ENANDREAD;
6615+
6616+ if (bitflips)
6617+ pr_info("bitflips %d is corrected\n", bitflips);
6618+
6619+ return bitflips;
6620+}
6621+
6622+static int nfiecc_adjust_strength(struct nfiecc *ecc, int strength)
6623+{
6624+ struct nfiecc_caps *caps = ecc->caps;
6625+ int i, count = caps->ecc_strength_num;
6626+
6627+ if (strength >= caps->ecc_strength[count - 1])
6628+ return caps->ecc_strength[count - 1];
6629+
6630+ if (strength < caps->ecc_strength[0])
6631+ return -EINVAL;
6632+
6633+ for (i = 1; i < count; i++) {
6634+ if (strength < caps->ecc_strength[i])
6635+ return caps->ecc_strength[i - 1];
6636+ }
6637+
6638+ return -EINVAL;
6639+}
6640+
6641+static int nfiecc_ctrl(struct nfiecc *ecc, int cmd, void *args)
6642+{
6643+ int ret = 0;
6644+
6645+ switch (cmd) {
6646+ case NFI_CTRL_ECC_IRQ:
6647+ ecc->ecc_irq_en = *(bool *)args;
6648+ break;
6649+
6650+ case NFI_CTRL_ECC_PAGE_IRQ:
6651+ ecc->page_irq_en = *(bool *)args;
6652+ break;
6653+
6654+ default:
6655+ pr_info("invalid arguments.\n");
6656+ ret = -EINVAL;
6657+ break;
6658+ }
6659+
6660+ return ret;
6661+}
6662+
6663+static int nfiecc_hw_init(struct nfiecc *ecc)
6664+{
6665+ int ret;
6666+
6667+ ret = nfiecc_wait_idle(ecc);
6668+ if (ret)
6669+ return ret;
6670+
6671+ writel(~ECC_OP_EN, ecc->res.regs + NFIECC_ENCCON);
6672+
6673+ ret = nfiecc_wait_idle(ecc);
6674+ if (ret)
6675+ return ret;
6676+
6677+ writel(~ECC_OP_EN, ecc->res.regs + NFIECC_DECCON);
6678+
6679+ return 0;
6680+}
6681+
6682+static struct nfiecc_caps nfiecc_caps_mt7622 = {
6683+ .err_mask = 0x1f,
6684+ .ecc_mode_shift = 4,
6685+ .parity_bits = 13,
6686+ .ecc_strength = ecc_strength_mt7622,
6687+ .ecc_strength_num = 7,
6688+};
6689+
6690+static struct nfiecc_caps *nfiecc_get_match_data(enum mtk_ic_version ic)
6691+{
6692+ /* NOTE: add other IC's data */
6693+ return &nfiecc_caps_mt7622;
6694+}
6695+
6696+struct nfiecc *nfiecc_init(struct nfiecc_resource *res)
6697+{
6698+ struct nfiecc *ecc;
6699+ int ret;
6700+
6701+ ecc = mem_alloc(1, sizeof(struct nfiecc));
6702+ if (!ecc)
6703+ return NULL;
6704+
6705+ ecc->res = *res;
6706+
6707+ ret = nandx_irq_register(res->dev, res->irq_id, nfiecc_irq_handler,
6708+ "mtk-ecc", ecc);
6709+ if (ret) {
6710+ pr_info("ecc irq register failed!\n");
6711+ goto error;
6712+ }
6713+
6714+ ecc->ecc_irq_en = false;
6715+ ecc->page_irq_en = false;
6716+ ecc->done = nandx_event_create();
6717+ ecc->caps = nfiecc_get_match_data(res->ic_ver);
6718+
6719+ ecc->adjust_strength = nfiecc_adjust_strength;
6720+ ecc->enable = nfiecc_enable;
6721+ ecc->disable = nfiecc_disable;
6722+ ecc->decode = nfiecc_decode;
6723+ ecc->encode = nfiecc_encode;
6724+ ecc->wait_done = nfiecc_wait_done;
6725+ ecc->decode_status = nfiecc_decode_status;
6726+ ecc->correct_data = nfiecc_correct_data;
6727+ ecc->nfiecc_ctrl = nfiecc_ctrl;
6728+
6729+ ret = nfiecc_hw_init(ecc);
6730+ if (ret)
6731+ return NULL;
6732+
6733+ return ecc;
6734+
6735+error:
6736+ mem_free(ecc);
6737+
6738+ return NULL;
6739+}
6740+
6741+void nfiecc_exit(struct nfiecc *ecc)
6742+{
6743+ nandx_event_destroy(ecc->done);
6744+ mem_free(ecc);
6745+}
6746+
6747diff --git a/drivers/mtd/nandx/core/nfi/nfiecc.h b/drivers/mtd/nandx/core/nfi/nfiecc.h
6748new file mode 100644
6749index 0000000000..b02a5c3534
6750--- /dev/null
6751+++ b/drivers/mtd/nandx/core/nfi/nfiecc.h
6752@@ -0,0 +1,90 @@
6753+/*
6754+ * Copyright (C) 2017 MediaTek Inc.
6755+ * Licensed under either
6756+ * BSD Licence, (see NOTICE for more details)
6757+ * GNU General Public License, version 2.0, (see NOTICE for more details)
6758+ */
6759+
6760+#ifndef __NFIECC_H__
6761+#define __NFIECC_H__
6762+
6763+enum nfiecc_mode {
6764+ ECC_DMA_MODE,
6765+ ECC_NFI_MODE,
6766+ ECC_PIO_MODE
6767+};
6768+
6769+enum nfiecc_operation {
6770+ ECC_ENCODE,
6771+ ECC_DECODE
6772+};
6773+
6774+enum nfiecc_deccon {
6775+ ECC_DEC_FER = 1,
6776+ ECC_DEC_LOCATE = 2,
6777+ ECC_DEC_CORRECT = 3
6778+};
6779+
6780+struct nfiecc_resource {
6781+ int ic_ver;
6782+ void *dev;
6783+ void *regs;
6784+ int irq_id;
6785+
6786+};
6787+
6788+struct nfiecc_status {
6789+ u32 corrected;
6790+ u32 failed;
6791+ u32 bitflips;
6792+};
6793+
6794+struct nfiecc_caps {
6795+ u32 err_mask;
6796+ u32 ecc_mode_shift;
6797+ u32 parity_bits;
6798+ const int *ecc_strength;
6799+ u32 ecc_strength_num;
6800+};
6801+
6802+struct nfiecc_config {
6803+ enum nfiecc_operation op;
6804+ enum nfiecc_mode mode;
6805+ enum nfiecc_deccon deccon;
6806+
6807+ void *dma_addr; /* DMA use only */
6808+ u32 strength;
6809+ u32 sectors;
6810+ u32 len;
6811+};
6812+
6813+struct nfiecc {
6814+ struct nfiecc_resource res;
6815+ struct nfiecc_config config;
6816+ struct nfiecc_caps *caps;
6817+
6818+ bool ecc_irq_en;
6819+ bool page_irq_en;
6820+
6821+ void *done;
6822+
6823+ int (*adjust_strength)(struct nfiecc *ecc, int strength);
6824+ int (*enable)(struct nfiecc *ecc);
6825+ int (*disable)(struct nfiecc *ecc);
6826+
6827+ int (*decode)(struct nfiecc *ecc, u8 *data);
6828+ int (*encode)(struct nfiecc *ecc, u8 *data);
6829+
6830+ int (*decode_status)(struct nfiecc *ecc, u32 start_sector, u32 sectors);
6831+ int (*correct_data)(struct nfiecc *ecc,
6832+ struct nfiecc_status *status,
6833+ u8 *data, u32 sector);
6834+ int (*wait_done)(struct nfiecc *ecc);
6835+
6836+ int (*nfiecc_ctrl)(struct nfiecc *ecc, int cmd, void *args);
6837+};
6838+
6839+struct nfiecc *nfiecc_init(struct nfiecc_resource *res);
6840+void nfiecc_exit(struct nfiecc *ecc);
6841+
6842+#endif /* __NFIECC_H__ */
6843diff --git a/drivers/mtd/nandx/core/nfi/nfiecc_regs.h b/drivers/mtd/nandx/core/nfi/nfiecc_regs.h
6844new file mode 100644
6845index 0000000000..96564cf872
6846--- /dev/null
6847+++ b/drivers/mtd/nandx/core/nfi/nfiecc_regs.h
6848@@ -0,0 +1,51 @@
6849+/*
6850+ * Copyright (C) 2017 MediaTek Inc.
6851+ * Licensed under either
6852+ * BSD Licence, (see NOTICE for more details)
6853+ * GNU General Public License, version 2.0, (see NOTICE for more details)
6854+ */
6855+
6856+#ifndef __NFIECC_REGS_H__
6857+#define __NFIECC_REGS_H__
6858+
6859+#define NFIECC_ENCCON 0x000
6860+/* NFIECC_DECCON has same bit define */
6861+#define ECC_OP_EN BIT(0)
6862+#define NFIECC_ENCCNFG 0x004
6863+#define ENCCNFG_MS_SHIFT 16
6864+#define ENC_BURST_EN BIT(8)
6865+#define NFIECC_ENCDIADDR 0x008
6866+#define NFIECC_ENCIDLE 0x00c
6867+#define NFIECC_ENCSTA 0x02c
6868+#define ENC_FSM_IDLE 1
6869+#define NFIECC_ENCIRQEN 0x030
6870+/* NFIECC_DECIRQEN has same bit define */
6871+#define ECC_IRQEN BIT(0)
6872+#define ECC_PG_IRQ_SEL BIT(1)
6873+#define NFIECC_ENCIRQSTA 0x034
6874+#define ENC_IRQSTA_GEN BIT(0)
6875+#define NFIECC_PIO_DIRDY 0x080
6876+#define PIO_DI_RDY BIT(0)
6877+#define NFIECC_PIO_DI 0x084
6878+#define NFIECC_DECCON 0x100
6879+#define NFIECC_DECCNFG 0x104
6880+#define DEC_BURST_EN BIT(8)
6881+#define DEC_EMPTY_EN BIT(31)
6882+#define DEC_CON_SHIFT 12
6883+#define DECCNFG_MS_SHIFT 16
6884+#define NFIECC_DECDIADDR 0x108
6885+#define NFIECC_DECIDLE 0x10c
6886+#define NFIECC_DECENUM(x) (0x114 + (x) * 4)
6887+#define NFIECC_DECDONE 0x11c
6888+#define NFIECC_DECIRQEN 0x140
6889+#define NFIECC_DECIRQSTA 0x144
6890+#define DEC_IRQSTA_GEN BIT(0)
6891+#define NFIECC_DECFSM 0x14c
6892+#define FSM_MASK 0x7f0f0f0f
6893+#define FSM_IDLE 0x01010101
6894+#define NFIECC_BYPASS 0x20c
6895+#define NFIECC_BYPASS_EN BIT(0)
6896+#define NFIECC_ENCPAR(x) (0x010 + (x) * 4)
6897+#define NFIECC_DECEL(x) (0x120 + (x) * 4)
6898+
6899+#endif /* __NFIECC_REGS_H__ */
6900diff --git a/drivers/mtd/nandx/driver/Nandx.mk b/drivers/mtd/nandx/driver/Nandx.mk
6901new file mode 100644
6902index 0000000000..3fb93d37c5
6903--- /dev/null
6904+++ b/drivers/mtd/nandx/driver/Nandx.mk
6905@@ -0,0 +1,18 @@
6906+#
6907+# Copyright (C) 2017 MediaTek Inc.
6908+# Licensed under either
6909+# BSD Licence, (see NOTICE for more details)
6910+# GNU General Public License, version 2.0, (see NOTICE for more details)
6911+#
6912+
6913+nandx-$(NANDX_SIMULATOR_SUPPORT) += simulator/driver.c
6914+
6915+nandx-$(NANDX_CTP_SUPPORT) += ctp/ts_nand.c
6916+nandx-$(NANDX_CTP_SUPPORT) += ctp/nand_test.c
6917+nandx-header-$(NANDX_CTP_SUPPORT) += ctp/nand_test.h
6918+
6919+nandx-$(NANDX_BBT_SUPPORT) += bbt/bbt.c
6920+nandx-$(NANDX_BROM_SUPPORT) += brom/driver.c
6921+nandx-$(NANDX_KERNEL_SUPPORT) += kernel/driver.c
6922+nandx-$(NANDX_LK_SUPPORT) += lk/driver.c
6923+nandx-$(NANDX_UBOOT_SUPPORT) += uboot/driver.c
6924diff --git a/drivers/mtd/nandx/driver/bbt/bbt.c b/drivers/mtd/nandx/driver/bbt/bbt.c
6925new file mode 100644
6926index 0000000000..c9d4823e09
6927--- /dev/null
6928+++ b/drivers/mtd/nandx/driver/bbt/bbt.c
6929@@ -0,0 +1,408 @@
6930+/*
6931+ * Copyright (C) 2017 MediaTek Inc.
6932+ * Licensed under either
6933+ * BSD Licence, (see NOTICE for more details)
6934+ * GNU General Public License, version 2.0, (see NOTICE for more details)
6935+ */
6936+
6937+#include "nandx_util.h"
6938+#include "nandx_core.h"
6939+#include "bbt.h"
6940+
6941+/* Not support: multi-chip */
6942+static u8 main_bbt_pattern[] = {'B', 'b', 't', '0' };
6943+static u8 mirror_bbt_pattern[] = {'1', 't', 'b', 'B' };
6944+
6945+static struct bbt_manager g_bbt_manager = {
6946+ { {{main_bbt_pattern, 4}, 0, BBT_INVALID_ADDR},
6947+ {{mirror_bbt_pattern, 4}, 0, BBT_INVALID_ADDR}
6948+ },
6949+ NAND_BBT_SCAN_MAXBLOCKS, NULL
6950+};
6951+
6952+static inline void set_bbt_mark(u8 *bbt, int block, u8 mark)
6953+{
6954+ int index, offset;
6955+
6956+ index = GET_ENTRY(block);
6957+ offset = GET_POSITION(block);
6958+
6959+ bbt[index] &= ~(BBT_ENTRY_MASK << offset);
6960+ bbt[index] |= (mark & BBT_ENTRY_MASK) << offset;
6961+ pr_info("%s %d block:%d, bbt[%d]:0x%x, offset:%d, mark:%d\n",
6962+ __func__, __LINE__, block, index, bbt[index], offset, mark);
6963+}
6964+
6965+static inline u8 get_bbt_mark(u8 *bbt, int block)
6966+{
6967+ int offset = GET_POSITION(block);
6968+ int index = GET_ENTRY(block);
6969+ u8 value = bbt[index];
6970+
6971+ return (value >> offset) & BBT_ENTRY_MASK;
6972+}
6973+
6974+static void mark_nand_bad(struct nandx_info *nand, int block)
6975+{
6976+ u8 *buf;
6977+
6978+ buf = mem_alloc(1, nand->page_size + nand->oob_size);
6979+ if (!buf) {
6980+ pr_info("%s, %d, memory alloc fail, pagesize:%d, oobsize:%d\n",
6981+ __func__, __LINE__, nand->page_size, nand->oob_size);
6982+ return;
6983+ }
6984+ memset(buf, 0, nand->page_size + nand->oob_size);
6985+ nandx_erase(block * nand->block_size, nand->block_size);
6986+ nandx_write(buf, buf + nand->page_size, block * nand->block_size,
6987+ nand->page_size);
6988+ mem_free(buf);
6989+}
6990+
6991+static inline bool is_bbt_data(u8 *buf, struct bbt_pattern *pattern)
6992+{
6993+ int i;
6994+
6995+ for (i = 0; i < pattern->len; i++) {
6996+ if (buf[i] != pattern->data[i])
6997+ return false;
6998+ }
6999+
7000+ return true;
7001+}
7002+
7003+static u64 get_bbt_address(struct nandx_info *nand, u8 *bbt,
7004+ u64 mirror_addr,
7005+ int max_blocks)
7006+{
7007+ u64 addr, end_addr;
7008+ u8 mark;
7009+
7010+ addr = nand->total_size;
7011+ end_addr = nand->total_size - nand->block_size * max_blocks;
7012+
7013+ while (addr > end_addr) {
7014+ addr -= nand->block_size;
7015+ mark = get_bbt_mark(bbt, div_down(addr, nand->block_size));
7016+
7017+ if (mark == BBT_BLOCK_WORN || mark == BBT_BLOCK_FACTORY_BAD)
7018+ continue;
7019+ if (addr != mirror_addr)
7020+ return addr;
7021+ }
7022+
7023+ return BBT_INVALID_ADDR;
7024+}
7025+
7026+static int read_bbt(struct bbt_desc *desc, u8 *bbt, u32 len)
7027+{
7028+ int ret;
7029+
7030+ ret = nandx_read(bbt, NULL, desc->bbt_addr + desc->pattern.len + 1,
7031+ len);
7032+ if (ret < 0)
7033+ pr_info("nand_bbt: error reading BBT page, ret:-%x\n", ret);
7034+
7035+ return ret;
7036+}
7037+
7038+static void create_bbt(struct nandx_info *nand, u8 *bbt)
7039+{
7040+ u32 offset = 0, block = 0;
7041+
7042+ do {
7043+ if (nandx_is_bad_block(offset)) {
7044+ pr_info("Create bbt at bad block:%d\n", block);
7045+ set_bbt_mark(bbt, block, BBT_BLOCK_FACTORY_BAD);
7046+ }
7047+ block++;
7048+ offset += nand->block_size;
7049+ } while (offset < nand->total_size);
7050+}
7051+
7052+static int search_bbt(struct nandx_info *nand, struct bbt_desc *desc,
7053+ int max_blocks)
7054+{
7055+ u64 addr, end_addr;
7056+ u8 *buf;
7057+ int ret;
7058+
7059+ buf = mem_alloc(1, nand->page_size);
7060+ if (!buf) {
7061+ pr_info("%s, %d, mem alloc fail!!! len:%d\n",
7062+ __func__, __LINE__, nand->page_size);
7063+ return -ENOMEM;
7064+ }
7065+
7066+ addr = nand->total_size;
7067+ end_addr = nand->total_size - max_blocks * nand->block_size;
7068+ while (addr > end_addr) {
7069+ addr -= nand->block_size;
7070+
7071+ nandx_read(buf, NULL, addr, nand->page_size);
7072+
7073+ if (is_bbt_data(buf, &desc->pattern)) {
7074+ desc->bbt_addr = addr;
7075+ desc->version = buf[desc->pattern.len];
7076+ pr_info("BBT is found at addr 0x%llx, version %d\n",
7077+ desc->bbt_addr, desc->version);
7078+ ret = 0;
7079+ break;
7080+ }
7081+ ret = -EFAULT;
7082+ }
7083+
7084+ mem_free(buf);
7085+ return ret;
7086+}
7087+
7088+static int save_bbt(struct nandx_info *nand, struct bbt_desc *desc,
7089+ u8 *bbt)
7090+{
7091+ u32 page_size_mask, total_block;
7092+ int write_len;
7093+ u8 *buf;
7094+ int ret;
7095+
7096+ ret = nandx_erase(desc->bbt_addr, nand->block_size);
7097+ if (ret) {
7098+ pr_info("erase addr 0x%llx fail !!!, ret %d\n",
7099+ desc->bbt_addr, ret);
7100+ return ret;
7101+ }
7102+
7103+ total_block = div_down(nand->total_size, nand->block_size);
7104+ write_len = GET_BBT_LENGTH(total_block) + desc->pattern.len + 1;
7105+ page_size_mask = nand->page_size - 1;
7106+ write_len = (write_len + page_size_mask) & (~page_size_mask);
7107+
7108+ buf = (u8 *)mem_alloc(1, write_len);
7109+ if (!buf) {
7110+ pr_info("%s, %d, mem alloc fail!!! len:%d\n",
7111+ __func__, __LINE__, write_len);
7112+ return -ENOMEM;
7113+ }
7114+ memset(buf, 0xFF, write_len);
7115+
7116+ memcpy(buf, desc->pattern.data, desc->pattern.len);
7117+ buf[desc->pattern.len] = desc->version;
7118+
7119+ memcpy(buf + desc->pattern.len + 1, bbt, GET_BBT_LENGTH(total_block));
7120+
7121+ ret = nandx_write(buf, NULL, desc->bbt_addr, write_len);
7122+
7123+ if (ret)
7124+ pr_info("nandx_write fail(%d), offset:0x%llx, len(%d)\n",
7125+ ret, desc->bbt_addr, write_len);
7126+ mem_free(buf);
7127+
7128+ return ret;
7129+}
7130+
7131+static int write_bbt(struct nandx_info *nand, struct bbt_desc *main,
7132+ struct bbt_desc *mirror, u8 *bbt, int max_blocks)
7133+{
7134+ int block;
7135+ int ret;
7136+
7137+ do {
7138+ if (main->bbt_addr == BBT_INVALID_ADDR) {
7139+ main->bbt_addr = get_bbt_address(nand, bbt,
7140+ mirror->bbt_addr, max_blocks);
7141+ if (main->bbt_addr == BBT_INVALID_ADDR)
7142+ return -ENOSPC;
7143+ }
7144+
7145+ ret = save_bbt(nand, main, bbt);
7146+ if (!ret)
7147+ break;
7148+
7149+ block = div_down(main->bbt_addr, nand->block_size);
7150+ set_bbt_mark(bbt, block, BBT_BLOCK_WORN);
7151+ main->version++;
7152+ mark_nand_bad(nand, block);
7153+ main->bbt_addr = BBT_INVALID_ADDR;
7154+ } while (1);
7155+
7156+ return 0;
7157+}
7158+
7159+static void mark_bbt_region(struct nandx_info *nand, u8 *bbt, int bbt_blocks)
7160+{
7161+ int total_block;
7162+ int block;
7163+ u8 mark;
7164+
7165+ total_block = div_down(nand->total_size, nand->block_size);
7166+ block = total_block - bbt_blocks;
7167+
7168+ while (bbt_blocks) {
7169+ mark = get_bbt_mark(bbt, block);
7170+ if (mark == BBT_BLOCK_GOOD)
7171+ set_bbt_mark(bbt, block, BBT_BLOCK_RESERVED);
7172+ block++;
7173+ bbt_blocks--;
7174+ }
7175+}
7176+
7177+static void unmark_bbt_region(struct nandx_info *nand, u8 *bbt, int bbt_blocks)
7178+{
7179+ int total_block;
7180+ int block;
7181+ u8 mark;
7182+
7183+ total_block = div_down(nand->total_size, nand->block_size);
7184+ block = total_block - bbt_blocks;
7185+
7186+ while (bbt_blocks) {
7187+ mark = get_bbt_mark(bbt, block);
7188+ if (mark == BBT_BLOCK_RESERVED)
7189+ set_bbt_mark(bbt, block, BBT_BLOCK_GOOD);
7190+ block++;
7191+ bbt_blocks--;
7192+ }
7193+}
7194+
7195+static int update_bbt(struct nandx_info *nand, struct bbt_desc *desc,
7196+ u8 *bbt,
7197+ int max_blocks)
7198+{
7199+ int ret = 0, i;
7200+
7201+ /* The reserved info is not stored in NAND*/
7202+ unmark_bbt_region(nand, bbt, max_blocks);
7203+
7204+ desc[0].version++;
7205+ for (i = 0; i < 2; i++) {
7206+ if (i > 0)
7207+ desc[i].version = desc[i - 1].version;
7208+
7209+ ret = write_bbt(nand, &desc[i], &desc[1 - i], bbt, max_blocks);
7210+ if (ret)
7211+ break;
7212+ }
7213+ mark_bbt_region(nand, bbt, max_blocks);
7214+
7215+ return ret;
7216+}
7217+
7218+int scan_bbt(struct nandx_info *nand)
7219+{
7220+ struct bbt_manager *manager = &g_bbt_manager;
7221+ struct bbt_desc *pdesc;
7222+ int total_block, len, i;
7223+ int valid_desc = 0;
7224+ int ret = 0;
7225+ u8 *bbt;
7226+
7227+ total_block = div_down(nand->total_size, nand->block_size);
7228+ len = GET_BBT_LENGTH(total_block);
7229+
7230+ if (!manager->bbt) {
7231+ manager->bbt = (u8 *)mem_alloc(1, len);
7232+ if (!manager->bbt) {
7233+ pr_info("%s, %d, mem alloc fail!!! len:%d\n",
7234+ __func__, __LINE__, len);
7235+ return -ENOMEM;
7236+ }
7237+ }
7238+ bbt = manager->bbt;
7239+ memset(bbt, 0xFF, len);
7240+
7241+ /* scan bbt */
7242+ for (i = 0; i < 2; i++) {
7243+ pdesc = &manager->desc[i];
7244+ pdesc->bbt_addr = BBT_INVALID_ADDR;
7245+ pdesc->version = 0;
7246+ ret = search_bbt(nand, pdesc, manager->max_blocks);
7247+ if (!ret && (pdesc->bbt_addr != BBT_INVALID_ADDR))
7248+ valid_desc += 1 << i;
7249+ }
7250+
7251+ pdesc = &manager->desc[0];
7252+ if ((valid_desc == 0x3) && (pdesc[0].version != pdesc[1].version))
7253+ valid_desc = (pdesc[0].version > pdesc[1].version) ? 1 : 2;
7254+
7255+ /* read bbt */
7256+ for (i = 0; i < 2; i++) {
7257+ if (!(valid_desc & (1 << i)))
7258+ continue;
7259+ ret = read_bbt(&pdesc[i], bbt, len);
7260+ if (ret) {
7261+ pdesc->bbt_addr = BBT_INVALID_ADDR;
7262+ pdesc->version = 0;
7263+ valid_desc &= ~(1 << i);
7264+ }
7265+ /* If two BBT version is same, only need to read the first bbt*/
7266+ if ((valid_desc == 0x3) &&
7267+ (pdesc[0].version == pdesc[1].version))
7268+ break;
7269+ }
7270+
7271+ if (!valid_desc) {
7272+ create_bbt(nand, bbt);
7273+ pdesc[0].version = 1;
7274+ pdesc[1].version = 1;
7275+ }
7276+
7277+ pdesc[0].version = max_t(u8, pdesc[0].version, pdesc[1].version);
7278+ pdesc[1].version = pdesc[0].version;
7279+
7280+ for (i = 0; i < 2; i++) {
7281+ if (valid_desc & (1 << i))
7282+ continue;
7283+
7284+ ret = write_bbt(nand, &pdesc[i], &pdesc[1 - i], bbt,
7285+ manager->max_blocks);
7286+ if (ret) {
7287+ pr_info("write bbt(%d) fail, ret:%d\n", i, ret);
7288+ manager->bbt = NULL;
7289+ return ret;
7290+ }
7291+ }
7292+
7293+ /* Prevent the bbt regions from erasing / writing */
7294+ mark_bbt_region(nand, manager->bbt, manager->max_blocks);
7295+
7296+ for (i = 0; i < total_block; i++) {
7297+ if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_WORN)
7298+ pr_info("Checked WORN bad blk: %d\n", i);
7299+ else if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_FACTORY_BAD)
7300+ pr_info("Checked Factory bad blk: %d\n", i);
7301+ else if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_RESERVED)
7302+ pr_info("Checked Reserved blk: %d\n", i);
7303+ else if (get_bbt_mark(manager->bbt, i) != BBT_BLOCK_GOOD)
7304+ pr_info("Checked unknown blk: %d\n", i);
7305+ }
7306+
7307+ return 0;
7308+}
7309+
7310+int bbt_mark_bad(struct nandx_info *nand, off_t offset)
7311+{
7312+ struct bbt_manager *manager = &g_bbt_manager;
7313+ int block = div_down(offset, nand->block_size);
7314+ int ret = 0;
7315+
7316+ mark_nand_bad(nand, block);
7317+
7318+#if 0
7319+ set_bbt_mark(manager->bbt, block, BBT_BLOCK_WORN);
7320+
7321+ /* Update flash-based bad block table */
7322+ ret = update_bbt(nand, manager->desc, manager->bbt,
7323+ manager->max_blocks);
7324+#endif
7325+ pr_info("block %d, update result %d.\n", block, ret);
7326+
7327+ return ret;
7328+}
7329+
7330+int bbt_is_bad(struct nandx_info *nand, off_t offset)
7331+{
7332+ int block;
7333+
7334+ block = div_down(offset, nand->block_size);
7335+
7336+ return get_bbt_mark(g_bbt_manager.bbt, block) != BBT_BLOCK_GOOD;
7337+}
7338diff --git a/drivers/mtd/nandx/driver/uboot/driver.c b/drivers/mtd/nandx/driver/uboot/driver.c
7339new file mode 100644
7340index 0000000000..7bd3342452
7341--- /dev/null
7342+++ b/drivers/mtd/nandx/driver/uboot/driver.c
7343@@ -0,0 +1,574 @@
7344+/*
7345+ * Copyright (C) 2017 MediaTek Inc.
7346+ * Licensed under either
7347+ * BSD Licence, (see NOTICE for more details)
7348+ * GNU General Public License, version 2.0, (see NOTICE for more details)
7349+ */
7350+
7351+#include <common.h>
7352+#include <linux/io.h>
7353+#include <dm.h>
7354+#include <clk.h>
7355+#include <nand.h>
7356+#include <linux/iopoll.h>
7357+#include <linux/delay.h>
7358+#include <linux/mtd/nand.h>
7359+#include <linux/mtd/mtd.h>
7360+#include <linux/mtd/partitions.h>
7361+#include "nandx_core.h"
7362+#include "nandx_util.h"
7363+#include "bbt.h"
7364+
7365+typedef int (*func_nandx_operation)(u8 *, u8 *, u64, size_t);
7366+
7367+struct nandx_clk {
7368+ struct clk *nfi_clk;
7369+ struct clk *ecc_clk;
7370+ struct clk *snfi_clk;
7371+ struct clk *snfi_clk_sel;
7372+ struct clk *snfi_parent_50m;
7373+};
7374+
7375+struct nandx_nfc {
7376+ struct nandx_info info;
7377+ struct nandx_clk clk;
7378+ struct nfi_resource *res;
7379+
7380+ struct nand_chip *nand;
7381+ spinlock_t lock;
7382+};
7383+
7384+/* Default flash layout for MTK nand controller
7385+ * 64Bytes oob format.
7386+ */
7387+static struct nand_ecclayout eccoob = {
7388+ .eccbytes = 42,
7389+ .eccpos = {
7390+ 17, 18, 19, 20, 21, 22, 23, 24, 25,
7391+ 26, 27, 28, 29, 30, 31, 32, 33, 34,
7392+ 35, 36, 37, 38, 39, 40, 41
7393+ },
7394+ .oobavail = 16,
7395+ .oobfree = {
7396+ {
7397+ .offset = 0,
7398+ .length = 16,
7399+ },
7400+ }
7401+};
7402+
7403+static struct nandx_nfc *mtd_to_nfc(struct mtd_info *mtd)
7404+{
7405+ struct nand_chip *nand = mtd_to_nand(mtd);
7406+
7407+ return (struct nandx_nfc *)nand_get_controller_data(nand);
7408+}
7409+
7410+static int nandx_enable_clk(struct nandx_clk *clk)
7411+{
7412+ int ret;
7413+
7414+ ret = clk_enable(clk->nfi_clk);
7415+ if (ret) {
7416+ pr_info("failed to enable nfi clk\n");
7417+ return ret;
7418+ }
7419+
7420+ ret = clk_enable(clk->ecc_clk);
7421+ if (ret) {
7422+ pr_info("failed to enable ecc clk\n");
7423+ goto disable_nfi_clk;
7424+ }
7425+
7426+ ret = clk_enable(clk->snfi_clk);
7427+ if (ret) {
7428+ pr_info("failed to enable snfi clk\n");
7429+ goto disable_ecc_clk;
7430+ }
7431+
7432+ ret = clk_enable(clk->snfi_clk_sel);
7433+ if (ret) {
7434+ pr_info("failed to enable snfi clk sel\n");
7435+ goto disable_snfi_clk;
7436+ }
7437+
7438+ ret = clk_set_parent(clk->snfi_clk_sel, clk->snfi_parent_50m);
7439+ if (ret) {
7440+ pr_info("failed to set snfi parent 50MHz\n");
7441+ goto disable_snfi_clk;
7442+ }
7443+
7444+ return 0;
7445+
7446+disable_snfi_clk:
7447+ clk_disable(clk->snfi_clk);
7448+disable_ecc_clk:
7449+ clk_disable(clk->ecc_clk);
7450+disable_nfi_clk:
7451+ clk_disable(clk->nfi_clk);
7452+
7453+ return ret;
7454+}
7455+
7456+static void nandx_disable_clk(struct nandx_clk *clk)
7457+{
7458+ clk_disable(clk->ecc_clk);
7459+ clk_disable(clk->nfi_clk);
7460+ clk_disable(clk->snfi_clk);
7461+}
7462+
7463+static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
7464+ struct mtd_oob_region *oob_region)
7465+{
7466+ struct nandx_nfc *nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
7467+ u32 eccsteps;
7468+
7469+ eccsteps = div_down(mtd->writesize, mtd->ecc_step_size);
7470+
7471+ if (section >= eccsteps)
7472+ return -EINVAL;
7473+
7474+ oob_region->length = nfc->info.fdm_reg_size - nfc->info.fdm_ecc_size;
7475+ oob_region->offset = section * nfc->info.fdm_reg_size
7476+ + nfc->info.fdm_ecc_size;
7477+
7478+ return 0;
7479+}
7480+
7481+static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
7482+ struct mtd_oob_region *oob_region)
7483+{
7484+ struct nandx_nfc *nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
7485+ u32 eccsteps;
7486+
7487+ if (section)
7488+ return -EINVAL;
7489+
7490+ eccsteps = div_down(mtd->writesize, mtd->ecc_step_size);
7491+ oob_region->offset = nfc->info.fdm_reg_size * eccsteps;
7492+ oob_region->length = mtd->oobsize - oob_region->offset;
7493+
7494+ return 0;
7495+}
7496+
7497+static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
7498+ .rfree = mtk_nfc_ooblayout_free,
7499+ .ecc = mtk_nfc_ooblayout_ecc,
7500+};
7501+
7502+struct nfc_compatible {
7503+ enum mtk_ic_version ic_ver;
7504+
7505+ u32 clock_1x;
7506+ u32 *clock_2x;
7507+ int clock_2x_num;
7508+
7509+ int min_oob_req;
7510+};
7511+
7512+static const struct nfc_compatible nfc_compats_mt7622 = {
7513+ .ic_ver = NANDX_MT7622,
7514+ .clock_1x = 26000000,
7515+ .clock_2x = NULL,
7516+ .clock_2x_num = 8,
7517+ .min_oob_req = 1,
7518+};
7519+
7520+static const struct udevice_id ic_of_match[] = {
7521+ {.compatible = "mediatek,mt7622-nfc", .data = &nfc_compats_mt7622},
7522+ {}
7523+};
7524+
7525+static int nand_operation(struct mtd_info *mtd, loff_t addr, size_t len,
7526+ size_t *retlen, uint8_t *data, uint8_t *oob, bool read)
7527+{
7528+ struct nandx_split64 split = {0};
7529+ func_nandx_operation operation;
7530+ u64 block_oobs, val, align;
7531+ uint8_t *databuf, *oobbuf;
7532+ struct nandx_nfc *nfc;
7533+ bool readoob;
7534+ int ret = 0;
7535+
7536+ nfc = (struct nandx_nfc *)nand_get_controller_data;
7537+ spin_lock(&nfc->lock);
7538+
7539+ databuf = data;
7540+ oobbuf = oob;
7541+
7542+ readoob = data ? false : true;
7543+ block_oobs = div_up(mtd->erasesize, mtd->writesize) * mtd->oobavail;
7544+ align = readoob ? block_oobs : mtd->erasesize;
7545+
7546+ operation = read ? nandx_read : nandx_write;
7547+
7548+ nandx_split(&split, addr, len, val, align);
7549+
7550+ if (split.head_len) {
7551+ ret = operation((u8 *) databuf, oobbuf, addr, split.head_len);
7552+
7553+ if (databuf)
7554+ databuf += split.head_len;
7555+
7556+ if (oobbuf)
7557+ oobbuf += split.head_len;
7558+
7559+ addr += split.head_len;
7560+ *retlen += split.head_len;
7561+ }
7562+
7563+ if (split.body_len) {
7564+ while (div_up(split.body_len, align)) {
7565+ ret = operation((u8 *) databuf, oobbuf, addr, align);
7566+
7567+ if (databuf) {
7568+ databuf += mtd->erasesize;
7569+ split.body_len -= mtd->erasesize;
7570+ *retlen += mtd->erasesize;
7571+ }
7572+
7573+ if (oobbuf) {
7574+ oobbuf += block_oobs;
7575+ split.body_len -= block_oobs;
7576+ *retlen += block_oobs;
7577+ }
7578+
7579+ addr += mtd->erasesize;
7580+ }
7581+
7582+ }
7583+
7584+ if (split.tail_len) {
7585+ ret = operation((u8 *) databuf, oobbuf, addr, split.tail_len);
7586+ *retlen += split.tail_len;
7587+ }
7588+
7589+ spin_unlock(&nfc->lock);
7590+
7591+ return ret;
7592+}
7593+
7594+static int mtk_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
7595+ size_t *retlen, u_char *buf)
7596+{
7597+ return nand_operation(mtd, from, len, retlen, buf, NULL, true);
7598+}
7599+
7600+static int mtk_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
7601+ size_t *retlen, const u_char *buf)
7602+{
7603+ return nand_operation(mtd, to, len, retlen, (uint8_t *)buf,
7604+ NULL, false);
7605+}
7606+
7607+int mtk_nand_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
7608+{
7609+ size_t retlen;
7610+
7611+ return nand_operation(mtd, from, ops->ooblen, &retlen, NULL,
7612+ ops->oobbuf, true);
7613+}
7614+
7615+int mtk_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
7616+{
7617+ size_t retlen;
7618+
7619+ return nand_operation(mtd, to, ops->ooblen, &retlen, NULL,
7620+ ops->oobbuf, false);
7621+}
7622+
7623+static int mtk_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
7624+{
7625+ struct nandx_nfc *nfc;
7626+ u64 erase_len, erase_addr;
7627+ u32 block_size;
7628+ int ret = 0;
7629+
7630+ nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
7631+ block_size = nfc->info.block_size;
7632+ erase_len = instr->len;
7633+ erase_addr = instr->addr;
7634+ spin_lock(&nfc->lock);
7635+ instr->state = MTD_ERASING;
7636+
7637+ while (erase_len) {
7638+ if (mtk_nand_is_bad(mtd, erase_addr)) {
7639+ pr_info("block(0x%llx) is bad, not erase\n",
7640+ erase_addr);
7641+ instr->state = MTD_ERASE_FAILED;
7642+ goto erase_exit;
7643+ } else {
7644+ ret = nandx_erase(erase_addr, block_size);
7645+ if (ret < 0) {
7646+ instr->state = MTD_ERASE_FAILED;
7647+ goto erase_exit;
7648+ pr_info("erase fail at blk %llu, ret:%d\n",
7649+ erase_addr, ret);
7650+ }
7651+ }
7652+ erase_addr += block_size;
7653+ erase_len -= block_size;
7654+ }
7655+
7656+ instr->state = MTD_ERASE_DONE;
7657+
7658+erase_exit:
7659+ ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
7660+
7661+ spin_unlock(&nfc->lock);
7662+ /* Do mtd call back function */
7663+ if (!ret)
7664+ mtd_erase_callback(instr);
7665+
7666+ return ret;
7667+}
7668+
7669+int mtk_nand_is_bad(struct mtd_info *mtd, loff_t ofs)
7670+{
7671+ struct nandx_nfc *nfc;
7672+ int ret;
7673+
7674+ nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
7675+ spin_lock(&nfc->lock);
7676+
7677+ /*ret = bbt_is_bad(&nfc->info, ofs);*/
7678+ ret = nandx_is_bad_block(ofs);
7679+ spin_unlock(&nfc->lock);
7680+
7681+ if (ret) {
7682+ pr_info("nand block 0x%x is bad, ret %d!\n", ofs, ret);
7683+ return 1;
7684+ } else {
7685+ return 0;
7686+ }
7687+}
7688+
7689+int mtk_nand_mark_bad(struct mtd_info *mtd, loff_t ofs)
7690+{
7691+ struct nandx_nfc *nfc;
7692+ int ret;
7693+
7694+ nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
7695+ spin_lock(&nfc->lock);
7696+ pr_info("%s, %d\n", __func__, __LINE__);
7697+ ret = bbt_mark_bad(&nfc->info, ofs);
7698+
7699+ spin_unlock(&nfc->lock);
7700+
7701+ return ret;
7702+}
7703+
7704+void mtk_nand_sync(struct mtd_info *mtd)
7705+{
7706+ nandx_sync();
7707+}
7708+
7709+static struct mtd_info *mtd_info_create(struct udevice *pdev,
7710+ struct nandx_nfc *nfc, struct nand_chip *nand)
7711+{
7712+ struct mtd_info *mtd = nand_to_mtd(nand);
7713+ int ret;
7714+
7715+ nand_set_controller_data(nand, nfc);
7716+
7717+ nand->flash_node = dev_of_offset(pdev);
7718+ nand->ecc.layout = &eccoob;
7719+
7720+ ret = nandx_ioctl(CORE_CTRL_NAND_INFO, &nfc->info);
7721+ if (ret) {
7722+ pr_info("fail to get nand info (%d)!\n", ret);
7723+ mem_free(mtd);
7724+ return NULL;
7725+ }
7726+
7727+ mtd->owner = THIS_MODULE;
7728+
7729+ mtd->name = "MTK-SNand";
7730+ mtd->writesize = nfc->info.page_size;
7731+ mtd->erasesize = nfc->info.block_size;
7732+ mtd->oobsize = nfc->info.oob_size;
7733+ mtd->size = nfc->info.total_size;
7734+ mtd->type = MTD_NANDFLASH;
7735+ mtd->flags = MTD_CAP_NANDFLASH;
7736+ mtd->_erase = mtk_nand_erase;
7737+ mtd->_read = mtk_nand_read;
7738+ mtd->_write = mtk_nand_write;
7739+ mtd->_read_oob = mtk_nand_read_oob;
7740+ mtd->_write_oob = mtk_nand_write_oob;
7741+ mtd->_sync = mtk_nand_sync;
7742+ mtd->_lock = NULL;
7743+ mtd->_unlock = NULL;
7744+ mtd->_block_isbad = mtk_nand_is_bad;
7745+ mtd->_block_markbad = mtk_nand_mark_bad;
7746+ mtd->writebufsize = mtd->writesize;
7747+
7748+ mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
7749+
7750+ mtd->ecc_strength = nfc->info.ecc_strength;
7751+ mtd->ecc_step_size = nfc->info.sector_size;
7752+
7753+ if (!mtd->bitflip_threshold)
7754+ mtd->bitflip_threshold = mtd->ecc_strength;
7755+
7756+ return mtd;
7757+}
7758+
7759+int board_nand_init(struct nand_chip *nand)
7760+{
7761+ struct udevice *dev;
7762+ struct mtd_info *mtd;
7763+ struct nandx_nfc *nfc;
7764+ int arg = 1;
7765+ int ret;
7766+
7767+ ret = uclass_get_device_by_driver(UCLASS_MTD,
7768+ DM_GET_DRIVER(mtk_snand_drv),
7769+ &dev);
7770+ if (ret) {
7771+ pr_err("Failed to get mtk_nand_drv. (error %d)\n", ret);
7772+ return ret;
7773+ }
7774+
7775+ nfc = dev_get_priv(dev);
7776+
7777+ ret = nandx_enable_clk(&nfc->clk);
7778+ if (ret) {
7779+ pr_err("failed to enable nfi clk (error %d)\n", ret);
7780+ return ret;
7781+ }
7782+
7783+ ret = nandx_init(nfc->res);
7784+ if (ret) {
7785+ pr_err("nandx init error (%d)!\n", ret);
7786+ goto disable_clk;
7787+ }
7788+
7789+ arg = 1;
7790+ nandx_ioctl(NFI_CTRL_DMA, &arg);
7791+ nandx_ioctl(NFI_CTRL_ECC, &arg);
7792+
7793+#ifdef NANDX_UNIT_TEST
7794+ nandx_unit_test(0x780000, 0x800);
7795+#endif
7796+
7797+ mtd = mtd_info_create(dev, nfc, nand);
7798+ if (!mtd) {
7799+ ret = -ENOMEM;
7800+ goto disable_clk;
7801+ }
7802+
7803+ spin_lock_init(&nfc->lock);
7804+#if 0
7805+ ret = scan_bbt(&nfc->info);
7806+ if (ret) {
7807+ pr_info("bbt init error (%d)!\n", ret);
7808+ goto disable_clk;
7809+ }
7810+#endif
7811+ return ret;
7812+
7813+disable_clk:
7814+ nandx_disable_clk(&nfc->clk);
7815+
7816+ return ret;
7817+}
7818+
7819+static int mtk_snand_ofdata_to_platdata(struct udevice *dev)
7820+{
7821+ struct nandx_nfc *nfc = dev_get_priv(dev);
7822+ struct nfc_compatible *compat;
7823+ struct nfi_resource *res;
7824+
7825+ int ret = 0;
7826+
7827+ res = mem_alloc(1, sizeof(struct nfi_resource));
7828+ if (!res)
7829+ return -ENOMEM;
7830+
7831+ nfc->res = res;
7832+
7833+ res->nfi_regs = (void *)dev_read_addr_index(dev, 0);
7834+ res->ecc_regs = (void *)dev_read_addr_index(dev, 1);
7835+ pr_debug("mtk snand nfi_regs:0x%x ecc_regs:0x%x\n",
7836+ res->nfi_regs, res->ecc_regs);
7837+
7838+ compat = (struct nfc_compatible *)dev_get_driver_data(dev);
7839+
7840+ res->ic_ver = (enum mtk_ic_version)(compat->ic_ver);
7841+ res->clock_1x = compat->clock_1x;
7842+ res->clock_2x = compat->clock_2x;
7843+ res->clock_2x_num = compat->clock_2x_num;
7844+
7845+ memset(&nfc->clk, 0, sizeof(struct nandx_clk));
7846+ nfc->clk.nfi_clk =
7847+ kmalloc(sizeof(*nfc->clk.nfi_clk), GFP_KERNEL);
7848+ nfc->clk.ecc_clk =
7849+ kmalloc(sizeof(*nfc->clk.ecc_clk), GFP_KERNEL);
7850+ nfc->clk.snfi_clk=
7851+ kmalloc(sizeof(*nfc->clk.snfi_clk), GFP_KERNEL);
7852+ nfc->clk.snfi_clk_sel =
7853+ kmalloc(sizeof(*nfc->clk.snfi_clk_sel), GFP_KERNEL);
7854+ nfc->clk.snfi_parent_50m =
7855+ kmalloc(sizeof(*nfc->clk.snfi_parent_50m), GFP_KERNEL);
7856+
7857+ if (!nfc->clk.nfi_clk || !nfc->clk.ecc_clk || !nfc->clk.snfi_clk ||
7858+ !nfc->clk.snfi_clk_sel || !nfc->clk.snfi_parent_50m) {
7859+ ret = -ENOMEM;
7860+ goto err;
7861+ }
7862+
7863+ ret = clk_get_by_name(dev, "nfi_clk", nfc->clk.nfi_clk);
7864+ if (IS_ERR(nfc->clk.nfi_clk)) {
7865+ ret = PTR_ERR(nfc->clk.nfi_clk);
7866+ goto err;
7867+ }
7868+
7869+ ret = clk_get_by_name(dev, "ecc_clk", nfc->clk.ecc_clk);
7870+ if (IS_ERR(nfc->clk.ecc_clk)) {
7871+ ret = PTR_ERR(nfc->clk.ecc_clk);
7872+ goto err;
7873+ }
7874+
7875+ ret = clk_get_by_name(dev, "snfi_clk", nfc->clk.snfi_clk);
7876+ if (IS_ERR(nfc->clk.snfi_clk)) {
7877+ ret = PTR_ERR(nfc->clk.snfi_clk);
7878+ goto err;
7879+ }
7880+
7881+ ret = clk_get_by_name(dev, "spinfi_sel", nfc->clk.snfi_clk_sel);
7882+ if (IS_ERR(nfc->clk.snfi_clk_sel)) {
7883+ ret = PTR_ERR(nfc->clk.snfi_clk_sel);
7884+ goto err;
7885+ }
7886+
7887+ ret = clk_get_by_name(dev, "spinfi_parent_50m", nfc->clk.snfi_parent_50m);
7888+ if (IS_ERR(nfc->clk.snfi_parent_50m))
7889+ pr_info("spinfi parent 50MHz is not configed\n");
7890+
7891+ return 0;
7892+err:
7893+ if (nfc->clk.nfi_clk)
7894+ kfree(nfc->clk.nfi_clk);
7895+ if (nfc->clk.snfi_clk)
7896+ kfree(nfc->clk.snfi_clk);
7897+ if (nfc->clk.ecc_clk)
7898+ kfree(nfc->clk.ecc_clk);
7899+ if (nfc->clk.snfi_clk_sel)
7900+ kfree(nfc->clk.snfi_clk_sel);
7901+ if (nfc->clk.snfi_parent_50m)
7902+ kfree(nfc->clk.snfi_parent_50m);
7903+
7904+ return ret;
7905+}
7906+
7907+U_BOOT_DRIVER(mtk_snand_drv) = {
7908+ .name = "mtk_snand",
7909+ .id = UCLASS_MTD,
7910+ .of_match = ic_of_match,
7911+ .ofdata_to_platdata = mtk_snand_ofdata_to_platdata,
7912+ .priv_auto_alloc_size = sizeof(struct nandx_nfc),
7913+};
7914+
7915+MODULE_LICENSE("GPL v2");
7916+MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
7917+MODULE_AUTHOR("MediaTek");
7918diff --git a/drivers/mtd/nandx/include/Nandx.mk b/drivers/mtd/nandx/include/Nandx.mk
7919new file mode 100644
7920index 0000000000..667402790e
7921--- /dev/null
7922+++ b/drivers/mtd/nandx/include/Nandx.mk
7923@@ -0,0 +1,16 @@
7924+#
7925+# Copyright (C) 2017 MediaTek Inc.
7926+# Licensed under either
7927+# BSD Licence, (see NOTICE for more details)
7928+# GNU General Public License, version 2.0, (see NOTICE for more details)
7929+#
7930+
7931+nandx-header-y += internal/nandx_core.h
7932+nandx-header-y += internal/nandx_errno.h
7933+nandx-header-y += internal/nandx_util.h
7934+nandx-header-$(NANDX_BBT_SUPPORT) += internal/bbt.h
7935+nandx-header-$(NANDX_SIMULATOR_SUPPORT) += simulator/nandx_os.h
7936+nandx-header-$(NANDX_CTP_SUPPORT) += ctp/nandx_os.h
7937+nandx-header-$(NANDX_LK_SUPPORT) += lk/nandx_os.h
7938+nandx-header-$(NANDX_KERNEL_SUPPORT) += kernel/nandx_os.h
7939+nandx-header-$(NANDX_UBOOT_SUPPORT) += uboot/nandx_os.h
7940diff --git a/drivers/mtd/nandx/include/internal/bbt.h b/drivers/mtd/nandx/include/internal/bbt.h
7941new file mode 100644
7942index 0000000000..4676def1f5
7943--- /dev/null
7944+++ b/drivers/mtd/nandx/include/internal/bbt.h
7945@@ -0,0 +1,62 @@
7946+/*
7947+ * Copyright (C) 2017 MediaTek Inc.
7948+ * Licensed under either
7949+ * BSD Licence, (see NOTICE for more details)
7950+ * GNU General Public License, version 2.0, (see NOTICE for more details)
7951+ */
7952+
7953+#ifndef __BBT_H__
7954+#define __BBT_H__
7955+
7956+#define BBT_BLOCK_GOOD 0x03
7957+#define BBT_BLOCK_WORN 0x02
7958+#define BBT_BLOCK_RESERVED 0x01
7959+#define BBT_BLOCK_FACTORY_BAD 0x00
7960+
7961+#define BBT_INVALID_ADDR 0
7962+/* The maximum number of blocks to scan for a bbt */
7963+#define NAND_BBT_SCAN_MAXBLOCKS 4
7964+#define NAND_BBT_USE_FLASH 0x00020000
7965+#define NAND_BBT_NO_OOB 0x00040000
7966+
7967+/* Search good / bad pattern on the first and the second page */
7968+#define NAND_BBT_SCAN2NDPAGE 0x00008000
7969+/* Search good / bad pattern on the last page of the eraseblock */
7970+#define NAND_BBT_SCANLASTPAGE 0x00010000
7971+
7972+#define NAND_DRAM_BUF_DATABUF_ADDR (NAND_BUF_ADDR)
7973+
7974+struct bbt_pattern {
7975+ u8 *data;
7976+ int len;
7977+};
7978+
7979+struct bbt_desc {
7980+ struct bbt_pattern pattern;
7981+ u8 version;
7982+ u64 bbt_addr;/*0: invalid value; otherwise, valid value*/
7983+};
7984+
7985+struct bbt_manager {
7986+ /* main bbt descriptor and mirror descriptor */
7987+ struct bbt_desc desc[2];/* 0: main bbt; 1: mirror bbt */
7988+ int max_blocks;
7989+ u8 *bbt;
7990+};
7991+
7992+#define BBT_ENTRY_MASK 0x03
7993+#define BBT_ENTRY_SHIFT 2
7994+
7995+#define GET_BBT_LENGTH(blocks) (blocks >> 2)
7996+#define GET_ENTRY(block) ((block) >> BBT_ENTRY_SHIFT)
7997+#define GET_POSITION(block) (((block) & BBT_ENTRY_MASK) * 2)
7998+#define GET_MARK_VALUE(block, mark) \
7999+ (((mark) & BBT_ENTRY_MASK) << GET_POSITION(block))
8000+
8001+int scan_bbt(struct nandx_info *nand);
8002+
8003+int bbt_mark_bad(struct nandx_info *nand, off_t offset);
8004+
8005+int bbt_is_bad(struct nandx_info *nand, off_t offset);
8006+
8007+#endif /*__BBT_H__*/
8008diff --git a/drivers/mtd/nandx/include/internal/nandx_core.h b/drivers/mtd/nandx/include/internal/nandx_core.h
8009new file mode 100644
8010index 0000000000..09aff72224
8011--- /dev/null
8012+++ b/drivers/mtd/nandx/include/internal/nandx_core.h
8013@@ -0,0 +1,250 @@
8014+/*
8015+ * Copyright (C) 2017 MediaTek Inc.
8016+ * Licensed under either
8017+ * BSD Licence, (see NOTICE for more details)
8018+ * GNU General Public License, version 2.0, (see NOTICE for more details)
8019+ */
8020+
8021+#ifndef __NANDX_CORE_H__
8022+#define __NANDX_CORE_H__
8023+
8024+/**
8025+ * mtk_ic_version - indicates specifical IC, IP need this to load some info
8026+ */
8027+enum mtk_ic_version {
8028+ NANDX_MT7622,
8029+};
8030+
8031+/**
8032+ * nandx_ioctl_cmd - operations supported by nandx
8033+ *
8034+ * @NFI_CTRL_DMA dma enable or not
8035+ * @NFI_CTRL_NFI_MODE customer/read/program/erase...
8036+ * @NFI_CTRL_ECC ecc enable or not
8037+ * @NFI_CTRL_ECC_MODE nfi/dma/pio
8038+ * @CHIP_CTRL_DRIVE_STRENGTH enum chip_ctrl_drive_strength
8039+ */
8040+enum nandx_ctrl_cmd {
8041+ CORE_CTRL_NAND_INFO,
8042+
8043+ NFI_CTRL_DMA,
8044+ NFI_CTRL_NFI_MODE,
8045+ NFI_CTRL_AUTOFORMAT,
8046+ NFI_CTRL_NFI_IRQ,
8047+ NFI_CTRL_PAGE_IRQ,
8048+ NFI_CTRL_RANDOMIZE,
8049+ NFI_CTRL_BAD_MARK_SWAP,
8050+
8051+ NFI_CTRL_ECC,
8052+ NFI_CTRL_ECC_MODE,
8053+ NFI_CTRL_ECC_CLOCK,
8054+ NFI_CTRL_ECC_IRQ,
8055+ NFI_CTRL_ECC_PAGE_IRQ,
8056+ NFI_CTRL_ECC_DECODE_MODE,
8057+
8058+ SNFI_CTRL_OP_MODE,
8059+ SNFI_CTRL_RX_MODE,
8060+ SNFI_CTRL_TX_MODE,
8061+ SNFI_CTRL_DELAY_MODE,
8062+
8063+ CHIP_CTRL_OPS_CACHE,
8064+ CHIP_CTRL_OPS_MULTI,
8065+ CHIP_CTRL_PSLC_MODE,
8066+ CHIP_CTRL_DRIVE_STRENGTH,
8067+ CHIP_CTRL_DDR_MODE,
8068+ CHIP_CTRL_ONDIE_ECC,
8069+ CHIP_CTRL_TIMING_MODE
8070+};
8071+
8072+enum snfi_ctrl_op_mode {
8073+ SNFI_CUSTOM_MODE,
8074+ SNFI_AUTO_MODE,
8075+ SNFI_MAC_MODE
8076+};
8077+
8078+enum snfi_ctrl_rx_mode {
8079+ SNFI_RX_111,
8080+ SNFI_RX_112,
8081+ SNFI_RX_114,
8082+ SNFI_RX_122,
8083+ SNFI_RX_144
8084+};
8085+
8086+enum snfi_ctrl_tx_mode {
8087+ SNFI_TX_111,
8088+ SNFI_TX_114,
8089+};
8090+
8091+enum chip_ctrl_drive_strength {
8092+ CHIP_DRIVE_NORMAL,
8093+ CHIP_DRIVE_HIGH,
8094+ CHIP_DRIVE_MIDDLE,
8095+ CHIP_DRIVE_LOW
8096+};
8097+
8098+enum chip_ctrl_timing_mode {
8099+ CHIP_TIMING_MODE0,
8100+ CHIP_TIMING_MODE1,
8101+ CHIP_TIMING_MODE2,
8102+ CHIP_TIMING_MODE3,
8103+ CHIP_TIMING_MODE4,
8104+ CHIP_TIMING_MODE5,
8105+};
8106+
8107+/**
8108+ * nandx_info - basic information
8109+ */
8110+struct nandx_info {
8111+ u32 max_io_count;
8112+ u32 min_write_pages;
8113+ u32 plane_num;
8114+ u32 oob_size;
8115+ u32 page_parity_size;
8116+ u32 page_size;
8117+ u32 block_size;
8118+ u64 total_size;
8119+ u32 fdm_reg_size;
8120+ u32 fdm_ecc_size;
8121+ u32 ecc_strength;
8122+ u32 sector_size;
8123+};
8124+
8125+/**
8126+ * nfi_resource - the resource needed by nfi & ecc to do initialization
8127+ */
8128+struct nfi_resource {
8129+ int ic_ver;
8130+ void *dev;
8131+
8132+ void *ecc_regs;
8133+ int ecc_irq_id;
8134+
8135+ void *nfi_regs;
8136+ int nfi_irq_id;
8137+
8138+ u32 clock_1x;
8139+ u32 *clock_2x;
8140+ int clock_2x_num;
8141+
8142+ int min_oob_req;
8143+};
8144+
8145+/**
8146+ * nandx_init - init all related modules below
8147+ *
8148+ * @res: basic resource of the project
8149+ *
8150+ * return 0 if init success, otherwise return negative error code
8151+ */
8152+int nandx_init(struct nfi_resource *res);
8153+
8154+/**
8155+ * nandx_exit - release resource those that obtained in init flow
8156+ */
8157+void nandx_exit(void);
8158+
8159+/**
8160+ * nandx_read - read data from nand this function can read data and related
8161+ * oob from specifical address
8162+ * if do multi_ops, set one operation per time, and call nandx_sync at last
8163+ * in multi mode, not support page partial read
8164+ * oob not support partial read
8165+ *
8166+ * @data: buf to receive data from nand
8167+ * @oob: buf to receive oob data from nand which related to data page
8168+ * length of @oob should oob size aligned, oob not support partial read
8169+ * @offset: offset address on the whole flash
8170+ * @len: the length of @data that need to read
8171+ *
8172+ * if read success return 0, otherwise return negative error code
8173+ */
8174+int nandx_read(u8 *data, u8 *oob, u64 offset, size_t len);
8175+
8176+/**
8177+ * nandx_write - write data to nand
8178+ * this function can write data and related oob to specifical address
8179+ * if do multi_ops, set one operation per time, and call nandx_sync at last
8180+ *
8181+ * @data: source data to be written to nand,
8182+ * for multi operation, the length of @data should be page size aliged
8183+ * @oob: source oob which related to data page to be written to nand,
8184+ * length of @oob should oob size aligned
8185+ * @offset: offset address on the whole flash, the value should be start address
8186+ * of a page
8187+ * @len: the length of @data that need to write,
8188+ * for multi operation, the len should be page size aliged
8189+ *
8190+ * if write success return 0, otherwise return negative error code
8191+ * if return value > 0, it indicates that how many pages still need to write,
8192+ * and data has not been written to nand
8193+ * please call nandx_sync after pages alligned $nandx_info.min_write_pages
8194+ */
8195+int nandx_write(u8 *data, u8 *oob, u64 offset, size_t len);
8196+
8197+/**
8198+ * nandx_erase - erase an area of nand
8199+ * if do multi_ops, set one operation per time, and call nandx_sync at last
8200+ *
8201+ * @offset: offset address on the flash
8202+ * @len: erase length which should be block size aligned
8203+ *
8204+ * if erase success return 0, otherwise return negative error code
8205+ */
8206+int nandx_erase(u64 offset, size_t len);
8207+
8208+/**
8209+ * nandx_sync - sync all operations to nand
8210+ * when do multi_ops, this function will be called at last operation
8211+ * when write data, if number of pages not alligned
8212+ * by $nandx_info.min_write_pages, this interface could be called to do
8213+ * force write, 0xff will be padded to blanked pages.
8214+ */
8215+int nandx_sync(void);
8216+
8217+/**
8218+ * nandx_is_bad_block - check if the block is bad
8219+ * only check the flag that marked by the flash vendor
8220+ *
8221+ * @offset: offset address on the whole flash
8222+ *
8223+ * return true if the block is bad, otherwise return false
8224+ */
8225+bool nandx_is_bad_block(u64 offset);
8226+
8227+/**
8228+ * nandx_ioctl - set/get property of nand chip
8229+ *
8230+ * @cmd: parameter that defined in enum nandx_ioctl_cmd
8231+ * @arg: operate parameter
8232+ *
8233+ * return 0 if operate success, otherwise return negative error code
8234+ */
8235+int nandx_ioctl(int cmd, void *arg);
8236+
8237+/**
8238+ * nandx_suspend - suspend nand, and store some data
8239+ *
8240+ * return 0 if suspend success, otherwise return negative error code
8241+ */
8242+int nandx_suspend(void);
8243+
8244+/**
8245+ * nandx_resume - resume nand, and replay some data
8246+ *
8247+ * return 0 if resume success, otherwise return negative error code
8248+ */
8249+int nandx_resume(void);
8250+
8251+#ifdef NANDX_UNIT_TEST
8252+/**
8253+ * nandx_unit_test - unit test
8254+ *
8255+ * @offset: offset address on the whole flash
8256+ * @len: should be not larger than a block size, we only test a block per time
8257+ *
8258+ * return 0 if test success, otherwise return negative error code
8259+ */
8260+int nandx_unit_test(u64 offset, size_t len);
8261+#endif
8262+
8263+#endif /* __NANDX_CORE_H__ */
8264diff --git a/drivers/mtd/nandx/include/internal/nandx_errno.h b/drivers/mtd/nandx/include/internal/nandx_errno.h
8265new file mode 100644
8266index 0000000000..51fb299c03
8267--- /dev/null
8268+++ b/drivers/mtd/nandx/include/internal/nandx_errno.h
8269@@ -0,0 +1,40 @@
8270+/*
8271+ * Copyright (C) 2017 MediaTek Inc.
8272+ * Licensed under either
8273+ * BSD Licence, (see NOTICE for more details)
8274+ * GNU General Public License, version 2.0, (see NOTICE for more details)
8275+ */
8276+
8277+#ifndef __NANDX_ERRNO_H__
8278+#define __NANDX_ERRNO_H__
8279+
8280+#ifndef EIO
8281+#define EIO 5 /* I/O error */
8282+#define ENOMEM 12 /* Out of memory */
8283+#define EFAULT 14 /* Bad address */
8284+#define EBUSY 16 /* Device or resource busy */
8285+#define ENODEV 19 /* No such device */
8286+#define EINVAL 22 /* Invalid argument */
8287+#define ENOSPC 28 /* No space left on device */
8288+/* Operation not supported on transport endpoint */
8289+#define EOPNOTSUPP 95
8290+#define ETIMEDOUT 110 /* Connection timed out */
8291+#endif
8292+
8293+#define ENANDFLIPS 1024 /* Too many bitflips, uncorrected */
8294+#define ENANDREAD 1025 /* Read fail, can't correct */
8295+#define ENANDWRITE 1026 /* Write fail */
8296+#define ENANDERASE 1027 /* Erase fail */
8297+#define ENANDBAD 1028 /* Bad block */
8298+#define ENANDWP 1029
8299+
8300+#define IS_NAND_ERR(err) ((err) >= -ENANDBAD && (err) <= -ENANDFLIPS)
8301+
8302+#ifndef MAX_ERRNO
8303+#define MAX_ERRNO 4096
8304+#define ERR_PTR(errno) ((void *)((long)errno))
8305+#define PTR_ERR(ptr) ((long)(ptr))
8306+#define IS_ERR(ptr) ((unsigned long)(ptr) > (unsigned long)-MAX_ERRNO)
8307+#endif
8308+
8309+#endif /* __NANDX_ERRNO_H__ */
8310diff --git a/drivers/mtd/nandx/include/internal/nandx_util.h b/drivers/mtd/nandx/include/internal/nandx_util.h
8311new file mode 100644
8312index 0000000000..1990b000ee
8313--- /dev/null
8314+++ b/drivers/mtd/nandx/include/internal/nandx_util.h
8315@@ -0,0 +1,221 @@
8316+/*
8317+ * Copyright (C) 2017 MediaTek Inc.
8318+ * Licensed under either
8319+ * BSD Licence, (see NOTICE for more details)
8320+ * GNU General Public License, version 2.0, (see NOTICE for more details)
8321+ */
8322+
8323+#ifndef __NANDX_UTIL_H__
8324+#define __NANDX_UTIL_H__
8325+
8326+typedef unsigned char u8;
8327+typedef unsigned short u16;
8328+typedef unsigned int u32;
8329+typedef unsigned long long u64;
8330+
8331+enum nand_irq_return {
8332+ NAND_IRQ_NONE,
8333+ NAND_IRQ_HANDLED,
8334+};
8335+
8336+enum nand_dma_operation {
8337+ NDMA_FROM_DEV,
8338+ NDMA_TO_DEV,
8339+};
8340+
8341+
8342+/*
8343+ * Compatible function
8344+ * used for preloader/lk/kernel environment
8345+ */
8346+#include "nandx_os.h"
8347+#include "nandx_errno.h"
8348+
8349+#ifndef BIT
8350+#define BIT(a) (1 << (a))
8351+#endif
8352+
8353+#ifndef min_t
8354+#define min_t(type, x, y) ({ \
8355+ type __min1 = (x); \
8356+ type __min2 = (y); \
8357+ __min1 < __min2 ? __min1 : __min2; })
8358+
8359+#define max_t(type, x, y) ({ \
8360+ type __max1 = (x); \
8361+ type __max2 = (y); \
8362+ __max1 > __max2 ? __max1 : __max2; })
8363+#endif
8364+
8365+#ifndef GENMASK
8366+#define GENMASK(h, l) \
8367+ (((~0UL) << (l)) & (~0UL >> ((sizeof(unsigned long) * 8) - 1 - (h))))
8368+#endif
8369+
8370+#ifndef __weak
8371+#define __weak __attribute__((__weak__))
8372+#endif
8373+
8374+#ifndef __packed
8375+#define __packed __attribute__((__packed__))
8376+#endif
8377+
8378+#ifndef KB
8379+#define KB(x) ((x) << 10)
8380+#define MB(x) (KB(x) << 10)
8381+#define GB(x) (MB(x) << 10)
8382+#endif
8383+
8384+#ifndef offsetof
8385+#define offsetof(type, member) ((size_t)&((type *)0)->member)
8386+#endif
8387+
8388+#ifndef NULL
8389+#define NULL (void *)0
8390+#endif
8391+static inline u32 nandx_popcount(u32 x)
8392+{
8393+ x = (x & 0x55555555) + ((x >> 1) & 0x55555555);
8394+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
8395+ x = (x & 0x0F0F0F0F) + ((x >> 4) & 0x0F0F0F0F);
8396+ x = (x & 0x00FF00FF) + ((x >> 8) & 0x00FF00FF);
8397+ x = (x & 0x0000FFFF) + ((x >> 16) & 0x0000FFFF);
8398+
8399+ return x;
8400+}
8401+
8402+#ifndef zero_popcount
8403+#define zero_popcount(x) (32 - nandx_popcount(x))
8404+#endif
8405+
8406+#ifndef do_div
8407+#define do_div(n, base) \
8408+ ({ \
8409+ u32 __base = (base); \
8410+ u32 __rem; \
8411+ __rem = ((u64)(n)) % __base; \
8412+ (n) = ((u64)(n)) / __base; \
8413+ __rem; \
8414+ })
8415+#endif
8416+
8417+#define div_up(x, y) \
8418+ ({ \
8419+ u64 __temp = ((x) + (y) - 1); \
8420+ do_div(__temp, (y)); \
8421+ __temp; \
8422+ })
8423+
8424+#define div_down(x, y) \
8425+ ({ \
8426+ u64 __temp = (x); \
8427+ do_div(__temp, (y)); \
8428+ __temp; \
8429+ })
8430+
8431+#define div_round_up(x, y) (div_up(x, y) * (y))
8432+#define div_round_down(x, y) (div_down(x, y) * (y))
8433+
8434+#define reminder(x, y) \
8435+ ({ \
8436+ u64 __temp = (x); \
8437+ do_div(__temp, (y)); \
8438+ })
8439+
8440+#ifndef round_up
8441+#define round_up(x, y) ((((x) - 1) | ((y) - 1)) + 1)
8442+#define round_down(x, y) ((x) & ~((y) - 1))
8443+#endif
8444+
8445+#ifndef readx_poll_timeout_atomic
8446+#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
8447+ ({ \
8448+ u64 end = get_current_time_us() + timeout_us; \
8449+ for (;;) { \
8450+ u64 now = get_current_time_us(); \
8451+ (val) = op(addr); \
8452+ if (cond) \
8453+ break; \
8454+ if (now > end) { \
8455+ (val) = op(addr); \
8456+ break; \
8457+ } \
8458+ } \
8459+ (cond) ? 0 : -ETIMEDOUT; \
8460+ })
8461+
8462+#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
8463+ readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us)
8464+#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
8465+ readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us)
8466+#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
8467+ readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us)
8468+#endif
8469+
8470+struct nandx_split64 {
8471+ u64 head;
8472+ size_t head_len;
8473+ u64 body;
8474+ size_t body_len;
8475+ u64 tail;
8476+ size_t tail_len;
8477+};
8478+
8479+struct nandx_split32 {
8480+ u32 head;
8481+ u32 head_len;
8482+ u32 body;
8483+ u32 body_len;
8484+ u32 tail;
8485+ u32 tail_len;
8486+};
8487+
8488+#define nandx_split(split, offset, len, val, align) \
8489+ do { \
8490+ (split)->head = (offset); \
8491+ (val) = div_round_down((offset), (align)); \
8492+ (val) = (align) - ((offset) - (val)); \
8493+ if ((val) == (align)) \
8494+ (split)->head_len = 0; \
8495+ else if ((val) > (len)) \
8496+ (split)->head_len = len; \
8497+ else \
8498+ (split)->head_len = val; \
8499+ (split)->body = (offset) + (split)->head_len; \
8500+ (split)->body_len = div_round_down((len) - \
8501+ (split)->head_len,\
8502+ (align)); \
8503+ (split)->tail = (split)->body + (split)->body_len; \
8504+ (split)->tail_len = (len) - (split)->head_len - \
8505+ (split)->body_len; \
8506+ } while (0)
8507+
8508+#ifndef container_of
8509+#define container_of(ptr, type, member) \
8510+ ({const __typeof__(((type *)0)->member) * __mptr = (ptr); \
8511+ (type *)((char *)__mptr - offsetof(type, member)); })
8512+#endif
8513+
8514+static inline u32 nandx_cpu_to_be32(u32 val)
8515+{
8516+ u32 temp = 1;
8517+ u8 *p_temp = (u8 *)&temp;
8518+
8519+ if (*p_temp)
8520+ return ((val & 0xff) << 24) | ((val & 0xff00) << 8) |
8521+ ((val >> 8) & 0xff00) | ((val >> 24) & 0xff);
8522+
8523+ return val;
8524+}
8525+
8526+static inline void nandx_set_bits32(unsigned long addr, u32 mask,
8527+ u32 val)
8528+{
8529+ u32 temp = readl((void *)addr);
8530+
8531+ temp &= ~(mask);
8532+ temp |= val;
8533+ writel(temp, (void *)addr);
8534+}
8535+
8536+#endif /* __NANDX_UTIL_H__ */
8537diff --git a/drivers/mtd/nandx/include/uboot/nandx_os.h b/drivers/mtd/nandx/include/uboot/nandx_os.h
8538new file mode 100644
8539index 0000000000..8ea53378bf
8540--- /dev/null
8541+++ b/drivers/mtd/nandx/include/uboot/nandx_os.h
8542@@ -0,0 +1,78 @@
8543+/*
8544+ * Copyright (C) 2017 MediaTek Inc.
8545+ * Licensed under either
8546+ * BSD Licence, (see NOTICE for more details)
8547+ * GNU General Public License, version 2.0, (see NOTICE for more details)
8548+ */
8549+
8550+#ifndef __NANDX_OS_H__
8551+#define __NANDX_OS_H__
8552+
8553+#include <common.h>
8554+#include <dm.h>
8555+#include <clk.h>
8556+#include <asm/dma-mapping.h>
8557+#include <linux/io.h>
8558+#include <linux/err.h>
8559+#include <linux/errno.h>
8560+#include <linux/bitops.h>
8561+#include <linux/kernel.h>
8562+#include <linux/compiler-gcc.h>
8563+
8564+#define NANDX_BULK_IO_USE_DRAM 0
8565+
8566+#define nandx_event_create() NULL
8567+#define nandx_event_destroy(event)
8568+#define nandx_event_complete(event)
8569+#define nandx_event_init(event)
8570+#define nandx_event_wait_complete(event, timeout) true
8571+
8572+#define nandx_irq_register(dev, irq, irq_handler, name, data) NULL
8573+
8574+static inline void *mem_alloc(u32 count, u32 size)
8575+{
8576+ return kmalloc(count * size, GFP_KERNEL | __GFP_ZERO);
8577+}
8578+
8579+static inline void mem_free(void *mem)
8580+{
8581+ kfree(mem);
8582+}
8583+
8584+static inline u64 get_current_time_us(void)
8585+{
8586+ return timer_get_us();
8587+}
8588+
8589+static inline u32 nandx_dma_map(void *dev, void *buf, u64 len,
8590+ enum nand_dma_operation op)
8591+{
8592+ unsigned long addr = (unsigned long)buf;
8593+ u64 size;
8594+
8595+ size = ALIGN(len, ARCH_DMA_MINALIGN);
8596+
8597+ if (op == NDMA_FROM_DEV)
8598+ invalidate_dcache_range(addr, addr + size);
8599+ else
8600+ flush_dcache_range(addr, addr + size);
8601+
8602+ return addr;
8603+}
8604+
8605+static inline void nandx_dma_unmap(void *dev, void *buf, void *addr,
8606+ u64 len, enum nand_dma_operation op)
8607+{
8608+ u64 size;
8609+
8610+ size = ALIGN(len, ARCH_DMA_MINALIGN);
8611+
8612+ if (op != NDMA_FROM_DEV)
8613+ invalidate_dcache_range((unsigned long)addr, addr + size);
8614+ else
8615+ flush_dcache_range((unsigned long)addr, addr + size);
8616+
8617+ return addr;
8618+}
8619+
8620+#endif /* __NANDX_OS_H__ */
8621diff --git a/include/configs/mt7622.h b/include/configs/mt7622.h
8622index dfd506ed24..6d0c956484 100644
8623--- a/include/configs/mt7622.h
8624+++ b/include/configs/mt7622.h
8625@@ -11,6 +11,31 @@
8626
8627 #include <linux/sizes.h>
8628
8629+/* SPI Nand */
8630+#if defined(CONFIG_MTD_RAW_NAND)
8631+#define CONFIG_SYS_MAX_NAND_DEVICE 1
8632+#define CONFIG_SYS_NAND_BASE 0x1100d000
8633+
8634+#define ENV_BOOT_READ_IMAGE \
8635+ "boot_rd_img=" \
8636+ "nand read 0x4007ff28 0x380000 0x1400000" \
8637+ ";iminfo 0x4007ff28 \0"
8638+
8639+#define ENV_BOOT_WRITE_IMAGE \
8640+ "boot_wr_img=" \
8641+ "nand write 0x4007ff28 0x380000 0x1400000" \
8642+ ";iminfo 0x4007ff28 \0"
8643+
8644+#define ENV_BOOT_CMD \
8645+ "mtk_boot=run boot_rd_img;bootm;\0"
8646+
8647+#define CONFIG_EXTRA_ENV_SETTINGS \
8648+ ENV_BOOT_READ_IMAGE \
8649+ ENV_BOOT_CMD \
8650+ "bootcmd=run mtk_boot;\0"
8651+
8652+#endif
8653+
8654 #define CONFIG_SYS_MAXARGS 8
8655 #define CONFIG_SYS_BOOTM_LEN SZ_64M
8656 #define CONFIG_SYS_CBSIZE SZ_1K
8657--
86582.17.1
8659