[Feature]add MT2731_MP2_MR2_SVN388 baseline version
Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/bsp/lk/platform/mt8512/drivers/gic/mt_gic.S b/src/bsp/lk/platform/mt8512/drivers/gic/mt_gic.S
new file mode 100644
index 0000000..ec127d0
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/gic/mt_gic.S
@@ -0,0 +1,64 @@
+#include <asm.h>
+#include <arch/arm64/mmu.h>
+#include <arch/asm_macros.h>
+#include <kernel/vm.h>
+
+/*
+ * Register use:
+ * x0-x3 Arguments
+ * x9-x15 Scratch
+ * x19-x28 Globals
+ */
+
+#define GIC_DIST_BASE 0x0c000000
+#define GIC_RDIST_BASE 0x0C080000
+
+FUNCTION(mt_gic_el3_setup)
+ /* GIC V3 redistributor initialization (all CPUs) */
+ ldr x2, =GIC_RDIST_BASE
+ ldr w4, [x2, #0x14] // GICR_WAKER
+ bic w4, w4, #(1 << 1) // clear ProcessorSleep
+ str w4, [x2, #0x14] // GICR_WAKER
+
+1:
+ ldr w4, [x2, #0x14] // GICR_WAKER
+ tst w4, #(1 << 2) // test ChildrenAsleep
+ b.ne 1b
+
+ /*
+ * We need to set SCR_EL3.NS in order to see GICv3 non-secure state.
+ * Restore SCR_EL3.NS again before exit.
+ */
+ mov x0, #0x30 // RES1
+ orr x0, x0, #1 // Non-secure EL1
+ msr scr_el3, x0 /* ensure NS=1 takes effect before accessing ICC_SRE_EL2 */
+
+ /*GIC V3 CPU interface initialization (all CPUs)*/
+ mrs x0, S3_6_C12_C12_5 // ICC_SRE_EL3
+ orr x0, x0, #1 // SRE
+ orr x0, x0, #(1<<3) // EN
+ msr S3_6_C12_C12_5, x0
+
+ mov x0, #0x1
+ msr S3_6_C12_C12_7, x0 // ICC_GRPEN1_EL3
+
+ mrs x0, S3_4_C12_C9_5 // ICC_SRE_EL2
+ orr x0, x0, #1 // SRE
+ orr x0, x0, #(1<<3) // EN
+ msr S3_4_C12_C9_5, x0
+
+ mov x0, #0xff
+ msr S3_0_C4_C6_0, x0 // ICC_PMR_EL1
+
+ /* Restore SCR_EL3 */
+ mrs x0, scr_el3
+ bic x0, x0, #1
+ msr scr_el3, x0 /* ensure NS=0 takes effect immediately */
+
+ mov x0, #0x1
+ msr S3_0_C12_C12_7, x0 // ICC_GRPEN1_EL1
+
+ mov x0, #0x1
+ msr S3_0_C12_C12_5, x0 //Write the secure ICC_SRE_EL1 register
+
+ ret
diff --git a/src/bsp/lk/platform/mt8512/drivers/gic/mt_gic_v3.c b/src/bsp/lk/platform/mt8512/drivers/gic/mt_gic_v3.c
new file mode 100644
index 0000000..e8d255d
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/gic/mt_gic_v3.c
@@ -0,0 +1,793 @@
+/*
+ * Copyright (c) 2012-2015 Travis Geiselbrecht
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <debug.h>
+#include <err.h>
+#include <reg.h>
+#include <kernel/thread.h>
+#include <kernel/debug.h>
+#include <lk/init.h>
+#include <platform/interrupts.h>
+#include <platform/mt_irq.h>
+#include <platform/mt_gic_v3.h>
+#include <platform/mt_reg_base.h>
+#include <arch/ops.h>
+#include <platform/gic.h>
+#include <trace.h>
+
+#if WITH_LIB_SM
+#include <lib/sm.h>
+#include <lib/sm/sm_err.h>
+#endif
+
+#define LOCAL_TRACE 0
+
+#if ARCH_ARM
+#include <arch/arm.h>
+#define iframe arm_iframe
+#define IFRAME_PC(frame) ((frame)->pc)
+#endif
+#if ARCH_ARM64
+#include <arch/arm64.h>
+#define iframe arm64_iframe_short
+#define IFRAME_PC(frame) ((frame)->elr)
+#endif
+
+/* helpers for later ICC encode macros
+ * Indirect stringification. Doing two levels allows the parameter to be a
+ * macro itself. For example, compile with -DFOO=bar, __stringify(FOO)
+ * converts to "bar".
+ */
+#define __stringify_1(x) #x
+#define __stringify(x) __stringify_1(x)
+
+#define GIC_READ(a) readl(a)
+#define GIC_WRITE(a, v) writel(v, a)
+
+static status_t arm_gic_set_secure_locked(u_int irq, bool secure);
+static spin_lock_t gicd_lock;
+
+#if WITH_LIB_SM
+#define GICD_LOCK_FLAGS SPIN_LOCK_FLAG_IRQ_FIQ
+#else
+#define GICD_LOCK_FLAGS SPIN_LOCK_FLAG_INTERRUPTS
+#endif
+
+#define GIC_MAX_PER_CPU_INT 32
+
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#define GIC_REG_COUNT(bit_per_reg) DIV_ROUND_UP(MAX_INT, (bit_per_reg))
+#define DEFINE_GIC_SHADOW_REG(name, bit_per_reg, init_val, init_from) \
+ uint32_t (name)[GIC_REG_COUNT(bit_per_reg)] = { \
+ [(init_from / bit_per_reg) ... \
+ (GIC_REG_COUNT(bit_per_reg) - 1)] = (init_val) \
+ }
+
+__asm__ (
+" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
+" .equ __reg_num_x\\num, \\num\n"
+" .endr\n"
+" .equ __reg_num_xzr, 31\n"
+"\n"
+" .macro mrs_s, rt, sreg\n"
+" .inst 0xd5300000|(\\sreg)|(__reg_num_\\rt)\n"
+" .endm\n"
+"\n"
+" .macro msr_s, sreg, rt\n"
+" .inst 0xd5100000|(\\sreg)|(__reg_num_\\rt)\n"
+" .endm\n"
+);
+
+/* since gcc not support most ARMv8 ICC sysreg in asm,
+ * we learn Linux's way to encode them */
+#define sys_reg(op0, op1, crn, crm, op2) \
+ ((((op0)-2)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
+
+#if WITH_LIB_SM
+static bool arm_gic_non_secure_interrupts_frozen;
+
+static bool arm_gic_interrupt_change_allowed(int irq)
+{
+ if (!arm_gic_non_secure_interrupts_frozen)
+ return true;
+
+ TRACEF("change to interrupt %d ignored after booting ns\n", irq);
+ return false;
+}
+
+static void suspend_resume_fiq(bool resume_gicc, bool resume_gicd);
+#else
+static bool arm_gic_interrupt_change_allowed(int irq)
+{
+ return true;
+}
+
+static void suspend_resume_fiq(bool resume_gicc, bool resume_gicd)
+{
+}
+#endif
+
+/*******************************************************************************
+ * This function does some minimal GICv3 configuration. The Firmware itself does
+ * not fully support GICv3 at this time and relies on GICv2 emulation as
+ * provided by GICv3. This function allows software (like Linux) in later stages
+ * to use full GICv3 features.
+ ******************************************************************************/
+static void gicv3_cpuif_setup(void)
+{
+ /* set all SGI/PPI as non-secure GROUP1 by default.
+ rdist_base + 64K == SGI_base */
+ GIC_WRITE(GIC_REDIS_BASE+SZ_64K+GICE_V3_IGROUP0, 0xffffffff);
+ GIC_WRITE(GIC_REDIS_BASE+SZ_64K+GICE_V3_IGRPMOD0, 0x0);
+}
+
+
+static void mt_git_dist_rwp(void)
+{
+ /*
+ * check GICD_CTLR.RWP for done check
+ */
+ while (GIC_READ(GIC_DIST_BASE + GICD_CTLR) & GICD_CTLR_RWP) {
+
+ }
+}
+
+struct int_handler_struct {
+ int_handler handler;
+ void *arg;
+};
+
+static struct int_handler_struct int_handler_table_per_cpu[GIC_MAX_PER_CPU_INT][SMP_MAX_CPUS];
+static struct int_handler_struct int_handler_table_shared[MAX_INT-GIC_MAX_PER_CPU_INT];
+
+static struct int_handler_struct *get_int_handler(unsigned int vector, uint cpu)
+{
+ if (vector < GIC_MAX_PER_CPU_INT)
+ return &int_handler_table_per_cpu[vector][cpu];
+ else
+ return &int_handler_table_shared[vector - GIC_MAX_PER_CPU_INT];
+}
+
+void register_int_handler(unsigned int vector, int_handler handler, void *arg)
+{
+ struct int_handler_struct *h;
+ uint cpu = arch_curr_cpu_num();
+
+ spin_lock_saved_state_t state;
+
+ if (vector >= MAX_INT)
+ panic("register_int_handler: vector out of range %d\n", vector);
+
+ spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS);
+
+ if (arm_gic_interrupt_change_allowed(vector)) {
+ h = get_int_handler(vector, cpu);
+ h->handler = handler;
+ h->arg = arg;
+ }
+
+ spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS);
+}
+
+#if WITH_LIB_SM
+static DEFINE_GIC_SHADOW_REG(gicd_igroupr, 32, ~0U, 0);
+#endif
+static DEFINE_GIC_SHADOW_REG(gicd_itargetsr, 4, 0x01010101, 32);
+
+static void gic_set_enable(uint vector, bool enable)
+{
+ int reg = vector / 32;
+ uint32_t mask = 1ULL << (vector % 32);
+
+ if (enable)
+ GIC_READ(GIC_DIST_BASE + GICD_ISENABLER + reg * 4) = mask;
+ else
+ GIC_READ(GIC_DIST_BASE + GICD_ICENABLER + reg * 4) = mask;
+}
+
+static void arm_gic_init_percpu(uint level)
+{
+#if WITH_LIB_SM
+ GIC_READ(GIC_REDIS_BASE + GICC_CTLR) = 0xb; // enable GIC0 and select fiq mode for secure
+ GIC_READ(GIC_DIST_BASE + GICD_IGROUPR) = ~0U; /* GICD_IGROUPR0 is banked */
+#else
+ GIC_READ(GIC_REDIS_BASE + GICC_CTLR) = 1; // enable GIC0
+#endif
+ GIC_READ(GIC_REDIS_BASE + GICC_PMR) = 0xFF; // unmask interrupts at all priority levels
+}
+
+LK_INIT_HOOK_FLAGS(arm_gic_init_percpu,
+ arm_gic_init_percpu,
+ LK_INIT_LEVEL_PLATFORM_EARLY, LK_INIT_FLAG_SECONDARY_CPUS);
+
+static void arm_gic_suspend_cpu(uint level)
+{
+ suspend_resume_fiq(false, false);
+}
+
+LK_INIT_HOOK_FLAGS(arm_gic_suspend_cpu, arm_gic_suspend_cpu,
+ LK_INIT_LEVEL_PLATFORM, LK_INIT_FLAG_CPU_SUSPEND);
+
+static void arm_gic_resume_cpu(uint level)
+{
+ spin_lock_saved_state_t state;
+ bool resume_gicd = false;
+
+ spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS);
+ if (!(GIC_READ(GIC_DIST_BASE + GICD_CTLR) & 1)) {
+ dprintf(SPEW, "%s: distibutor is off, calling arm_gic_init instead\n", __func__);
+ arm_gic_init();
+ resume_gicd = true;
+ } else {
+ arm_gic_init_percpu(0);
+ }
+ spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS);
+ suspend_resume_fiq(true, resume_gicd);
+}
+
+LK_INIT_HOOK_FLAGS(arm_gic_resume_cpu, arm_gic_resume_cpu,
+ LK_INIT_LEVEL_PLATFORM, LK_INIT_FLAG_CPU_RESUME);
+
+static int arm_gic_max_cpu(void)
+{
+ return (GIC_READ(GIC_DIST_BASE + GICD_TYPER) >> 5) & 0x7;
+}
+
+/*******************************************************************************
+ * Enable secure interrupts and use FIQs to route them. Disable legacy bypass
+ * and set the priority mask register to allow all interrupts to trickle in.
+ ******************************************************************************/
+void arm_gic_redist_init(void)
+{
+ /* set all SGI/PPI as non-secure GROUP1 by default.
+ rdist_base + 64K == SGI_base */
+ GIC_WRITE(GIC_REDIS_BASE+SZ_64K+GICE_V3_IGROUP0, 0xffffffff);
+ GIC_WRITE(GIC_REDIS_BASE+SZ_64K+GICE_V3_IGRPMOD0, 0x0);
+}
+
+static void arm_gic_distif_init(void)
+{
+ unsigned int i, ctrl, irq_set;
+
+ /* Disable the distributor before going further */
+ ctrl = GIC_READ(GIC_DIST_BASE + GICD_CTLR);
+ ctrl &= ~(GICD_CTLR_ENABLE_GRP0 | GICD_CTLR_ENGRP1NS | GICD_CTLR_ENGRP1S);
+ GIC_WRITE(GIC_DIST_BASE + GICD_CTLR, ctrl);
+
+ mt_git_dist_rwp();
+
+ /*
+ * Mark out non-secure SPI interrupts. The number of interrupts is
+ * calculated as 32 * (IT_LINES + 1). We do 32 at a time.
+ */
+ irq_set = (GIC_READ(GIC_DIST_BASE + GICD_TYPER)&IT_LINES_NO_MASK) + 1;
+ irq_set = irq_set * 32;
+
+ /* set all SPI as non-secure group1 by default,
+ * index from 1, because GICE_V3_IGROUP0, GICE_V3_IGRPMOD0 are RES0,
+ * equivalent function is provided by GICR_IGROUPR0, GICE_V3_IGRPMOD0,
+ * which are both initialized in gic_cpuif_init() */
+ for (i = 32; i < irq_set; i += 32) {
+ GIC_WRITE(GIC_DIST_BASE+GICD_IGROUPR + i * 4 / 32, 0x0);
+ GIC_WRITE(GIC_DIST_BASE+GICD_IGRPMODR + i * 4 / 32, 0xffffffff);
+ }
+
+ /*
+ * Set all global interrupts to be level triggered, active low.
+ */
+ for (i = 32; i < irq_set; i += 16) {
+ GIC_WRITE(GIC_DIST_BASE + GICD_ICFGR + i * 4 / 16, 0);
+ }
+
+ /*
+ * Set all global interrupts to this CPU only.
+ */
+ if (arm_gic_max_cpu() > 0) {
+ /* Set external interrupts to target cpu 0 */
+ for (i = 32; i < irq_set; i += 4) {
+ GIC_READ(GIC_DIST_BASE + GICD_ITARGETSR + (i / 4) * 4) = gicd_itargetsr[i / 4];
+ }
+ }
+
+ /*
+ * Set priority on all interrupts.
+ */
+ for (i = 0; i < irq_set; i += 4) {
+ GIC_WRITE(GIC_DIST_BASE + GICD_IPRIORITYR + i * 4 / 4, 0xA0A0A0A0);
+ }
+
+ /*
+ * Disable all interrupts.
+ */
+ for (i = 0; i < irq_set; i += 32) {
+ GIC_WRITE(GIC_DIST_BASE + GICD_ICENABLER + i * 4 / 32, 0xFFFFFFFF);
+ }
+
+ /*
+ * Clear all active status
+ */
+ for (i = 0; i < irq_set; i += 32) {
+ GIC_WRITE(GIC_DIST_BASE + GICD_ICACTIVER + i * 4 / 32, 0xFFFFFFFF);
+ }
+
+ /*
+ * Clear all pending status
+ */
+ for (i = 0; i < irq_set; i += 32) {
+ GIC_WRITE(GIC_DIST_BASE + GICD_ICPENDR + i * 4 / 32, 0xFFFFFFFF);
+ }
+
+ /* enable all groups & ARE */
+ ctrl = GICD_CTLR_ENABLE_GRP0 | GICD_CTLR_ENGRP1NS | GICD_CTLR_ENGRP1S |
+ GICD_CTLR_ARE | GICD_CTLR_ARE_NS;
+ GIC_WRITE(GIC_DIST_BASE + GICD_CTLR, ctrl);
+
+ mt_git_dist_rwp();
+}
+
+void clear_sec_pol_ctl_en(void)
+{
+ unsigned int i;
+
+ /* total 19 polarity ctrl registers */
+ for (i = 0; i <= NR_INT_POL_CTL-1; i++) {
+ GIC_WRITE((SEC_POL_CTL_EN0 + (i * 4)), 0);
+ }
+}
+
+void arm_gic_init(void)
+{
+ LTRACEF("[LK GIC] before arm_gic_cpuif_setup\n");
+ arm_gic_redist_init();
+
+ LTRACEF("[LK GIC] before arm_gic_distif_init\n");
+ arm_gic_distif_init();
+
+#if WITH_LIB_SM
+ GIC_READ(GIC_DIST_BASE + GICD_CTLR) = 3; // enable GIC0 ns interrupts
+ /*
+ * Iterate through all IRQs and set them to non-secure
+ * mode. This will allow the non-secure side to handle
+ * all the interrupts we don't explicitly claim.
+ */
+ for (i = 32; i < MAX_INT; i += 32) {
+ u_int reg = i / 32;
+ GIC_READ(GIC_DIST_BASE + GICD_IGROUPR + reg * 4) = gicd_igroupr[reg];
+ }
+#endif
+ arm_gic_init_percpu(0);
+
+ LTRACEF("[LK GIC] before clear_sec_pol_ctl_en\n");
+ clear_sec_pol_ctl_en();
+
+}
+
+static status_t arm_gic_set_secure_locked(u_int irq, bool secure)
+{
+#if WITH_LIB_SM
+ int reg = irq / 32;
+ uint32_t mask = 1ULL << (irq % 32);
+
+ if (irq >= MAX_INT)
+ return ERR_INVALID_ARGS;
+
+ if (secure)
+ GIC_READ(GIC_DIST_BASE + GICD_IGROUPR + reg * 4) = (gicd_igroupr[reg] &= ~mask);
+ else
+ GIC_READ(GIC_DIST_BASE + GICD_IGROUPR + reg * 4) = (gicd_igroupr[reg] |= mask);
+ LTRACEF("irq %d, secure %d, GICD_IGROUP%d = %x\n",
+ irq, secure, reg, GIC_READ(GIC_DIST_BASE + GICD_IGROUPR + reg * 4);
+#endif
+ return NO_ERROR;
+}
+
+static status_t arm_gic_set_target_locked(u_int irq, u_int cpu_mask, u_int enable_mask)
+{
+ u_int reg = irq / 4;
+ u_int shift = 8 * (irq % 4);
+ u_int old_val;
+ u_int new_val;
+
+ cpu_mask = (cpu_mask & 0xff) << shift;
+ enable_mask = (enable_mask << shift) & cpu_mask;
+
+ old_val = GIC_READ(GIC_DIST_BASE + GICD_ITARGETSR + reg * 4);
+ new_val = (gicd_itargetsr[reg] & ~cpu_mask) | enable_mask;
+ GIC_READ(GIC_DIST_BASE + GICD_ITARGETSR + reg * 4) = gicd_itargetsr[reg] = new_val;
+ LTRACEF("irq %i, GICD_ITARGETSR%d %x => %x (got %x)\n",
+ irq, reg, old_val, new_val, GIC_READ(GIC_DIST_BASE + GICD_ITARGETSR + reg));
+
+ return NO_ERROR;
+}
+
+static status_t arm_gic_get_priority(u_int irq)
+{
+ u_int reg = irq / 4;
+ u_int shift = 8 * (irq % 4);
+ return (GIC_READ(GIC_DIST_BASE + GICD_IPRIORITYR + reg * 4) >> shift) & 0xff;
+}
+
+static status_t arm_gic_set_priority_locked(u_int irq, uint8_t priority)
+{
+ u_int reg = irq / 4;
+ u_int shift = 8 * (irq % 4);
+ u_int mask = 0xff << shift;
+ uint32_t regval;
+
+ regval = GIC_READ(GIC_DIST_BASE + GICD_IPRIORITYR + reg);
+ LTRACEF("irq %i, old GICD_IPRIORITYR%d = %x\n", irq, reg, regval);
+ regval = (regval & ~mask) | ((uint32_t)priority << shift);
+ GIC_READ(GIC_DIST_BASE + GICD_IPRIORITYR + reg * 4) = regval;
+ LTRACEF("irq %i, new GICD_IPRIORITYR%d = %x, req %x\n",
+ irq, reg, GIC_READ(GIC_DIST_BASE + GICD_IPRIORITYR + reg * 4), regval);
+
+ return 0;
+}
+
+status_t arm_gic_sgi(u_int irq, u_int flags, u_int cpu_mask)
+{
+ u_int val =
+ ((flags & ARM_GIC_SGI_FLAG_TARGET_FILTER_MASK) << 24) |
+ ((cpu_mask & 0xff) << 16) |
+ ((flags & ARM_GIC_SGI_FLAG_NS) ? (1U << 15) : 0) |
+ (irq & 0xf);
+
+ if (irq >= 16)
+ return ERR_INVALID_ARGS;
+
+ LTRACEF("GICD_SGIR: %x\n", val);
+
+ GIC_READ(GIC_DIST_BASE + GICD_SGIR) = val;
+
+ return NO_ERROR;
+}
+
+status_t mask_interrupt(unsigned int vector)
+{
+ if (vector >= MAX_INT)
+ return ERR_INVALID_ARGS;
+
+ if (arm_gic_interrupt_change_allowed(vector))
+ gic_set_enable(vector, false);
+
+ return NO_ERROR;
+}
+
+status_t unmask_interrupt(unsigned int vector)
+{
+ if (vector >= MAX_INT)
+ return ERR_INVALID_ARGS;
+
+ if (arm_gic_interrupt_change_allowed(vector))
+ gic_set_enable(vector, true);
+
+ return NO_ERROR;
+}
+
+static uint64_t gicc_read_hppir1_el1(void)
+{
+ uint64_t val = 0;
+
+ __asm__ volatile("mrs_s %0, " __stringify(ICC_HPPIR1_EL1) : "=r" (val));
+
+ return val;
+}
+
+uint32_t arm_gic_get_pending_interrupt_id(void)
+{
+ return gicc_read_hppir1_el1();
+}
+
+uint64_t gicc_read_iar1_el1(void)
+{
+ u64 irqstat;
+ __asm__ volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
+ return irqstat;
+}
+
+uint64_t arm_gic_acknowledge_irq(void)
+{
+ return gicc_read_iar1_el1();
+}
+
+static void gicc_write_eoi1_el1(uint32_t irq)
+{
+ __asm__ volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
+}
+
+static void arm_gic_end_of_irq(uint32_t id)
+{
+ gicc_write_eoi1_el1(id);
+}
+
+uint64_t gicc_read_iar0_el1(void)
+{
+ u64 irqstat;
+ __asm__ volatile("mrs_s %0, " __stringify(ICC_IAR0_EL1) : "=r" (irqstat));
+ return irqstat;
+}
+
+uint64_t arm_gic_acknowledge_fiq(void)
+{
+ return gicc_read_iar0_el1();
+}
+
+static void gicc_write_eoi0_el1(uint32_t irq)
+{
+ __asm__ volatile("msr_s " __stringify(ICC_EOIR0_EL1) ", %0" : : "r" (irq));
+}
+
+static void arm_gic_end_of_fiq(uint32_t id)
+{
+ gicc_write_eoi0_el1(id);
+}
+
+static
+enum handler_return __platform_irq(struct iframe *frame)
+{
+ // get the current vector
+ uint32_t iar = arm_gic_acknowledge_irq();
+ unsigned int vector = iar & 0x3ff;
+
+ if (vector >= 0x3fe) {
+ // spurious
+ return INT_NO_RESCHEDULE;
+ }
+
+ THREAD_STATS_INC(interrupts);
+ KEVLOG_IRQ_ENTER(vector);
+
+ uint cpu = arch_curr_cpu_num();
+
+ LTRACEF_LEVEL(2, "iar 0x%x cpu %u currthread %p vector %d pc 0x%lx\n", iar, cpu,
+ get_current_thread(), vector, (uintptr_t)IFRAME_PC(frame));
+
+ // deliver the interrupt
+ enum handler_return ret;
+
+ ret = INT_NO_RESCHEDULE;
+ struct int_handler_struct *handler = get_int_handler(vector, cpu);
+ if (handler->handler)
+ ret = handler->handler(handler->arg);
+
+ arm_gic_end_of_irq(iar);
+
+ LTRACEF_LEVEL(2, "cpu %u exit %d\n", cpu, ret);
+
+ KEVLOG_IRQ_EXIT(vector);
+
+ return ret;
+}
+
+enum handler_return platform_irq(struct iframe *frame)
+{
+#if WITH_LIB_SM
+ uint32_t ahppir = arm_gic_get_pending_interrupt_id();
+ uint32_t pending_irq = ahppir & 0x3ff;
+ struct int_handler_struct *h;
+ uint cpu = arch_curr_cpu_num();
+
+ LTRACEF("ahppir %d\n", ahppir);
+ if (pending_irq < MAX_INT && get_int_handler(pending_irq, cpu)->handler) {
+ enum handler_return ret = 0;
+ uint32_t irq;
+ uint8_t old_priority;
+ spin_lock_saved_state_t state;
+
+ spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS);
+
+ /* Temporarily raise the priority of the interrupt we want to
+ * handle so another interrupt does not take its place before
+ * we can acknowledge it.
+ */
+ old_priority = arm_gic_get_priority(pending_irq);
+ arm_gic_set_priority_locked(pending_irq, 0);
+ DSB;
+ irq = arm_gic_acknowledge_irq() & 0x3ff;
+ arm_gic_set_priority_locked(pending_irq, old_priority);
+
+ spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS);
+
+ LTRACEF("irq %d\n", irq);
+ if (irq < MAX_INT && (h = get_int_handler(pending_irq, cpu))->handler)
+ ret = h->handler(h->arg);
+ else
+ TRACEF("unexpected irq %d != %d may get lost\n", irq, pending_irq);
+ arm_gic_end_of_fiq(irq);
+
+ return ret;
+ }
+ return sm_handle_irq();
+#else
+ return __platform_irq(frame);
+#endif
+}
+
+void platform_fiq(struct iframe *frame)
+{
+#if WITH_LIB_SM
+ sm_handle_fiq();
+#else
+ PANIC_UNIMPLEMENTED;
+#endif
+}
+
+#if WITH_LIB_SM
+static status_t arm_gic_get_next_irq_locked(u_int min_irq, bool per_cpu)
+{
+ u_int irq;
+ u_int max_irq = per_cpu ? GIC_MAX_PER_CPU_INT : MAX_INT;
+ uint cpu = arch_curr_cpu_num();
+
+ if (!per_cpu && min_irq < GIC_MAX_PER_CPU_INT)
+ min_irq = GIC_MAX_PER_CPU_INT;
+
+ for (irq = min_irq; irq < max_irq; irq++)
+ if (get_int_handler(irq, cpu)->handler)
+ return irq;
+
+ return SM_ERR_END_OF_INPUT;
+}
+
+long smc_intc_get_next_irq(smc32_args_t *args)
+{
+ status_t ret;
+ spin_lock_saved_state_t state;
+
+ spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS);
+
+ arm_gic_non_secure_interrupts_frozen = true;
+ ret = arm_gic_get_next_irq_locked(args->params[0], args->params[1]);
+ LTRACEF("min_irq %d, per_cpu %d, ret %d\n",
+ args->params[0], args->params[1], ret);
+
+ spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS);
+
+ return ret;
+}
+
+static u_long enabled_fiq_mask[BITMAP_NUM_WORDS(MAX_INT)];
+
+static void bitmap_update_locked(u_long *bitmap, u_int bit, bool set)
+{
+ u_long mask = 1UL << BITMAP_BIT_IN_WORD(bit);
+
+ bitmap += BITMAP_WORD(bit);
+ if (set)
+ *bitmap |= mask;
+ else
+ *bitmap &= ~mask;
+}
+
+long smc_intc_request_fiq(smc32_args_t *args)
+{
+ u_int fiq = args->params[0];
+ bool enable = args->params[1];
+ spin_lock_saved_state_t state;
+
+ dprintf(SPEW, "%s: fiq %d, enable %d\n", __func__, fiq, enable);
+ spin_lock_save(&gicd_lock, &state, GICD_LOCK_FLAGS);
+
+ arm_gic_set_secure_locked(fiq, true);
+ arm_gic_set_target_locked(fiq, ~0, ~0);
+ arm_gic_set_priority_locked(fiq, 0);
+
+ gic_set_enable(fiq, enable);
+ bitmap_update_locked(enabled_fiq_mask, fiq, enable);
+
+ dprintf(SPEW, "%s: fiq %d, enable %d done\n", __func__, fiq, enable);
+
+ spin_unlock_restore(&gicd_lock, state, GICD_LOCK_FLAGS);
+
+ return NO_ERROR;
+}
+
+static u_int current_fiq[8] = { 0x3ff, 0x3ff, 0x3ff, 0x3ff, 0x3ff, 0x3ff, 0x3ff, 0x3ff };
+
+static bool update_fiq_targets(u_int cpu, bool enable, u_int triggered_fiq, bool resume_gicd)
+{
+ u_int i, j;
+ u_long mask;
+ u_int fiq;
+ bool smp = arm_gic_max_cpu() > 0;
+ bool ret = false;
+
+ spin_lock(&gicd_lock); /* IRQs and FIQs are already masked */
+ for (i = 0; i < BITMAP_NUM_WORDS(MAX_INT); i++) {
+ mask = enabled_fiq_mask[i];
+ while (mask) {
+ j = _ffz(~mask);
+ mask &= ~(1UL << j);
+ fiq = i * BITMAP_BITS_PER_WORD + j;
+ if (fiq == triggered_fiq)
+ ret = true;
+ LTRACEF("cpu %d, irq %i, enable %d\n", cpu, fiq, enable);
+ if (smp)
+ arm_gic_set_target_locked(fiq, 1U << cpu, enable ? ~0 : 0);
+ if (!smp || resume_gicd)
+ gic_set_enable(fiq, enable);
+ }
+ }
+ spin_unlock(&gicd_lock);
+ return ret;
+}
+
+static void suspend_resume_fiq(bool resume_gicc, bool resume_gicd)
+{
+ u_int cpu = arch_curr_cpu_num();
+
+ ASSERT(cpu < 8);
+
+ update_fiq_targets(cpu, resume_gicc, ~0, resume_gicd);
+}
+
+status_t sm_intc_fiq_enter(void)
+{
+ u_int cpu = arch_curr_cpu_num();
+ u_int irq = arm_gic_acknowledge_fiq() & 0x3ff;
+ bool fiq_enabled;
+
+ ASSERT(cpu < 8);
+
+ LTRACEF("cpu %d, irq %i\n", cpu, irq);
+
+ if (irq >= 1020) {
+ LTRACEF("spurious fiq: cpu %d, old %d, new %d\n", cpu, current_fiq[cpu], irq);
+ return ERR_NO_MSG;
+ }
+
+ fiq_enabled = update_fiq_targets(cpu, false, irq, false);
+ arm_gic_end_of_fiq(irq);
+
+ if (current_fiq[cpu] != 0x3ff) {
+ dprintf(INFO, "more than one fiq active: cpu %d, old %d, new %d\n", cpu, current_fiq[cpu], irq);
+ return ERR_ALREADY_STARTED;
+ }
+
+ if (!fiq_enabled) {
+ dprintf(INFO, "got disabled fiq: cpu %d, new %d\n", cpu, irq);
+ return ERR_NOT_READY;
+ }
+
+ current_fiq[cpu] = irq;
+
+ return 0;
+}
+
+void sm_intc_fiq_exit(void)
+{
+ u_int cpu = arch_curr_cpu_num();
+
+ ASSERT(cpu < 8);
+
+ LTRACEF("cpu %d, irq %i\n", cpu, current_fiq[cpu]);
+ if (current_fiq[cpu] == 0x3ff) {
+ dprintf(INFO, "%s: no fiq active, cpu %d\n", __func__, cpu);
+ return;
+ }
+ update_fiq_targets(cpu, true, current_fiq[cpu], false);
+ current_fiq[cpu] = 0x3ff;
+}
+#endif
+
+/* vim: set ts=4 sw=4 noexpandtab: */
diff --git a/src/bsp/lk/platform/mt8512/drivers/i2c/mt_i2c.c b/src/bsp/lk/platform/mt8512/drivers/i2c/mt_i2c.c
new file mode 100644
index 0000000..1c0fa00
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/i2c/mt_i2c.c
@@ -0,0 +1,1397 @@
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+/**
+ * @file mt_i2c.c
+ * This i2c driver is used to control MediaTek I2C controller.\n
+ * It provides the interfaces which will be used in LK.
+ */
+
+/**
+ * @defgroup IP_group_i2c I2C
+ *
+ * @{
+ * @defgroup IP_group_i2c_external EXTERNAL
+ * The external API document for I2C. \n
+ *
+ * @{
+ * @defgroup IP_group_i2c_external_function 1.function
+ * External function in i2c driver.
+ * @defgroup IP_group_i2c_external_struct 2.structure
+ * none.
+ * @defgroup IP_group_i2c_external_typedef 3.typedef
+ * none.
+ * @defgroup IP_group_i2c_external_enum 4.enumeration
+ * none.
+ * @defgroup IP_group_i2c_external_def 5.define
+ * none.
+ * @}
+ *
+ * @defgroup IP_group_i2c_internal INTERNAL
+ * The internal API document for I2C. \n
+ *
+ * @{
+ * @defgroup IP_group_i2c_internal_function 1.function
+ * Internal function in i2c driver.
+ * @defgroup IP_group_i2c_internal_struct 2.structure
+ * Internal structure in i2c driver.
+ * @defgroup IP_group_i2c_internal_typedef 3.typedef
+ * none.
+ * @defgroup IP_group_i2c_internal_enum 4.enumeration
+ * Internal enumeration in i2c driver.
+ * @defgroup IP_group_i2c_internal_def 5.define
+ * Internal define in i2c driver.
+ * @}
+ * @}
+ */
+
+#include "platform/mt_i2c.h"
+
+
+#define DRV_Reg32(addr) (*(volatile u32 *)(addr))
+#define DRV_WriteReg32(addr,data) ((*(volatile u32 *)(addr)) = (u32)data)
+
+struct mtk_i2c *i2c_global;
+
+/**
+ * @brief i2c source clock.
+ */
+static uint32_t g_i2c_source_clock = MTK_I2C_SOURCE_CLK;
+
+/** @ingroup IP_group_i2c_internal_function
+ * @par Description
+ * Write data to i2c controller register.
+ * @param[in]
+ * i2c: mtk_i2c pointer, struct mtk_i2c contains register base\n
+ * address.
+ * @param[in]
+ * offset: register relative base offset value.
+ * @param[in]
+ * value: The value set to register.
+ * @return
+ * none.
+ * @par Boundary case and Limitation
+ * none.
+ * @par Error case and Error handling
+ * none.
+ * @par Call graph and Caller graph
+ * @par Refer to the source code
+ */
+static void i2c_writel(struct mtk_i2c *i2c, uint32_t offset,
+ uint32_t value)
+{
+ writel(value, (i2c->base + offset));
+}
+
+/** @ingroup IP_group_i2c_internal_function
+ * @par Description
+ * Read data from i2c controller register.
+ * @param[in]
+ * i2c: mtk_i2c pointer, struct mtk_i2c contains register base\n
+ * address.
+ * @param[in]
+ * offset: register relative base offset value.
+ * @return
+ * i2c controller register value.
+ * @par Boundary case and Limitation
+ * none.
+ * @par Error case and Error handling
+ * none.
+ * @par Call graph and Caller graph
+ * @par Refer to the source code
+ */
+static uint32_t i2c_readl(struct mtk_i2c *i2c, uint32_t offset)
+{
+ return readl(i2c->base + offset);
+}
+
+/** @ingroup IP_group_i2c_internal_function
+ * @par Description
+ * Write data to DMA controller register.
+ * @param[in]
+ * i2c: mtk_i2c pointer, struct mtk_i2c contains DMA register\n
+ * base address.
+ * @param[in]
+ * offset: register relative base offset value.
+ * @param[in]
+ * value: The value set to register.
+ * @return
+ * none.
+ * @par Boundary case and Limitation
+ * none.
+ * @par Error case and Error handling
+ * none.
+ * @par Call graph and Caller graph
+ * @par Refer to the source code
+ */
+static inline void i2c_dma_writel(struct mtk_i2c *i2c, uint32_t offset,
+ uint32_t value)
+{
+ writel(value, (i2c->dmabase + offset));
+}
+
+/** @ingroup IP_group_i2c_internal_function
+ * @par Description
+ * Read data from DMA controller register.
+ * @param[in]
+ * i2c: mtk_i2c pointer, struct mtk_i2c contains DMA register\n
+ * base address.
+ * @param[in]
+ * offset: register relative base offset value.
+ * @return
+ * DMA controller register value.
+ * @par Boundary case and Limitation
+ * none.
+ * @par Error case and Error handling
+ * none.
+ * @par Call graph and Caller graph
+ * @par Refer to the source code
+ */
+static inline uint32_t i2c_dma_readl(struct mtk_i2c *i2c, uint32_t offset)
+{
+ return readl(i2c->dmabase + offset);
+}
+
+/** @ingroup IP_group_i2c_internal_function
+ * @par Description
+ * Set gpio to i2c mode.
+ * @param[in]
+ * i2c: mtk_i2c pointer, struct mtk_i2c contains bus number\n
+ * information.
+ * @return
+ * 0, set gpio to i2c mode successfully.\n
+ * -EINVAL_I2C, invalid i2c bus id.
+ * @par Boundary case and Limitation
+ * none.
+ * @par Error case and Error handling
+ * 1. Invalid i2c bus number, return -EINVAL_I2C.
+ * @par Call graph and Caller graph
+ * @par Refer to the source code
+ */
+static int i2c_gpio_pinmux(struct mtk_i2c *i2c)
+{
+#ifndef CONFIG_MTK_FPGA
+ uint32_t gpio_reg;
+
+ switch (i2c->id) {
+ case 0:
+ ///* I2C0_SDA */
+ //mtk_pmx_set_mode(110, 1);
+ ///* I2C0_SCL */
+ //mtk_pmx_set_mode(111, 1);
+ gpio_reg = (readl(MTK_GPIO_I2C_BASE) &
+ (~(0x7 << MTK_GPIO_SDA0))) | (0x1 << MTK_GPIO_SDA0);
+ writel(gpio_reg, MTK_GPIO_I2C_BASE);
+ gpio_reg = (readl(MTK_GPIO_I2C_BASE) &
+ (~(0x7 << MTK_GPIO_SCL0))) | (0x1 << MTK_GPIO_SCL0);
+ writel(gpio_reg, MTK_GPIO_I2C_BASE);
+ break;
+ case 1:
+ ///* I2C1_SDA */
+ //mtk_pmx_set_mode(108, 1);
+ ///* I2C1_SCL */
+ //mtk_pmx_set_mode(109, 1);
+ gpio_reg = (readl(MTK_GPIO_I2C_BASE) &
+ (~(0x7 << MTK_GPIO_SDA1))) | (0x1 << MTK_GPIO_SDA1);
+ writel(gpio_reg, MTK_GPIO_I2C_BASE);
+ gpio_reg = (readl(MTK_GPIO_I2C_BASE) &
+ (~(0x7 << MTK_GPIO_SCL1))) | (0x1 << MTK_GPIO_SCL1);
+ writel(gpio_reg, MTK_GPIO_I2C_BASE);
+ break;
+ case 2:
+ ///* I2C1_SDA */
+ //mtk_pmx_set_mode(112, 1);
+ ///* I2C1_SCL */
+ //mtk_pmx_set_mode(113, 1);
+ gpio_reg = (readl(MTK_GPIO_I2C_BASE) &
+ (~(0x7 << MTK_GPIO_SDA2))) | (0x1 << MTK_GPIO_SDA2);
+ writel(gpio_reg, MTK_GPIO_I2C_BASE);
+ gpio_reg = (readl(MTK_GPIO_I2C_BASE) &
+ (~(0x7 << MTK_GPIO_SCL2))) | (0x1 << MTK_GPIO_SCL2);
+ writel(gpio_reg, MTK_GPIO_I2C_BASE);
+ break;
+ default:
+ I2CERR("invalid para: i2c->id=%d\n", i2c->id);
+ return -EINVAL_I2C;
+ }
+#endif
+ return 0;
+}
+
+/** @ingroup IP_group_i2c_internal_function
+ * @par Description
+ * Enable i2c clock.
+ * @param[in]
+ * i2c: mtk_i2c pointer, struct mtk_i2c contains i2c bus number.
+ * @return
+ * none.
+ * @par Boundary case and Limitation
+ * none.
+ * @par Error case and Error handling
+ * none.
+ * @par Call graph and Caller graph
+ * @par Refer to the source code
+ */
+static void i2c_clock_enable(struct mtk_i2c *i2c)
+{
+#ifndef CONFIG_MTK_FPGA
+ switch (i2c->id) {
+ case 0:
+ writel(MTK_I2C0_CLK_OFFSET, MTK_I2C_CLK_CLR);
+ break;
+ case 1:
+ writel(MTK_I2C1_CLK_OFFSET, MTK_I2C_CLK_CLR);
+ break;
+ case 2:
+ writel(MTK_I2C2_CLK_OFFSET, MTK_I2C_CLK_CLR);
+ break;
+ default:
+ I2CERR("i2c clk enable, invalid para: i2c->id=%d\n",i2c->id);
+ }
+
+ writel(MTK_APDMA_CLK_OFFSET, MTK_APDMA_CLK_CLR);
+#endif
+}
+
+/** @ingroup IP_group_i2c_internal_function
+ * @par Description
+ * Disable i2c clock.
+ * @param[in]
+ * i2c: mtk_i2c pointer, struct mtk_i2c contains i2c bus number.
+ * @return
+ * none.
+ * @par Boundary case and Limitation
+ * none.
+ * @par Error case and Error handling
+ * none.
+ * @par Call graph and Caller graph
+ * @par Refer to the source code
+ */
+static void i2c_clock_disable(struct mtk_i2c *i2c)
+{
+#ifndef CONFIG_MTK_FPGA
+
+
+ switch (i2c->id) {
+ case 0:
+ writel(MTK_I2C0_CLK_OFFSET, MTK_I2C_CLK_SET);
+ break;
+ case 1:
+ writel(MTK_I2C1_CLK_OFFSET, MTK_I2C_CLK_SET);
+ break;
+ case 2:
+ writel(MTK_I2C2_CLK_OFFSET, MTK_I2C_CLK_SET);
+ break;
+ default:
+ I2CERR("i2c clk disable, invalid para: i2c->id=%d\n",i2c->id);
+ }
+
+ writel(MTK_APDMA_CLK_OFFSET, MTK_APDMA_CLK_SET);
+#endif
+}
+
+#ifdef CONFIG_MTK_IRQ
+static static void mtk_i2c_irq(void)
+{
+ uint16_t restart_flag = 0;
+ uint16_t intr_stat;
+
+ if (i2c_global->auto_restart)
+ restart_flag = I2C_RS_TRANSFER;
+
+ intr_stat = i2c_readl(i2c_global, OFFSET_INTR_STAT);
+ i2c_writel(i2c_global, OFFSET_INTR_STAT, intr_stat);
+
+ /*
+ * when occurs ack error, i2c controller generate two interrupts
+ * first is the ack error interrupt, then the complete interrupt
+ * i2c->irq_stat need keep the two interrupt value.
+ */
+ i2c_global->irq_stat |= intr_stat;
+
+ if (i2c_global->irq_stat & (I2C_TRANSAC_COMP | restart_flag))
+ i2c_global->msg_complete = true;
+}
+
+static void mtk_irq_init(struct mtk_i2c *i2c)
+{
+ register_int_handler(i2c->irqnr, mtk_i2c_irq, "i2c irq handler");
+}
+#endif
+
+/** @ingroup IP_group_i2c_internal_function
+ * @par Description
+ * Get i2c bus base address, DMA base address and source clock.
+ * @param[out]
+ * i2c: mtk_i2c pointer, struct mtk_i2c contains register base\n
+ * address, DMA base address and bus number information.
+ * @return
+ * 0, set base address successfully.\n
+ * -EINVAL_I2C, invalid i2c bus id.
+ * @par Boundary case and Limitation
+ * none.
+ * @par Error case and Error handling
+ * 1. Invalid i2c bus number, return -EINVAL_I2C.
+ * @par Call graph and Caller graph
+ * @par Refer to the source code
+ */
+static int mtk_i2c_init_base(struct mtk_i2c *i2c)
+{
+ switch (i2c->id) {
+ case 0:
+ i2c->base = MTK_I2C0_BASE;
+ i2c->dmabase = MTK_I2C0_DMA;
+ i2c->irqnr = MTK_I2C0_GIC_IRQ;
+ break;
+ case 1:
+ i2c->base = MTK_I2C1_BASE;
+ i2c->dmabase = MTK_I2C1_DMA;
+ i2c->irqnr = MTK_I2C1_GIC_IRQ;
+ break;
+ case 2:
+ i2c->base = MTK_I2C2_BASE;
+ i2c->dmabase = MTK_I2C2_DMA;
+ i2c->irqnr = MTK_I2C2_GIC_IRQ;
+ break;
+ default:
+ I2CERR("invalid para: i2c->id=%d\n", i2c->id);
+ return -EINVAL_I2C;
+ }
+
+ i2c->clk = g_i2c_source_clock;
+ i2c->clk_src_div = MTK_I2C_CLK_DIV;
+
+ if(!i2c->poll_en) {
+#ifdef CONFIG_MTK_IRQ
+ i2c_global = i2c;
+ mtk_irq_init(i2c);
+#endif
+ }
+
+ return 0;
+}
+
+/** @ingroup IP_group_i2c_internal_function
+ * @par Description
+ * Initialize i2c hardware, soft reset i2c controller, then\n
+ * configure io mode and control registers.
+ * @param[in]
+ * i2c: mtk_i2c pointer, struct mtk_i2c contains register base\n
+ * address, ioconfig and i2c hardware information.
+ * @return
+ * none.
+ * @par Boundary case and Limitation
+ * none.
+ * @par Error case and Error handling
+ * none.
+ * @par Call graph and Caller graph
+ * @par Refer to the source code
+ */
+static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
+{
+ uint16_t control_reg;
+
+ if (!(i2c->mode & I2C_FIFO_FORCE))
+ i2c_writel(i2c, OFFSET_SOFTRESET, I2C_SOFT_RST);
+
+ /* set ioconfig */
+ if (i2c->pushpull)
+ i2c_writel(i2c, OFFSET_IO_CONFIG, I2C_IO_CONFIG_PUSH_PULL);
+ else
+ i2c_writel(i2c, OFFSET_IO_CONFIG, I2C_IO_CONFIG_OPEN_DRAIN);
+
+ control_reg = I2C_CONTROL_DEFAULT | I2C_CONTROL_ACKERR_DET_EN |
+ I2C_CONTROL_CLK_EXT_EN;
+ i2c_writel(i2c, OFFSET_CONTROL, control_reg);
+
+ if (i2c->mode & I2C_DCM_ENABLE)
+ i2c_writel(i2c, OFFSET_DCM_EN, I2C_DCM_OPEN);
+
+ if (i2c->mode & I2C_CONTI_TRANS)
+ i2c_writel(i2c, OFFSET_DELAY_LEN, i2c->delay_len);
+ else
+ i2c_writel(i2c, OFFSET_DELAY_LEN, I2C_DELAY_LEN);
+
+ i2c_dma_writel(i2c, OFFSET_DMA_RST, I2C_DMA_HARD_RST);
+ i2c_dma_writel(i2c, OFFSET_DMA_RST, I2C_DMA_CLR_FLAG);
+}
+
+/** @ingroup IP_group_i2c_internal_function
+ * @par Description
+ * Calculate i2c speed.\n
+ * Hardware design:\n
+ * i2c_bus_freq = source_clk / (2 * sample_cnt * step_cnt)\n
+ * The calculation want to pick the highest bus frequency that\n
+ * is still less than or equal to i2c->speed_hz. The\n
+ * calculation try to get sample_cnt and step_cnt.
+ * @param[in]
+ * clk_src: i2c module source clock.
+ * @param[in]
+ * target_speed: i2c target speed.
+ * @param[out]
+ * timing_step_cnt: i2c step_cnt value.
+ * @param[out]
+ * timing_sample_cnt: i2c sample_cnt value.
+ * @return
+ * 0, calculate speed successfully.\n
+ * -EINVAL_I2C, calculate speed fail.
+ * @par Boundary case and Limitation
+ * none.
+ * @par Error case and Error handling
+ * 1. Target speed is too low, calculate speed fail, return\n
+ * -EINVAL_I2C.
+ * @par Call graph and Caller graph
+ * @par Refer to the source code
+ */
+static int mtk_i2c_calculate_speed(unsigned int clk_src,
+ unsigned int target_speed,
+ unsigned int *timing_step_cnt,
+ unsigned int *timing_sample_cnt)
+{
+ unsigned int step_cnt;
+ unsigned int sample_cnt;
+ unsigned int max_step_cnt;
+ unsigned int base_sample_cnt = MAX_SAMPLE_CNT_DIV;
+ unsigned int base_step_cnt;
+ unsigned int opt_div;
+ unsigned int best_mul;
+ unsigned int cnt_mul;
+
+ if (target_speed > MAX_FS_PLUS_SPEED)
+ max_step_cnt = MAX_HS_STEP_CNT_DIV;
+ else
+ max_step_cnt = MAX_STEP_CNT_DIV;
+
+ base_step_cnt = max_step_cnt;
+
+ /* find the best combination */
+ opt_div = DIV_ROUND_UP(clk_src >> 1, target_speed);
+ best_mul = MAX_SAMPLE_CNT_DIV * max_step_cnt;
+
+ /* Search for the best pair (sample_cnt, step_cnt) with
+ * 0 < sample_cnt < MAX_SAMPLE_CNT_DIV
+ * 0 < step_cnt < max_step_cnt
+ * sample_cnt * step_cnt >= opt_div
+ * optimizing for sample_cnt * step_cnt being minimal
+ */
+ for (sample_cnt = 1; sample_cnt <= MAX_SAMPLE_CNT_DIV; sample_cnt++) {
+ step_cnt = DIV_ROUND_UP(opt_div, sample_cnt);
+ cnt_mul = step_cnt * sample_cnt;
+ if (step_cnt > max_step_cnt)
+ continue;
+
+ if (cnt_mul < best_mul) {
+ best_mul = cnt_mul;
+ base_sample_cnt = sample_cnt;
+ base_step_cnt = step_cnt;
+ if (best_mul == opt_div)
+ break;
+ }
+ }
+
+ sample_cnt = base_sample_cnt;
+ step_cnt = base_step_cnt;
+
+ if ((clk_src / (2 * sample_cnt * step_cnt)) > target_speed) {
+ I2CERR("Unsupported speed (%u KHz)\n", target_speed);
+ return -EINVAL_I2C;
+ }
+
+ *timing_step_cnt = step_cnt - 1;
+ *timing_sample_cnt = sample_cnt - 1;
+
+ return 0;
+}
+
+/** @ingroup IP_group_i2c_internal_function
+ * @par Description
+ * Calculate i2c speed and write sample_cnt, step_cnt to TIMING register.
+ * @param[out]
+ * i2c: mtk_i2c pointer, struct mtk_i2c contains i2c source clock,
+ * clock divide and speed.
+ * @return
+ * 0, set speed successfully.\n
+ * error code from mtk_i2c_calculate_speed().
+ * @par Boundary case and Limitation
+ * none.
+ * @par Error case and Error handling
+ * 1. If mtk_i2c_calculate_speed() fails, return its error code.
+ * @par Call graph and Caller graph
+ * @par Refer to the source code
+ */
+static int mtk_i2c_set_speed(struct mtk_i2c *i2c)
+{
+ bool low_power_clk = false;
+ unsigned int clk_src;
+ unsigned int step_cnt;
+ unsigned int sample_cnt;
+ unsigned int target_speed;
+ int ret;
+
+ if ((i2c->clk == MTK_I2C_ULPOSC_DIV8) ||
+ (i2c->clk == MTK_I2C_ULPOSC_DIV16) ||
+ (i2c->clk == MTK_I2C_CLK_26M))
+ low_power_clk = true;
+
+ if (i2c->speed == 0)
+ i2c->speed = I2C_DEFAULT_SPEED;
+
+ if ((!low_power_clk) && (i2c->speed == I2C_DEFAULT_SPEED)) {
+ i2c->clock_div_reg = I2C_CLK_DIV_100K;
+ i2c->timing_reg = I2C_TIMING_100K;
+ i2c->high_speed_reg = 0;
+ } else if ((!low_power_clk) && (i2c->speed == MAX_FS_MODE_SPEED)) {
+ i2c->clock_div_reg = I2C_CLK_DIV_400K;
+ i2c->timing_reg = I2C_TIMING_400K;
+ i2c->high_speed_reg = 0;
+ } else if ((!low_power_clk) && (i2c->speed == MAX_FS_PLUS_SPEED)) {
+ i2c->clock_div_reg = I2C_CLK_DIV_1000K;
+ i2c->timing_reg = I2C_TIMING_1000K;
+ i2c->high_speed_reg = 0;
+ } else {
+ i2c->clock_div_reg = I2C_DEFAULT_CLK_DIV;
+
+ if (i2c->clk_src_div == 0)
+ i2c->clk_src_div = MTK_I2C_CLK_DIV;
+
+ i2c->clk_src_div *= i2c->clock_div_reg;
+
+ clk_src = (i2c->clk) / (i2c->clk_src_div);
+ target_speed = i2c->speed;
+
+ if (target_speed > MAX_FS_PLUS_SPEED) {
+ /* Set master code speed register */
+ i2c->timing_reg = I2C_TIMING_400K;
+
+ /* Set the high speed mode register */
+ ret = mtk_i2c_calculate_speed(clk_src, target_speed,
+ &step_cnt, &sample_cnt);
+ if (ret < 0)
+ return ret;
+
+ i2c->high_speed_reg = I2C_TIME_DEFAULT_VALUE |
+ (sample_cnt << 12) |
+ (step_cnt << 8);
+ } else {
+ ret = mtk_i2c_calculate_speed(clk_src, target_speed,
+ &step_cnt, &sample_cnt);
+ if (ret < 0)
+ return ret;
+
+ i2c->timing_reg = (sample_cnt << 8) | step_cnt;
+
+ /* Disable the high speed transaction */
+ i2c->high_speed_reg = 0;
+ }
+ }
+
+ i2c_writel(i2c, OFFSET_CLOCK_DIV, (i2c->clock_div_reg - 1));
+ i2c_writel(i2c, OFFSET_TIMING, i2c->timing_reg);
+ i2c_writel(i2c, OFFSET_HS, i2c->high_speed_reg);
+
+ return 0;
+}
+
+/** @ingroup IP_group_i2c_internal_function
+ * @par Description
+ * Dump i2c controller registers and DMA registers value.
+ * @param[in]
+ * i2c: mtk_i2c pointer, struct mtk_i2c contains register base\n
+ * address and DMA base address.
+ * @return
+ * none.
+ * @par Boundary case and Limitation
+ * none.
+ * @par Error case and Error handling
+ * none.
+ * @par Call graph and Caller graph
+ * @par Refer to the source code
+ */
+static void i2c_dump_info(struct mtk_i2c *i2c)
+{
+ I2CERR("I2C structure:\n");
+ I2CERR("id=%d,dma_en=%x,auto_restart=%x,poll_en=%x,op=%x\n",
+ i2c->id, i2c->dma_en, i2c->auto_restart, i2c->poll_en, i2c->op);
+ I2CERR("irq_stat=%x,source_clk=%d,clk_div=%d,speed=%d\n",
+ i2c->irq_stat, i2c->clk, i2c->clk_src_div, i2c->speed);
+ I2CERR("filter_msg=%x,msg_complete=%x,addr=%x\n",
+ i2c->filter_msg, i2c->msg_complete, i2c->addr);
+ I2CERR("mode=%x,irqnr=%x,timing_reg=%x,high_speed_reg=%x\n",
+ i2c->mode, i2c->irqnr, i2c->timing_reg, i2c->high_speed_reg);
+ I2CERR("con_num=%d,delay_len=%x,ext_time=%x,scl_ratio=%x\n",
+ i2c->con_num, i2c->delay_len, i2c->ext_time, i2c->scl_ratio);
+ I2CERR("hs_scl_ratio=%x,scl_mis_comp=%x,sta_stop_time=%x\n",
+ i2c->hs_scl_ratio, i2c->scl_mis_comp, i2c->sta_stop_time);
+ I2CERR("hs_sta_stop_time=%x,sda_time=%x\n",
+ i2c->hs_sta_stop_time, i2c->sda_time);
+
+ I2CERR("I2C base address 0x%llx\n", i2c->base);
+ I2CERR("I2C register:\n");
+ I2CERR("SLAVE_ADDR=0x%x,INTR_MASK=0x%x,INTR_STAT=0x%x\n",
+ (i2c_readl(i2c, OFFSET_SLAVE_ADDR)),
+ (i2c_readl(i2c, OFFSET_INTR_MASK)),
+ (i2c_readl(i2c, OFFSET_INTR_STAT)));
+ I2CERR("CONTROL=0x%x,TIMING=0x%x\n",
+ (i2c_readl(i2c, OFFSET_CONTROL)),
+ (i2c_readl(i2c, OFFSET_TIMING)));
+ I2CERR("TRANSFER_LEN=0x%x,TRANSAC_LEN=0x%x,DELAY_LEN=0x%x\n",
+ (i2c_readl(i2c, OFFSET_TRANSFER_LEN)),
+ (i2c_readl(i2c, OFFSET_TRANSAC_LEN)),
+ (i2c_readl(i2c, OFFSET_DELAY_LEN)));
+ I2CERR("START=0x%x,EXT_CONF=0x%x,IO_CONFIG=0x%x\n",
+ (i2c_readl(i2c, OFFSET_START)),
+ (i2c_readl(i2c, OFFSET_EXT_CONF)),
+ (i2c_readl(i2c, OFFSET_IO_CONFIG)));
+ I2CERR("FIFO_STAT1=0x%x,FIFO_STAT=0x%x,FIFO_THRESH=0x%x\n",
+ (i2c_readl(i2c, OFFSET_FIFO_STAT1)),
+ (i2c_readl(i2c, OFFSET_FIFO_STAT)),
+ (i2c_readl(i2c, OFFSET_FIFO_THRESH)));
+ I2CERR("DEBUGSTAT=0x%x,TRANSFER_LEN_AUX=0x%x,CLOCK_DIV=0x%x\n",
+ (i2c_readl(i2c, OFFSET_DEBUGSTAT)),
+ (i2c_readl(i2c, OFFSET_TRANSFER_LEN_AUX)),
+ (i2c_readl(i2c, OFFSET_CLOCK_DIV)));
+ I2CERR("HS=0x%x,SCL_HL_RATIO=0x%x,HS_SCL_HL_RATIO=0x%x\n",
+ (i2c_readl(i2c, OFFSET_HS)),
+ (i2c_readl(i2c, OFFSET_SCL_HL_RATIO)),
+ (i2c_readl(i2c, OFFSET_HS_SCL_HL_RATIO)));
+ I2CERR("STA_STOP_AC_TIME=0x%x,HS_STA_STOP_AC_TIME=0x%x\n",
+ (i2c_readl(i2c, OFFSET_STA_STOP_AC_TIME)),
+ (i2c_readl(i2c, OFFSET_HS_STA_STOP_AC_TIME)));
+ I2CERR("SCL_MIS_COMP_POINT=0x%x,SDA_TIME=0x%x,FIFO_PAUSE=0x%x\n",
+ (i2c_readl(i2c, OFFSET_SCL_MIS_COMP_POINT)),
+ (i2c_readl(i2c, OFFSET_SDA_TIME)),
+ (i2c_readl(i2c, OFFSET_FIFO_PAUSE)));
+
+ I2CERR("DMA base address 0x%llx\n", i2c->dmabase);
+ I2CERR("I2C DMA register:\n");
+ I2CERR("OFFSET_DMA_TX_MEM_ADDR=0x%x,OFFSET_DMA_RX_MEM_ADDR=0x%x\n",
+ (i2c_dma_readl(i2c, OFFSET_DMA_TX_MEM_ADDR)),
+ (i2c_dma_readl(i2c, OFFSET_DMA_RX_MEM_ADDR)));
+ I2CERR("OFFSET_DMA_TX_LEN=0x%x,OFFSET_DMA_RX_LEN=0x%x\n",
+ (i2c_dma_readl(i2c, OFFSET_DMA_TX_LEN)),
+ (i2c_dma_readl(i2c, OFFSET_DMA_RX_LEN)));
+ I2CERR("OFFSET_DMA_CON=0x%x,OFFSET_DMA_EN=0x%x\n",
+ (i2c_dma_readl(i2c, OFFSET_DMA_CON)),
+ (i2c_dma_readl(i2c, OFFSET_DMA_EN)));
+ I2CERR("OFFSET_DMA_INT_EN=0x%x,OFFSET_DMA_INT_FLAG=0x%x\n",
+ (i2c_dma_readl(i2c, OFFSET_DMA_INT_EN)),
+ (i2c_dma_readl(i2c, OFFSET_DMA_INT_FLAG)));
+}
+
+/** @ingroup IP_group_i2c_internal_function
+ * @par Description
+ * Configure i2c register and trigger transfer.
+ * @param[out]
+ * i2c: mtk_i2c pointer, struct mtk_i2c contains register base\n
+ * address, operation mode, interrupt status and i2c driver data.
+ * @param[out]
+ * msgs: i2c_msg pointer, struct i2c_msg contains slave\n
+ * address, operation mode, msg length and data buffer.
+ * @param[in]
+ * num: i2c_msg number.
+ * @param[in]
+ * left_num: left i2c_msg number.
+ * @return
+ * 0, i2c transfer successfully.\n
+ * -ETIMEDOUT_I2C, i2c transfer timeout.\n
+ * -EREMOTEIO_I2C, i2c receive data length does not equal to request data\n
+ * length.\n
+ * -ENXIO_I2C, i2c transfer ack error.
+ * @par Boundary case and Limitation
+ * none.
+ * @par Error case and Error handling
+ * 1. i2c transfer timeout, return -ETIMEDOUT_I2C.\n
+ * 2. i2c receive data length does not equal to request data\n
+ * length, return -EREMOTEIO_I2C.\n
+ * 3. i2c transfer ack error, return -ENXIO_I2C.
+ * @par Call graph and Caller graph
+ * @par Refer to the source code
+ */
+static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
+ int num, int left_num)
+{
+ bool tmo = false;
+ bool trans_error = false;
+ uint8_t *data_buf = msgs->buf;
+ uint16_t data_len = msgs->len;
+ uint16_t read_len;
+ uint16_t addr_reg;
+ uint16_t start_reg;
+ uint16_t control_reg;
+ uint16_t restart_flag = 0;
+ uint32_t tmo_poll = I2C_POLL_VALUE;
+ int ret;
+
+ i2c->irq_stat = 0;
+
+ if (i2c->auto_restart)
+ restart_flag = I2C_RS_TRANSFER;
+
+ control_reg = i2c_readl(i2c, OFFSET_CONTROL) &
+ ~(I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS);
+
+ if ((i2c->speed > MAX_FS_PLUS_SPEED) ||
+ ((num > 1) && !(i2c->mode & I2C_MULTI_STOP)))
+ control_reg |= I2C_CONTROL_RS;
+
+ if (i2c->op == I2C_MASTER_WRRD)
+ control_reg |= I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS;
+
+ if (i2c->dma_en)
+ control_reg |= I2C_CONTROL_AYNCS_MODE | I2C_CONTROL_DMA_EN;
+
+ i2c_writel(i2c, OFFSET_CONTROL, control_reg);
+
+ /* set start condition */
+ if (i2c->mode & I2C_EXTEN_SET)
+ i2c_writel(i2c, OFFSET_EXT_CONF, i2c->ext_time);
+ else {
+ if (i2c->speed <= I2C_DEFAULT_SPEED)
+ i2c_writel(i2c, OFFSET_EXT_CONF, I2C_ST_START_CON);
+ else
+ i2c_writel(i2c, OFFSET_EXT_CONF, I2C_FS_START_CON);
+ }
+
+ /* set ac timing register */
+ if (i2c->mode & I2C_ACTIME_SET) {
+ i2c_writel(i2c, OFFSET_SCL_HL_RATIO, i2c->scl_ratio);
+ i2c_writel(i2c, OFFSET_HS_SCL_HL_RATIO, i2c->hs_scl_ratio);
+ i2c_writel(i2c, OFFSET_SCL_MIS_COMP_POINT, i2c->scl_mis_comp);
+ i2c_writel(i2c, OFFSET_STA_STOP_AC_TIME, i2c->sta_stop_time);
+ i2c_writel(i2c, OFFSET_HS_STA_STOP_AC_TIME,
+ i2c->hs_sta_stop_time);
+ i2c_writel(i2c, OFFSET_SDA_TIME, i2c->sda_time);
+ }
+
+ addr_reg = msgs->addr << 1;
+ if (i2c->op == I2C_MASTER_RD)
+ addr_reg |= 0x1;
+
+ i2c_writel(i2c, OFFSET_SLAVE_ADDR, addr_reg);
+
+ /* clear interrupt status */
+ i2c_writel(i2c, OFFSET_INTR_STAT, I2C_RS_TRANSFER | I2C_ACKERR |
+ I2C_TRANSAC_COMP);
+
+ if (!(i2c->mode & I2C_FIFO_FORCE))
+ i2c_writel(i2c, OFFSET_FIFO_ADDR_CLR, I2C_FIFO_ADDR_CLR);
+
+ if (i2c->poll_en)
+ i2c_writel(i2c, OFFSET_INTR_MASK, 0);
+ else
+ i2c_writel(i2c, OFFSET_INTR_MASK, restart_flag | I2C_ACKERR |
+ I2C_TRANSAC_COMP);
+
+ /* set transfer and transaction len */
+ if (i2c->op == I2C_MASTER_WRRD) {
+ i2c_writel(i2c, OFFSET_TRANSFER_LEN, msgs->len);
+ i2c_writel(i2c, OFFSET_TRANSFER_LEN_AUX, (msgs + 1)->len);
+ i2c_writel(i2c, OFFSET_TRANSAC_LEN, I2C_WRRD_TRANAC_VALUE);
+ } else {
+ if (i2c->mode & I2C_CONTI_TRANS) {
+ i2c_writel(i2c, OFFSET_TRANSFER_LEN, msgs->len);
+ i2c_writel(i2c, OFFSET_TRANSAC_LEN, i2c->con_num);
+
+ msgs->len *= i2c->con_num;
+ } else {
+ i2c_writel(i2c, OFFSET_TRANSFER_LEN, msgs->len);
+ i2c_writel(i2c, OFFSET_TRANSAC_LEN, num);
+ }
+ }
+
+ if (i2c->dma_en) {
+ if (i2c->op == I2C_MASTER_WR) {
+ i2c_dma_writel(i2c, OFFSET_DMA_INT_FLAG,
+ I2C_DMA_INT_FLAG_NONE);
+ i2c_dma_writel(i2c, OFFSET_DMA_CON,
+ I2C_DMA_CON_TX);
+ i2c_dma_writel(i2c, OFFSET_DMA_TX_MEM_ADDR,
+ (uint32_t)(uint64_t)(msgs->buf));
+ i2c_dma_writel(i2c, OFFSET_DMA_TX_LEN,
+ (uint32_t)(msgs->len));
+ } else if (i2c->op == I2C_MASTER_RD) {
+ i2c_dma_writel(i2c, OFFSET_DMA_INT_FLAG,
+ I2C_DMA_INT_FLAG_NONE);
+ i2c_dma_writel(i2c, OFFSET_DMA_CON,
+ I2C_DMA_CON_RX);
+ i2c_dma_writel(i2c, OFFSET_DMA_RX_MEM_ADDR,
+ (uint32_t)(uint64_t)(msgs->buf));
+ i2c_dma_writel(i2c, OFFSET_DMA_RX_LEN,
+ (uint32_t)(msgs->len));
+ } else if (i2c->op == I2C_MASTER_WRRD) {
+ i2c_dma_writel(i2c, OFFSET_DMA_INT_FLAG,
+ I2C_DMA_CLR_FLAG);
+ i2c_dma_writel(i2c, OFFSET_DMA_CON,
+ I2C_DMA_CLR_FLAG);
+ i2c_dma_writel(i2c, OFFSET_DMA_TX_MEM_ADDR,
+ (uint32_t)(uint64_t)(msgs->buf));
+ i2c_dma_writel(i2c, OFFSET_DMA_RX_MEM_ADDR,
+ (uint32_t)(uint64_t)((msgs + 1)->buf));
+ i2c_dma_writel(i2c, OFFSET_DMA_TX_LEN,
+ (uint32_t)(msgs->len));
+ i2c_dma_writel(i2c, OFFSET_DMA_RX_LEN,
+ (uint32_t)((msgs + 1)->len));
+ }
+
+ i2c_dma_writel(i2c, OFFSET_DMA_EN, I2C_DMA_START_EN);
+ } else {
+ if (!(i2c->mode & I2C_FIFO_FORCE) &&
+ (i2c->op != I2C_MASTER_RD)) {
+ data_buf = msgs->buf;
+ data_len = msgs->len;
+
+ while (data_len--)
+ i2c_writel(i2c, OFFSET_DATA_PORT,
+ *(data_buf++));
+ }
+ }
+
+ if (!i2c->auto_restart) {
+ start_reg = I2C_TRANSAC_START;
+ } else {
+ start_reg = I2C_TRANSAC_START | I2C_RS_MUL_TRIG;
+ if (left_num >= 1)
+ start_reg |= I2C_RS_MUL_CNFG;
+ }
+
+ i2c_writel(i2c, OFFSET_START, start_reg);
+
+ if (i2c->poll_en) {
+ for (;;) {
+ i2c->irq_stat = i2c_readl(i2c, OFFSET_INTR_STAT);
+
+ if (i2c->irq_stat & (I2C_TRANSAC_COMP | restart_flag)) {
+ tmo = false;
+ if (i2c->irq_stat & I2C_ACKERR)
+ trans_error = true;
+ break;
+ }
+
+ tmo_poll--;
+ if (tmo_poll == 0) {
+ tmo = true;
+ break;
+ }
+ }
+ } else {
+ for (;;) {
+ if (i2c->msg_complete && (i2c->irq_stat &
+ (I2C_TRANSAC_COMP | restart_flag))) {
+ tmo = false;
+ if (i2c->irq_stat & I2C_ACKERR)
+ trans_error = true;
+ break;
+ }
+
+ tmo_poll--;
+ if (tmo_poll == 0) {
+ tmo = true;
+ break;
+ }
+ }
+ }
+
+ /* clear interrupt mask */
+ i2c_writel(i2c, OFFSET_INTR_MASK, ~(restart_flag | I2C_ACKERR |
+ I2C_TRANSAC_COMP));
+
+ if ((!tmo) && (!trans_error)) {
+ if (!i2c->dma_en && i2c->op != I2C_MASTER_WR &&
+ !(i2c->mode & I2C_FIFO_FORCE)) {
+ data_buf = (i2c->op == I2C_MASTER_RD) ?
+ msgs->buf : (msgs + 1)->buf;
+ data_len = (i2c->op == I2C_MASTER_RD) ?
+ msgs->len : (msgs + 1)->len;
+ read_len = i2c_readl(i2c, OFFSET_FIFO_STAT1)
+ & 0x1f;
+
+ if (read_len == data_len) {
+ while (data_len--)
+ *(data_buf++) = i2c_readl(i2c,
+ OFFSET_DATA_PORT);
+ } else {
+ I2CERR("fifo read error!\n");
+ I2CERR("data_len %x, read_len %x\n",
+ data_len, read_len);
+ if (i2c->filter_msg == false)
+ i2c_dump_info(i2c);
+ return -EREMOTEIO_I2C;
+ }
+ }
+ } else {
+ /* timeout or ACKERR */
+ if (tmo)
+ ret = -ETIMEDOUT_I2C;
+ else
+ ret = -ENXIO_I2C;
+
+ if (i2c->filter_msg == false) {
+ if (tmo) {
+ I2CERR("id=%d, addr: %x, transfer timeout\n",
+ i2c->id, msgs->addr);
+ } else {
+ I2CERR("id=%d, addr: %x, I2C_ACKERR\n",
+ i2c->id, msgs->addr);
+ }
+
+ i2c_dump_info(i2c);
+ }
+
+ return ret;
+ }
+
+ return 0;
+}
+
+/** @ingroup IP_group_i2c_internal_function
+ * @par Description
+ * Common i2c transfer API. Set i2c transfer mode according to i2c_msg\n
+ * information, then call mtk_i2c_do_transfer() to configure i2c register\n
+ * and trigger transfer.
+ * @param[out]
+ * i2c: mtk_i2c pointer, struct mtk_i2c contains register base\n
+ * address, operation mode, interrupt status and i2c driver data.
+ * @param[out]
+ * msgs: i2c_msg pointer, struct i2c_msg contains slave\n
+ * address, operation mode, msg length and data buffer.
+ * @param[in]
+ * num: i2c_msg number.
+ * @return
+ * i2c_msg number, i2c transfer successfully.\n
+ * -EINVAL_I2C, msg length is 0 or more than 16, msg data buffer is NULL,\n
+ * use DMA MODE or slave address more than 0x7f.\n
+ * error code from mtk_i2c_init_base().\n
+ * error code from mtk_i2c_set_speed().\n
+ * error code from mtk_i2c_do_transfer().
+ * @par Boundary case and Limitation
+ * none.
+ * @par Error case and Error handling
+ * 1. If msg length is 0 or more than 16, msg data buffer is NULL,\n
+ * use DMA MODE or slave address more than 0x7f, return -EINVAL_I2C.
+ * 2. If mtk_i2c_init_base() fails, return its error code.\n
+ * 3. If mtk_i2c_set_speed() fails, return its error code.\n
+ * 4. If mtk_i2c_do_transfer() fails, return its error code.
+ * @par Call graph and Caller graph
+ * @par Refer to the source code
+ */
+static int mtk_i2c_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, int num)
+{
+ uint8_t num_cnt;
+ int left_num = num;
+ int ret;
+
+ ret = mtk_i2c_init_base(i2c);
+ if (ret) {
+ I2CERR("Failed to init i2c base.\n");
+ return ret;
+ }
+
+ ret = i2c_gpio_pinmux(i2c);
+ if (ret) {
+ I2CERR("Failed to set gpio to i2c mode.\n");
+ return ret;
+ }
+
+ if (!(i2c->mode & I2C_FIFO_FORCE)) {
+
+ i2c_clock_enable(i2c);
+ }
+
+ mtk_i2c_init_hw(i2c);
+
+ ret = mtk_i2c_set_speed(i2c);
+ if (ret) {
+ I2CERR("Failed to set the speed.\n");
+ goto err_exit;
+ }
+
+ for (num_cnt = 0; num_cnt < num; num_cnt++) {
+ if (((msgs + num_cnt)->addr) > 0x7f) {
+ I2CERR("i2c addr: msgs[%d]->addr(%x) > 0x7f, error!\n",
+ num_cnt, ((msgs + num_cnt)->addr));
+ ret = -EINVAL_I2C;
+ goto err_exit;
+ }
+
+ if (!(msgs + num_cnt)->buf) {
+ I2CERR("msgs[%d]->buf is NULL.\n", num_cnt);
+ ret = -EINVAL_I2C;
+ goto err_exit;
+ }
+
+ if ((msgs + num_cnt)->len == 0) {
+ I2CERR("msgs[%d]->len == 0, error!\n", num_cnt);
+ ret = -EINVAL_I2C;
+ goto err_exit;
+ }
+
+ if (!(i2c->mode & I2C_FIFO_FORCE) &&
+ (msgs + num_cnt)->len > I2C_FIFO_SIZE)
+ i2c->dma_en = true;
+ }
+
+ if ((num == 1) || ((!i2c->dma_en) && (num == 2) &&
+ (!(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD) &&
+ (msgs[0].addr == msgs[1].addr)) && !(i2c->mode & I2C_MULTI_TRANS)))
+ i2c->auto_restart = false;
+ else
+ i2c->auto_restart = true;
+
+ while (left_num--) {
+ if (msgs->flags & I2C_M_RD)
+ i2c->op = I2C_MASTER_RD;
+ else
+ i2c->op = I2C_MASTER_WR;
+
+ if (!i2c->auto_restart) {
+ if (num == 2) {
+ /* combined two messages into one transaction */
+ i2c->op = I2C_MASTER_WRRD;
+ left_num--;
+ }
+ }
+
+ ret = mtk_i2c_do_transfer(i2c, msgs, num, left_num);
+ if (ret < 0)
+ goto err_exit;
+
+ msgs++;
+ }
+
+ ret = I2C_OK;
+
+err_exit:
+ if (!(i2c->mode & I2C_FIFO_FORCE))
+ i2c_clock_disable(i2c);
+
+ return ret;
+}
+
+/** @ingroup IP_group_i2c_external_function
+ * @par Description
+ * Initialize struct mtk_i2c and i2c_msg, then read data from\n
+ * slave device.
+ * @param[in]
+ * bus_num: i2c bus number.
+ * @param[in]
+ * device_addr: slave device 7bits address.
+ * @param[in]
+ * speed_khz: i2c transfer speed.
+ * @param[out]
+ * buffer: read data buffer pointer.
+ * @param[in]
+ * len: read data length.
+ * @return
+ * 0, i2c transfer successfully.\n
+ * error code from mtk_i2c_transfer().
+ * @par Boundary case and Limitation
+ * none.
+ * @par Error case and Error handling
+ * If mtk_i2c_transfer() fails, return its error code.
+ * @par Call graph and Caller graph
+ * @par Refer to the source code
+ */
+int mtk_i2c_read(uint8_t bus_num, uint8_t device_addr, uint32_t speed_khz,
+ uint8_t *buffer, uint16_t len)
+{
+ int ret = I2C_OK;
+ struct i2c_msg msgs;
+ struct mtk_i2c i2c_mtk;
+ struct mtk_i2c *i2c = &i2c_mtk;
+
+ memset(i2c, 0, sizeof(struct mtk_i2c));
+
+ i2c->poll_en = true;
+ i2c->dma_en = false;
+ i2c->auto_restart = false;
+ i2c->pushpull = false;
+ i2c->filter_msg = false;
+ i2c->id = bus_num;
+ i2c->addr = device_addr;
+ i2c->speed = speed_khz;
+ i2c->mode = 0;
+
+ msgs.addr = i2c->addr;
+ msgs.flags = 1;
+ msgs.buf = buffer;
+ msgs.len = len;
+ ret = mtk_i2c_transfer(i2c, &msgs, 1);
+
+ if ((i2c->filter_msg == false) && (ret != I2C_OK))
+ I2CERR("mtk_i2c_read fails(%d).\n", ret);
+
+ return ret;
+}
+
+/** @ingroup IP_group_i2c_external_function
+ * @par Description
+ * Initialize struct mtk_i2c and i2c_msg, then write data to\n
+ * slave device.
+ * @param[in]
+ * bus_num: i2c bus number.
+ * @param[in]
+ * device_addr: slave device 7bits address.
+ * @param[in]
+ * speed_khz: i2c transfer speed.
+ * @param[in]
+ * buffer: write data buffer pointer.
+ * @param[in]
+ * len: write data length.
+ * @return
+ * 0, i2c transfer successfully.\n
+ * error code from mtk_i2c_transfer().
+ * @par Boundary case and Limitation
+ * none.
+ * @par Error case and Error handling
+ * If mtk_i2c_transfer() fails, return its error code.\n
+ * @par Call graph and Caller graph
+ * @par Refer to the source code
+ */
+int mtk_i2c_write(uint8_t bus_num, uint8_t device_addr, uint32_t speed_khz,
+ uint8_t *buffer, uint16_t len)
+{
+ int ret = I2C_OK;
+ struct i2c_msg msgs;
+ struct mtk_i2c i2c_mtk;
+ struct mtk_i2c *i2c = &i2c_mtk;
+
+ memset(i2c, 0, sizeof(struct mtk_i2c));
+
+ i2c->poll_en = true;
+ i2c->dma_en = false;
+ i2c->auto_restart = false;
+ i2c->pushpull = false;
+ i2c->filter_msg = false;
+ i2c->id = bus_num;
+ i2c->addr = device_addr;
+ i2c->speed = speed_khz;
+ i2c->mode = 0;
+
+ msgs.addr = i2c->addr;
+ msgs.flags = 0;
+ msgs.buf = buffer;
+ msgs.len = len;
+ ret = mtk_i2c_transfer(i2c, &msgs, 1);
+
+ if ((i2c->filter_msg == false) && (ret != I2C_OK))
+ I2CERR("mtk_i2c_write fails(%d).\n", ret);
+
+ return ret;
+}
+
+/** @ingroup IP_group_i2c_external_function
+ * @par Description
+ * Initialize struct mtk_i2c and i2c_msg, first write data to\n
+ * slave device then read data from slave device.
+ * @param[in]
+ * bus_num: i2c bus number.
+ * @param[in]
+ * device_addr: slave device 7bits address.
+ * @param[in]
+ * speed_khz: i2c transfer speed.
+ * @param[in]
+ * write_buffer: write data buffer pointer.
+ * @param[out]
+ * read_buffer: read data buffer pointer.
+ * @param[in]
+ * write_len: write data length.
+ * @param[in]
+ * read_len: read data length.
+ * @return
+ * 0, i2c transfer successfully.\n
+ * error code from mtk_i2c_transfer().
+ * @par Boundary case and Limitation
+ * none.
+ * @par Error case and Error handling
+ * If mtk_i2c_transfer() fails, return its error code.\n
+ * @par Call graph and Caller graph
+ * @par Refer to the source code
+ */
+int mtk_i2c_write_read(uint8_t bus_num, uint8_t device_addr, uint32_t speed_khz,
+ uint8_t *write_buffer, uint8_t *read_buffer,
+ uint16_t write_len, uint16_t read_len)
+{
+ int ret = I2C_OK;
+ struct i2c_msg msgs[2];
+ struct mtk_i2c i2c_mtk;
+ struct mtk_i2c *i2c = &i2c_mtk;
+
+ memset(i2c, 0, sizeof(struct mtk_i2c));
+
+ i2c->poll_en = true;
+ i2c->dma_en = false;
+ i2c->auto_restart = false;
+ i2c->pushpull = false;
+ i2c->filter_msg = false;
+ i2c->id = bus_num;
+ i2c->addr = device_addr;
+ i2c->speed = speed_khz;
+ i2c->mode = 0;
+
+ msgs[0].addr = i2c->addr;
+ msgs[0].flags = 0;
+ msgs[0].buf = write_buffer;
+ msgs[0].len = write_len;
+
+ msgs[1].addr = i2c->addr;
+ msgs[1].flags = 1;
+ msgs[1].buf = read_buffer;
+ msgs[1].len = read_len;
+ ret = mtk_i2c_transfer(i2c, msgs, 2);
+
+ if ((i2c->filter_msg == false) && (ret != I2C_OK))
+ I2CERR("mtk_i2c_write_read fails(%d).\n", ret);
+
+ return ret;
+}
+
+int i2c_hw_init(void)
+{
+ uint32_t gpio_reg;
+ /* I2C0_SDA */
+ gpio_reg = (readl(MTK_GPIO_I2C_BASE) &
+ (~(0x7 << MTK_GPIO_SDA0))) | (0x1 << MTK_GPIO_SDA0);
+ writel(gpio_reg, MTK_GPIO_I2C_BASE);
+
+ writel((readl(MTK_GPIO_I2C_PULL_ENABLE_BASE) |
+ (0x1 << MTK_GPIO_PULL_ENABLE_SDA0)),
+ MTK_GPIO_I2C_PULL_ENABLE_BASE);
+
+ writel((readl(MTK_GPIO_I2C_PULL_SEL_BASE) |
+ (0x1 << MTK_GPIO_PULL_ENABLE_SDA0)),
+ MTK_GPIO_I2C_PULL_SEL_BASE);
+ writel((readl(MTK_GPIO_I2C_RESL_BASE) |
+ (0x3 << MTK_GPIO_RESL_SDA0)),
+ MTK_GPIO_I2C_RESL_BASE);
+
+ /* I2C0_SCL */
+ gpio_reg = (readl(MTK_GPIO_I2C_BASE) &
+ (~(0x7 << MTK_GPIO_SCL0))) | (0x1 << MTK_GPIO_SCL0);
+ writel(gpio_reg, MTK_GPIO_I2C_BASE);
+ writel((readl(MTK_GPIO_I2C_PULL_ENABLE_BASE) |
+ (0x1 << MTK_GPIO_PULL_ENABLE_SCL0)),
+ MTK_GPIO_I2C_PULL_ENABLE_BASE);
+
+ writel((readl(MTK_GPIO_I2C_PULL_SEL_BASE) |
+ (0x1 << MTK_GPIO_PULL_ENABLE_SCL0)),
+ MTK_GPIO_I2C_PULL_SEL_BASE);
+ writel((readl(MTK_GPIO_I2C_RESL_BASE) |
+ (0x3 << MTK_GPIO_RESL_SCL0)),
+ MTK_GPIO_I2C_RESL_BASE);
+ /* I2C1_SDA */
+ gpio_reg = (readl(MTK_GPIO_I2C_BASE) &
+ (~(0x7 << MTK_GPIO_SDA1))) | (0x1 << MTK_GPIO_SDA1);
+ writel(gpio_reg, MTK_GPIO_I2C_BASE);
+
+ writel((readl(MTK_GPIO_I2C_PULL_ENABLE_BASE) |
+ (0x1 << MTK_GPIO_PULL_ENABLE_SDA1)),
+ MTK_GPIO_I2C_PULL_ENABLE_BASE);
+ writel((readl(MTK_GPIO_I2C_PULL_SEL_BASE) |
+ (0x1 << MTK_GPIO_PULL_ENABLE_SDA1)),
+ MTK_GPIO_I2C_PULL_SEL_BASE);
+ writel((readl(MTK_GPIO_I2C_RESL_BASE) |
+ (0x3 << MTK_GPIO_RESL_SDA1)),
+ MTK_GPIO_I2C_RESL_BASE);
+
+ /* I2C1_SCL */
+ gpio_reg = (readl(MTK_GPIO_I2C_BASE) &
+ (~(0x7 << MTK_GPIO_SCL1))) | (0x1 << MTK_GPIO_SCL1);
+ writel(gpio_reg, MTK_GPIO_I2C_BASE);
+
+ writel((readl(MTK_GPIO_I2C_PULL_ENABLE_BASE) |
+ (0x1 << MTK_GPIO_PULL_ENABLE_SCL1)),
+ MTK_GPIO_I2C_PULL_ENABLE_BASE);
+ writel((readl(MTK_GPIO_I2C_PULL_SEL_BASE) |
+ (0x1 << MTK_GPIO_PULL_ENABLE_SCL1)),
+ MTK_GPIO_I2C_PULL_SEL_BASE);
+ writel((readl(MTK_GPIO_I2C_RESL_BASE) |
+ (0x3 << MTK_GPIO_RESL_SCL1)),
+ MTK_GPIO_I2C_RESL_BASE);
+ /* I2C2_SDA */
+ gpio_reg = (readl(MTK_GPIO_I2C_BASE) &
+ (~(0x7 << MTK_GPIO_SDA2))) | (0x1 << MTK_GPIO_SDA2);
+ writel(gpio_reg, MTK_GPIO_I2C_BASE);
+
+ writel((readl(MTK_GPIO_I2C_PULL_ENABLE_BASE) |
+ (0x1 << MTK_GPIO_PULL_ENABLE_SDA2)),
+ MTK_GPIO_I2C_PULL_ENABLE_BASE);
+ writel((readl(MTK_GPIO_I2C_PULL_SEL_BASE) |
+ (0x1 << MTK_GPIO_PULL_ENABLE_SDA2)),
+ MTK_GPIO_I2C_PULL_SEL_BASE);
+ writel((readl(MTK_GPIO_I2C_RESL_BASE) |
+ (0x3 << MTK_GPIO_RESL_SDA2)),
+ MTK_GPIO_I2C_RESL_BASE);
+
+ /* I2C2_SCL */
+ gpio_reg = (readl(MTK_GPIO_I2C_BASE) &
+ (~(0x7 << MTK_GPIO_SCL2))) | (0x1 << MTK_GPIO_SCL2);
+ writel(gpio_reg, MTK_GPIO_I2C_BASE);
+
+ writel((readl(MTK_GPIO_I2C_PULL_ENABLE_BASE) |
+ (0x1 << MTK_GPIO_PULL_ENABLE_SCL2)),
+ MTK_GPIO_I2C_PULL_ENABLE_BASE);
+ writel((readl(MTK_GPIO_I2C_PULL_SEL_BASE) |
+ (0x1 << MTK_GPIO_PULL_ENABLE_SCL2)),
+ MTK_GPIO_I2C_PULL_SEL_BASE);
+ writel((readl(MTK_GPIO_I2C_RESL_BASE) |
+ (0x3 << MTK_GPIO_RESL_SCL2)),
+ MTK_GPIO_I2C_RESL_BASE);
+ return 0;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/key/mtk_key.c b/src/bsp/lk/platform/mt8512/drivers/key/mtk_key.c
new file mode 100644
index 0000000..2c3066b
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/key/mtk_key.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <debug.h>
+#include <platform/mt_reg_base.h>
+#include <reg.h>
+#include <platform/mtk_key.h>
+
+bool check_download_key(void)
+{
+ if ((readl(SEJ_BASE)&0xF) == 0x3)
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
diff --git a/src/bsp/lk/platform/mt8512/drivers/mmc/mmc_core.c b/src/bsp/lk/platform/mt8512/drivers/mmc/mmc_core.c
new file mode 100644
index 0000000..d5dd36f
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/mmc/mmc_core.c
@@ -0,0 +1,1864 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*=======================================================================*/
+/* HEADER FILES */
+/*=======================================================================*/
+#include <config.h>
+#include <platform/msdc.h>
+#include <platform/mmc_core.h>
+#include <platform/mmc_ioctl.h>
+#include <lib/bio.h>
+#include <lib/heap.h>
+#include <lib/partition.h>
+#include <stdlib.h>
+#include <string.h>
+#include <err.h>
+#include <errno.h>
+#include <kernel/mutex.h>
+
+#define CMD_RETRIES (5)
+#define CMD_TIMEOUT (100) /* 100ms */
+#define PAD_DELAY_MAX 32
+
+static int mmc_set_ext_csd(struct mmc_card *card, u8 addr, u8 value);
+/* before DRAM k, malloc() is not ready, so define it globally */
+struct mmc_host msdc_host0;
+struct mmc_card emmc_card;
+
+typedef struct {
+ bdev_t bdev;
+ u32 part_id;
+ struct mmc_host *host;
+ struct mmc_card *card;
+} mmc_dev_t;
+
+struct msdc_delay_phase {
+ u8 maxlen;
+ u8 start;
+ u8 final_phase;
+};
+
+static const unsigned int tran_exp[] = {
+ 10000, 100000, 1000000, 10000000,
+ 0, 0, 0, 0
+};
+
+static const unsigned char tran_mant[] = {
+ 0, 10, 12, 13, 15, 20, 25, 30,
+ 35, 40, 45, 50, 55, 60, 70, 80,
+};
+
+static const unsigned char mmc_tran_mant[] = {
+ 0, 10, 12, 13, 15, 20, 26, 30,
+ 35, 40, 45, 52, 55, 60, 70, 80,
+};
+
+static u32 unstuff_bits(u32 *resp, u32 start, u32 size)
+{
+ const u32 __mask = (1 << (size)) - 1;
+ const int __off = 3 - ((start) / 32);
+ const int __shft = (start) & 31;
+ u32 __res;
+
+ __res = resp[__off] >> __shft;
+ if ((size) + __shft >= 32)
+ __res |= resp[__off-1] << (32 - __shft);
+ return __res & __mask;
+}
+
+#define UNSTUFF_BITS(r,s,sz) unstuff_bits(r,s,sz)
+
+static int mmc_switch_part(mmc_dev_t *dev)
+{
+ int err = MMC_ERR_NONE;
+ struct mmc_card *card;
+ struct mmc_host *host;
+ u8 cfg;
+
+ host = dev->host;
+ if (host->curr_part == dev->part_id)
+ /* already set to specific partition */
+ return MMC_ERR_NONE;
+
+ if (dev->part_id > EXT_CSD_PART_CFG_GP_PART_4) {
+ dprintf(CRITICAL, "[MSDC] Unsupported partid: %u\n", dev->part_id);
+ return MMC_ERR_INVALID;
+ }
+
+ card = dev->card;
+ ASSERT(card);
+
+ cfg = card->ext_csd.part_cfg;
+ cfg = (cfg & ~0x7) | dev->part_id;
+ err = mmc_set_ext_csd(card, EXT_CSD_PART_CFG, cfg);
+ if (err)
+ dprintf(CRITICAL, "[MSDC] switch to part %u failed!\n", dev->part_id);
+ else
+ card->ext_csd.part_cfg = cfg;
+
+ return err;
+}
+
+static int mmc_cmd(struct mmc_host *host, struct mmc_command *cmd)
+{
+ int err;
+ int retry = cmd->retries;
+
+ do {
+ err = msdc_cmd(host, cmd);
+ if (err == MMC_ERR_NONE || cmd->opcode == MMC_CMD21) /* do not tuning CMD21 */
+ break;
+ } while (retry--);
+
+ return err;
+}
+
+static int mmc_app_cmd(struct mmc_host *host, struct mmc_command *cmd,
+ u32 rca, int retries)
+{
+ int err = MMC_ERR_FAILED;
+ struct mmc_command appcmd;
+
+ appcmd.opcode = MMC_CMD_APP_CMD;
+ appcmd.arg = rca << 16;
+ appcmd.rsptyp = RESP_R1;
+ appcmd.retries = CMD_RETRIES;
+ appcmd.timeout = CMD_TIMEOUT;
+
+ do {
+ err = mmc_cmd(host, &appcmd);
+
+ if (err == MMC_ERR_NONE)
+ err = mmc_cmd(host, cmd);
+ if (err == MMC_ERR_NONE)
+ break;
+ } while (retries--);
+
+ return err;
+}
+
+static u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
+{
+ int bit;
+
+ ocr &= host->ocr_avail;
+
+ bit = __builtin_ffs(ocr);
+ if (bit) {
+ bit -= 1;
+ ocr &= 3 << bit;
+ } else {
+ ocr = 0;
+ }
+ return ocr;
+}
+
+static inline int mmc_go_idle(struct mmc_host *host)
+{
+ struct mmc_command cmd = {
+ MMC_CMD_GO_IDLE_STATE, 0, RESP_NONE, {0}, CMD_TIMEOUT, CMD_RETRIES, 0
+ };
+ return mmc_cmd(host, &cmd);
+}
+
+static int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
+{
+ struct mmc_command cmd;
+ int err;
+ static const u8 test_pattern = 0xAA;
+ u8 result_pattern;
+
+ /*
+ * To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND
+ * before SD_APP_OP_COND. This command will harmlessly fail for
+ * SD 1.0 cards.
+ */
+
+ cmd.opcode = SD_CMD_SEND_IF_COND;
+ cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern;
+ cmd.rsptyp = RESP_R1;
+ cmd.retries = 0;
+ cmd.timeout = CMD_TIMEOUT;
+
+ err = mmc_cmd(host, &cmd);
+
+ if (err != MMC_ERR_NONE)
+ return err;
+
+ result_pattern = cmd.resp[0] & 0xFF;
+
+ if (result_pattern != test_pattern)
+ return MMC_ERR_INVALID;
+
+ return MMC_ERR_NONE;
+}
+
+/*
+ * return MMC_ERR_RETRY means that need re-send CMD1 in stage 2
+ */
+static int mmc_send_op_cond_once(struct mmc_host *host, u32 ocr, u32 *rocr)
+{
+ int i, err = 0;
+ struct mmc_command cmd = {
+ MMC_CMD_SEND_OP_COND, 0, RESP_R3, {0}, CMD_TIMEOUT, 0, 0
+ };
+
+ cmd.arg = ocr;
+
+ for (i = 1; i; i--) {
+ err = mmc_cmd(host, &cmd);
+ if (err)
+ break;
+
+ /* if we're just probing, do a single pass */
+ if (ocr == 0)
+ break;
+
+ if (cmd.resp[0] & MMC_CARD_BUSY)
+ break;
+
+ err = MMC_ERR_RETRY;
+ }
+
+ if (!err && rocr)
+ *rocr = cmd.resp[0];
+
+ return err;
+}
+
+static int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
+{
+ int i, err = 0;
+ struct mmc_command cmd = {
+ MMC_CMD_SEND_OP_COND, 0, RESP_R3, {0}, CMD_TIMEOUT, 0, 0
+ };
+
+ cmd.arg = ocr;
+
+ for (i = 100; i; i--) {
+ err = mmc_cmd(host, &cmd);
+ if (err)
+ break;
+
+ /* if we're just probing, do a single pass */
+ if (ocr == 0)
+ break;
+
+ if (cmd.resp[0] & MMC_CARD_BUSY)
+ break;
+
+ err = MMC_ERR_TIMEOUT;
+
+ spin(10000);
+
+ }
+
+ if (!err && rocr)
+ *rocr = cmd.resp[0];
+
+ return err;
+}
+
+static int mmc_send_app_op_cond_once(struct mmc_host *host, u32 ocr, u32 *rocr)
+{
+ struct mmc_command cmd;
+ int i, err = 0;
+
+ cmd.opcode = SD_ACMD_SEND_OP_COND;
+ cmd.arg = ocr;
+ cmd.rsptyp = RESP_R3;
+ cmd.retries = CMD_RETRIES;
+ cmd.timeout = CMD_TIMEOUT;
+
+ for (i = 1; i; i--) {
+ err = mmc_app_cmd(host, &cmd, 0, CMD_RETRIES);
+ if (err != MMC_ERR_NONE)
+ break;
+
+ if (cmd.resp[0] & MMC_CARD_BUSY || ocr == 0)
+ break;
+
+ err = MMC_ERR_RETRY;
+ }
+
+ if (rocr)
+ *rocr = cmd.resp[0];
+
+ return err;
+}
+
+static int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
+{
+ struct mmc_command cmd;
+ int i, err = 0;
+
+ cmd.opcode = SD_ACMD_SEND_OP_COND;
+ cmd.arg = ocr;
+ cmd.rsptyp = RESP_R3;
+ cmd.retries = CMD_RETRIES;
+ cmd.timeout = CMD_TIMEOUT;
+
+ for (i = 100; i; i--) {
+ err = mmc_app_cmd(host, &cmd, 0, CMD_RETRIES);
+ if (err != MMC_ERR_NONE)
+ break;
+
+ if (cmd.resp[0] & MMC_CARD_BUSY || ocr == 0)
+ break;
+
+ err = MMC_ERR_TIMEOUT;
+
+ spin(10000);
+ }
+
+ if (rocr)
+ *rocr = cmd.resp[0];
+
+ return err;
+}
+
+static int mmc_all_send_cid(struct mmc_host *host)
+{
+ struct mmc_command cmd = {
+ MMC_CMD_ALL_SEND_CID, 0, RESP_R2, {0}, CMD_TIMEOUT, CMD_RETRIES, 0
+ };
+ return mmc_cmd(host, &cmd);
+}
+
+static int mmc_send_relative_addr(struct mmc_host *host,
+ struct mmc_card *card, unsigned int *rca)
+{
+ int err;
+ struct mmc_command cmd;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ if (mmc_card_mmc(card)) { /* set rca */
+ cmd.opcode = MMC_CMD_SET_RELATIVE_ADDR;
+ cmd.arg = *rca << 16;
+ cmd.rsptyp = RESP_R1;
+ cmd.retries = CMD_RETRIES;
+ cmd.timeout = CMD_TIMEOUT;
+ } else { /* send rca */
+ cmd.opcode = SD_CMD_SEND_RELATIVE_ADDR;
+ cmd.arg = 0;
+ cmd.rsptyp = RESP_R6;
+ cmd.retries = CMD_RETRIES;
+ cmd.timeout = CMD_TIMEOUT;
+ }
+ err = mmc_cmd(host, &cmd);
+ if ((err == MMC_ERR_NONE) && !mmc_card_mmc(card))
+ *rca = cmd.resp[0] >> 16;
+
+ return err;
+}
+
+static int mmc_select_card(struct mmc_host *host, struct mmc_card *card)
+{
+ struct mmc_command cmd = {
+ MMC_CMD_SELECT_CARD, 0, RESP_R1B, {0}, CMD_TIMEOUT, CMD_RETRIES, 0
+ };
+ cmd.arg = card->rca << 16;
+ return mmc_cmd(host, &cmd);
+}
+
+static int mmc_send_status(struct mmc_host *host, struct mmc_card *card,
+ u32 *status)
+{
+ int err;
+ struct mmc_command cmd = {
+ MMC_CMD_SEND_STATUS, 0, RESP_R1, {0}, CMD_TIMEOUT, CMD_RETRIES, 0
+ };
+ cmd.arg = card->rca << 16;
+
+ err = mmc_cmd(host, &cmd);
+ if (err == MMC_ERR_NONE)
+ *status = cmd.resp[0];
+ return err;
+}
+
+static int mmc_switch(struct mmc_host *host, struct mmc_card *card,
+ u8 set, u8 index, u8 value)
+{
+ int err;
+ u32 status = 0, count = 0;
+ struct mmc_command cmd = {
+ MMC_CMD_SWITCH, 0, RESP_R1B, {0}, CMD_TIMEOUT, CMD_RETRIES, 0
+ };
+
+ cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | (index << 16) |
+ (value << 8) | set;
+
+ err = mmc_cmd(host, &cmd);
+ if (err != MMC_ERR_NONE)
+ return err;
+
+ do {
+ err = mmc_send_status(host, card, &status);
+ if (err) {
+ dprintf(CRITICAL, "[eMMC] Fail to send status %d\n", err);
+ break;
+ }
+ if (status & R1_SWITCH_ERROR) {
+ dprintf(CRITICAL, "[eMMC] switch error. arg(0x%x)\n", cmd.arg);
+ return MMC_ERR_FAILED;
+ }
+ if (count++ >= 600000) {
+ dprintf(CRITICAL, "[%s]: timeout happend, count=%d, status=0x%x\n",
+ __func__, count, status);
+ break;
+ }
+ } while (!(status & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(status) == 7));
+
+ if (!err && (index == EXT_CSD_PART_CFG))
+ host->curr_part = value & 0x7;
+
+ return err;
+}
+
+static int mmc_read_csds(struct mmc_host *host, struct mmc_card *card)
+{
+ int err;
+ struct mmc_command cmd = {
+ MMC_CMD_SEND_CSD, 0, RESP_R2, {0}, CMD_TIMEOUT * 100, CMD_RETRIES, 0
+ };
+
+ cmd.arg = card->rca << 16;
+
+ err = mmc_cmd(host, &cmd);
+ if (err == MMC_ERR_NONE) {
+ unsigned int e, m;
+ card->csd.mmca_vsn = UNSTUFF_BITS(&cmd.resp[0], 122, 4);
+ m = UNSTUFF_BITS(&cmd.resp[0], 99, 4);
+ e = UNSTUFF_BITS(&cmd.resp[0], 96, 3);
+ card->csd.max_dtr = tran_exp[e] * mmc_tran_mant[m];
+ e = UNSTUFF_BITS(&cmd.resp[0], 47, 3);
+ m = UNSTUFF_BITS(&cmd.resp[0], 62, 12);
+ card->csd.capacity = (1 + m) << (e + 2);
+ card->csd.read_blkbits = UNSTUFF_BITS(&cmd.resp[0], 80, 4);
+ memcpy(&card->raw_csd, &cmd.resp[0], sizeof(u32) * 4);
+ }
+
+ return err;
+}
+
+static int mmc_decode_csd(struct mmc_card *card)
+{
+ struct mmc_csd *csd = &card->csd;
+ unsigned int e, m, csd_struct;
+ u32 *resp = card->raw_csd;
+
+ /* common part; some part are updated later according to spec. */
+ csd_struct = unstuff_bits(resp, 126, 2);
+ csd->csd_struct = csd_struct;
+
+ /* For MMC
+ * We only understand CSD structure v1.1 and v1.2.
+ * v1.2 has extra information in bits 15, 11 and 10.
+ */
+ if ( ( mmc_card_mmc(card) &&
+ ( csd_struct != CSD_STRUCT_VER_1_0 && csd_struct != CSD_STRUCT_VER_1_1
+ && csd_struct != CSD_STRUCT_VER_1_2 && csd_struct != CSD_STRUCT_EXT_CSD )
+ ) ||
+ ( mmc_card_sd(card) && ( csd_struct != 0 && csd_struct!=1 ) )
+ ) {
+ dprintf(ALWAYS, "Unknown CSD ver %d\n", csd_struct);
+ return MMC_ERR_INVALID;
+ }
+
+ m = unstuff_bits(resp, 99, 4);
+ e = unstuff_bits(resp, 96, 3);
+ csd->max_dtr = tran_exp[e] * tran_mant[m];
+
+ /* update later according to spec. */
+ csd->read_blkbits = unstuff_bits(resp, 80, 4);
+
+ e = unstuff_bits(resp, 47, 3);
+ m = unstuff_bits(resp, 62, 12);
+ csd->capacity = (1 + m) << (e + 2);
+
+ //Specific part
+ if (mmc_card_sd(card)) {
+ switch (csd_struct) {
+ case 0:
+ break;
+ case 1:
+ /*
+ * This is a block-addressed SDHC card. Most
+ * interesting fields are unused and have fixed
+ * values. To avoid getting tripped by buggy cards,
+ * we assume those fixed values ourselves.
+ */
+ mmc_card_set_blockaddr(card);
+
+ m = unstuff_bits(resp, 48, 22);
+ csd->capacity = (1 + m) << 10;
+
+ csd->read_blkbits = 9;
+ break;
+ }
+ } else {
+ csd->mmca_vsn = unstuff_bits(resp, 122, 4);
+ }
+
+ return 0;
+}
+
+static void mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
+{
+ u32 caps = card->host->caps;
+ u8 card_type = ext_csd[EXT_CSD_CARD_TYPE];
+
+ card->ext_csd.sectors =
+ ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
+ ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
+ ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
+ ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
+
+ card->ext_csd.rev = ext_csd[EXT_CSD_REV];
+ card->ext_csd.boot_info = ext_csd[EXT_CSD_BOOT_INFO];
+ card->ext_csd.boot_part_sz = ext_csd[EXT_CSD_BOOT_SIZE_MULT] * 128 * 1024;
+ card->ext_csd.rpmb_sz = ext_csd[EXT_CSD_RPMB_SIZE_MULT] * 128 * 1024;
+
+ if (card->ext_csd.sectors)
+ mmc_card_set_blockaddr(card);
+
+ if (caps & MMC_CAP_EMMC_HS400 &&
+ card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
+ card->ext_csd.hs400_support = 1;
+ card->ext_csd.hs_max_dtr = 200000000;
+ } else if (caps & MMC_CAP_EMMC_HS200 &&
+ card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
+ card->ext_csd.hs_max_dtr = 200000000;
+ } else if (caps & MMC_CAP_DDR &&
+ card_type & EXT_CSD_CARD_TYPE_DDR_52) {
+ card->ext_csd.ddr_support = 1;
+ card->ext_csd.hs_max_dtr = 52000000;
+ } else if (caps & MMC_CAP_MMC_HIGHSPEED &&
+ card_type & EXT_CSD_CARD_TYPE_52) {
+ card->ext_csd.hs_max_dtr = 52000000;
+ } else if (card_type & EXT_CSD_CARD_TYPE_26) {
+ card->ext_csd.hs_max_dtr = 26000000;
+ } else {
+ /* MMC v4 spec says this cannot happen */
+ dprintf(CRITICAL, "[eMMC] MMCv4 but HS unsupported\n");
+ }
+
+ card->ext_csd.part_cfg = ext_csd[EXT_CSD_PART_CFG];
+ card->ext_csd.sec_support = ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
+ card->ext_csd.reset_en = ext_csd[EXT_CSD_RST_N_FUNC];
+
+ return;
+}
+
+/* Read and decode extended CSD. */
+static int mmc_read_ext_csd(struct mmc_host *host, struct mmc_card *card)
+{
+ int err = MMC_ERR_NONE;
+ u8 *ext_csd;
+ int result = MMC_ERR_NONE;
+ struct mmc_data data;
+ addr_t base = host->base;
+ struct mmc_command cmd = {
+ MMC_CMD_SEND_EXT_CSD, 0, RESP_R1, {0}, CMD_TIMEOUT, CMD_RETRIES, 0
+ };
+
+ if (card->csd.mmca_vsn < CSD_SPEC_VER_4) {
+ dprintf(CRITICAL, "[eMMC] MMCA_VSN: %d. Skip EXT_CSD\n",
+ card->csd.mmca_vsn);
+ return MMC_ERR_NONE;
+ }
+
+ /*
+ * As the ext_csd is so large and mostly unused, we don't store the
+ * raw block in mmc_card.
+ */
+ ext_csd = malloc(512);
+ ASSERT(ext_csd);
+ memset(ext_csd, 0, 512);
+
+ msdc_reset_tune_counter(host);
+
+ do {
+ MSDC_DMA_ON;
+ MSDC_WRITE32(SDC_BLK_NUM, 1);
+ host->blklen = 512;
+ msdc_set_timeout(host, 100000000, 0);
+ err = mmc_cmd(host, &cmd);
+ if (err != MMC_ERR_NONE)
+ goto out;
+
+ data.cmd = &cmd;
+ data.blks = 1;
+ data.buf = ext_csd;
+ data.timeout = 100;
+ err = msdc_dma_transfer(host, &data);
+ MSDC_DMA_OFF;
+ if (err != MMC_ERR_NONE) {
+ if (msdc_abort_handler(host, 1))
+ dprintf(CRITICAL, "[eMMC] data abort failed\n");
+ result = msdc_tune_read(host);
+ }
+ } while (err && result != MMC_ERR_READTUNEFAIL);
+ msdc_reset_tune_counter(host);
+ mmc_decode_ext_csd(card, ext_csd);
+
+out:
+ free(ext_csd);
+ return err;
+}
+
+static void mmc_set_clock(struct mmc_host *host, int state, unsigned int hz)
+{
+ if (hz >= host->f_max) {
+ hz = host->f_max;
+ } else if (hz < host->f_min) {
+ hz = host->f_min;
+ }
+ msdc_config_clock(host, state, hz);
+}
+
+static int mmc_set_bus_width(struct mmc_host *host, struct mmc_card *card, int width)
+{
+ int err = MMC_ERR_NONE;
+ u32 arg = 0;
+ struct mmc_command cmd;
+
+ if (mmc_card_sd(card)) {
+ if (width == HOST_BUS_WIDTH_8) {
+ arg = SD_BUS_WIDTH_4;
+ width = HOST_BUS_WIDTH_4;
+ }
+
+ if ((width == HOST_BUS_WIDTH_4) && (host->caps & MMC_CAP_4_BIT_DATA)) {
+ arg = SD_BUS_WIDTH_4;
+ } else {
+ arg = SD_BUS_WIDTH_1;
+ width = HOST_BUS_WIDTH_1;
+ }
+
+ cmd.opcode = SD_ACMD_SET_BUSWIDTH;
+ cmd.arg = arg;
+ cmd.rsptyp = RESP_R1;
+ cmd.retries = CMD_RETRIES;
+ cmd.timeout = CMD_TIMEOUT;
+
+ err = mmc_app_cmd(host, &cmd, card->rca, 0);
+ if (err != MMC_ERR_NONE)
+ goto out;
+
+ msdc_config_bus(host, width);
+ } else if (mmc_card_mmc(card)) {
+
+ if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
+ goto out;
+
+ if (width == HOST_BUS_WIDTH_8) {
+ if (host->caps & MMC_CAP_8_BIT_DATA) {
+ arg = EXT_CSD_BUS_WIDTH_8;
+ } else {
+ width = HOST_BUS_WIDTH_4;
+ }
+ }
+ if (width == HOST_BUS_WIDTH_4) {
+ if (host->caps & MMC_CAP_4_BIT_DATA) {
+ arg = EXT_CSD_BUS_WIDTH_4;
+ } else {
+ width = HOST_BUS_WIDTH_1;
+ }
+ }
+ if (width == HOST_BUS_WIDTH_1)
+ arg = EXT_CSD_BUS_WIDTH_1;
+
+ err = mmc_switch(host, card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, arg);
+ if (err != MMC_ERR_NONE) {
+ dprintf(CRITICAL, "[eMMC] Switch to bus width(%d) failed\n", arg);
+ goto out;
+ }
+ mmc_card_clr_ddr(card);
+
+ msdc_config_bus(host, width);
+ }
+
+out:
+ return err;
+}
+
+static u32 test_delay_bit(u32 delay, u32 bit)
+{
+ bit %= PAD_DELAY_MAX;
+ return delay & (1 << bit);
+}
+
+static int get_delay_len(u32 delay, u32 start_bit)
+{
+ u32 i;
+
+ for (i = 0; i < (PAD_DELAY_MAX - start_bit); i++) {
+ if (test_delay_bit(delay, start_bit + i) == 0)
+ return i;
+ }
+ return PAD_DELAY_MAX - start_bit;
+}
+
+static struct msdc_delay_phase get_best_delay(u32 delay)
+{
+ int start = 0, len = 0;
+ int start_final = 0, len_final = 0;
+ u8 final_phase = 0xff;
+ struct msdc_delay_phase delay_phase = { 0, };
+
+ if (delay == 0) {
+ dprintf(CRITICAL, "phase error: [map:%x]\n", delay);
+ delay_phase.final_phase = final_phase;
+ return delay_phase;
+ }
+
+ while (start < PAD_DELAY_MAX) {
+ len = get_delay_len(delay, start);
+ if (len_final < len) {
+ start_final = start;
+ len_final = len;
+ }
+ start += len ? len : 1;
+ if (len >= 12 && start_final < 4)
+ break;
+ }
+
+ /* The rule is that to find the smallest delay cell */
+ if (start_final == 0)
+ final_phase = (start_final + len_final / 3) % PAD_DELAY_MAX;
+ else
+ final_phase = (start_final + len_final / 2) % PAD_DELAY_MAX;
+ dprintf(ALWAYS, "phase: [map:%x] [maxlen:%d] [final:%d]\n",
+ delay, len_final, final_phase);
+
+ delay_phase.maxlen = len_final;
+ delay_phase.start = start_final;
+ delay_phase.final_phase = final_phase;
+ return delay_phase;
+}
+
+static int mmc_hs200_tune_cmd(struct mmc_host *host, int *cmd_error)
+{
+ int err = MMC_ERR_NONE;
+ u8 *tune_data;
+ u16 data_len = host->caps & MMC_CAP_8_BIT_DATA ? 128: 64;
+ struct mmc_data data;
+ addr_t base = host->base;
+ struct mmc_command cmd = {
+ MMC_CMD21, 0, RESP_R1, {0}, CMD_TIMEOUT, 0, 0
+ };
+
+ tune_data = malloc(data_len);
+ ASSERT(tune_data);
+ memset(tune_data, 0, data_len);
+ *cmd_error = MMC_ERR_NONE;
+
+ msdc_reset_tune_counter(host);
+
+ MSDC_DMA_ON;
+ MSDC_WRITE32(SDC_BLK_NUM, 1);
+ host->blklen = data_len;
+ msdc_set_timeout(host, 100000000, 0);
+ err = mmc_cmd(host, &cmd);
+ if (err != MMC_ERR_NONE)
+ *cmd_error = err; /* still need receive data, or will impact the next cmd21 */
+
+ data.cmd = &cmd;
+ data.blks = 1;
+ data.buf = tune_data;
+ data.timeout = 100;
+ err = msdc_dma_transfer(host, &data);
+ MSDC_DMA_OFF;
+ msdc_reset_tune_counter(host);
+
+out:
+ free(tune_data);
+ return err;
+}
+
+static int msdc_tune_response(struct mmc_host *mmc)
+{
+ addr_t base = mmc->base;
+ u32 rise_delay = 0, fall_delay = 0;
+ struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
+ u8 final_delay, final_maxlen;
+ int cmd_err;
+ int i, j;
+
+ MSDC_CLR_BIT32(MSDC_IOCON, MSDC_IOCON_RSPL);
+ for (i = 0 ; i < PAD_DELAY_MAX; i++) {
+ MSDC_SET_FIELD(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY, i);
+ for (j = 0; j < 3; j++) {
+ mmc_hs200_tune_cmd(mmc, &cmd_err);
+ if (!cmd_err) {
+ rise_delay |= (1 << i);
+ } else {
+ rise_delay &= ~(1 << i);
+ break;
+ }
+ }
+ }
+ final_rise_delay = get_best_delay(rise_delay);
+ /* if rising edge has enough margin, then do not scan falling edge */
+ if (final_rise_delay.maxlen >= 12 ||
+ (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
+ goto skip_fall;
+
+ MSDC_SET_BIT32(MSDC_IOCON, MSDC_IOCON_RSPL);
+ for (i = 0; i < PAD_DELAY_MAX; i++) {
+ MSDC_SET_FIELD(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY, i);
+ for (j = 0; j < 3; j++) {
+ mmc_hs200_tune_cmd(mmc, &cmd_err);
+ if (!cmd_err) {
+ fall_delay |= (1 << i);
+ } else {
+ fall_delay &= ~(1 << i);
+ break;
+ }
+ }
+ }
+ final_fall_delay = get_best_delay(fall_delay);
+
+skip_fall:
+ final_maxlen = MAX(final_rise_delay.maxlen, final_fall_delay.maxlen);
+ if (final_maxlen == final_rise_delay.maxlen) {
+ MSDC_CLR_BIT32(MSDC_IOCON, MSDC_IOCON_RSPL);
+ MSDC_SET_FIELD(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
+ final_rise_delay.final_phase);
+ final_delay = final_rise_delay.final_phase;
+ } else {
+ MSDC_SET_BIT32(MSDC_IOCON, MSDC_IOCON_RSPL);
+ MSDC_SET_FIELD(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
+ final_fall_delay.final_phase);
+ final_delay = final_fall_delay.final_phase;
+ }
+
+ dprintf(ALWAYS, "Final cmd pad delay: %x\n", final_delay);
+ return final_delay == 0xff ? -EIO : 0;
+}
+
+static int mmc_select_hs200(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int ret;
+
+ ret = mmc_set_bus_width(host, card, HOST_BUS_WIDTH_8);
+ if (ret != MMC_ERR_NONE) {
+ dprintf(CRITICAL, "failed to set bus width!\n");
+ return ret;
+ }
+
+ ret = mmc_switch(host, card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
+ EXT_CSD_HS_TIMEING_HS200);
+ if (ret != MMC_ERR_NONE) {
+ dprintf(CRITICAL, "failed to switch to hs200 mode!\n");
+ return ret;
+ }
+
+ mmc_card_set_hs200(card);
+ mmc_set_clock(host, card->state, card->ext_csd.hs_max_dtr);
+
+ return 0;
+}
+
+static int mmc_select_hs400(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int ret;
+
+ mmc_set_clock(host, card->state, 50000000);
+ ret = mmc_switch(host, card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
+ EXT_CSD_HS_TIMEING_HS);
+ if (ret != MMC_ERR_NONE) {
+ dprintf(CRITICAL, "switch to high-speed from hs200 failed, err:%d\n", ret);
+ return ret;
+ }
+
+ ret = mmc_switch(host, card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_8_DDR);
+ if (ret != MMC_ERR_NONE) {
+ dprintf(CRITICAL, "switch to bus width for hs400 failed, err:%d\n", ret);
+ return ret;
+ }
+
+ ret = mmc_switch(host, card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
+ EXT_CSD_HS_TIMEING_HS400);
+ if (ret != MMC_ERR_NONE) {
+ dprintf(CRITICAL, "switch to hs400 failed, err:%d\n", ret);
+ return ret;
+ }
+ mmc_card_set_hs400(card);
+ mmc_set_clock(host, card->state, card->ext_csd.hs_max_dtr);
+
+ return ret;
+}
+
+static int mmc_hs200_tuning(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ addr_t base = host->base;
+ int ret;
+
+ ret = msdc_tune_response(host);
+ if (ret == -EIO) {
+ dprintf(CRITICAL, "hs200 tuning cmd error!\n");
+ return ret;
+ }
+
+ if (host->caps & MMC_CAP_EMMC_HS400 && card->ext_csd.hs400_support) {
+ MSDC_WRITE32(EMMC50_PAD_DS_TUNE, 0x14029);
+ return MMC_ERR_NONE;
+ }
+
+ return MMC_ERR_NONE;
+}
+
+static int mmc_erase_start(struct mmc_card *card, u32 blknr)
+{
+ struct mmc_command cmd = {
+ MMC_CMD_ERASE_GROUP_START, 0, RESP_R1, {0}, CMD_TIMEOUT, 3, 0
+ };
+ if (mmc_card_sd(card))
+ cmd.opcode = MMC_CMD_ERASE_WR_BLK_START;
+ cmd.arg = blknr;
+ return mmc_cmd(card->host, &cmd);
+}
+
+static int mmc_erase_end(struct mmc_card *card, u32 blknr)
+{
+ struct mmc_command cmd = {
+ MMC_CMD_ERASE_GROUP_END, 0, RESP_R1, {0}, CMD_TIMEOUT, 3, 0
+ };
+ if (mmc_card_sd(card))
+ cmd.opcode = MMC_CMD_ERASE_WR_BLK_END;
+ cmd.arg = blknr;
+ return mmc_cmd(card->host, &cmd);
+}
+
+static int mmc_erase(struct mmc_card *card, u32 arg)
+{
+ int err;
+ u32 status;
+ struct mmc_command cmd = {
+ MMC_CMD_ERASE, 0, RESP_R1B, {0}, CMD_TIMEOUT, 3, 0
+ };
+ if (mmc_card_sd(card))
+ arg = 0;
+ cmd.arg = arg;
+
+ if (arg & MMC_ERASE_SECURE_REQ) {
+ if (!(card->ext_csd.sec_support & EXT_CSD_SEC_FEATURE_ER_EN))
+ return MMC_ERR_INVALID;
+ }
+ if ((arg & MMC_ERASE_GC_REQ) || (arg & MMC_ERASE_TRIM)) {
+ if (!(card->ext_csd.sec_support & EXT_CSD_SEC_FEATURE_GB_CL_EN))
+ return MMC_ERR_INVALID;
+ }
+
+ err = mmc_cmd(card->host, &cmd);
+ if (err)
+ return err;
+
+ do {
+ err = mmc_send_status(card->host, card, &status);
+ if (err)
+ break;
+ if (R1_STATUS(status) != 0)
+ break;
+ } while (R1_CURRENT_STATE(status) == 7);
+
+ return err;
+}
+
+static int mmc_do_trim(struct mmc_card *card, off_t start_addr, size_t len)
+{
+ int err = MMC_ERR_NONE;
+ off_t end_addr;
+
+ if (len < card->blklen) {
+ dprintf(CRITICAL, "%s: invalid len: %ld\n", __func__, len);
+ return MMC_ERR_INVALID;
+ }
+
+ end_addr =((start_addr + len) / card->blklen - 1) * card->blklen;
+
+ if (mmc_card_highcaps(card)) {
+ start_addr >>= MMC_BLOCK_BITS_SHFT;
+ end_addr >>= MMC_BLOCK_BITS_SHFT;
+ }
+
+ err = mmc_erase_start(card, start_addr);
+ if (err)
+ goto error;
+
+ err = mmc_erase_end(card, end_addr);
+ if (err)
+ goto error;
+
+ err = mmc_erase(card, MMC_ERASE_TRIM);
+
+error:
+ if (err)
+ dprintf(CRITICAL, "%s: erase range (0x%llx~0x%llx) failed,Err<%d>\n",
+ __func__, start_addr, end_addr, err);
+
+ return err;
+}
+
+static int mmc_set_ext_csd(struct mmc_card *card, u8 addr, u8 value)
+{
+ int err;
+
+ /* can't write */
+ if (192 <= addr || !card)
+ return MMC_ERR_INVALID;
+
+ err = mmc_switch(card->host, card, EXT_CSD_CMD_SET_NORMAL, addr, value);
+
+ if (err == MMC_ERR_NONE)
+ err = mmc_read_ext_csd(card->host, card);
+
+ return err;
+}
+
+static int mmc_set_reset_func(struct mmc_card *card, u8 enable)
+{
+ int err = MMC_ERR_FAILED;
+
+ if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
+ goto out;
+
+ if (card->ext_csd.reset_en == 0) {
+ err = mmc_set_ext_csd(card, EXT_CSD_RST_N_FUNC, enable);
+ if (err == MMC_ERR_NONE)
+ card->ext_csd.reset_en = enable;
+ } else {
+ /* no need set */
+ return MMC_ERR_NONE;
+ }
+out:
+ return err;
+}
+
+static int mmc_set_boot_bus(struct mmc_card *card, u8 rst_bwidth, u8 mode, u8 bwidth)
+{
+ int err = MMC_ERR_FAILED;
+ u8 arg;
+
+ if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
+ goto out;
+
+ arg = mode | rst_bwidth | bwidth;
+
+ err = mmc_set_ext_csd(card, EXT_CSD_BOOT_BUS_WIDTH, arg);
+out:
+ return err;
+}
+
+static int mmc_set_part_config(struct mmc_card *card, u8 cfg)
+{
+ int err = MMC_ERR_FAILED;
+
+ if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
+ goto out;
+
+ err = mmc_set_ext_csd(card, EXT_CSD_PART_CFG, cfg);
+out:
+ return err;
+}
+
+static int mmc_boot_config(struct mmc_card *card, u8 acken, u8 enpart, u8 buswidth, u8 busmode)
+{
+ int err = MMC_ERR_FAILED;
+ u8 val;
+ u8 rst_bwidth = 0;
+ u8 cfg;
+
+ if (card->csd.mmca_vsn < CSD_SPEC_VER_4 ||
+ !card->ext_csd.boot_info || card->ext_csd.rev < 3)
+ goto out;
+
+ cfg = card->ext_csd.part_cfg;
+ /* configure boot partition */
+ val = acken | enpart | (cfg & 0x7);
+ err = mmc_set_part_config(card, val);
+ if (err != MMC_ERR_NONE)
+ goto out;
+ else
+ card->ext_csd.part_cfg = val;
+
+ /* configure boot bus mode and width */
+ rst_bwidth = (buswidth != EXT_CSD_BOOT_BUS_WIDTH_1 ? 1 : 0) << 2;
+ dprintf(INFO, " =====Set boot Bus Width<%d>=======\n", buswidth);
+ dprintf(INFO, " =====Set boot Bus mode<%d>=======\n", busmode);
+ err = mmc_set_boot_bus(card, rst_bwidth, busmode, buswidth);
+out:
+
+ return err;
+}
+
+static int emmc_boot_prepare(struct mmc_card *card)
+{
+ int err = MMC_ERR_NONE;
+ u8 buswidth = EXT_CSD_BOOT_BUS_WIDTH_1;
+
+ err = mmc_boot_config(card, EXT_CSD_PART_CFG_EN_ACK,
+ EXT_CSD_PART_CFG_EN_BOOT_PART_1,
+ buswidth, EXT_CSD_BOOT_BUS_MODE_DEFT);
+ if (err)
+ goto exit;
+
+ err = mmc_set_reset_func(card, 1);
+exit:
+ return err;
+}
+
+static int mmc_dev_bread(struct mmc_card *card, unsigned long blknr, u32 blkcnt, u8 *dst)
+{
+ struct mmc_host *host = card->host;
+ u32 blksz = host->blklen;
+ int tune = 0;
+ int retry = 3;
+ int err;
+ unsigned long src;
+
+ src = mmc_card_highcaps(card) ? blknr : blknr * blksz;
+
+ do {
+ if (!tune) {
+ err = host->blk_read(host, (uchar *)dst, src, blkcnt);
+ } else {
+#ifdef FEATURE_MMC_RD_TUNING
+ err = msdc_tune_bread(host, (uchar *)dst, src, blkcnt);
+#endif
+ if (err && (host->sclk > (host->f_max >> 4)))
+ mmc_set_clock(host, card->state, host->sclk >> 1);
+ }
+ if (err == MMC_ERR_NONE) {
+ break;
+ }
+
+ if (err == MMC_ERR_BADCRC || err == MMC_ERR_ACMD_RSPCRC || err == MMC_ERR_CMD_RSPCRC) {
+ tune = 1;
+ retry++;
+ } else if (err == MMC_ERR_READTUNEFAIL || err == MMC_ERR_CMDTUNEFAIL) {
+ dprintf(CRITICAL, "[eMMC] Fail to tuning,%s",
+ (err == MMC_ERR_CMDTUNEFAIL) ?
+ "cmd tune failed!\n" : "read tune failed!\n");
+ break;
+ }
+ } while (retry--);
+
+ return err;
+}
+
+static int mmc_dev_bwrite(struct mmc_card *card, unsigned long blknr,
+ u32 blkcnt, const u8 *src)
+{
+ struct mmc_host *host = card->host;
+ u32 blksz = host->blklen;
+ u32 status;
+ int tune = 0;
+ int retry = 3;
+ int err;
+ unsigned long dst;
+
+ dst = mmc_card_highcaps(card) ? blknr : blknr * blksz;
+
+ do {
+ if (!tune) {
+ err = host->blk_write(host, dst, (uchar *)src, blkcnt);
+ } else {
+#ifdef FEATURE_MMC_WR_TUNING
+ err = msdc_tune_bwrite(host, dst, (uchar *)src, blkcnt);
+#endif
+ if (err && (host->sclk > (host->f_max >> 4)))
+ mmc_set_clock(host, card->state, host->sclk >> 1);
+ }
+ if (err == MMC_ERR_NONE) {
+ do {
+ err = mmc_send_status(host, card, &status);
+ if (err) {
+ dprintf(CRITICAL, "[eMMC] Fail to send status %d\n", err);
+ break;
+ }
+ } while (!(status & R1_READY_FOR_DATA) ||
+ (R1_CURRENT_STATE(status) == 7));
+ dprintf(INFO, "[eMMC] Write %d bytes (DONE)\n", blkcnt * blksz);
+ break;
+ }
+
+ if (err == MMC_ERR_BADCRC || err == MMC_ERR_ACMD_RSPCRC || err == MMC_ERR_CMD_RSPCRC) {
+ tune = 1;
+ retry++;
+ } else if (err == MMC_ERR_WRITETUNEFAIL || err == MMC_ERR_CMDTUNEFAIL) {
+ dprintf(CRITICAL, "[eMMC] Fail to tuning,%s",
+ (err == MMC_ERR_CMDTUNEFAIL) ?
+ "cmd tune failed!\n" : "write tune failed!\n");
+ break;
+ }
+ } while (retry--);
+
+ return err;
+}
+
+static ssize_t mmc_block_read(struct bdev *dev, void *buf, bnum_t block,
+ uint count)
+{
+ mmc_dev_t *__dev = (mmc_dev_t *)dev;
+ struct mmc_host *host = __dev->host;
+ struct mmc_card *card = __dev->card;
+ u32 maxblks = host->max_phys_segs;
+ u32 leftblks, totalblks = count;
+ ssize_t ret = 0;
+
+ mutex_acquire(&host->lock);
+ if (mmc_switch_part(__dev)) {
+ ret = ERR_IO;
+ goto done;
+ }
+
+ do {
+ leftblks = ((count > maxblks) ? maxblks : count);
+ if (mmc_dev_bread(card, (unsigned long)block, leftblks, buf)) {
+ ret = ERR_IO;
+ goto done;
+ }
+ block += leftblks;
+ buf += maxblks * dev->block_size;
+ count -= leftblks;
+ } while (count);
+
+ if (dev->block_size * totalblks > 0x7fffffffU)
+ /* ssize_t is defined as signed, should take a look here */
+ dprintf(CRITICAL, "[MSDC] %s: WARN! The return size is overflow! 0x%lx\n",
+ __func__, dev->block_size * totalblks);
+
+done:
+ mutex_release(&host->lock);
+ return ret ? ret : (ssize_t)dev->block_size * totalblks;
+}
+
+static ssize_t mmc_block_write(struct bdev *dev, const void *buf, bnum_t block,
+ uint count)
+{
+ mmc_dev_t *__dev = (mmc_dev_t *)dev;
+ struct mmc_host *host = __dev->host;
+ struct mmc_card *card = __dev->card;
+ u32 maxblks = host->max_phys_segs;
+ u32 leftblks, totalblks = count;
+ ssize_t ret = 0;
+
+ mutex_acquire(&host->lock);
+ if (mmc_switch_part(__dev)) {
+ ret = ERR_IO;
+ goto done;
+ }
+
+ do {
+ leftblks = ((count > maxblks) ? maxblks : count);
+ if (mmc_dev_bwrite(card, (unsigned long)block, leftblks, buf)) {
+ ret = ERR_IO;
+ goto done;
+ }
+ block += leftblks;
+ buf = (u8 *)buf + maxblks * dev->block_size;
+ count -= leftblks;
+ } while (count);
+
+ if (dev->block_size * totalblks > 0x7fffffffU)
+ /* ssize_t is defined as signed, should take a look here */
+ dprintf(CRITICAL, "[MSDC] %s: WARN! The return size is overflow! 0x%lx\n",
+ __func__, dev->block_size * totalblks);
+
+done:
+ mutex_release(&host->lock);
+ return ret ? ret: (ssize_t)dev->block_size * totalblks;
+}
+
+static ssize_t mmc_wrap_erase(struct bdev *bdev, off_t offset, size_t len)
+{
+ mmc_dev_t *dev = (mmc_dev_t *)bdev;
+ struct mmc_host *host = dev->host;
+ ssize_t ret = 0;
+
+ mutex_acquire(&host->lock);
+ if (mmc_switch_part(dev)) {
+ ret = ERR_IO;
+ goto done;
+ }
+
+ /* ATTENTION:
+ * We use TRIM here, which is block-based(512B) wipping,
+ * If using ERASE here, please ensure the offset & size are
+ * erase-group aligned,
+ * OTHERWISE, some valid data may be wiped. refer to JEDEC spec:
+ * The Device will ignore all LSB's below the Erase Group size,
+ * effectively ROUNDING the address DOWN to the Erase Group boundary. */
+ ASSERT(dev && len);
+ if ((offset % MMC_BLOCK_SIZE) || (len % MMC_BLOCK_SIZE)) {
+ dprintf(CRITICAL, "%s: offset(0x%llx)/len(%lu) is not block-aligned!\n",
+ __func__, offset, len);
+ ret = ERR_IO;
+ goto done;
+ }
+
+ ASSERT(dev->card);
+ if (mmc_do_trim(dev->card, offset, len)) {
+ ret = ERR_IO;
+ goto done;
+ }
+
+done:
+ mutex_release(&host->lock);
+ return ret ? ret: (ssize_t)len;
+}
+
+static ssize_t mmc_rpmb_dummy_read(struct bdev *dev, void *buf, bnum_t block,
+ uint count)
+{
+ return 0;
+}
+
+static ssize_t mmc_rpmb_dummy_write(struct bdev *dev, const void *buf, bnum_t block,
+ uint count)
+{
+ return 0;
+}
+
+static ssize_t mmc_rpmb_dummy_erase(struct bdev *bdev, off_t offset, size_t len)
+{
+ return 0;
+}
+
+static int mmc_set_block_count(struct mmc_host *host, unsigned int blockcount,
+ bool is_rel_write)
+{
+ struct mmc_command cmd = {0};
+
+ cmd.opcode = MMC_CMD_SET_BLOCK_COUNT;
+ cmd.arg = blockcount & 0x0000FFFF;
+ if (is_rel_write)
+ cmd.arg |= 1 << 31;
+ cmd.rsptyp = RESP_R1;
+
+ return mmc_cmd(host, &cmd);
+}
+
+static int mmc_rpmb_ioctl_cmd(struct bdev *dev, struct mmc_ioc_cmd *arg)
+{
+ mmc_dev_t *__dev = (mmc_dev_t *)dev;
+ struct mmc_host *host = __dev->host;
+ //struct mmc_card *card = __dev->card;
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
+ addr_t base = host->base;
+ int ret = 0;
+ int old_autocmd = msdc_get_autocmd(host);
+
+ msdc_set_autocmd(host, 0);
+ cmd.opcode = arg->opcode;
+ cmd.arg = arg->arg;
+ cmd.rsptyp = arg->flags; /* arg->flags must be type of enum of RESP_NONE ~ RESP_R1B */
+
+ if (arg->blocks) {
+ ret = mmc_set_block_count(host, arg->blocks,
+ arg->write_flag & (1 << 31));
+ if (ret != MMC_ERR_NONE) {
+ dprintf(CRITICAL, "mmc cmd23 failed!\n");
+ goto out;
+ }
+ }
+
+ if (arg->blocks) {
+ MSDC_DMA_ON;
+ MSDC_WRITE32(SDC_BLK_NUM, arg->blocks);
+ host->blklen = 512;
+ msdc_set_timeout(host, 100000000, 0);
+ ret = mmc_cmd(host, &cmd);
+ if (ret != MMC_ERR_NONE) {
+ dprintf(CRITICAL, "mmc cmd failed\n");
+ goto out;
+ }
+
+ data.cmd = &cmd;
+ data.blks = arg->blocks;
+ data.buf = (uchar *)arg->data_ptr;
+ data.timeout = 100;
+ ret = msdc_dma_transfer(host, &data);
+ MSDC_DMA_OFF;
+
+ } else {
+ ret = mmc_cmd(host, &cmd);
+ }
+
+out:
+ msdc_set_autocmd(host, old_autocmd);
+ return ret;
+}
+
+static int mmc_rpmb_ioctl(struct bdev *dev, int request, void *argp)
+{
+ mmc_dev_t *__dev = (mmc_dev_t *)dev;
+ struct mmc_host *host = __dev->host;
+ int ret = 0;
+
+ mutex_acquire(&host->lock);
+ if (mmc_switch_part(__dev)) {
+ ret = ERR_IO;
+ goto done;
+ }
+
+ switch (request) {
+ case MMC_IOC_CMD:
+ ret = mmc_rpmb_ioctl_cmd(dev, (struct mmc_ioc_cmd *)argp);
+ break;
+ default:
+ ret = ERR_INVALID_ARGS;
+ break;
+ }
+
+done:
+ mutex_release(&host->lock);
+ return ret;
+}
+
+static int mmc_init_mem_card_stage1(struct mmc_host *host,
+ struct mmc_card *card, u32 ocr)
+{
+ int err;
+
+ /*
+ * Sanity check the voltages that the card claims to
+ * support.
+ */
+ if (ocr & 0x7F)
+ ocr &= ~0x7F;
+
+ ocr = host->ocr = mmc_select_voltage(host, ocr);
+
+ /*
+ * Can we support the voltage(s) of the card(s)?
+ */
+ if (!host->ocr) {
+ err = MMC_ERR_FAILED;
+ goto out;
+ }
+
+ err = mmc_go_idle(host);
+ if (err != MMC_ERR_NONE) {
+ dprintf(CRITICAL, "[eMMC] Fail in GO_IDLE_STATE cmd\n");
+ goto out;
+ }
+
+ /* send interface condition */
+ if (mmc_card_sd(card))
+ err = mmc_send_if_cond(host, ocr);
+
+ /* host support HCS[30] */
+ ocr |= (1 << 30);
+
+ /* send operation condition */
+ if (mmc_card_sd(card)) {
+ err = mmc_send_app_op_cond_once(host, ocr, &card->ocr);
+ } else {
+ /* The extra bit indicates that we support high capacity */
+ err = mmc_send_op_cond_once(host, ocr, &card->ocr);
+ }
+
+out:
+ /* MMC_ERR_RETRY is not error */
+ return err;
+}
+
+static int mmc_init_mem_card_stage2(struct mmc_host *host,
+ struct mmc_card *card, bool retry_opcond)
+{
+ int err = MMC_ERR_NONE;
+ u32 ocr = host->ocr;
+
+ /* host support HCS[30] */
+ ocr |= (1 << 30);
+
+ if (retry_opcond) {
+ /* send operation condition */
+ if (mmc_card_sd(card)) {
+ err = mmc_send_app_op_cond(host, ocr, &card->ocr);
+ } else {
+ /* The extra bit indicates that we support high capacity */
+ err = mmc_send_op_cond(host, ocr, &card->ocr);
+ }
+ }
+
+ if (err != MMC_ERR_NONE) {
+ dprintf(INFO, "Fail in SEND_OP_COND cmd\n");
+ goto out;
+ }
+
+ /* set hcs bit if a high-capacity card */
+ card->state |= ((card->ocr >> 30) & 0x1) ? MMC_STATE_HIGHCAPS : 0;
+ /* send cid */
+ err = mmc_all_send_cid(host);
+ if (err != MMC_ERR_NONE) {
+ dprintf(CRITICAL, "[eMMC] Fail in SEND_CID cmd\n");
+ goto out;
+ }
+
+ /* assign a rca */
+ card->rca = 0x1;
+
+ /* set/send rca */
+ err = mmc_send_relative_addr(host, card, &card->rca);
+ if (err != MMC_ERR_NONE) {
+ dprintf(CRITICAL, "[eMMC] Fail in SEND_RCA cmd\n");
+ goto out;
+ }
+
+ /* send csd */
+ err = mmc_read_csds(host, card);
+ if (err != MMC_ERR_NONE) {
+ dprintf(CRITICAL, "[eMMC] Fail in SEND_CSD cmd\n");
+ goto out;
+ }
+ mmc_decode_csd(card);
+
+ /* select this card */
+ err = mmc_select_card(host, card);
+ if (err != MMC_ERR_NONE) {
+ dprintf(CRITICAL, "[eMMC] Fail in select card cmd\n");
+ goto out;
+ }
+
+ if (mmc_card_sd(card)) {
+ /* set bus width */
+ mmc_set_bus_width(host, card, HOST_BUS_WIDTH_4);
+ /* compute bus speed. usd defalut speed */
+ card->maxhz = 26000000;
+ mmc_set_clock(host, card->state, card->maxhz);
+ } else {
+
+ /* send ext csd */
+ err = mmc_read_ext_csd(host, card);
+ if (err != MMC_ERR_NONE) {
+ dprintf(CRITICAL, "[eMMC] Fail in SEND_EXT_CSD cmd\n");
+ goto out;
+ }
+ if (host->caps & MMC_CAP_EMMC_HS200 && host->caps & MMC_CAP_EMMC_HS400) {
+ if (card->ext_csd.hs400_support) {
+ err = mmc_select_hs200(card);
+ if (err != MMC_ERR_NONE)
+ goto select_hs;
+ err = mmc_hs200_tuning(card);
+ if (err != MMC_ERR_NONE)
+ goto select_hs;
+ err = mmc_select_hs400(card);
+ if (err != MMC_ERR_NONE)
+ goto select_hs;
+ else
+ goto card_init_done;
+ }
+ }
+
+select_hs:
+ /* activate high speed (if supported) */
+ if ((card->ext_csd.hs_max_dtr != 0) && (host->caps & MMC_CAP_MMC_HIGHSPEED)) {
+ mmc_set_clock(host, 0, host->f_min);
+ err = mmc_switch(host, card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1);
+ if (err == MMC_ERR_NONE) {
+ dprintf(INFO, "[eMMC] Switched to High-Speed mode!\n");
+ mmc_card_clear_hs200(card);
+ mmc_card_clear_hs400(card);
+ mmc_card_clear_ddr(card);
+ mmc_card_set_highspeed(card);
+ mmc_set_clock(host, card->state, 50000000);
+ /* set bus width */
+ mmc_set_bus_width(host, card, HOST_BUS_WIDTH_8);
+ }
+ }
+
+card_init_done:
+ /* compute bus speed. */
+ card->maxhz = (unsigned int)-1;
+
+ if (mmc_card_highspeed(card) || mmc_card_hs400(card)) {
+ if (card->maxhz > card->ext_csd.hs_max_dtr)
+ card->maxhz = card->ext_csd.hs_max_dtr;
+ } else if (card->maxhz > card->csd.max_dtr) {
+ card->maxhz = card->csd.max_dtr;
+ }
+ }
+
+ if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
+ /* The EXT_CSD sector count is in number or 512 byte sectors. */
+ card->blklen = MMC_BLOCK_SIZE;
+ card->nblks = card->ext_csd.sectors;
+ } else {
+ /* The CSD capacity field is in units of read_blkbits.
+ * set_capacity takes units of 512 bytes.
+ */
+ card->blklen = MMC_BLOCK_SIZE;
+ host->blklen = MMC_BLOCK_SIZE;
+ card->nblks = card->csd.capacity << (card->csd.read_blkbits - 9);
+ }
+
+ dprintf(CRITICAL,"[eMMC/SD] Size: %d MB, Max.Speed: %d kHz, blklen(%d), nblks(%d), ro(%d)\n",
+ ((card->nblks / 1024) * card->blklen) / 1024 , card->maxhz / 1000,
+ card->blklen, card->nblks, mmc_card_readonly(card));
+
+ card->ready = 1;
+
+ dprintf(INFO, "[eMMC/SD] Initialized\n");
+out:
+ return err;
+}
+
+static int mmc_init_card_stage1(struct mmc_host *host, struct mmc_card *card)
+{
+ int err;
+ u32 ocr;
+
+ dprintf(INFO, "[%s]: start\n", __func__);
+ memset(card, 0, sizeof(struct mmc_card));
+
+ mmc_card_set_present(card);
+ mmc_card_set_host(card, host);
+ mmc_card_set_unknown(card);
+
+ err = mmc_go_idle(host);
+ if (err != MMC_ERR_NONE) {
+ dprintf(CRITICAL, "[eMMC] Fail in GO_IDLE_STATE cmd\n");
+ goto out;
+ }
+
+ /* send interface condition */
+ if (host->host_id)
+ mmc_send_if_cond(host, host->ocr_avail);
+
+ /* query operation condition */
+
+ if (host->host_id) {
+ err = mmc_send_app_op_cond(host, 0, &ocr);
+ if (err != MMC_ERR_NONE) {
+ err = mmc_send_op_cond(host, 0, &ocr);
+ if (err != MMC_ERR_NONE) {
+ dprintf(INFO, "Fail in MMC_CMD_SEND_OP_COND/SD_ACMD_SEND_OP_COND cmd\n");
+ goto out;
+ }
+ mmc_card_set_mmc(card);
+ } else {
+ mmc_card_set_sd(card);
+ }
+ } else {
+ err = mmc_send_op_cond(host, 0, &ocr);
+ if (err != MMC_ERR_NONE) {
+ dprintf(INFO, "Fail in MMC_CMD_SEND_OP_COND/SD_ACMD_SEND_OP_COND cmd\n");
+ goto out;
+ }
+ mmc_card_set_mmc(card);
+ }
+
+ host->card = card;
+ err = mmc_init_mem_card_stage1(host, card, ocr);
+
+out:
+ return err;
+}
+
+static int mmc_init_card_stage2(struct mmc_host *host, struct mmc_card *card,
+ bool retry_opcond)
+{
+ int err;
+
+ err = mmc_init_mem_card_stage2(host, card, retry_opcond);
+ if (err) {
+ dprintf(CRITICAL, "[%s]: failed, err=%d\n", __func__, err);
+ return err;
+ }
+ host->card = card;
+ dprintf(INFO, "[%s]: finish successfully\n",__func__);
+ return 0;
+}
+
+static inline int mmc_init_host(struct mmc_host *host)
+{
+ mutex_init(&host->lock);
+ return msdc_init(host);
+}
+
+static void mmc_bio_ops(const void *name, const int part_id, const int nblks,
+ struct mmc_host *host, struct mmc_card *card)
+{
+ mmc_dev_t *dev;
+
+ dev = malloc(sizeof(mmc_dev_t));
+ /* malloc fail */
+ ASSERT(dev);
+ /* construct the block device */
+ memset(dev, 0, sizeof(mmc_dev_t));
+
+ /* setup partition id*/
+ dev->part_id = part_id;
+ /* setup host */
+ dev->host = host;
+ /* setup card */
+ dev->card = card;
+ /* bio block device register */
+ bio_initialize_bdev(&dev->bdev, name,
+ card->blklen, nblks,
+ 0, NULL, BIO_FLAGS_NONE);
+ /* override our block device hooks */
+ if (part_id == EXT_CSD_PART_CFG_RPMB_PART) {
+ dev->bdev.read_block = mmc_rpmb_dummy_read;
+ dev->bdev.write_block = mmc_rpmb_dummy_write;
+ dev->bdev.erase = mmc_rpmb_dummy_erase;
+ dev->bdev.ioctl = mmc_rpmb_ioctl;
+ } else {
+ dev->bdev.read_block = mmc_block_read;
+ dev->bdev.write_block = mmc_block_write;
+ dev->bdev.erase = mmc_wrap_erase;
+ }
+ bio_register_device(&dev->bdev);
+ partition_publish(dev->bdev.name, 0x0);
+}
+
+struct mmc_card *emmc_init_stage1(bool *retry_opcond)
+{
+ int err = MMC_ERR_NONE;
+ struct mmc_host *host;
+ struct mmc_card *card;
+
+ host = &msdc_host0;
+ /* construct the block device */
+ memset(host, 0, sizeof(struct mmc_host));
+ host->host_id = 0;
+
+ card = &emmc_card;
+ /* construct the block device */
+ memset(card, 0, sizeof(struct mmc_card));
+
+ err = mmc_init_host(host);
+
+ if (err == MMC_ERR_NONE)
+ err = mmc_init_card_stage1(host, card);
+
+ if (err && err != MMC_ERR_RETRY) {
+ dprintf(CRITICAL, "failed in %s \n", __func__);
+ return NULL;
+ } else if (err == MMC_ERR_RETRY) {
+ *retry_opcond = true;
+ } else {
+ *retry_opcond = false;
+ }
+
+ return card;
+}
+
+int emmc_init_stage2(struct mmc_card *card, bool retry_opcond)
+{
+ int err = MMC_ERR_NONE;
+ struct mmc_host *host;
+ int boot_part_nblks = 0;
+ int rpmb_part_nblks = 0;
+
+ host = card->host;
+ err = mmc_init_card_stage2(host, card, retry_opcond);
+ /* mmc init fail */
+ ASSERT(err == MMC_ERR_NONE);
+
+ err = emmc_boot_prepare(card);
+ ASSERT(err == MMC_ERR_NONE);
+
+ mmc_bio_ops("mmc0", EXT_CSD_PART_CFG_DEFT_PART, card->nblks, host, card);
+ boot_part_nblks = card->ext_csd.boot_part_sz/card->blklen;
+ mmc_bio_ops("mmc0boot0", EXT_CSD_PART_CFG_BOOT_PART_1, boot_part_nblks,
+ host, card);
+ mmc_bio_ops("mmc0boot1", EXT_CSD_PART_CFG_BOOT_PART_2, boot_part_nblks,
+ host, card);
+ rpmb_part_nblks = card->ext_csd.rpmb_sz/card->blklen;
+ mmc_bio_ops("mmc0rpmb", EXT_CSD_PART_CFG_RPMB_PART, rpmb_part_nblks,
+ host, card);
+
+ return err;
+}
+
+int sdmmc_init(u8 host_id)
+{
+ int err = MMC_ERR_NONE;
+ struct mmc_host *host;
+ struct mmc_card *card;
+ bool retry_opcond;
+
+ printf("%s enter\n", __func__);
+
+ host = malloc(sizeof(struct mmc_host));
+ /* malloc fail */
+ if (!host) {
+ dprintf(INFO, "Failed to malloc host!\n");
+ err = -ENOMEM;
+ goto end;
+ }
+ /* construct the block device */
+ memset(host, 0, sizeof(struct mmc_host));
+ host->host_id = host_id;
+
+ card = malloc(sizeof(struct mmc_card));
+ /* malloc fail */
+ if (!card) {
+ dprintf(INFO, "Failed to malloc card!\n");
+ free(host);
+ err = -ENOMEM;
+ goto end;
+ }
+ /* construct the block device */
+ memset(card, 0, sizeof(struct mmc_card));
+
+ err = mmc_init_host(host);
+
+ if (err == MMC_ERR_NONE)
+ err = mmc_init_card_stage1(host, card);
+ /* mmc init fail */
+ if (err && err != MMC_ERR_RETRY) {
+ dprintf(INFO, "mmc_init_card fail!\n");
+ free(host);
+ free(card);
+ goto end;
+ } else if (err == MMC_ERR_RETRY) {
+ retry_opcond = true;
+ } else {
+ retry_opcond = false;
+ }
+
+ err = mmc_init_card_stage2(host, card, retry_opcond);
+ if (err != MMC_ERR_NONE) {
+ dprintf(INFO, "mmc_init_card fail!\n");
+ free(host);
+ free(card);
+ goto end;
+ }
+ mmc_bio_ops("sdmmc1", EXT_CSD_PART_CFG_DEFT_PART, card->nblks, host, card);
+
+end:
+ return err;
+}
diff --git a/src/bsp/lk/platform/mt8512/drivers/mmc/msdc.c b/src/bsp/lk/platform/mt8512/drivers/mmc/msdc.c
new file mode 100644
index 0000000..a5b57eb
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/mmc/msdc.c
@@ -0,0 +1,2037 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define MSDC_DEBUG_KICKOFF
+
+#include <platform/msdc.h>
+#include <platform/mmc_core.h>
+#include <kernel/event.h>
+#include <kernel/vm.h>
+#include <platform/interrupts.h>
+#include <platform/mt_irq.h>
+#include <string.h>
+#include <assert.h>
+
+#define CMD_RETRIES (5)
+#define CMD_TIMEOUT (100) /* 100ms */
+
+#define PERI_MSDC_SRCSEL (0xc100000c)
+
+/* Tuning Parameter */
+#define DEFAULT_DEBOUNCE (8) /* 8 cycles */
+#define DEFAULT_DTOC (40) /* data timeout counter. 65536x40 sclk. */
+#define DEFAULT_WDOD (0) /* write data output delay. no delay. */
+#define DEFAULT_BSYDLY (8) /* card busy delay. 8 extend sclk */
+
+/* Declarations */
+static int msdc_send_cmd(struct mmc_host *host, struct mmc_command *cmd);
+static int msdc_wait_cmd_done(struct mmc_host *host, struct mmc_command *cmd);
+static int msdc_tune_cmdrsp(struct mmc_host *host, struct mmc_command *cmd);
+
+typedef struct {
+ int autocmd;
+ int rdsmpl;
+ int wdsmpl;
+ int rsmpl;
+ int start_bit;
+} msdc_priv_t;
+
+static int msdc_rsp[] = {
+ 0, /* RESP_NONE */
+ 1, /* RESP_R1 */
+ 2, /* RESP_R2 */
+ 3, /* RESP_R3 */
+ 4, /* RESP_R4 */
+ 1, /* RESP_R5 */
+ 1, /* RESP_R6 */
+ 1, /* RESP_R7 */
+ 7, /* RESP_R1b */
+};
+
+struct msdc_cust {
+ unsigned char clk_src; /* host clock source */
+ unsigned char hclk_src; /* host clock source */
+ unsigned char cmd_edge; /* command latch edge */
+ unsigned char data_edge; /* data latch edge */
+#define MSDC_SMPL_RISING (0)
+#define MSDC_SMPL_FALLING (1)
+#define MSDC_SMPL_SEPERATE (2)
+ unsigned char clk_drv; /* clock pad driving */
+ unsigned char cmd_drv; /* command pad driving */
+ unsigned char dat_drv; /* data pad driving */
+ unsigned char rst_drv; /* reset pin pad driving */
+ unsigned char ds_drv; /* ds pad driving */
+ unsigned char data_pins; /* data pins */
+ unsigned int data_offset; /* data address offset */
+ unsigned int flags; /* hardware capability flags */
+#define MSDC_CD_PIN_EN (1 << 0) /* card detection pin is wired */
+#define MSDC_WP_PIN_EN (1 << 1) /* write protection pin is wired */
+#define MSDC_RST_PIN_EN (1 << 2) /* emmc reset pin is wired */
+#define MSDC_SDIO_IRQ (1 << 3) /* use internal sdio irq (bus) */
+#define MSDC_EXT_SDIO_IRQ (1 << 4) /* use external sdio irq */
+#define MSDC_REMOVABLE (1 << 5) /* removable slot */
+#define MSDC_SYS_SUSPEND (1 << 6) /* suspended by system */
+#define MSDC_HIGHSPEED (1 << 7) /* high-speed mode support */
+#define MSDC_UHS1 (1 << 8) /* uhs-1 mode support */
+#define MSDC_DDR (1 << 9) /* ddr mode support */
+#define MSDC_HS200 (1 << 10) /* hs200 mode support(eMMC4.5) */
+#define MSDC_HS400 (1 << 11) /* hs200 mode support(eMMC5.0) */
+} msdc_cap[2] = {
+ {
+ MSDC50_CLKSRC_DEFAULT, /* host clock source */
+ MSDC50_CLKSRC4HCLK_273MHZ, /* host clock source */
+ MSDC_SMPL_RISING, /* command latch edge */
+ MSDC_SMPL_RISING, /* data latch edge */
+ MSDC_DRVN_GEAR2, /* clock pad driving */
+ MSDC_DRVN_GEAR2, /* command pad driving */
+ MSDC_DRVN_GEAR2, /* data pad driving */
+ MSDC_DRVN_GEAR2, /* rst pad driving */
+ MSDC_DRVN_GEAR2, /* ds pad driving */
+ 8, /* data pins */
+ 0, /* data address offset */
+ MSDC_HIGHSPEED
+ },
+
+ {
+ MSDC50_CLKSRC_DEFAULT, /* host clock source */
+ MSDC50_CLKSRC4HCLK_273MHZ, /* host clock source */
+ MSDC_SMPL_RISING, /* command latch edge */
+ MSDC_SMPL_RISING, /* data latch edge */
+ MSDC_DRVN_GEAR2, /* clock pad driving */
+ MSDC_DRVN_GEAR2, /* command pad driving */
+ MSDC_DRVN_GEAR2, /* data pad driving */
+ MSDC_DRVN_GEAR2, /* rst pad driving */
+ MSDC_DRVN_GEAR2, /* ds pad driving */
+ 4, /* data pins */
+ 0, /* data address offset */
+ MSDC_HIGHSPEED
+ },
+};
+
+static event_t msdc_int_event;
+static u32 g_int_status = 0;
+static msdc_priv_t msdc_priv;
+
+#ifndef FPGA_PLATFORM
+static u32 hclks_msdc30[] = { 26000000, 208000000, 200000000, 156000000,
+ 182000000, 156000000, 178280000
+ };
+/* add function for MSDC_PAD_CTL handle */
+static void msdc_set_smt(struct mmc_host *host, int smt)
+{
+ return;
+}
+
+/* pull up means that host driver the line to HIGH
+ * pull down means that host driver the line to LOW */
+static void msdc_pin_set(struct mmc_host *host)
+{
+ /* driver CLK/DAT pin */
+ ASSERT(host);
+
+ MSDC_SET_FIELD(GPIO_PULLSEL2, 0x1 << 22, 1);
+ MSDC_SET_FIELD(GPIO_PULLSEL2, 0x1 << 21, 1);
+ MSDC_SET_FIELD(GPIO_PULLSEL2, 0x1 << 20, 1);
+ MSDC_SET_FIELD(GPIO_PULLSEL2, 0x1 << 19, 1);
+ MSDC_SET_FIELD(GPIO_PULLSEL2, 0x1 << 18, 0);
+ MSDC_SET_FIELD(GPIO_PULLSEL2, 0x1 << 17, 1);
+ MSDC_SET_FIELD(GPIO_PULLSEL2, 0x1 << 16, 1);
+ MSDC_SET_FIELD(GPIO_PULLSEL2, 0x1 << 15, 1);
+ MSDC_SET_FIELD(GPIO_PULLSEL2, 0x1 << 14, 1);
+ MSDC_SET_FIELD(GPIO_PULLSEL2, 0x1 << 13, 1);
+ MSDC_SET_FIELD(GPIO_PULLSEL2, 0x1 << 12, 1);
+}
+
+/* host can modify from 0-7 */
+static void msdc_set_driving(struct mmc_host *host, struct msdc_cust *msdc_cap)
+{
+ ASSERT(host);
+ ASSERT(msdc_cap);
+
+ if (host && msdc_cap) {
+ /*
+ * 0x10005740[19:16] --> PAD_MSDC0_DAT
+ * 0x10005750[7:4] --> PAD_MSDC0_CMD
+ * 0x10005750[11:8] --> PAD_MSDC0_CLK
+ *
+ */
+ MSDC_SET_FIELD(DRV4_CFG_ADDR, (0x7 << 8), msdc_cap->clk_drv); //CLK[11:8]
+ MSDC_SET_FIELD(DRV4_CFG_ADDR, (0x7 << 4), msdc_cap->cmd_drv); //CMD[7:4]
+ MSDC_SET_FIELD(DRV3_CFG_ADDR, (0x7 << 16), msdc_cap->dat_drv); //DAT[19:16]
+ }
+}
+
+static void msdc_set_pin_mode(struct mmc_host *host)
+{
+ ASSERT(host);
+ /*
+ * GPIO86---0x260[20:18]=1---MSDC0_DAT0
+ * GPIO85---0x260[17:15]=1---MSDC0_DAT1
+ * GPIO84---0x260[14:12]=1---MSDC0_DAT2
+ * GPIO83---0x260[11:9]=1---MSDC0_DAT3
+ * GPIO82---0x260[8:6]=1---MSDC0_CLK
+ * GPIO81---0x260[5:3]=1---MSDC0_CMD
+ * GPIO80---0x260[2:0]=1---MSDC0_RSTB
+ * GPIO79---0x250[29:27]=1---MSDC0_DAT4
+ * GPIO78---0x250[26:24]=1---MSDC0_DAT5
+ * GPIO77---0x250[23:21]=1---MSDC0_DAT6
+ * GPIO76---0x250[20:18]=1---MSDC0_DAT7
+ */
+ /* gpio register default value is ok, but We still set it */
+
+ MSDC_SET_FIELD(GPIO_MODE7_ADDR, (0x7 << 27), 1);
+ MSDC_SET_FIELD(GPIO_MODE7_ADDR, (0x7 << 24), 1);
+ MSDC_SET_FIELD(GPIO_MODE7_ADDR, (0x7 << 21), 1);
+ MSDC_SET_FIELD(GPIO_MODE7_ADDR, (0x7 << 18), 1);
+ MSDC_SET_FIELD(GPIO_MODE8_ADDR, (0x7 << 18), 1);
+ MSDC_SET_FIELD(GPIO_MODE8_ADDR, (0x7 << 15), 1);
+ MSDC_SET_FIELD(GPIO_MODE8_ADDR, (0x7 << 12), 1);
+ MSDC_SET_FIELD(GPIO_MODE8_ADDR, (0x7 << 9), 1);
+ MSDC_SET_FIELD(GPIO_MODE8_ADDR, (0x7 << 6), 1);
+ MSDC_SET_FIELD(GPIO_MODE8_ADDR, (0x7 << 3), 1);
+ MSDC_SET_FIELD(GPIO_MODE8_ADDR, (0x7 << 0), 1);
+}
+
+static void msdc_gpio_and_pad_init(struct mmc_host *host)
+{
+ /*set smt enable*/
+ msdc_set_smt(host, 1);
+
+ /*set pupd enable*/
+ msdc_pin_set(host);
+
+ /* set gpio to msdc mode*/
+ msdc_set_pin_mode(host);
+
+ /*set driving*/
+ msdc_set_driving(host, &msdc_cap[host->host_id]);
+
+}
+#endif
+
+#ifndef FPGA_PLATFORM
+
+static int pmic_config_interface(int a, int b, int c, int d)
+{
+ return 0;
+}
+
+static void msdc_set_card_pwr(struct mmc_host *host, int on)
+{
+ unsigned int ret;
+
+ ret = pmic_config_interface(0xAB,0x7,0x7,4); /* VMCH=3.3V */
+
+ if (ret == 0) {
+ if (on) {
+ ret = pmic_config_interface(0xAB,0x1,0x1,0); /* VMCH_EN=1 */
+ } else {
+ ret = pmic_config_interface(0xAB,0x0,0x1,0); /* VMCH_EN=0 */
+ }
+ }
+
+ if (ret != 0) {
+ dprintf(CRITICAL, "PMIC: Set MSDC Host Power Fail\n");
+ } else {
+ spin(3000);
+ }
+}
+
+static void msdc_set_host_pwr(struct mmc_host *host, int on)
+{
+ unsigned int ret;
+
+ ret = pmic_config_interface(0xA7,0x7,0x7,4); /* VMC=3.3V */
+
+ if (ret == 0) {
+ if (on) {
+ ret = pmic_config_interface(0xA7,0x1,0x1,0); /* VMC_EN=1 */
+ } else {
+ ret = pmic_config_interface(0xA7,0x0,0x1,0); /* VMC_EN=0 */
+ }
+ }
+
+ if (ret != 0) {
+ dprintf(CRITICAL, "PMIC: Set MSDC Card Power Fail\n");
+ }
+}
+#else
+#define PWR_GPIO (0x10001E84)
+#define PWR_GPIO_EO (0x10001E88)
+
+#define PWR_MASK_EN (0x1 << 8)
+#define PWR_MASK_VOL_18 (0x1 << 9)
+#define PWR_MASK_VOL_33 (0x1 << 10)
+#define PWR_MASK_L4 (0x1 << 11)
+#define PWR_MSDC_DIR (PWR_MASK_EN | PWR_MASK_VOL_18 | PWR_MASK_VOL_33 | PWR_MASK_L4)
+
+#define MSDC0_PWR_MASK_EN (0x1 << 12)
+#define MSDC0_PWR_MASK_VOL_18 (0x1 << 13)
+#define MSDC0_PWR_MASK_VOL_33 (0x1 << 14)
+#define MSDC0_PWR_MASK_L4 (0x1 << 15)
+#define MSDC0_PWR_MSDC_DIR (MSDC0_PWR_MASK_EN | MSDC0_PWR_MASK_VOL_18 | MSDC0_PWR_MASK_VOL_33 | MSDC0_PWR_MASK_L4)
+
+static void msdc_clr_gpio(u32 bits)
+{
+ switch (bits) {
+ case MSDC0_PWR_MASK_EN:
+ /* check for set before */
+ MSDC_SET_FIELD(PWR_GPIO, (MSDC0_PWR_MASK_EN),0);
+ MSDC_SET_FIELD(PWR_GPIO_EO, (MSDC0_PWR_MASK_EN),0);
+ break;
+ case MSDC0_PWR_MASK_VOL_18:
+ /* check for set before */
+ MSDC_SET_FIELD(PWR_GPIO, (MSDC0_PWR_MASK_VOL_18|MSDC0_PWR_MASK_VOL_33), 0);
+ MSDC_SET_FIELD(PWR_GPIO_EO, (MSDC0_PWR_MASK_VOL_18|MSDC0_PWR_MASK_VOL_33), 0);
+ break;
+ case MSDC0_PWR_MASK_VOL_33:
+ /* check for set before */
+ MSDC_SET_FIELD(PWR_GPIO, (MSDC0_PWR_MASK_VOL_18|MSDC0_PWR_MASK_VOL_33), 0);
+ MSDC_SET_FIELD(PWR_GPIO_EO, (MSDC0_PWR_MASK_VOL_18|MSDC0_PWR_MASK_VOL_33), 0);
+ break;
+ case MSDC0_PWR_MASK_L4:
+ /* check for set before */
+ MSDC_SET_FIELD(PWR_GPIO, MSDC0_PWR_MASK_L4, 0);
+ MSDC_SET_FIELD(PWR_GPIO_EO, MSDC0_PWR_MASK_L4, 0);
+ break;
+ case PWR_MASK_EN:
+ /* check for set before */
+ MSDC_SET_FIELD(PWR_GPIO, PWR_MASK_EN,0);
+ MSDC_SET_FIELD(PWR_GPIO_EO, PWR_MASK_EN,0);
+ break;
+ case PWR_MASK_VOL_18:
+ /* check for set before */
+ MSDC_SET_FIELD(PWR_GPIO, (PWR_MASK_VOL_18|PWR_MASK_VOL_33), 0);
+ MSDC_SET_FIELD(PWR_GPIO_EO, (PWR_MASK_VOL_18|PWR_MASK_VOL_33), 0);
+ break;
+ case PWR_MASK_VOL_33:
+ /* check for set before */
+ MSDC_SET_FIELD(PWR_GPIO, (PWR_MASK_VOL_18|PWR_MASK_VOL_33), 0);
+ MSDC_SET_FIELD(PWR_GPIO_EO, (PWR_MASK_VOL_18|PWR_MASK_VOL_33), 0);
+ break;
+ case PWR_MASK_L4:
+ /* check for set before */
+ MSDC_SET_FIELD(PWR_GPIO, PWR_MASK_L4, 0);
+ MSDC_SET_FIELD(PWR_GPIO_EO, PWR_MASK_L4, 0);
+ break;
+ default:
+ dprintf(CRITICAL, "[%s:%s]invalid value: 0x%x\n", __FILE__, __func__, bits);
+ break;
+ }
+}
+
+static void msdc_set_gpio(u32 bits)
+{
+ switch (bits) {
+ case MSDC0_PWR_MASK_EN:
+ /* check for set before */
+ MSDC_SET_FIELD(PWR_GPIO, MSDC0_PWR_MASK_EN,1);
+ MSDC_SET_FIELD(PWR_GPIO_EO, MSDC0_PWR_MASK_EN,1);
+ break;
+ case MSDC0_PWR_MASK_VOL_18:
+ /* check for set before */
+ MSDC_SET_FIELD(PWR_GPIO, (MSDC0_PWR_MASK_VOL_18|MSDC0_PWR_MASK_VOL_33), 1);
+ MSDC_SET_FIELD(PWR_GPIO_EO, (MSDC0_PWR_MASK_VOL_18|MSDC0_PWR_MASK_VOL_33), 1);
+ break;
+ case MSDC0_PWR_MASK_VOL_33:
+ /* check for set before */
+ MSDC_SET_FIELD(PWR_GPIO, (MSDC0_PWR_MASK_VOL_18|MSDC0_PWR_MASK_VOL_33), 2);
+ MSDC_SET_FIELD(PWR_GPIO_EO, (MSDC0_PWR_MASK_VOL_18|MSDC0_PWR_MASK_VOL_33), 2);
+ break;
+ case MSDC0_PWR_MASK_L4:
+ /* check for set before */
+ MSDC_SET_FIELD(PWR_GPIO, MSDC0_PWR_MASK_L4, 1);
+ MSDC_SET_FIELD(PWR_GPIO_EO, MSDC0_PWR_MASK_L4, 1);
+ break;
+ case PWR_MASK_EN:
+ /* check for set before */
+ MSDC_SET_FIELD(PWR_GPIO, PWR_MASK_EN,1);
+ MSDC_SET_FIELD(PWR_GPIO_EO, PWR_MASK_EN,1);
+ break;
+ case PWR_MASK_VOL_18:
+ /* check for set before */
+ MSDC_SET_FIELD(PWR_GPIO, (PWR_MASK_VOL_18|PWR_MASK_VOL_33), 1);
+ MSDC_SET_FIELD(PWR_GPIO_EO, (PWR_MASK_VOL_18|PWR_MASK_VOL_33), 1);
+ break;
+ case PWR_MASK_VOL_33:
+ /* check for set before */
+ MSDC_SET_FIELD(PWR_GPIO, (PWR_MASK_VOL_18|PWR_MASK_VOL_33), 2);
+ MSDC_SET_FIELD(PWR_GPIO_EO, (PWR_MASK_VOL_18|PWR_MASK_VOL_33), 2);
+ break;
+ case PWR_MASK_L4:
+ /* check for set before */
+ MSDC_SET_FIELD(PWR_GPIO, PWR_MASK_L4, 1);
+ MSDC_SET_FIELD(PWR_GPIO_EO, PWR_MASK_L4, 1);
+ break;
+ default:
+ dprintf(CRITICAL, "[%s:%s]invalid value: 0x%x\n", __FILE__, __func__, bits);
+ break;
+ }
+}
+
+static void msdc_set_card_pwr(struct mmc_host *host, int on)
+{
+ if (on) {
+ msdc_set_gpio(MSDC0_PWR_MASK_VOL_18);
+ msdc_set_gpio(MSDC0_PWR_MASK_L4);
+ msdc_set_gpio(MSDC0_PWR_MASK_EN);
+ } else {
+ msdc_clr_gpio(MSDC0_PWR_MASK_EN);
+ msdc_clr_gpio(MSDC0_PWR_MASK_VOL_18);
+ msdc_clr_gpio(MSDC0_PWR_MASK_L4);
+ }
+ spin(10000);
+}
+
+static void msdc_set_host_level_pwr(struct mmc_host *host, u32 level)
+{
+ msdc_clr_gpio(PWR_MASK_VOL_18);
+ msdc_clr_gpio(PWR_MASK_VOL_33);
+
+ if (level) {
+ msdc_set_gpio(PWR_MASK_VOL_18);
+ } else {
+ msdc_set_gpio(PWR_MASK_VOL_33);
+ }
+ msdc_set_gpio(PWR_MASK_L4);
+}
+
+static void msdc_set_host_pwr(struct mmc_host *host, int on)
+{
+ msdc_set_host_level_pwr(host, 0);
+}
+#endif
+
+static void msdc_set_startbit(struct mmc_host *host, u8 start_bit)
+{
+ addr_t base = host->base;
+ msdc_priv_t *priv = (msdc_priv_t *)host->priv;
+
+ /* set start bit */
+ MSDC_SET_FIELD(MSDC_CFG, MSDC_CFG_START_BIT, start_bit);
+ priv->start_bit = start_bit;
+ dprintf(INFO, "start bit = %d, MSDC_CFG[0x%x]\n", start_bit, MSDC_READ32(MSDC_CFG));
+}
+
+#define TYPE_CMD_RESP_EDGE (0)
+#define TYPE_WRITE_CRC_EDGE (1)
+#define TYPE_READ_DATA_EDGE (2)
+#define TYPE_WRITE_DATA_EDGE (3)
+
+static void msdc_set_smpl(struct mmc_host *host, u8 HS400, u8 mode, u8 type)
+{
+ addr_t base = host->base;
+ int i=0;
+ msdc_priv_t *priv = (msdc_priv_t *)host->priv;
+ static u8 read_data_edge[8] = {MSDC_SMPL_RISING, MSDC_SMPL_RISING, MSDC_SMPL_RISING, MSDC_SMPL_RISING,
+ MSDC_SMPL_RISING, MSDC_SMPL_RISING, MSDC_SMPL_RISING, MSDC_SMPL_RISING
+ };
+ static u8 write_data_edge[4] = {MSDC_SMPL_RISING, MSDC_SMPL_RISING, MSDC_SMPL_RISING, MSDC_SMPL_RISING};
+
+ switch (type) {
+ case TYPE_CMD_RESP_EDGE:
+ if (HS400) {
+ // eMMC5.0 only output resp at CLK pin, so no need to select DS pin
+ MSDC_SET_FIELD(EMMC50_CFG0, MSDC_EMMC50_CFG_PADCMD_LATCHCK, 0); //latch cmd resp at CLK pin
+ MSDC_SET_FIELD(EMMC50_CFG0, MSDC_EMMC50_CFG_CMD_RESP_SEL, 0);//latch cmd resp at CLK pin
+ }
+
+ if (mode == MSDC_SMPL_RISING || mode == MSDC_SMPL_FALLING) {
+ MSDC_SET_FIELD(MSDC_IOCON, MSDC_IOCON_RSPL, mode);
+ priv->rsmpl = mode;
+ } else {
+ dprintf(CRITICAL, "[%s]: invalid resp parameter: HS400=%d, type=%d, mode=%d\n", __func__, HS400, type, mode);
+ }
+ break;
+
+ case TYPE_WRITE_CRC_EDGE:
+ if (HS400) {
+ MSDC_SET_FIELD(EMMC50_CFG0, MSDC_EMMC50_CFG_CRC_STS_SEL, 1);//latch write crc status at DS pin
+ } else {
+ MSDC_SET_FIELD(EMMC50_CFG0, MSDC_EMMC50_CFG_CRC_STS_SEL, 0);//latch write crc status at CLK pin
+ }
+
+ if (mode == MSDC_SMPL_RISING || mode == MSDC_SMPL_FALLING) {
+ if (HS400) {
+ MSDC_SET_FIELD(EMMC50_CFG0, MSDC_EMMC50_CFG_CRC_STS_EDGE, mode);
+ } else {
+ MSDC_SET_FIELD(MSDC_IOCON, MSDC_IOCON_W_D_SMPL_SEL, 0);
+ MSDC_SET_FIELD(MSDC_IOCON, MSDC_IOCON_W_D_SMPL, mode);
+ }
+ priv->wdsmpl = mode;
+ } else if (mode == MSDC_SMPL_SEPERATE && !HS400) {
+ MSDC_SET_FIELD(MSDC_IOCON, MSDC_IOCON_W_D0SPL, write_data_edge[0]); //only dat0 is for write crc status.
+ priv->wdsmpl = mode;
+ } else {
+ dprintf(CRITICAL, "[%s]: invalid crc parameter: HS400=%d, type=%d, mode=%d\n", __func__, HS400, type, mode);
+ }
+ break;
+
+ case TYPE_READ_DATA_EDGE:
+ if (HS400) {
+ msdc_set_startbit(host, START_AT_RISING_AND_FALLING); //for HS400, start bit is output both on rising and falling edge
+ priv->start_bit = START_AT_RISING_AND_FALLING;
+ } else {
+ msdc_set_startbit(host, START_AT_RISING); //for the other mode, start bit is only output on rising edge. but DDR50 can try falling edge if error casued by pad delay
+ priv->start_bit = START_AT_RISING;
+ }
+ if (mode == MSDC_SMPL_RISING || mode == MSDC_SMPL_FALLING) {
+ MSDC_SET_FIELD(MSDC_IOCON, MSDC_IOCON_R_D_SMPL_SEL, 0);
+ MSDC_SET_FIELD(MSDC_IOCON, MSDC_IOCON_R_D_SMPL, mode);
+ priv->rdsmpl = mode;
+ } else if (mode == MSDC_SMPL_SEPERATE) {
+ MSDC_SET_FIELD(MSDC_IOCON, MSDC_IOCON_R_D_SMPL_SEL, 1);
+ for (i=0; i<8; i++) {
+ MSDC_SET_FIELD(MSDC_IOCON, (MSDC_IOCON_R_D0SPL << i), read_data_edge[i]);
+ }
+ priv->rdsmpl = mode;
+ } else {
+ dprintf(CRITICAL, "[%s]: invalid read parameter: HS400=%d, type=%d, mode=%d\n", __func__, HS400, type, mode);
+ }
+ break;
+
+ case TYPE_WRITE_DATA_EDGE:
+ MSDC_SET_FIELD(EMMC50_CFG0, MSDC_EMMC50_CFG_CRC_STS_SEL, 0);//latch write crc status at CLK pin
+
+ if (mode == MSDC_SMPL_RISING|| mode == MSDC_SMPL_FALLING) {
+ MSDC_SET_FIELD(MSDC_IOCON, MSDC_IOCON_W_D_SMPL_SEL, 0);
+ MSDC_SET_FIELD(MSDC_IOCON, MSDC_IOCON_W_D_SMPL, mode);
+ priv->wdsmpl = mode;
+ } else if (mode == MSDC_SMPL_SEPERATE) {
+ MSDC_SET_FIELD(MSDC_IOCON, MSDC_IOCON_W_D_SMPL_SEL, 1);
+ for (i=0; i<4; i++) {
+ MSDC_SET_FIELD(MSDC_IOCON, (MSDC_IOCON_W_D0SPL << i), write_data_edge[i]);//dat0~4 is for SDIO card.
+ }
+ priv->wdsmpl = mode;
+ } else {
+ dprintf(CRITICAL, "[%s]: invalid write parameter: HS400=%d, type=%d, mode=%d\n", __func__, HS400, type, mode);
+ }
+ break;
+
+ default:
+ dprintf(CRITICAL, "[%s]: invalid parameter: HS400=%d, type=%d, mode=%d\n", __func__, HS400, type, mode);
+ break;
+ }
+}
+
+void msdc_set_timeout(struct mmc_host *host, u32 ns, u32 clks)
+{
+ addr_t base = host->base;
+ u32 timeout, clk_ns;
+ u32 mode = 0;
+
+ if (host->cur_bus_clk == 0) {
+ timeout = 0;
+ } else {
+ clk_ns = 1000000000UL / host->cur_bus_clk;
+ timeout = (ns + clk_ns - 1) / clk_ns + clks;
+ timeout = (timeout + (1 << 20) - 1) >> 20; /* in 1048576 sclk cycle unit */
+ MSDC_GET_FIELD(MSDC_CFG, MSDC_CFG_CKMOD, mode);
+ timeout = mode >= 2 ? timeout * 2 : timeout; //DDR mode will double the clk cycles for data timeout
+ timeout = timeout > 1 ? timeout - 1 : 0;
+ timeout = timeout > 255 ? 255 : timeout;
+ }
+ MSDC_SET_FIELD(SDC_CFG, SDC_CFG_DTOC, timeout);
+ dprintf(INFO, "[MSDC] Set read data timeout: %dns %dclks -> %d (65536 sclk cycles)\n",
+ ns, clks, timeout + 1);
+}
+
+void msdc_set_autocmd(struct mmc_host *host, int cmd)
+{
+ msdc_priv_t *priv = (msdc_priv_t *)host->priv;
+
+ priv->autocmd = cmd;
+}
+
+int msdc_get_autocmd(struct mmc_host *host)
+{
+ msdc_priv_t *priv = (msdc_priv_t *)host->priv;
+
+ return priv->autocmd;
+}
+
+static void msdc_abort(struct mmc_host *host)
+{
+ addr_t base = host->base;
+
+ dprintf(CRITICAL, "[MSDC] Abort: MSDC_FIFOCS=%xh MSDC_PS=%xh SDC_STS=%xh\n",
+ MSDC_READ32(MSDC_FIFOCS), MSDC_READ32(MSDC_PS), MSDC_READ32(SDC_STS));
+ /* reset controller */
+ MSDC_RESET();
+
+ /* clear fifo */
+ MSDC_CLR_FIFO();
+
+ /* make sure txfifo and rxfifo are empty */
+ if (MSDC_TXFIFOCNT() != 0 || MSDC_RXFIFOCNT() != 0) {
+ dprintf(INFO, "[MSDC] Abort: TXFIFO(%d), RXFIFO(%d) != 0\n",
+ MSDC_TXFIFOCNT(), MSDC_RXFIFOCNT());
+ }
+
+ /* clear all interrupts */
+ MSDC_CLR_INT();
+}
+
+static int msdc_get_card_status(struct mmc_host *host, u32 *status)
+{
+ int err;
+ struct mmc_command cmd;
+
+ cmd.opcode = MMC_CMD_SEND_STATUS;
+ cmd.arg = host->card->rca << 16;
+ cmd.rsptyp = RESP_R1;
+ cmd.retries = CMD_RETRIES;
+ cmd.timeout = CMD_TIMEOUT;
+
+ err = msdc_send_cmd(host, &cmd);
+ if (!err)
+ err = msdc_wait_cmd_done(host, &cmd);
+
+ if (err == MMC_ERR_NONE)
+ *status = cmd.resp[0];
+
+ return err;
+}
+
+int msdc_abort_handler(struct mmc_host *host, int abort_card)
+{
+ struct mmc_command stop;
+ u32 status = 0;
+ u32 state = 0;
+
+ while (state != 4) { // until status to "tran"
+ msdc_abort(host);
+ if (msdc_get_card_status(host, &status)) {
+ dprintf(CRITICAL, "Get card status failed\n");
+ return 1;
+ }
+ state = R1_CURRENT_STATE(status);
+ dprintf(INFO, "check card state<%d>\n", state);
+ if (state == 5 || state == 6) {
+ dprintf(INFO, "state<%d> need cmd12 to stop\n", state);
+ if (abort_card) {
+ stop.opcode = MMC_CMD_STOP_TRANSMISSION;
+ stop.rsptyp = RESP_R1B;
+ stop.arg = 0;
+ stop.retries = CMD_RETRIES;
+ stop.timeout = CMD_TIMEOUT;
+ msdc_send_cmd(host, &stop);
+ msdc_wait_cmd_done(host, &stop); // don't tuning
+ } else if (state == 7) { // busy in programing
+ dprintf(INFO, "state<%d> card is busy\n", state);
+ spin(100000);
+ } else if (state != 4) {
+ dprintf(INFO, "state<%d> ??? \n", state);
+ return 1;
+ }
+ }
+ }
+ msdc_abort(host);
+ return 0;
+}
+
+static u32 msdc_intr_wait(struct mmc_host *host, u32 intrs)
+{
+ u32 sts = 0;
+ u32 tmo = UINT_MAX;
+ int ret = 0;
+
+ /* warning that interrupts are not enabled */
+ ret = event_wait_timeout(&msdc_int_event, tmo);
+ if (ret != 0) {
+ addr_t base = host->base;
+ dprintf(CRITICAL, "[%s]: failed to get event INT=0x%x\n",
+ __func__, MSDC_READ32(MSDC_INT));
+ g_int_status = 0;
+ return 0;
+ }
+
+ sts = g_int_status;
+ g_int_status = 0;
+
+ if (~intrs & sts)
+ dprintf(CRITICAL, "msdc_intr_wait Unexpected INT(0x%x)\n", ~intrs & sts);
+
+ return sts;
+}
+
+static enum handler_return msdc_interrupt_handler(void *arg)
+{
+ struct mmc_host *host = arg;
+ addr_t base = host->base;
+
+ /* Save & Clear the interrupt */
+ g_int_status = MSDC_READ32(MSDC_INT);
+ MSDC_WRITE32(MSDC_INT, g_int_status);
+ MSDC_WRITE32(MSDC_INTEN, 0);
+ host->intr_mask = 0;
+
+ /* MUST BE *false*! otherwise, schedule in interrupt */
+ event_signal(&msdc_int_event, false);
+
+ return INT_RESCHEDULE;
+}
+
+static int msdc_send_cmd(struct mmc_host *host, struct mmc_command *cmd)
+{
+ msdc_priv_t *priv = (msdc_priv_t *)host->priv;
+ addr_t base = host->base;
+ u32 opcode = cmd->opcode;
+ u32 rsptyp = cmd->rsptyp;
+ u32 rawcmd;
+ u32 error = MMC_ERR_NONE;
+
+ /* rawcmd :
+ * vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 |
+ * stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode
+ */
+ rawcmd = (opcode & ~(SD_CMD_BIT | SD_CMD_APP_BIT)) |
+ msdc_rsp[rsptyp] << 7 | host->blklen << 16;
+
+ if (opcode == MMC_CMD_WRITE_MULTIPLE_BLOCK) {
+ rawcmd |= ((2 << 11) | (1 << 13));
+ if (priv->autocmd & MSDC_AUTOCMD12) {
+ rawcmd |= (1 << 28);
+ } else if (priv->autocmd & MSDC_AUTOCMD23) {
+ rawcmd |= (2 << 28);
+ }
+ } else if (opcode == MMC_CMD_WRITE_BLOCK || opcode == MMC_CMD50) {
+ rawcmd |= ((1 << 11) | (1 << 13));
+ } else if (opcode == MMC_CMD_READ_MULTIPLE_BLOCK) {
+ rawcmd |= (2 << 11);
+ if (priv->autocmd & MSDC_AUTOCMD12) {
+ rawcmd |= (1 << 28);
+ } else if (priv->autocmd & MSDC_AUTOCMD23) {
+ rawcmd |= (2 << 28);
+ }
+ } else if (opcode == MMC_CMD_READ_SINGLE_BLOCK ||
+ opcode == SD_ACMD_SEND_SCR ||
+ opcode == SD_CMD_SWITCH ||
+ opcode == MMC_CMD_SEND_EXT_CSD ||
+ opcode == MMC_CMD_SEND_WRITE_PROT ||
+ opcode == MMC_CMD_SEND_WRITE_PROT_TYPE ||
+ opcode == MMC_CMD21) {
+ rawcmd |= (1 << 11);
+ } else if (opcode == MMC_CMD_STOP_TRANSMISSION) {
+ rawcmd |= (1 << 14);
+ rawcmd &= ~(0x0FFF << 16);
+ } else if (opcode == SD_IO_RW_EXTENDED) {
+ if (cmd->arg & 0x80000000) /* R/W flag */
+ rawcmd |= (1 << 13);
+ if ((cmd->arg & 0x08000000) && ((cmd->arg & 0x1FF) > 1))
+ rawcmd |= (2 << 11); /* multiple block mode */
+ else
+ rawcmd |= (1 << 11);
+ } else if (opcode == SD_IO_RW_DIRECT) {
+ if ((cmd->arg & 0x80000000) && ((cmd->arg >> 9) & 0x1FFFF))/* I/O abt */
+ rawcmd |= (1 << 14);
+ } else if (opcode == SD_CMD_VOL_SWITCH) {
+ rawcmd |= (1 << 30);
+ } else if (opcode == SD_CMD_SEND_TUNING_BLOCK) {
+ rawcmd |= (1 << 11); /* CHECKME */
+ if (priv->autocmd & MSDC_AUTOCMD19)
+ rawcmd |= (3 << 28);
+ } else if (opcode == MMC_CMD_GO_IRQ_STATE) {
+ rawcmd |= (1 << 15);
+ } else if (opcode == MMC_CMD_WRITE_DAT_UNTIL_STOP) {
+ rawcmd |= ((1<< 13) | (3 << 11));
+ } else if (opcode == MMC_CMD_READ_DAT_UNTIL_STOP) {
+ rawcmd |= (3 << 11);
+ }
+
+ dprintf(INFO, "+[MSDC%d] CMD(%d): ARG(0x%x), RAW(0x%x), BLK_NUM(0x%x) RSP(%d)\n",host->host_id,
+ (opcode & ~(SD_CMD_BIT | SD_CMD_APP_BIT)), cmd->arg, rawcmd,
+ MSDC_READ32(SDC_BLK_NUM), rsptyp);
+
+ while (SDC_IS_CMD_BUSY());
+ if ((rsptyp == RESP_R1B) || (opcode == MMC_CMD_WRITE_MULTIPLE_BLOCK) ||
+ opcode == MMC_CMD_WRITE_BLOCK || opcode == MMC_CMD_READ_MULTIPLE_BLOCK ||
+ opcode == MMC_CMD_READ_SINGLE_BLOCK)
+ while (SDC_IS_BUSY());
+
+ SDC_SEND_CMD(rawcmd, cmd->arg);
+
+end:
+ cmd->error = error;
+
+ return error;
+}
+
+static int msdc_wait_cmd_done(struct mmc_host *host, struct mmc_command *cmd)
+{
+ addr_t base = host->base;
+ u32 rsptyp = cmd->rsptyp;
+ u32 status;
+ u32 opcode = (cmd->opcode & ~(SD_CMD_BIT | SD_CMD_APP_BIT));
+ u32 error = MMC_ERR_NONE;
+ u32 wints = MSDC_INT_CMDTMO | MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR |
+ MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO;
+ u32 *resp = &cmd->resp[0];
+ msdc_priv_t *priv = (msdc_priv_t *)host->priv;
+
+ while (1) {
+ /* Wait for interrupt coming */
+ while (((status = MSDC_READ32(MSDC_INT)) & wints) == 0);
+ MSDC_WRITE32(MSDC_INT, (status & wints));
+ if (~wints & status)
+ dprintf(CRITICAL, "msdc_wait_cmd_done Unexpected INT(0x%x)\n",
+ ~wints & status);
+
+ if (status & MSDC_INT_CMDRDY)
+ break;
+ else if (status & MSDC_INT_RSPCRCERR) {
+ if (opcode != MMC_CMD21)
+ dprintf(CRITICAL, "[MSDC] cmd%d CRCERR! (0x%x)\n", opcode, status);
+ error = MMC_ERR_BADCRC;
+ goto err;
+ } else if (status & MSDC_INT_CMDTMO) {
+ dprintf(CRITICAL, "[MSDC] cmd%d TMO! (0x%x)\n", opcode, status);
+ error = MMC_ERR_TIMEOUT;
+ goto err;
+ } else if (priv->autocmd & MSDC_AUTOCMD23) {
+ if (status & MSDC_INT_ACMDRDY)
+ /* Autocmd rdy is set prior to cmd rdy */
+ continue;
+ else if (status & MSDC_INT_ACMDCRCERR) {
+ dprintf(CRITICAL, "[MSDC] autocmd23 CRCERR! (0x%x)\n", status);
+ error = MMC_ERR_ACMD_RSPCRC;
+ goto err;
+ } else if (status & MSDC_INT_ACMDTMO) {
+ dprintf(CRITICAL, "[MSDC] autocmd23 TMO! (0x%x)\n", status);
+ error = MMC_ERR_ACMD_TIMEOUT;
+ goto err;
+ }
+ } else {
+ dprintf(CRITICAL, "[MSDC] cmd%d UNEXPECT status! (0x%x)\n",
+ opcode, status);
+ error = MMC_ERR_UNEXPECT;
+ goto err;
+ }
+ }
+
+ switch (rsptyp) {
+ case RESP_NONE:
+ dprintf(INFO, "-[MSDC] CMD(%d): RSP(%d)\n",
+ opcode, rsptyp);
+ break;
+ case RESP_R2:
+ *resp++ = MSDC_READ32(SDC_RESP3);
+ *resp++ = MSDC_READ32(SDC_RESP2);
+ *resp++ = MSDC_READ32(SDC_RESP1);
+ *resp++ = MSDC_READ32(SDC_RESP0);
+ dprintf(INFO, "-[MSDC] CMD(%d): RSP(%d) = 0x%x 0x%x 0x%x 0x%x\n",
+ opcode, cmd->rsptyp, cmd->resp[0], cmd->resp[1],
+ cmd->resp[2], cmd->resp[3]);
+ break;
+ default: /* Response types 1, 3, 4, 5, 6, 7(1b) */
+ cmd->resp[0] = MSDC_READ32(SDC_RESP0);
+ dprintf(INFO, "-[MSDC] CMD(%d): RSP(%d) = 0x%x\n",
+ opcode, cmd->rsptyp, cmd->resp[0]);
+ break;
+ }
+
+err:
+ if (rsptyp == RESP_R1B)
+ while ((MSDC_READ32(MSDC_PS) & MSDC_PS_DAT0) != MSDC_PS_DAT0);
+
+ cmd->error = error;
+
+ return error;
+}
+
+int msdc_cmd(struct mmc_host *host, struct mmc_command *cmd)
+{
+ int err;
+
+ err = msdc_send_cmd(host, cmd);
+ if (err != MMC_ERR_NONE)
+ return err;
+
+ err = msdc_wait_cmd_done(host, cmd);
+
+ if (err && cmd->opcode != MMC_CMD21) {
+ addr_t base = host->base;
+ u32 tmp = MSDC_READ32(SDC_CMD);
+
+ /* check if data is used by the command or not */
+ if (tmp & SDC_CMD_DTYP) {
+ if (msdc_abort_handler(host, 1)) {
+ dprintf(CRITICAL, "[MSDC] abort failed\n");
+ }
+ }
+
+ if (cmd->opcode == MMC_CMD_APP_CMD ||
+ cmd->opcode == SD_CMD_SEND_IF_COND) {
+ if (err == MMC_ERR_TIMEOUT)
+ return err;
+ }
+
+ err = msdc_tune_cmdrsp(host, cmd);
+ }
+
+ return err;
+}
+
+#ifdef MSDC_USE_DMA_MODE
+static void msdc_flush_membuf(void *buf, u32 len)
+{
+ arch_clean_invalidate_cache_range((addr_t)buf,len);
+}
+
+static int msdc_dma_wait_done(struct mmc_host *host, struct mmc_command *cmd)
+{
+ addr_t base = host->base;
+ msdc_priv_t *priv = (msdc_priv_t *)host->priv;
+ u32 status;
+ u32 error = MMC_ERR_NONE;
+ u32 wints = MSDC_INT_XFER_COMPL | MSDC_INT_DATTMO | MSDC_INT_DATCRCERR |
+ MSDC_INT_DXFER_DONE | MSDC_INT_DMAQ_EMPTY |
+ MSDC_INT_ACMDRDY | MSDC_INT_ACMDTMO | MSDC_INT_ACMDCRCERR |
+ MSDC_INT_CMDRDY | MSDC_INT_CMDTMO | MSDC_INT_RSPCRCERR;
+
+ /* Deliver it to irq handler */
+ host->intr_mask = wints;
+
+ do {
+ status = msdc_intr_wait(host, wints);
+
+ if (status & MSDC_INT_XFER_COMPL) {
+ if (mmc_op_multi(cmd->opcode) && (priv->autocmd & MSDC_AUTOCMD12)) {
+ /* acmd rdy should be checked after xfer_done been held */
+ if (status & MSDC_INT_ACMDRDY) {
+ break;
+ } else if (status & MSDC_INT_ACMDTMO) {
+ dprintf(CRITICAL, "[MSDC] ACMD12 timeout(%xh)\n", status);
+ error = MMC_ERR_ACMD_TIMEOUT;
+ goto end;
+ } else if (status & MSDC_INT_ACMDCRCERR) {
+ dprintf(CRITICAL, "[MSDC] ACMD12 CRC error(%xh)\n", status);
+ error = MMC_ERR_ACMD_RSPCRC;
+ goto end;
+ }
+ } else
+ break;
+ }
+
+ if (status == 0 || status & MSDC_INT_DATTMO) {
+ dprintf(CRITICAL, "[MSDC] DMA DAT timeout(%xh)\n", status);
+ error = MMC_ERR_TIMEOUT;
+ goto end;
+ } else if (status & MSDC_INT_DATCRCERR) {
+ dprintf(CRITICAL, "[MSDC] DMA DAT CRC error(%xh)\n", status);
+ error = MMC_ERR_BADCRC;
+ goto end;
+ } else {
+ dprintf(CRITICAL, "[MSDC] Unexpect status(0x%x)\n", status);
+ error = MMC_ERR_UNEXPECT;
+ goto end;
+ }
+ } while (1);
+
+end:
+ if (error)
+ MSDC_RESET();
+
+ return error;
+}
+
+int msdc_dma_transfer(struct mmc_host *host, struct mmc_data *data)
+{
+ addr_t base = host->base;
+ int err;
+ paddr_t pa;
+
+ /* Set dma timeout */
+ msdc_set_timeout(host, data->timeout * 1000000, 0);
+ /* DRAM address */
+#if WITH_KERNEL_VM
+ pa = kvaddr_to_paddr(data->buf);
+#else
+ pa = (paddr_t)(data->buf);
+#endif
+ if (sizeof(pa) > 4)
+ dprintf(INFO, "[MSDC] WARN: 64bit physical address!\n");
+ MSDC_WRITE32(MSDC_DMA_SA, (u32)pa);
+ MSDC_SET_FIELD(MSDC_DMA_CTRL, MSDC_DMA_CTRL_BURSTSZ, MSDC_DMA_BURST_64B);
+ /* BASIC_DMA mode */
+ MSDC_SET_FIELD(MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 0);
+ /* This is the last buffer */
+ MSDC_SET_FIELD(MSDC_DMA_CTRL, MSDC_DMA_CTRL_LASTBUF, 1);
+ /* Total transfer size */
+ MSDC_WRITE32(MSDC_DMA_LEN, data->blks * host->blklen);
+ /* Set interrupts bit */
+ MSDC_SET_BIT32(MSDC_INTEN,
+ MSDC_INT_XFER_COMPL | MSDC_INT_DATTMO | MSDC_INT_DATCRCERR);
+ /* Clean & Invalidate cache */
+ msdc_flush_membuf(data->buf, data->blks * host->blklen);
+ /* Trigger DMA start */
+ MSDC_SET_FIELD(MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1);
+ /* wait DMA transferring done */
+ err = msdc_dma_wait_done(host, data->cmd);
+ msdc_flush_membuf(data->buf, data->blks * host->blklen);
+ if (err) {
+ dprintf(CRITICAL, "[MSDC] DMA failed! err(%d)\n", err);
+ if (msdc_abort_handler(host, 1)) {
+ dprintf(CRITICAL, "[MSDC] eMMC cannot back to TRANS mode!\n");
+ return MMC_ERR_FAILED;
+ }
+ }
+
+ /* Check DMA status and stop DMA transfer */
+ MSDC_SET_FIELD(MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1);
+ while (MSDC_READ32(MSDC_DMA_CFG) & MSDC_DMA_CFG_STS);
+
+ return err;
+}
+
+static int msdc_dma_rw(struct mmc_host *host, u8 *buf, u32 blkaddr, u32 nblks, bool rd)
+{
+ int multi, err;
+ struct mmc_command cmd;
+ struct mmc_data data;
+ addr_t base = host->base;
+
+ ASSERT(nblks <= host->max_phys_segs);
+
+ dprintf(INFO, "[MSDC] %s data %d blks %s 0x%x\n",
+ rd ? "Read" : "Write", nblks, rd ? "from" : "to", blkaddr);
+
+ multi = nblks > 1 ? 1 : 0;
+ /* DMA and block number _MUST_BE_ set prior to issuing command */
+ MSDC_DMA_ON;
+ MSDC_WRITE32(SDC_BLK_NUM, nblks);
+
+ /* send read command */
+ if (rd)
+ cmd.opcode =
+ multi ? MMC_CMD_READ_MULTIPLE_BLOCK : MMC_CMD_READ_SINGLE_BLOCK;
+ else
+ cmd.opcode = multi ? MMC_CMD_WRITE_MULTIPLE_BLOCK : MMC_CMD_WRITE_BLOCK;
+ cmd.arg = blkaddr;
+ cmd.rsptyp = RESP_R1;
+ cmd.retries = 0;
+ cmd.timeout = CMD_TIMEOUT;
+
+ err = msdc_cmd(host, &cmd);
+ if (err != MMC_ERR_NONE)
+ return err;
+
+ data.cmd = &cmd;
+ data.blks = nblks;
+ data.buf = buf;
+ if (rd)
+ data.timeout = 100;
+ else
+ data.timeout = 250;
+
+ err = msdc_dma_transfer(host, &data);
+ MSDC_DMA_OFF;
+
+ return err;
+}
+
+static int msdc_dma_bread(struct mmc_host *host, u8 *dst, u32 src, u32 nblks)
+{
+ return msdc_dma_rw(host, dst, src, nblks, true);
+}
+
+static int msdc_dma_bwrite(struct mmc_host *host, u32 dst, u8 *src, u32 nblks)
+{
+ return msdc_dma_rw(host, src, dst, nblks, false);
+}
+#else
+static int msdc_pio_read_word(struct mmc_host *host, u32 *ptr, u32 size)
+{
+ int err = MMC_ERR_NONE;
+ addr_t base = host->base;
+ u32 ints = MSDC_INT_DATCRCERR | MSDC_INT_DATTMO | MSDC_INT_XFER_COMPL;
+ //u32 timeout = 100000;
+ u32 status;
+ u32 totalsz = size;
+ u8 done = 0;
+ u8 *u8ptr;
+ u32 dcrc=0;
+
+ while (1) {
+ status = MSDC_READ32(MSDC_INT);
+ MSDC_WRITE32(MSDC_INT, status);
+ if (status & ~ints)
+ dprintf(CRITICAL, "msdc_pio_read_word Unexpected INT(0x%x)\n", status);
+ if (status & MSDC_INT_DATCRCERR) {
+ MSDC_GET_FIELD(SDC_DCRC_STS, SDC_DCRC_STS_POS|SDC_DCRC_STS_NEG, dcrc);
+ dprintf(CRITICAL, "[MSDC] DAT CRC error (0x%x), Left:%d/%d bytes, RXFIFO:%d,dcrc:0x%x\n",
+ status, size, totalsz, MSDC_RXFIFOCNT(),dcrc);
+ err = MMC_ERR_BADCRC;
+ break;
+ } else if (status & MSDC_INT_DATTMO) {
+ dprintf(CRITICAL, "[MSDC] DAT TMO error (0x%x), Left: %d/%d bytes, RXFIFO:%d\n",
+ status, size, totalsz, MSDC_RXFIFOCNT());
+ err = MMC_ERR_TIMEOUT;
+ break;
+ } else if (status & MSDC_INT_ACMDCRCERR) {
+ MSDC_GET_FIELD(SDC_DCRC_STS, SDC_DCRC_STS_POS|SDC_DCRC_STS_NEG, dcrc);
+ dprintf(CRITICAL, "[MSDC] AUTOCMD CRC error (0x%x), Left:%d/%d bytes, RXFIFO:%d,dcrc:0x%x\n",
+ status, size, totalsz, MSDC_RXFIFOCNT(),dcrc);
+ err = MMC_ERR_ACMD_RSPCRC;
+ break;
+ } else if (status & MSDC_INT_XFER_COMPL) {
+ done = 1;
+ }
+
+ if (size == 0 && done)
+ break;
+
+ /* Note. RXFIFO count would be aligned to 4-bytes alignment size */
+ if ((size >= MSDC_FIFO_THD) && (MSDC_RXFIFOCNT() >= MSDC_FIFO_THD)) {
+ int left = MSDC_FIFO_THD >> 2;
+ do {
+ *ptr++ = MSDC_FIFO_READ32();
+ } while (--left);
+ size -= MSDC_FIFO_THD;
+ dprintf(INFO, "[MSDC] Read %d bytes, RXFIFOCNT: %d, Left: %d/%d\n",
+ MSDC_FIFO_THD, MSDC_RXFIFOCNT(), size, totalsz);
+ } else if ((size < MSDC_FIFO_THD) && MSDC_RXFIFOCNT() >= size) {
+ while (size) {
+ if (size > 3) {
+ *ptr++ = MSDC_FIFO_READ32();
+ } else {
+ u8ptr = (u8 *)ptr;
+ while (size --)
+ *u8ptr++ = MSDC_FIFO_READ8();
+ }
+ }
+ dprintf(INFO, "[MSDC] Read left bytes, RXFIFOCNT: %d, Left: %d/%d\n",
+ MSDC_RXFIFOCNT(), size, totalsz);
+ }
+ }
+
+ return err;
+}
+
+static int msdc_pio_read(struct mmc_host *host, u32 *ptr, u32 size)
+{
+ int err = msdc_pio_read_word(host, (u32 *)ptr, size);
+
+ if (err != MMC_ERR_NONE) {
+ msdc_abort(host); /* reset internal fifo and state machine */
+ dprintf(CRITICAL, "[MSDC] %d-bit PIO Read Error (%d)\n", 32, err);
+ }
+
+ return err;
+}
+
+static int msdc_pio_write_word(struct mmc_host *host, u32 *ptr, u32 size)
+{
+ int err = MMC_ERR_NONE;
+ addr_t base = host->base;
+ u32 ints = MSDC_INT_DATCRCERR | MSDC_INT_DATTMO | MSDC_INT_XFER_COMPL;
+ //u32 timeout = 250000;
+ u32 status;
+ u8 *u8ptr;
+ msdc_priv_t *priv = (msdc_priv_t *)host->priv;
+
+ while (1) {
+ status = MSDC_READ32(MSDC_INT);
+ MSDC_WRITE32(MSDC_INT, status);
+ if (status & ~ints) {
+ dprintf(CRITICAL, "msdc_pio_write_word Unexpected INT(0x%x)\n", status);
+ }
+ if (status & MSDC_INT_DATCRCERR) {
+ dprintf(CRITICAL, "[MSDC] DAT CRC error (0x%x), Left DAT: %d bytes\n",
+ status, size);
+ err = MMC_ERR_BADCRC;
+ break;
+ } else if (status & MSDC_INT_DATTMO) {
+ dprintf(CRITICAL, "[MSDC] DAT TMO error (0x%x), Left DAT: %d bytes, MSDC_FIFOCS=%xh\n",
+ status, size, MSDC_READ32(MSDC_FIFOCS));
+ err = MMC_ERR_TIMEOUT;
+ break;
+ } else if (status & MSDC_INT_ACMDCRCERR) {
+ dprintf(CRITICAL, "[MSDC] AUTO CMD CRC error (0x%x), Left DAT: %d bytes\n",
+ status, size);
+ err = MMC_ERR_ACMD_RSPCRC;
+ break;
+ } else if (status & MSDC_INT_XFER_COMPL) {
+ if (size == 0) {
+ dprintf(INFO, "[MSDC] all data flushed to card\n");
+ break;
+ } else
+ dprintf(INFO, "[MSDC]<CHECKME> XFER_COMPL before all data written\n");
+ }
+
+ if (size == 0)
+ continue;
+
+ if (size >= MSDC_FIFO_SZ) {
+ if (MSDC_TXFIFOCNT() == 0) {
+ int left = MSDC_FIFO_SZ >> 2;
+ do {
+ MSDC_FIFO_WRITE32(*ptr);
+ ptr++;
+ } while (--left);
+ size -= MSDC_FIFO_SZ;
+ }
+ } else if (size < MSDC_FIFO_SZ && MSDC_TXFIFOCNT() == 0) {
+ while (size ) {
+ if (size > 3) {
+ MSDC_FIFO_WRITE32(*ptr);
+ ptr++;
+ size -= 4;
+ } else {
+ u8ptr = (u8 *)ptr;
+ while (size --) {
+ MSDC_FIFO_WRITE8(*u8ptr);
+ u8ptr++;
+ }
+ }
+ }
+ }
+ }
+
+ return err;
+}
+
+static int msdc_pio_write(struct mmc_host *host, u32 *ptr, u32 size)
+{
+ int err = msdc_pio_write_word(host, (u32 *)ptr, size);
+
+ if (err != MMC_ERR_NONE) {
+ msdc_abort(host); /* reset internal fifo and state machine */
+ dprintf(CRITICAL, "[MSDC] PIO Write Error (%d)\n", err);
+ }
+
+ return err;
+}
+
+static int msdc_pio_bread(struct mmc_host *host, u8 *dst, u32 src, u32 nblks)
+{
+ msdc_priv_t *priv = (msdc_priv_t *)host->priv;
+ addr_t base = host->base;
+ u32 blksz = host->blklen;
+ int err = MMC_ERR_NONE, derr = MMC_ERR_NONE;
+ int multi;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ u32 *ptr = (u32 *)dst;
+
+ dprintf(INFO, "[MSDC] Read data %d bytes from 0x%x\n", nblks * blksz, src);
+
+ multi = nblks > 1 ? 1 : 0;
+
+ MSDC_WRITE32(SDC_BLK_NUM, nblks);
+ msdc_set_timeout(host, 100000000, 0);
+
+ /* send read command */
+ cmd.opcode = multi ? MMC_CMD_READ_MULTIPLE_BLOCK : MMC_CMD_READ_SINGLE_BLOCK;
+ cmd.rsptyp = RESP_R1;
+ cmd.arg = src;
+ cmd.retries = 0;
+ cmd.timeout = CMD_TIMEOUT;
+ err = msdc_cmd(host, &cmd);
+
+ if (err != MMC_ERR_NONE)
+ goto done;
+
+ err = derr = msdc_pio_read(host, (u32 *)ptr, nblks * blksz);
+
+done:
+ if (err != MMC_ERR_NONE) {
+ if (derr != MMC_ERR_NONE) {
+ dprintf(CRITICAL, "[MSDC] Read data error (%d)\n", derr);
+ if (msdc_abort_handler(host, 1))
+ dprintf(CRITICAL, "[MSDC] abort failed\n");
+ } else {
+ dprintf(CRITICAL, "[MSDC] Read error (%d)\n", err);
+ }
+ }
+ return (derr == MMC_ERR_NONE) ? err : derr;
+}
+
+static int msdc_pio_bwrite(struct mmc_host *host, u32 dst, u8 *src, u32 nblks)
+{
+ msdc_priv_t *priv = (msdc_priv_t *)host->priv;
+ addr_t base = host->base;
+ int err = MMC_ERR_NONE, derr = MMC_ERR_NONE;
+ int multi;
+ u32 blksz = host->blklen;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ u32 *ptr = (u32 *)src;
+
+ dprintf(CRITICAL, "[MSDC] Write data %d bytes to 0x%x\n", nblks * blksz, dst);
+
+ multi = nblks > 1 ? 1 : 0;
+
+ MSDC_WRITE32(SDC_BLK_NUM, nblks);
+
+ /* No need since MSDC always waits 8 cycles for write data timeout */
+
+ /* send write command */
+ cmd.opcode = multi ? MMC_CMD_WRITE_MULTIPLE_BLOCK : MMC_CMD_WRITE_BLOCK;
+ cmd.rsptyp = RESP_R1;
+ cmd.arg = dst;
+ cmd.retries = 0;
+ cmd.timeout = CMD_TIMEOUT;
+ err = msdc_cmd(host, &cmd);
+
+ if (err != MMC_ERR_NONE)
+ goto done;
+
+ err = derr = msdc_pio_write(host, (u32 *)ptr, nblks * blksz);
+
+done:
+ if (err != MMC_ERR_NONE) {
+ if (derr != MMC_ERR_NONE) {
+ dprintf(CRITICAL, "[MSDC] Write data error (%d)\n", derr);
+ if (msdc_abort_handler(host, 1))
+ dprintf(CRITICAL, "[MSDC] abort failed\n");
+ } else {
+ dprintf(CRITICAL, "[MSDC] Write error (%d)\n", err);
+ }
+ }
+ return (derr == MMC_ERR_NONE) ? err : derr;
+}
+#endif
+
+
+static void msdc_config_clksrc(struct mmc_host *host, u32 clksrc, u32 hclksrc)
+{
+ // modify the clock
+ ASSERT(host);
+ /*
+ * For MT2712, MSDC0 use 400Mhz(MSDCPLL) source clock
+ */
+ host->clksrc = clksrc;
+ host->hclksrc = hclksrc;
+#ifndef FPGA_PLATFORM
+ if (host->host_id == 0)
+ host->clk = 400 * 1000 * 1000;
+ else
+ host->clk = 200 * 1000 * 1000;
+#else
+ host->clk = MSDC_OP_SCLK;
+#endif
+
+ /* Chaotian, may need update this part of code */
+ dprintf(INFO, "[info][%s] pll_clk %u (%uMHz), pll_hclk %u\n",
+ __func__, host->clksrc, host->clk/1000000, host->hclksrc);
+}
+
+void msdc_config_clock(struct mmc_host *host, int state, u32 hz)
+{
+ addr_t base = host->base;
+ u32 mode = 0;
+ u32 div;
+ u32 sclk;
+ u32 u4buswidth=0;
+
+ if (hz >= host->f_max) {
+ hz = host->f_max;
+ } else if (hz < host->f_min) {
+ hz = host->f_min;
+ }
+
+ msdc_config_clksrc(host, host->clksrc, host->hclksrc);
+ MSDC_CLR_BIT32(MSDC_CFG, MSDC_CFG_CKMOD_HS400);
+ MSDC_SET_BIT32(MSDC_PATCH_BIT2, MSDC_PB2_CFGCRCSTS);
+
+ if (state & MMC_STATE_HS400) {
+ mode = 0x3;
+ div = 0; /* we let hs400 mode fixed at 200Mhz */
+ sclk = host->clk >> 1;
+ MSDC_SET_BIT32(MSDC_CFG, MSDC_CFG_CKMOD_HS400);
+ MSDC_CLR_BIT32(MSDC_PATCH_BIT2, MSDC_PB2_CFGCRCSTS);
+ } else if (state&MMC_STATE_DDR) {
+ mode = 0x2; /* ddr mode and use divisor */
+ if (hz >= (host->clk >> 2)) {
+ div = 0; /* mean div = 1/2 */
+ sclk = host->clk >> 2; /* sclk = clk/div/2. 2: internal divisor */
+ } else {
+ div = (host->clk + ((hz << 2) - 1)) / (hz << 2);
+ sclk = (host->clk >> 2) / div;
+ div = (div >> 1); /* since there is 1/2 internal divisor */
+ }
+ } else if (hz >= host->clk) {
+ mode = 0x1; /* no divisor and divisor is ignored */
+ div = 0;
+ sclk = host->clk;
+ } else {
+ mode = 0x0; /* use divisor */
+ if (hz >= (host->clk >> 1)) {
+ div = 0; /* mean div = 1/2 */
+ sclk = host->clk >> 1; /* sclk = clk / 2 */
+ } else {
+ div = (host->clk + ((hz << 2) - 1)) / (hz << 2);
+ sclk = (host->clk >> 2) / div;
+ }
+ }
+ host->cur_bus_clk = sclk;
+
+ /* set clock mode and divisor */
+ MSDC_SET_FIELD(MSDC_CFG, (MSDC_CFG_CKMOD |MSDC_CFG_CKDIV),\
+ (mode << 12) | div);
+ /* wait clock stable */
+ while (!(MSDC_READ32(MSDC_CFG) & MSDC_CFG_CKSTB));
+
+ MSDC_GET_FIELD(SDC_CFG,SDC_CFG_BUSWIDTH,u4buswidth);
+
+ dprintf(INFO,
+ "[MSDC] SET_CLK(%dkHz): SCLK(%dkHz) MODE(%d) DIV(%d) DS(%d) RS(%d) buswidth(%s)\n",
+ hz/1000, sclk/1000, mode, div, msdc_cap[host->host_id].data_edge,
+ msdc_cap[host->host_id].cmd_edge,
+ (u4buswidth == 0) ?
+ "1-bit" : (u4buswidth == 1) ?
+ "4-bits" : (u4buswidth == 2) ?
+ "8-bits" : "undefined");
+}
+
+void msdc_config_bus(struct mmc_host *host, u32 width)
+{
+ u32 val,mode, div;
+ addr_t base = host->base;
+
+ val = (width == HOST_BUS_WIDTH_8) ? 2 :
+ (width == HOST_BUS_WIDTH_4) ? 1 : 0;
+
+ MSDC_SET_FIELD(SDC_CFG, SDC_CFG_BUSWIDTH, val);
+ MSDC_GET_FIELD(MSDC_CFG,MSDC_CFG_CKMOD,mode);
+ MSDC_GET_FIELD(MSDC_CFG,MSDC_CFG_CKDIV,div);
+
+ dprintf(INFO, "CLK (%dMHz), SCLK(%dkHz) MODE(%d) DIV(%d) buswidth(%u-bits)\n",
+ host->clk/1000000, host->cur_bus_clk/1000, mode, div, width);
+}
+
+void msdc_clock(struct mmc_host *host, int on)
+{
+ /* Chaotian, may need update this part of code */
+ dprintf(INFO, "[MSDC] Turn %s %s clock \n", on ? "on" : "off", "host");
+}
+
+static void msdc_host_power(struct mmc_host *host, int on)
+{
+ dprintf(INFO, "[MSDC] Turn %s %s power \n", on ? "on" : "off", "host");
+}
+
+static void msdc_card_power(struct mmc_host *host, int on)
+{
+ dprintf(INFO, "[MSDC] Turn %s %s power \n", on ? "on" : "off", "card");
+ return; /* power always on, return directly */
+
+ if (on) {
+ msdc_set_card_pwr(host, 1);
+ } else {
+ msdc_set_card_pwr(host, 0);
+ }
+}
+
+void msdc_power(struct mmc_host *host, u8 mode)
+{
+ if (mode == MMC_POWER_ON || mode == MMC_POWER_UP) {
+ msdc_host_power(host, 1);
+ msdc_card_power(host, 1);
+ } else {
+ msdc_card_power(host, 0);
+ msdc_host_power(host, 0);
+ }
+}
+
+void msdc_reset_tune_counter(struct mmc_host *host)
+{
+ host->time_read = 0;
+}
+
+#if defined(FEATURE_MMC_CM_TUNING)
+#ifndef FPGA_PLATFORM
+int msdc_tune_cmdrsp(struct mmc_host *host, struct mmc_command *cmd)
+{
+ u32 base = host->base;
+ u32 rsmpl,cur_rsmpl, orig_rsmpl;
+ u32 rdly,cur_rdly, orig_rdly;
+ u8 hs400 = 0, orig_clkmode;
+ int result = MMC_ERR_CMDTUNEFAIL;
+
+ MSDC_SET_FIELD(EMMC_TOP_CMD, PAD_CMD_RD_RXDLY_SEL, 1);
+ MSDC_GET_FIELD(MSDC_IOCON, MSDC_IOCON_RSPL, orig_rsmpl);
+ MSDC_GET_FIELD(EMMC_TOP_CMD, PAD_CMD_RXDLY, orig_rdly);
+ MSDC_GET_FIELD(MSDC_CFG, MSDC_CFG_CKMOD, orig_clkmode);
+
+ hs400 = (orig_clkmode == 3) ? 1 : 0;
+ rdly = 0;
+
+ do {
+ for (rsmpl = 0; rsmpl < 2; rsmpl++) {
+ cur_rsmpl = (orig_rsmpl + rsmpl) % 2;
+ msdc_set_smpl(host, hs400, cur_rsmpl, TYPE_CMD_RESP_EDGE);
+ if (host->cur_bus_clk <= 400000){
+ MSDC_SET_FIELD(MSDC_IOCON, MSDC_IOCON_RSPL, 0);
+ }
+ if (cmd->opcode != MMC_CMD_STOP_TRANSMISSION) {
+ result = msdc_send_cmd(host, cmd);
+ if(result == MMC_ERR_TIMEOUT)
+ rsmpl--;
+ if (result != MMC_ERR_NONE && cmd->opcode != MMC_CMD_STOP_TRANSMISSION){
+ if(cmd->opcode == MMC_CMD_READ_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_CMD_WRITE_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_CMD_READ_SINGLE_BLOCK ||
+ cmd->opcode == MMC_CMD_WRITE_BLOCK)
+ msdc_abort_handler(host,1);
+ continue;
+ }
+ result = msdc_wait_cmd_done(host, cmd);
+ } else if (cmd->opcode == MMC_CMD_STOP_TRANSMISSION){
+ result = MMC_ERR_NONE;
+ goto done;
+ }
+ else
+ result = MMC_ERR_BADCRC;
+
+ if (result == MMC_ERR_NONE) {
+ goto done;
+ }
+
+ if(cmd->opcode == MMC_CMD_READ_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_CMD_WRITE_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_CMD_READ_SINGLE_BLOCK ||
+ cmd->opcode == MMC_CMD_WRITE_BLOCK)
+ msdc_abort_handler(host,1);
+ }
+ cur_rdly = (orig_rdly + rdly + 1) % 32;
+ MSDC_SET_FIELD(EMMC_TOP_CMD, PAD_CMD_RXDLY, cur_rdly);
+ } while (++rdly < 32);
+
+done:
+ dprintf(INFO,("[SD%d] <TUNE_CMD%d><%s>@msdc_tune_cmdrsp\n",
+ host->host_id, (cmd->opcode & (~(SD_CMD_BIT | SD_CMD_APP_BIT))),\
+ (result == MMC_ERR_NONE) ? "PASS" : "FAIL"));
+ return result;
+}
+#else
+int msdc_tune_cmdrsp(struct mmc_host *host, struct mmc_command *cmd)
+{
+ u32 base = host->base;
+ u32 rsmpl,cur_rsmpl, orig_rsmpl;
+ u8 hs400 = 0, orig_clkmode;
+ int result = MMC_ERR_CMDTUNEFAIL;
+
+ MSDC_GET_FIELD(MSDC_IOCON, MSDC_IOCON_RSPL, orig_rsmpl);
+ MSDC_GET_FIELD(MSDC_CFG, MSDC_CFG_CKMOD, orig_clkmode);
+
+ hs400 = (orig_clkmode == 3) ? 1 : 0;
+
+ for (rsmpl = 0; rsmpl < 2; rsmpl++) {
+ cur_rsmpl = (orig_rsmpl + rsmpl) % 2;
+ msdc_set_smpl(host, hs400, cur_rsmpl, TYPE_CMD_RESP_EDGE);
+ if (host->cur_bus_clk <= 400000){
+ MSDC_SET_FIELD(MSDC_IOCON, MSDC_IOCON_RSPL, 0);
+ }
+ if (cmd->opcode != MMC_CMD_STOP_TRANSMISSION) {
+ result = msdc_send_cmd(host, cmd);
+ if(result == MMC_ERR_TIMEOUT)
+ rsmpl--;
+ if (result != MMC_ERR_NONE && cmd->opcode != MMC_CMD_STOP_TRANSMISSION){
+ if(cmd->opcode == MMC_CMD_READ_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_CMD_WRITE_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_CMD_READ_SINGLE_BLOCK ||
+ cmd->opcode == MMC_CMD_WRITE_BLOCK)
+ msdc_abort_handler(host,1);
+ continue;
+ }
+ result = msdc_wait_cmd_done(host, cmd);
+ } else if (cmd->opcode == MMC_CMD_STOP_TRANSMISSION){
+ result = MMC_ERR_NONE;
+ goto done;
+ }
+ else
+ result = MMC_ERR_BADCRC;
+
+ if (result == MMC_ERR_NONE) {
+ goto done;
+ }
+
+ if(cmd->opcode == MMC_CMD_READ_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_CMD_WRITE_MULTIPLE_BLOCK ||
+ cmd->opcode == MMC_CMD_READ_SINGLE_BLOCK ||
+ cmd->opcode == MMC_CMD_WRITE_BLOCK)
+ msdc_abort_handler(host,1);
+ }
+
+done:
+ dprintf(INFO,("[SD%d] <TUNE_CMD%d><%s>@msdc_tune_cmdrsp\n",
+ host->host_id, (cmd->opcode & (~(SD_CMD_BIT | SD_CMD_APP_BIT))),\
+ (result == MMC_ERR_NONE) ? "PASS" : "FAIL"));
+ return result;
+}
+#endif
+#endif
+
+#if defined(FEATURE_MMC_RD_TUNING)
+#ifndef FPGA_PLATFORM
+int msdc_tune_bread(struct mmc_host *host, uchar *dst, u32 src, u32 nblks)
+{
+ u32 base = host->base;
+ u32 cur_rxdly0, org_rxdly0;
+ u32 rxdly = 0;
+ u32 rdsmpl, cur_rdsmpl, orig_rdsmpl;
+ u32 dcrc, ddr = 0;
+ u8 hs400 = 0;
+ u32 orig_clkmode;
+ int result = MMC_ERR_READTUNEFAIL;
+
+ MSDC_GET_FIELD(MSDC_CFG, MSDC_CFG_CKMOD, orig_clkmode);
+ ddr = (orig_clkmode == 2) ? 1 : 0;
+ hs400 = (orig_clkmode == 3) ? 1 : 0;
+
+ MSDC_GET_FIELD(MSDC_IOCON, MSDC_IOCON_R_D_SMPL, orig_rdsmpl);
+ MSDC_SET_BIT32(EMMC_TOP_CONTROL, PAD_DAT_RD_RXDLY_SEL);
+ MSDC_GET_FIELD(EMMC_TOP_CONTROL, PAD_DAT_RD_RXDLY, org_rxdly0);
+
+ do {
+ for (rdsmpl = 0; rdsmpl < 2; rdsmpl++) {
+ cur_rdsmpl = (orig_rdsmpl + rdsmpl) % 2;
+ msdc_set_smpl(host, hs400, cur_rdsmpl, TYPE_READ_DATA_EDGE);
+
+ result = host->blk_read(host, dst, src, nblks);
+ if (result == MMC_ERR_CMDTUNEFAIL ||
+ result == MMC_ERR_CMD_RSPCRC ||
+ result == MMC_ERR_ACMD_RSPCRC)
+ goto done;
+
+ MSDC_GET_FIELD(SDC_DCRC_STS, SDC_DCRC_STS_POS|SDC_DCRC_STS_NEG, dcrc);
+ if (!ddr) dcrc &= ~SDC_DCRC_STS_NEG;
+
+ /* no crc error in this data line */
+ if (result == MMC_ERR_NONE && dcrc == 0) {
+ goto done;
+ } else {
+ result = MMC_ERR_BADCRC;
+ }
+ }
+
+ cur_rxdly0 = (org_rxdly0 + rxdly + 1) % 32;
+ MSDC_SET_FIELD(EMMC_TOP_CONTROL, PAD_DAT_RD_RXDLY, cur_rxdly0);
+
+ } while (++rxdly < 32);
+
+done:
+ dprintf(INFO,("[SD%d] <msdc_tune_bread<%s><cmd%d>@msdc_tune_bread\n",
+ host->host_id, (result == MMC_ERR_NONE && dcrc == 0) ?"PASS" : "FAIL", (nblks == 1 ? 17 : 18)));
+
+ return result;
+}
+#else
+int msdc_tune_bread(struct mmc_host *host, uchar *dst, u32 src, u32 nblks)
+{
+ u32 base = host->base;
+ u32 rxdly = 0;
+ u32 rdsmpl, cur_rdsmpl, orig_rdsmpl;
+ u32 dcrc, ddr = 0;
+ u8 hs400 = 0;
+ u32 orig_clkmode;
+ int result = MMC_ERR_READTUNEFAIL;
+
+ MSDC_GET_FIELD(MSDC_CFG, MSDC_CFG_CKMOD, orig_clkmode);
+ ddr = (orig_clkmode == 2) ? 1 : 0;
+ hs400 = (orig_clkmode == 3) ? 1 : 0;
+
+ MSDC_GET_FIELD(MSDC_IOCON, MSDC_IOCON_R_D_SMPL, orig_rdsmpl);
+
+ for (rdsmpl = 0; rdsmpl < 2; rdsmpl++) {
+ cur_rdsmpl = (orig_rdsmpl + rdsmpl) % 2;
+ msdc_set_smpl(host, hs400, cur_rdsmpl, TYPE_READ_DATA_EDGE);
+
+ result = host->blk_read(host, dst, src, nblks);
+ if (result == MMC_ERR_CMDTUNEFAIL ||
+ result == MMC_ERR_CMD_RSPCRC ||
+ result == MMC_ERR_ACMD_RSPCRC)
+ goto done;
+
+ MSDC_GET_FIELD(SDC_DCRC_STS, SDC_DCRC_STS_POS|SDC_DCRC_STS_NEG, dcrc);
+ if (!ddr) dcrc &= ~SDC_DCRC_STS_NEG;
+
+ /* no crc error in this data line */
+ if (result == MMC_ERR_NONE && dcrc == 0) {
+ goto done;
+ } else {
+ result = MMC_ERR_BADCRC;
+ }
+ }
+
+done:
+ dprintf(INFO,("[SD%d] <msdc_tune_bread<%s><cmd%d>@msdc_tune_bread\n",
+ host->host_id, (result == MMC_ERR_NONE && dcrc == 0) ?"PASS" : "FAIL", (nblks == 1 ? 17 : 18)));
+
+ return result;
+}
+#endif
+
+#define READ_TUNING_MAX_HS (2 * 32)
+#define READ_TUNING_MAX_UHS (2 * 32)
+#define READ_TUNING_MAX_UHS_CLKMOD1 (2 * 32)
+
+#ifndef FPGA_PLATFORM
+int msdc_tune_read(struct mmc_host *host)
+{
+ u32 base = host->base;
+ u32 cur_rxdly0, org_rxdly0;
+ u32 rxdly = 0;
+ u32 rdsmpl, cur_dsmpl, orig_dsmpl;
+ u32 dcrc, ddr = 0;
+ u8 hs400 = 0;
+ u32 orig_clkmode;
+ int result = MMC_ERR_NONE;
+
+ MSDC_GET_FIELD(MSDC_CFG, MSDC_CFG_CKMOD, orig_clkmode);
+ ddr = (orig_clkmode == 2) ? 1 : 0;
+ hs400 = (orig_clkmode == 3) ? 1 : 0;
+
+ MSDC_GET_FIELD(MSDC_IOCON, MSDC_IOCON_R_D_SMPL, orig_dsmpl);
+ MSDC_SET_BIT32(EMMC_TOP_CONTROL, PAD_DAT_RD_RXDLY_SEL);
+ MSDC_GET_FIELD(EMMC_TOP_CONTROL, PAD_DAT_RD_RXDLY, org_rxdly0);
+
+ cur_dsmpl = (orig_dsmpl + 1) ;
+ msdc_set_smpl(host, hs400, (cur_dsmpl % 2), TYPE_READ_DATA_EDGE);
+ ++(host->time_read);
+ if (cur_dsmpl >= 2){
+ cur_rxdly0++;
+ MSDC_SET_FIELD(EMMC_TOP_CONTROL, PAD_DAT_RD_RXDLY, cur_rxdly0 % 32);
+ if (cur_rxdly0 > 32)
+ result = MMC_ERR_READTUNEFAIL;
+ }
+
+ dprintf(INFO,("[SD%d] <msdc_tune_read <%s> @msdc_tune_bread\n",
+ host->host_id, (result == MMC_ERR_NONE && dcrc == 0) ?"PASS" : "FAIL"));
+ return result;
+}
+#else
+int msdc_tune_read(struct mmc_host *host)
+{
+ u32 base = host->base;
+ u32 rdsmpl, cur_dsmpl, orig_dsmpl;
+ u32 dcrc, ddr = 0;
+ u8 hs400 = 0;
+ u32 orig_clkmode;
+ int result = MMC_ERR_NONE;
+
+ MSDC_GET_FIELD(MSDC_CFG, MSDC_CFG_CKMOD, orig_clkmode);
+ ddr = (orig_clkmode == 2) ? 1 : 0;
+ hs400 = (orig_clkmode == 3) ? 1 : 0;
+
+ MSDC_GET_FIELD(MSDC_IOCON, MSDC_IOCON_R_D_SMPL, orig_dsmpl);
+
+ cur_dsmpl = (orig_dsmpl + 1) ;
+ msdc_set_smpl(host, hs400, (cur_dsmpl % 2), TYPE_READ_DATA_EDGE);
+ ++(host->time_read);
+ if (cur_dsmpl >= 2){
+ result = MMC_ERR_READTUNEFAIL;
+ }
+
+ dprintf(INFO,("[SD%d] <msdc_tune_read <%s> @msdc_tune_bread\n",
+ host->host_id, (result == MMC_ERR_NONE && dcrc == 0) ?"PASS" : "FAIL"));
+ return result;
+}
+#endif
+#endif /* end of FEATURE_MMC_RD_TUNING */
+
+#if defined(FEATURE_MMC_WR_TUNING)
+#ifndef FPGA_PLATFORM
+/* Chaotian, make read/write tune flow the same */
+int msdc_tune_bwrite(struct mmc_host *host, u32 dst, uchar *src, u32 nblks)
+{
+ u32 base = host->base;
+ u32 cur_rxdly0, org_rxdly0;
+ u32 rxdly = 0;
+ u32 rdsmpl, cur_rdsmpl, orig_rdsmpl;
+ u32 dcrc, ddr = 0;
+ u8 hs400 = 0;
+ u32 orig_clkmode;
+ int result = MMC_ERR_READTUNEFAIL;
+
+ MSDC_GET_FIELD(MSDC_CFG, MSDC_CFG_CKMOD, orig_clkmode);
+ ddr = (orig_clkmode == 2) ? 1 : 0;
+ hs400 = (orig_clkmode == 3) ? 1 : 0;
+
+ MSDC_GET_FIELD(MSDC_IOCON, MSDC_IOCON_R_D_SMPL, orig_rdsmpl);
+ MSDC_SET_BIT32(EMMC_TOP_CONTROL, PAD_DAT_RD_RXDLY_SEL);
+ MSDC_GET_FIELD(EMMC_TOP_CONTROL, PAD_DAT_RD_RXDLY, org_rxdly0);
+
+ do {
+ for (rdsmpl = 0; rdsmpl < 2; rdsmpl++) {
+ cur_rdsmpl = (orig_rdsmpl + rdsmpl) % 2;
+ msdc_set_smpl(host, hs400, cur_rdsmpl, TYPE_READ_DATA_EDGE);
+
+ result = host->blk_write(host, dst, src, nblks);
+ if (result == MMC_ERR_CMDTUNEFAIL ||
+ result == MMC_ERR_CMD_RSPCRC ||
+ result == MMC_ERR_ACMD_RSPCRC)
+ goto done;
+
+ MSDC_GET_FIELD(SDC_DCRC_STS, SDC_DCRC_STS_POS|SDC_DCRC_STS_NEG, dcrc);
+ if (!ddr) dcrc &= ~SDC_DCRC_STS_NEG;
+
+ /* no crc error in this data line */
+ if (result == MMC_ERR_NONE && dcrc == 0) {
+ goto done;
+ } else {
+ result = MMC_ERR_BADCRC;
+ }
+ }
+
+ cur_rxdly0 = (org_rxdly0 + rxdly + 1) % 32;
+ MSDC_SET_FIELD(EMMC_TOP_CONTROL, PAD_DAT_RD_RXDLY, cur_rxdly0);
+
+ } while (++rxdly < 32);
+
+done:
+ dprintf(INFO,("[SD%d] <msdc_tune_bread<%s><cmd%d>@msdc_tune_bread\n",
+ host->host_id, (result == MMC_ERR_NONE && dcrc == 0) ?"PASS" : "FAIL", (nblks == 1 ? 17 : 18)));
+
+ return result;
+}
+#else
+int msdc_tune_bwrite(struct mmc_host *host, u32 dst, uchar *src, u32 nblks)
+{
+ u32 base = host->base;
+ u32 rdsmpl, cur_rdsmpl, orig_rdsmpl;
+ u32 dcrc, ddr = 0;
+ u8 hs400 = 0;
+ u32 orig_clkmode;
+ int result = MMC_ERR_READTUNEFAIL;
+
+ MSDC_GET_FIELD(MSDC_CFG, MSDC_CFG_CKMOD, orig_clkmode);
+ ddr = (orig_clkmode == 2) ? 1 : 0;
+ hs400 = (orig_clkmode == 3) ? 1 : 0;
+
+ MSDC_GET_FIELD(MSDC_IOCON, MSDC_IOCON_R_D_SMPL, orig_rdsmpl);
+
+ for (rdsmpl = 0; rdsmpl < 2; rdsmpl++) {
+ cur_rdsmpl = (orig_rdsmpl + rdsmpl) % 2;
+ msdc_set_smpl(host, hs400, cur_rdsmpl, TYPE_READ_DATA_EDGE);
+
+ result = host->blk_write(host, dst, src, nblks);
+ if (result == MMC_ERR_CMDTUNEFAIL ||
+ result == MMC_ERR_CMD_RSPCRC ||
+ result == MMC_ERR_ACMD_RSPCRC)
+ goto done;
+
+ MSDC_GET_FIELD(SDC_DCRC_STS, SDC_DCRC_STS_POS|SDC_DCRC_STS_NEG, dcrc);
+ if (!ddr) dcrc &= ~SDC_DCRC_STS_NEG;
+
+ /* no crc error in this data line */
+ if (result == MMC_ERR_NONE && dcrc == 0) {
+ goto done;
+ } else {
+ result = MMC_ERR_BADCRC;
+ }
+ }
+
+done:
+ dprintf(INFO,("[SD%d] <msdc_tune_bwrite<%s><cmd%d>@msdc_tune_bwrite\n",
+ host->host_id, (result == MMC_ERR_NONE && dcrc == 0) ?"PASS" : "FAIL", (nblks == 1 ? 17 : 18)));
+
+ return result;
+}
+#endif
+#endif /* end of FEATURE_MMC_WR_TUNING */
+
+void msdc_emmc_boot_stop(struct mmc_host *host)
+{
+ addr_t base = host->base;
+ u32 count = 0;
+
+ /* Step5. stop the boot mode */
+ MSDC_WRITE32(SDC_ARG, 0x00000000);
+ MSDC_WRITE32(SDC_CMD, 0x00001000);
+
+ MSDC_SET_FIELD(EMMC_CFG0, EMMC_CFG0_BOOTWDLY, 2);
+ MSDC_SET_BIT32(EMMC_CFG0, EMMC_CFG0_BOOTSTOP);
+ while (MSDC_READ32(EMMC_STS) & EMMC_STS_BOOTUPSTATE) {
+ spin(1000);
+ count++;
+ if (count >= 1000) {
+ dprintf(ALWAYS, "Timeout to wait EMMC to leave boot state!\n");
+ break;
+ }
+ }
+
+ /* Step6. */
+ MSDC_CLR_BIT32(EMMC_CFG0, EMMC_CFG0_BOOTSUPP);
+
+ /* Step7. clear EMMC_STS bits */
+ MSDC_WRITE32(EMMC_STS, MSDC_READ32(EMMC_STS));
+}
+
+int msdc_init(struct mmc_host *host)
+{
+ addr_t base = host->host_id ? MSDC1_BASE: MSDC0_BASE; /* only support MSDC0, MSDC1 */
+ addr_t top_baddr = host->host_id? MSDC1_TOP_BASE :MSDC0_TOP_BASE;
+ msdc_priv_t *priv;
+
+ dprintf(INFO, "[%s]: Host controller intialization start \n", __func__);
+
+ priv = &msdc_priv;
+ memset(priv, 0, sizeof(msdc_priv_t));
+
+ host->base = base;
+ host->top_base = top_baddr;
+ host->clksrc = msdc_cap[host->host_id].clk_src;
+ host->hclksrc= msdc_cap[host->host_id].hclk_src;
+#ifndef FPGA_PLATFORM
+ host->f_max = hclks_msdc30[host->clksrc];
+#else
+ host->f_max = MSDC_MAX_SCLK;
+#endif
+
+ host->f_min = MSDC_MIN_SCLK;
+ host->blklen = 0;
+ host->priv = (void *)priv;
+ host->caps = MMC_CAP_MULTIWRITE;
+
+ if (msdc_cap[host->host_id].flags & MSDC_HIGHSPEED)
+ host->caps |= (MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED);
+ if (msdc_cap[host->host_id].flags & MSDC_DDR)
+ host->caps |= MMC_CAP_DDR;
+ if (msdc_cap[host->host_id].data_pins == 4)
+ host->caps |= MMC_CAP_4_BIT_DATA;
+ if (msdc_cap[host->host_id].data_pins == 8)
+ host->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
+ if (msdc_cap[host->host_id].flags & MSDC_HS200)
+ host->caps |= MMC_CAP_EMMC_HS200;
+ if (msdc_cap[host->host_id].flags & MSDC_HS400)
+ host->caps |= MMC_CAP_EMMC_HS400;
+
+ host->ocr_avail = MMC_VDD_32_33; /* TODO: To be customized */
+
+ /* Configure BASIC_DMA + AUTOCMD12 for better R/W performance
+ * NOTE: ACMD23 only support transferring size of up to 32M */
+ priv->autocmd = MSDC_AUTOCMD12;
+ if (priv->autocmd == MSDC_AUTOCMD23)
+ /* The maximal transferring size is size of *[15:0] number of blocks* */
+ host->max_phys_segs = 0xffff;
+ else
+ /* The maximal transferring size is size of DMA_LENGTH */
+ host->max_phys_segs = (UINT_MAX & ~511) >> MMC_BLOCK_BITS_SHFT;
+
+ priv->rdsmpl = msdc_cap[host->host_id].data_edge;
+ priv->wdsmpl = msdc_cap[host->host_id].data_edge;
+ priv->rsmpl = msdc_cap[host->host_id].cmd_edge;
+
+#ifdef MSDC_USE_DMA_MODE
+ host->blk_read = msdc_dma_bread;
+ host->blk_write = msdc_dma_bwrite;
+ dprintf(INFO, "Transfer method: DMA\n");
+#else
+ host->blk_read = msdc_pio_bread;
+ host->blk_write = msdc_pio_bwrite;
+ dprintf(INFO, "Transfer method: PIO\n");
+#endif
+
+ priv->rdsmpl = msdc_cap[host->host_id].data_edge;
+ priv->rsmpl = msdc_cap[host->host_id].cmd_edge;
+
+ /* disable EMMC boot mode */
+ msdc_emmc_boot_stop(host);
+
+ msdc_power(host, MMC_POWER_OFF);
+ msdc_power(host, MMC_POWER_ON);
+
+ /* set to SD/MMC mode */
+ MSDC_SET_FIELD(MSDC_CFG, MSDC_CFG_MODE, MSDC_SDMMC);
+ MSDC_SET_BIT32(MSDC_CFG, MSDC_CFG_PIO);
+ MSDC_SET_BIT32(MSDC_CFG, MSDC_CFG_CKPDN);
+
+ MSDC_RESET();
+ MSDC_CLR_FIFO();
+ MSDC_CLR_INT();
+
+ /* reset tuning parameter */
+ //MSDC_WRITE32(MSDC_PAD_CTL0, 0x0098000);
+ //MSDC_WRITE32(MSDC_PAD_CTL1, 0x00A0000);
+ //MSDC_WRITE32(MSDC_PAD_CTL2, 0x00A0000);
+ MSDC_WRITE32(MSDC_PAD_TUNE, 0x0000000);
+ MSDC_WRITE32(MSDC_DAT_RDDLY0, 0x00000000);
+ MSDC_WRITE32(MSDC_DAT_RDDLY1, 0x00000000);
+ MSDC_WRITE32(MSDC_IOCON, 0x00000000);
+
+ MSDC_SET_BIT32(MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATRRDLYSEL);
+ MSDC_SET_BIT32(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLYSEL);
+ MSDC_WRITE32(MSDC_PATCH_BIT0, 0x403c0046);
+ MSDC_WRITE32(MSDC_PATCH_BIT1, 0xFFFF4309);//High 16 bit = 0 mean Power KPI is on, enable ECO for write timeout issue
+ MSDC_SET_BIT32(EMMC50_CFG0, MSDC_EMMC50_CFG_CRCSTS_SEL);
+ MSDC_CLR_BIT32(SDC_FIFO_CFG, SDC_FIFO_CFG_WRVALIDSEL);
+ MSDC_CLR_BIT32(SDC_FIFO_CFG, SDC_FIFO_CFG_RDVALIDSEL);
+ MSDC_SET_BIT32(SDC_AVG_CFG0, SDC_RX_ENHANCE_EN);
+ MSDC_SET_BIT32(MSDC_PATCH_BIT2, MSDC_PB2_CFGCRCSTS);
+ MSDC_CLR_BIT32(MSDC_PATCH_BIT1, MSDC_BUSY_CHECK_SEL); /* disable busy check */
+ //MSDC_PATCH_BIT1YD:WRDAT_CRCS_TA_CNTR need fix to 3'001 by default,(<50MHz) (>=50MHz set 3'001 as initial value is OK for tunning)
+ //YD:CMD_RSP_TA_CNTR need fix to 3'001 by default(<50MHz)(>=50MHz set 3'001as initial value is OK for tunning)
+ /* 2012-01-07 using internal clock instead of feedback clock */
+ //MSDC_SET_BIT32(MSDC_PATCH_BIT0, MSDC_CKGEN_MSDC_CK_SEL);
+
+#ifdef MSDC_USE_PATCH_BIT2_TURNING_WITH_ASYNC
+ MSDC_SET_FIELD(MSDC_PATCH_BIT2, MSDC_PB2_CFGCRCSTS,1);
+ MSDC_SET_FIELD(MSDC_PATCH_BIT2, MSDC_PB2_CFGRESP,0);
+#else
+ MSDC_SET_FIELD(MSDC_PATCH_BIT2, MSDC_PB2_CFGCRCSTS,0);
+ MSDC_SET_FIELD(MSDC_PATCH_BIT2, MSDC_PB2_CFGRESP,1);
+#endif
+
+ /* enable wake up events */
+ //MSDC_SET_BIT32(SDC_CFG, SDC_CFG_INSWKUP);
+
+#ifndef FPGA_PLATFORM
+ msdc_gpio_and_pad_init(host);
+#endif
+ /* set sampling edge */
+ MSDC_SET_FIELD(MSDC_IOCON, MSDC_IOCON_RSPL, msdc_cap[host->host_id].cmd_edge);
+ MSDC_SET_FIELD(MSDC_IOCON, MSDC_IOCON_DSPL, msdc_cap[host->host_id].data_edge);
+
+ /* write crc timeout detection */
+ MSDC_SET_FIELD(MSDC_PATCH_BIT0, MSDC_PB0_DETWR_CRCTMO, 1);
+
+ msdc_set_startbit(host, START_AT_RISING);
+
+ msdc_config_bus(host, HOST_BUS_WIDTH_1);
+ msdc_config_clock(host, 0, MSDC_MIN_SCLK);
+ msdc_set_timeout(host, 100000000, 0);
+
+ /* disable SDIO func */
+ MSDC_SET_FIELD(SDC_CFG, SDC_CFG_SDIO, 0);
+ MSDC_SET_FIELD(SDC_CFG, SDC_CFG_SDIOIDE, 0);
+ MSDC_SET_FIELD(SDC_CFG, SDC_CFG_INSWKUP, 0);
+
+ /* Clear all interrupts first */
+ MSDC_CLR_INT();
+ MSDC_WRITE32(MSDC_INTEN, 0);
+
+#ifdef MSDC_USE_DMA_MODE
+ /* Register msdc irq */
+ mt_irq_set_sens(MT_MSDC0_IRQ_ID + host->host_id, LEVEL_SENSITIVE);
+ mt_irq_set_polarity(MT_MSDC0_IRQ_ID + host->host_id, MT65xx_POLARITY_LOW);
+ event_init(&msdc_int_event, false, EVENT_FLAG_AUTOUNSIGNAL);
+ register_int_handler(MT_MSDC0_IRQ_ID + host->host_id, msdc_interrupt_handler, host);
+ unmask_interrupt(MT_MSDC0_IRQ_ID + host->host_id);
+#endif
+
+ dprintf(INFO, "[%s]: Host controller intialization done\n", __func__);
+ return 0;
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/mmc/rules.mk b/src/bsp/lk/platform/mt8512/drivers/mmc/rules.mk
new file mode 100644
index 0000000..bb46477
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/mmc/rules.mk
@@ -0,0 +1,12 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+MODULE := $(LOCAL_DIR)
+
+MODULE_SRCS += \
+ $(LOCAL_DIR)/msdc.c \
+ $(LOCAL_DIR)/mmc_core.c \
+
+MODULE_DEPS += \
+ lib/bio \
+ lib/partition \
+
+include make/module.mk
diff --git a/src/bsp/lk/platform/mt8512/drivers/nand/rules.mk b/src/bsp/lk/platform/mt8512/drivers/nand/rules.mk
new file mode 100644
index 0000000..b08c9a7
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nand/rules.mk
@@ -0,0 +1,26 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+MODULE := $(LOCAL_DIR)
+
+CFLAGS := $(filter-out -Werror, $(CFLAGS))
+GLOBAL_CFLAGS := $(filter-out -Werror, $(GLOBAL_CFLAGS))
+
+CFLAGS := $(filter-out -Werror=return-type, $(CFLAGS))
+GLOBAL_CFLAGS := $(filter-out -Werror=return-type, $(GLOBAL_CFLAGS))
+
+CFLAGS := $(filter-out -Werror=implicit-function-declaration, $(CFLAGS))
+GLOBAL_CFLAGS := $(filter-out -Werror=implicit-function-declaration, $(GLOBAL_CFLAGS))
+
+MODULE_DEPS += \
+ lib/bio \
+ lib/partition \
+
+MODULE_SRCS += \
+ $(LOCAL_DIR)/slc_bdev.c \
+ $(LOCAL_DIR)/slc/slc.c \
+ $(LOCAL_DIR)/slc/slc_ids.c \
+ $(LOCAL_DIR)/slc/ecc/ecc.c \
+ $(LOCAL_DIR)/slc/bbt/bbt.c \
+ $(LOCAL_DIR)/slc/nfi/nfi.c \
+ $(LOCAL_DIR)/slc/test/slc_test.c \
+
+include make/module.mk
diff --git a/src/bsp/lk/platform/mt8512/drivers/nand/slc/bbt/bbt.c b/src/bsp/lk/platform/mt8512/drivers/nand/slc/bbt/bbt.c
new file mode 100644
index 0000000..68df306
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nand/slc/bbt/bbt.c
@@ -0,0 +1,1433 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* Description:
+ *
+ * When nand_scan_bbt is called, then it tries to find the bad block table
+ * depending on the options in the BBT descriptor(s). If no flash based BBT
+ * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory
+ * marked good / bad blocks. This information is used to create a memory BBT.
+ * Once a new bad block is discovered then the "factory" information is updated
+ * on the device.
+ * If a flash based BBT is specified then the function first tries to find the
+ * BBT on flash. If a BBT is found then the contents are read and the memory
+ * based BBT is created. If a mirrored BBT is selected then the mirror is
+ * searched too and the versions are compared. If the mirror has a greater
+ * version number, then the mirror BBT is used to build the memory based BBT.
+ * If the tables are not versioned, then we "or" the bad block information.
+ * If one of the BBTs is out of date or does not exist it is (re)created.
+ * If no BBT exists at all then the device is scanned for factory marked
+ * good / bad blocks and the bad block tables are created.
+ *
+ * For manufacturer created BBTs like the one found on M-SYS DOC devices
+ * the BBT is searched and read but never created
+ *
+ * The auto generated bad block table is located in the last good blocks
+ * of the device. The table is mirrored, so it can be updated eventually.
+ * The table is marked in the OOB area with an ident pattern and a version
+ * number which indicates which of both tables is more up to date. If the NAND
+ * controller needs the complete OOB area for the ECC information then the
+ * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of
+ * course): it moves the ident pattern and the version byte into the data area
+ * and the OOB area will remain untouched.
+ *
+ * The table uses 2 bits per block
+ * 11b: block is good
+ * 00b: block is factory marked bad
+ * 01b, 10b: block is marked bad due to wear
+ *
+ * The memory bad block table uses the following scheme:
+ * 00b: block is good
+ * 01b: block is marked bad due to wear
+ * 10b: block is reserved (to protect the bbt area)
+ * 11b: block is factory marked bad
+ *
+ * Multichip devices like DOC store the bad block info per floor.
+ *
+ * Following assumptions are made:
+ * - bbts start at a page boundary, if autolocated on a block boundary
+ * - the space necessary for a bbt in FLASH does not exceed a block boundary
+ *
+ */
+#include "bbt.h"
+
+static int mtk_nand_update_bbt(struct mtk_nand_chip *chip, loff_t offs);
+
+static inline uint8_t bbt_get_entry(struct mtk_nand_chip *chip, int block)
+{
+ uint8_t entry = chip->bbt[block >> BBT_ENTRY_SHIFT];
+ entry >>= (block & BBT_ENTRY_MASK) * 2;
+ return entry & BBT_ENTRY_MASK;
+}
+
+static inline void bbt_mark_entry(struct mtk_nand_chip *chip, int block,
+ uint8_t mark)
+{
+ uint8_t msk = (mark & BBT_ENTRY_MASK) << ((block & BBT_ENTRY_MASK) * 2);
+ chip->bbt[block >> BBT_ENTRY_SHIFT] |= msk;
+}
+
+static int check_pattern_no_oob(uint8_t *buf, struct mtk_nand_bbt_descr *td)
+{
+ if (nand_memcmp(buf, td->pattern, td->len))
+ return -1;
+ return 0;
+}
+
+/**
+ * check_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @len: the length of buffer to search
+ * @paglen: the pagelength
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers.
+ */
+static int check_pattern(uint8_t *buf, int len, int paglen, struct mtk_nand_bbt_descr *td)
+{
+ if (td->options & NAND_BBT_NO_OOB)
+ return check_pattern_no_oob(buf, td);
+
+ /* Compare the pattern */
+ if (nand_memcmp(buf + paglen + td->offs, td->pattern, td->len))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * check_short_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers. Same as check_pattern, but no optional empty
+ * check.
+ */
+static int check_short_pattern(uint8_t *buf, struct mtk_nand_bbt_descr *td)
+{
+ /* Compare the pattern */
+ if (nand_memcmp(buf + td->offs, td->pattern, td->len))
+ return -1;
+ return 0;
+}
+
+/**
+ * add_marker_len - compute the length of the marker in data area
+ * @td: BBT descriptor used for computation
+ *
+ * The length will be 0 if the marker is located in OOB area.
+ */
+static u32 add_marker_len(struct mtk_nand_bbt_descr *td)
+{
+ u32 len;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ return 0;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+ return len;
+}
+
+/**
+ * read_bbt - [GENERIC] Read the bad block table starting from page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @td: the bbt describtion table
+ * @offs: block number offset in the table
+ *
+ * Read the bad block table starting from page.
+ */
+static int read_bbt(struct mtk_nand_chip *chip, uint8_t *buf, int page, int num,
+ struct mtk_nand_bbt_descr *td, int offs)
+{
+ int ret = 0, i, j, act = 0;
+ size_t retlen, len, totlen;
+ loff_t from;
+ int bits = td->options & NAND_BBT_NRBITS_MSK;
+ uint8_t msk = (uint8_t)((1 << bits) - 1);
+ u32 marker_len;
+ int reserved_block_code = td->reserved_block_code;
+ struct mtk_nand_ops ops;
+
+ nand_debug("page:%d num:%d", page, num);
+
+ totlen = (num * bits) >> 3;
+ marker_len = add_marker_len(td);
+ from = ((loff_t)page) * chip->pagesize;
+
+ while (totlen) {
+ len = min(totlen, (size_t)(1 << chip->bbt_erase_shift));
+ if (marker_len) {
+ /*
+ * In case the BBT marker is not in the OOB area it
+ * will be just in the first page.
+ */
+ len -= marker_len;
+ from += marker_len;
+ marker_len = 0;
+ }
+ nand_memset(&ops, 0, sizeof(ops));
+ ops.mode = NAND_OPS_ECC_DMA_POLL;
+ ops.offset = (u64)from;
+ ops.len = (u64)max(len, chip->pagesize);
+ ops.datbuf = buf;
+
+ ret = mtk_nand_read(chip, &ops);
+ if (ret < 0) {
+ if (ret = -EBADMSG) {
+ nand_info("nand_bbt: ECC error in BBT at 0x%012llx",
+ from & ~chip->pagesize);
+ return ret;
+ } else {
+ nand_info("nand_bbt: error reading BBT");
+ return ret;
+ }
+ }
+
+ /* Analyse data */
+ for (i = 0; i < len; i++) {
+ uint8_t dat = buf[i];
+ for (j = 0; j < 8; j += bits, act++) {
+ uint8_t tmp = (dat >> j) & msk;
+ if (tmp == msk)
+ continue;
+ if (reserved_block_code && (tmp == reserved_block_code)) {
+ nand_info("nand_read_bbt: reserved block at 0x%012llx",
+ (loff_t)(offs + act) /chip->blocksize/* >> chip->bbt_erase_shift */);
+ bbt_mark_entry(chip, offs + act,
+ BBT_BLOCK_RESERVED);
+ continue;
+ }
+ /*
+ * Leave it for now, if it's matured we can
+ * move this message to nand_debug.
+ */
+ nand_debug("nand_read_bbt: bad block at 0x%x",
+ (loff_t)(offs + act) /chip->blocksize/* >> chip->bbt_erase_shift */);
+ /* Factory marked bad or worn out? */
+ if (tmp == 0)
+ bbt_mark_entry(chip, offs + act,
+ BBT_BLOCK_FACTORY_BAD);
+ else
+ bbt_mark_entry(chip, offs + act,
+ BBT_BLOCK_WORN);
+ }
+ }
+ totlen -= len;
+ from += len;
+ }
+ return ret;
+}
+
+/**
+ * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @chip: read the table for a specific chip, -1 read all chips; applies only if
+ * NAND_BBT_PERCHIP option is set
+ *
+ * Read the bad block table for all chips starting at a given page. We assume
+ * that the bbt bits are in consecutive order.
+ */
+static int read_abs_bbt(struct mtk_nand_chip *this, uint8_t *buf, struct mtk_nand_bbt_descr *td, int chip)
+{
+ int res = 0, i;
+
+ if (td->options & NAND_BBT_PERCHIP) {
+ int offs = 0;
+ for (i = 0; i < this->numchips; i++) {
+ if (chip == -1 || chip == i)
+ res = read_bbt(this, buf, td->pages[i],
+ this->chipsize >> this->bbt_erase_shift,
+ td, offs);
+ if (res)
+ return res;
+ offs += this->chipsize >> this->bbt_erase_shift;
+ }
+ } else {
+ res = read_bbt(this, buf, td->pages[0],
+ this->totalsize >> this->bbt_erase_shift, td, 0);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
+/* BBT marker is in the first page, no OOB */
+static int scan_read_data(struct mtk_nand_chip *chip, uint8_t *buf, loff_t offs,
+ struct mtk_nand_bbt_descr *td)
+{
+ size_t retlen;
+ size_t len;
+ struct mtk_nand_ops ops;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+
+ nand_debug("len:0x%x, offs:0x%lx", len, offs);
+
+ nand_memset(&ops, 0, sizeof(ops));
+ ops.mode = NAND_OPS_ECC_DMA_POLL;
+ ops.offset = (u64)offs;
+ ops.len = (u64)max(len, chip->pagesize);
+ ops.datbuf = buf;
+
+ return mtk_nand_read(chip, &ops);
+}
+
+/**
+ * scan_read_oob - [GENERIC] Scan data+OOB region to buffer
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @offs: offset at which to scan
+ * @len: length of data region to read
+ *
+ * Scan read data from data+OOB. May traverse multiple pages, interleaving
+ * page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest"
+ * ECC condition (error or bitflip). May quit on the first (non-ECC) error.
+ */
+static int scan_read_oob(struct mtk_nand_chip *chip, uint8_t *buf, loff_t offs,
+ size_t len)
+{
+ struct mtk_nand_ops ops;
+
+ nand_debug("len:0x%x, offs:0x%lx", len, offs);
+
+ nand_memset(&ops, 0, sizeof(ops));
+ ops.mode = NAND_OPS_ECC_DMA_POLL;
+ ops.offset = (u64)offs;
+ ops.len = (u64)max(len, chip->pagesize);
+ ops.datbuf = buf;
+
+ return mtk_nand_read(chip, &ops);
+}
+
+static int scan_read(struct mtk_nand_chip *chip, uint8_t *buf, loff_t offs,
+ size_t len, struct mtk_nand_bbt_descr *td)
+{
+ nand_debug("td->options:0x%x", td->options);
+ if (td->options & NAND_BBT_NO_OOB)
+ return scan_read_data(chip, buf, offs, td);
+ else
+ return scan_read_oob(chip, buf, offs, len);
+}
+
+/* Scan write data with oob to flash */
+static int scan_write_bbt(struct mtk_nand_chip *chip, loff_t offs, size_t len,
+ uint8_t *buf, uint8_t *oob)
+{
+ struct mtk_nand_ops ops;
+
+ nand_debug("len:0x%x, offs:0x%lx", len, offs);
+
+ nand_memset(&ops, 0, sizeof(ops));
+ ops.mode = NAND_OPS_ECC_DMA_POLL;
+ ops.offset = offs;
+ ops.len = len;
+ ops.datbuf = buf;
+ ops.oobeccbuf = oob;
+
+ return mtk_nand_write(chip, &ops);
+}
+
+static u32 bbt_get_ver_offs(struct mtk_nand_chip *chip, struct mtk_nand_bbt_descr *td)
+{
+ u32 ver_offs = td->veroffs;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ ver_offs += chip->pagesize;
+ return ver_offs;
+}
+
+/**
+ * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Read the bad block table(s) for all chips starting at a given page. We
+ * assume that the bbt bits are in consecutive order.
+ */
+static void read_abs_bbts(struct mtk_nand_chip *this, uint8_t *buf,
+ struct mtk_nand_bbt_descr *td, struct mtk_nand_bbt_descr *md)
+{
+ /* Read the primary version, if available */
+ if (td->options & NAND_BBT_VERSION) {
+ scan_read(this, buf, (loff_t)td->pages[0] * this->pagesize,
+ this->pagesize, td);
+ td->version[0] = buf[bbt_get_ver_offs(this, td)];
+ nand_info("Bad block table at page %d, version 0x%02X",
+ td->pages[0], td->version[0]);
+ }
+
+ /* Read the mirror version, if available */
+ if (md && (md->options & NAND_BBT_VERSION)) {
+ scan_read(this, buf, (loff_t)md->pages[0] * this->pagesize,
+ this->pagesize, md);
+ md->version[0] = buf[bbt_get_ver_offs(this, md)];
+ nand_info("Bad block table at page %d, version 0x%02X",
+ md->pages[0], md->version[0]);
+ }
+}
+
+/* Scan a given block partially */
+static int scan_block_fast(struct mtk_nand_chip *chip, struct mtk_nand_bbt_descr *bd,
+ loff_t offs, uint8_t *buf, int numpages)
+{
+ int j, ret;
+ struct mtk_nand_ops ops;
+
+ nand_memset(&ops, 0, sizeof(ops));
+ ops.mode = NAND_OPS_ECC_DMA_POLL;
+ ops.len = (u64)chip->pagesize;
+ ops.datbuf = buf;
+
+ nand_debug("numpages:0x%x, offs:%lx", numpages, offs);
+
+ for (j = 0; j < numpages; j++) {
+ ops.offset = (u64)offs;
+
+ /*
+ * Read the full oob until read_oob is fixed to handle single
+ * byte reads for 16 bit buswidth.
+ */
+ ret = mtk_nand_read(chip, &ops);
+ /* Ignore ECC errors when checking for BBM */
+ if (ret == -EBADMSG) {
+ nand_info("Found ECC at offs:0x%lx", offs);
+ return 1;
+ }
+
+ //if (check_short_pattern(buf, bd))
+ if (check_short_pattern(chip->oob_poi, bd))
+ return 1;
+
+ offs += chip->pagesize;
+ }
+ return 0;
+}
+
+/**
+ * create_bbt - [GENERIC] Create a bad block table by scanning the device
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips; applies only
+ * if NAND_BBT_PERCHIP option is set
+ *
+ * Create a bad block table by scanning the device for the given good/bad block
+ * identify pattern.
+ */
+static int create_bbt(struct mtk_nand_chip *this, uint8_t *buf,
+ struct mtk_nand_bbt_descr *bd, int chip)
+{
+ int i, numblocks, numpages;
+ int startblock;
+ loff_t from;
+
+ nand_info("Scanning device for bad blocks");
+
+ if (bd->options & NAND_BBT_SCAN2NDPAGE)
+ numpages = 2;
+ else
+ numpages = 1;
+
+ if (chip == -1) {
+ numblocks = this->totalsize/this->blocksize;
+ //startblock = 0;
+ //from = 0;
+ startblock = 8; //from PL/LK address
+ from = 0;
+ } else {
+ if (chip >= this->numchips) {
+ nand_err("create_bbt(): chipnr (%d) > available chips (%d)",
+ chip + 1, this->numchips);
+ return -EINVAL;
+ }
+ numblocks = this->totalsize/this->blocksize;
+ startblock = chip * numblocks;
+ numblocks += startblock;
+ from = (loff_t)startblock /this->blocksize/* >> this->bbt_erase_shift */;
+ }
+
+ if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
+ from += this->blocksize - (this->pagesize * numpages);
+
+ for (i = startblock; i < numblocks; i++) {
+ int ret;
+
+ /* BUG_ON(bd->options & NAND_BBT_NO_OOB); */
+
+ ret = scan_block_fast(this, bd, from, buf, numpages);
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ bbt_mark_entry(this, i, BBT_BLOCK_FACTORY_BAD);
+ nand_err("Bad eraseblock %d at 0x%x",
+ i, (unsigned long)from);
+ /* this->ecc_stats.badblocks++; */
+ }
+
+ from += this->blocksize;
+ }
+ return 0;
+}
+
+/**
+ * search_bbt - [GENERIC] scan the device for a specific bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ *
+ * Read the bad block table by searching for a given ident pattern. Search is
+ * preformed either from the beginning up or from the end of the device
+ * downwards. The search starts always at the start of a block. If the option
+ * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
+ * the bad block information of this chip. This is necessary to provide support
+ * for certain DOC devices.
+ *
+ * The bbt ident pattern resides in the oob area of the first page in a block.
+ */
+static int search_bbt(struct mtk_nand_chip *this, uint8_t *buf, struct mtk_nand_bbt_descr *td)
+{
+ int i, chips;
+ int startblock, block, dir;
+ int scanlen = this->pagesize + this->oobsize;
+ int bbtblocks;
+
+ nand_debug("td->options:0x%x", td->options);
+
+ /* Search direction top -> down? */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = (this->totalsize >> this->bbt_erase_shift) - 1;
+ dir = -1;
+ } else {
+ startblock = 0;
+ dir = 1;
+ }
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = this->numchips;
+ bbtblocks = this->chipsize >> this->bbt_erase_shift;
+ startblock &= bbtblocks - 1;
+ } else {
+ chips = 1;
+ bbtblocks = this->totalsize >> this->bbt_erase_shift;
+ }
+
+ for (i = 0; i < chips; i++) {
+ /* Reset version information */
+ td->version[i] = 0;
+ td->pages[i] = -1;
+ /* Scan the maximum number of blocks */
+ for (block = 0; block < td->maxblocks; block++) {
+
+ int actblock = startblock + dir * block;
+ loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
+
+ /* Read first page */
+ scan_read(this, buf, offs, this->pagesize, td);
+ if (!check_pattern(buf, scanlen, this->pagesize, td)) {
+ td->pages[i] = actblock * this->page_per_block;
+ if (td->options & NAND_BBT_VERSION) {
+ offs = bbt_get_ver_offs(this, td);
+ td->version[i] = buf[offs];
+ }
+ break;
+ }
+ }
+ startblock += this->chipsize >> this->bbt_erase_shift;
+ }
+ /* Check, if we found a bbt for each requested chip */
+ for (i = 0; i < chips; i++) {
+ if (td->pages[i] == -1)
+ nand_err("Bad block table not found for chip %d", i);
+ else
+ nand_info("Bad block table found at page %d, version 0x%x",
+ td->pages[i], td->version[i]);
+ }
+
+ return 0;
+}
+
+/**
+ * search_read_bbts - [GENERIC] scan the device for bad block table(s)
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Search and read the bad block table(s).
+ */
+static void search_read_bbts(struct mtk_nand_chip *chip, uint8_t *buf,
+ struct mtk_nand_bbt_descr *td,
+ struct mtk_nand_bbt_descr *md)
+{
+ nand_debug("td->options:0x%x", td->options);
+
+ /* Search the primary table */
+ search_bbt(chip, buf, td);
+
+ /* Search the mirror table */
+ if (md)
+ search_bbt(chip, buf, md);
+}
+
+/**
+ * write_bbt - [GENERIC] (Re)write the bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
+ *
+ * (Re)write the bad block table.
+ */
+static int write_bbt(struct mtk_nand_chip *this, uint8_t *buf,
+ struct mtk_nand_bbt_descr *td, struct mtk_nand_bbt_descr *md,
+ int chipsel)
+{
+ int i, res, chip = 0;
+ int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
+ int nrchips, pageoffs, ooboffs;
+ uint8_t msk[4];
+ uint8_t rcode = td->reserved_block_code;
+ size_t retlen, len = 0;
+ loff_t to;
+ struct mtk_nand_ops ops;
+
+ nand_debug("td->options:0x%x", td->options);
+
+ if (!rcode)
+ rcode = 0xff;
+ /* Write bad block table per chip rather than per device? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ numblocks = (int)(this->chipsize /this->blocksize/* >> this->bbt_erase_shift */);
+ /* Full device write or specific chip? */
+ if (chipsel == -1) {
+ nrchips = this->numchips;
+ } else {
+ nrchips = chipsel + 1;
+ chip = chipsel;
+ }
+ } else {
+ numblocks = (int)(this->totalsize /this->blocksize/* >> this->bbt_erase_shift */);
+ nrchips = 1;
+ }
+
+ /* Loop through the chips */
+ for (; chip < nrchips; chip++) {
+ /*
+ * There was already a version of the table, reuse the page
+ * This applies for absolute placement too, as we have the
+ * page nr. in td->pages.
+ */
+ if (td->pages[chip] != -1) {
+ page = td->pages[chip];
+ goto write;
+ }
+
+ /*
+ * Automatic placement of the bad block table. Search direction
+ * top -> down?
+ */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = numblocks * (chip + 1) - 1;
+ dir = -1;
+ } else {
+ startblock = chip * numblocks;
+ dir = 1;
+ }
+
+ for (i = 0; i < td->maxblocks; i++) {
+ int block = startblock + dir * i;
+ /* Check, if the block is bad */
+ switch (bbt_get_entry(this, block)) {
+ case BBT_BLOCK_WORN:
+ case BBT_BLOCK_FACTORY_BAD:
+ continue;
+ }
+ page = block * this->page_per_block;
+ /* Check, if the block is used by the mirror table */
+ if (!md || md->pages[chip] != page)
+ goto write;
+ }
+ nand_err("No space left to write bad block table");
+ return -ENOSPC;
+write:
+
+ /* Set up shift count and masks for the flash table */
+ bits = td->options & NAND_BBT_NRBITS_MSK;
+ msk[2] = ~rcode;
+ switch (bits) {
+ case 1:
+ sft = 3;
+ sftmsk = 0x07;
+ msk[0] = 0x00;
+ msk[1] = 0x01;
+ msk[3] = 0x01;
+ break;
+ case 2:
+ sft = 2;
+ sftmsk = 0x06;
+ msk[0] = 0x00;
+ msk[1] = 0x01;
+ msk[3] = 0x03;
+ break;
+ case 4:
+ sft = 1;
+ sftmsk = 0x04;
+ msk[0] = 0x00;
+ msk[1] = 0x0C;
+ msk[3] = 0x0f;
+ break;
+ case 8:
+ sft = 0;
+ sftmsk = 0x00;
+ msk[0] = 0x00;
+ msk[1] = 0x0F;
+ msk[3] = 0xff;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ to = ((loff_t)page) * this->pagesize;
+
+ /* Must we save the block contents? */
+ if (td->options & NAND_BBT_SAVECONTENT) {
+ /* Make it block aligned */
+ to &= ~(this->blocksize - 1);
+ len = 1 << this->bbt_erase_shift;
+
+ nand_memset(&ops, 0, sizeof(ops));
+ ops.mode = NAND_OPS_ECC_DMA_POLL;
+ ops.offset = (u64)to;
+ ops.len = (u64)this->blocksize;
+ ops.datbuf = buf;
+
+ res = mtk_nand_read(this, &ops);
+ if (res < 0) {
+ nand_err("nand_bbt: ECC error while reading block for writing bad block table");
+ }
+ /* Read oob data */
+ ops.oobecclen = (len / this->pagesize) * this->oobsize;
+ ops.oobeccbuf = &buf[len];
+ res = mtk_nand_read(this, &ops);
+ if (res < 0)
+ goto outerr;
+
+ /* Calc the byte offset in the buffer */
+ pageoffs = page - (int)(to / this->pagesize);
+ offs = pageoffs * this->pagesize;
+ /* Preset the bbt area with 0xff */
+ nand_memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
+ ooboffs = len + (pageoffs * this->oobsize);
+
+ } else if (td->options & NAND_BBT_NO_OOB) {
+ ooboffs = 0;
+ offs = td->len;
+ /* The version byte */
+ if (td->options & NAND_BBT_VERSION)
+ offs++;
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ len += offs;
+ /* Make it page aligned! */
+ len = ALIGN(len, this->pagesize);
+ /* Preset the buffer with 0xff */
+ nand_memset(buf, 0xff, len);
+ /* Pattern is located at the begin of first page */
+ nand_memcpy(buf, td->pattern, td->len);
+ } else {
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ /* Make it page aligned! */
+ len = ALIGN(len, this->pagesize);
+ /* Preset the buffer with 0xff */
+ nand_memcpy(buf, 0xff, len +
+ (len / this->pagesize)* this->oobsize);
+ offs = 0;
+ ooboffs = len;
+ /* Pattern is located in oob area of first page */
+ nand_memcpy(&buf[ooboffs + td->offs], td->pattern, td->len);
+ }
+
+ if (td->options & NAND_BBT_VERSION)
+ buf[ooboffs + td->veroffs] = td->version[chip];
+
+ /* Walk through the memory table */
+ for (i = 0; i < numblocks; i++) {
+ uint8_t dat;
+ int sftcnt = (i << (3 - sft)) & sftmsk;
+ dat = bbt_get_entry(this, chip * numblocks + i);
+ /* Do not store the reserved bbt blocks! */
+ buf[offs + (i >> sft)] &= ~(msk[dat] << sftcnt);
+ }
+
+ nand_memset(&ops, 0, sizeof(ops));
+ ops.mode = NAND_OPS_ERASE_POLL;
+ ops.offset = to;
+ ops.len = 1 << this->bbt_erase_shift;
+ res = mtk_nand_erase(this, &ops);
+ if (res < 0)
+ goto outerr;
+
+ res = scan_write_bbt(this, to, len, buf,
+ td->options & NAND_BBT_NO_OOB ? NULL :
+ &buf[len]);
+ if (res < 0)
+ goto outerr;
+
+ nand_info("Bad block table written to 0x%x, version 0x%x",
+ (unsigned long)to, td->version[chip]);
+
+ /* Mark it as used */
+ td->pages[chip] = page;
+ }
+ return 0;
+
+outerr:
+ nand_err("nand_bbt: error while writing bad block table %d", res);
+ return res;
+}
+
+/**
+ * nand_memory_bbt - [GENERIC] create a memory based bad block table
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function creates a memory based bbt by scanning the device for
+ * manufacturer / software marked good / bad blocks.
+ */
+static inline int nand_memory_bbt(struct mtk_nand_chip *chip, struct mtk_nand_bbt_descr *bd)
+{
+
+ return create_bbt(chip, chip->databuf, bd, -1);
+}
+
+/**
+ * check_create - [GENERIC] create and write bbt(s) if necessary
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks the results of the previous call to read_bbt and creates
+ * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
+ * for the chip/device. Update is necessary if one of the tables is missing or
+ * the version nr. of one table is less than the other.
+ */
+static int check_create(struct mtk_nand_chip *this, uint8_t *buf, struct mtk_nand_bbt_descr *bd)
+{
+ int i, chips, writeops, create, chipsel, res, res2;
+ struct mtk_nand_bbt_descr *td = this->bbt_td;
+ struct mtk_nand_bbt_descr *md = this->bbt_md;
+ struct mtk_nand_bbt_descr *rd, *rd2;
+
+ nand_debug("td->options:0x%x", td->options);
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP)
+ chips = this->numchips;
+ else
+ chips = 1;
+
+ for (i = 0; i < chips; i++) {
+ writeops = 0;
+ create = 0;
+ rd = NULL;
+ rd2 = NULL;
+ res = res2 = 0;
+ /* Per chip or per device? */
+ chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
+ /* Mirrored table available? */
+ if (md) {
+ if (td->pages[i] == -1 && md->pages[i] == -1) {
+ create = 1;
+ writeops = 0x03;
+ } else if (td->pages[i] == -1) {
+ rd = md;
+ writeops = 0x01;
+ } else if (md->pages[i] == -1) {
+ rd = td;
+ writeops = 0x02;
+ } else if (td->version[i] == md->version[i]) {
+ rd = td;
+ if (!(td->options & NAND_BBT_VERSION))
+ rd2 = md;
+ } else if (((int8_t)(td->version[i] - md->version[i])) > 0) {
+ rd = td;
+ writeops = 0x02;
+ } else {
+ rd = md;
+ writeops = 0x01;
+ }
+ } else {
+ if (td->pages[i] == -1) {
+ create = 1;
+ writeops = 0x01;
+ } else {
+ rd = td;
+ }
+ }
+
+ if (create) {
+ /* Create the bad block table by scanning the device? */
+ if (!(td->options & NAND_BBT_CREATE))
+ continue;
+
+ /* Create the table in memory by scanning the chip(s) */
+ if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
+ create_bbt(this, buf, bd, chipsel);
+
+ td->version[i] = 1;
+ if (md)
+ md->version[i] = 1;
+ }
+
+ /* Read back first? */
+ if (rd) {
+ res = read_abs_bbt(this, buf, rd, chipsel);
+ if (res < 0) {
+ /* Mark table as invalid */
+ rd->pages[i] = -1;
+ rd->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+ /* If they weren't versioned, read both */
+ if (rd2) {
+ res2 = read_abs_bbt(this, buf, rd2, chipsel);
+ if (res2 < 0) {
+ /* Mark table as invalid */
+ rd2->pages[i] = -1;
+ rd2->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+
+ /* Scrub the flash table(s)? */
+ /* if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
+ writeops = 0x03; */
+
+ /* Update version numbers before writing */
+ if (md) {
+ td->version[i] = max(td->version[i], md->version[i]);
+ md->version[i] = td->version[i];
+ }
+
+ nand_debug("writeops:0x%x", writeops);
+ /* Write the bad block table to the device? */
+ if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+ res = write_bbt(this, buf, td, md, chipsel);
+ if (res < 0)
+ return res;
+ }
+
+ /* Write the mirror bad block table to the device? */
+ if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt(this, buf, md, td, chipsel);
+ if (res < 0)
+ return res;
+ }
+ }
+ return 0;
+}
+
+/**
+ * mark_bbt_regions - [GENERIC] mark the bad block table regions
+ * @mtd: MTD device structure
+ * @td: bad block table descriptor
+ *
+ * The bad block table regions are marked as "bad" to prevent accidental
+ * erasures / writes. The regions are identified by the mark 0x02.
+ */
+static void mark_bbt_region(struct mtk_nand_chip *this, struct mtk_nand_bbt_descr *td)
+{
+ int i, j, chips, block, nrblocks, update;
+ uint8_t oldval;
+
+ nand_debug("td->options:0x%x", td->options);
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = this->numchips;
+ nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ } else {
+ chips = 1;
+ nrblocks = (int)(this->totalsize >> this->bbt_erase_shift);
+ }
+
+ for (i = 0; i < chips; i++) {
+ if ((td->options & NAND_BBT_ABSPAGE) ||
+ !(td->options & NAND_BBT_WRITE)) {
+ if (td->pages[i] == -1)
+ continue;
+ block = td->pages[i] / this->page_per_block;
+ oldval = bbt_get_entry(this, block);
+ bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+ if ((oldval != BBT_BLOCK_RESERVED) &&
+ td->reserved_block_code)
+ mtk_nand_update_bbt(this, (loff_t)block <<
+ this->bbt_erase_shift);
+ continue;
+ }
+ update = 0;
+ if (td->options & NAND_BBT_LASTBLOCK)
+ block = ((i + 1) * nrblocks) - td->maxblocks;
+ else
+ block = i * nrblocks;
+ for (j = 0; j < td->maxblocks; j++) {
+ oldval = bbt_get_entry(this, block);
+ bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+ if (oldval != BBT_BLOCK_RESERVED)
+ update = 1;
+ block++;
+ }
+ /*
+ * If we want reserved blocks to be recorded to flash, and some
+ * new ones have been marked, then we need to update the stored
+ * bbts. This should only happen once.
+ */
+ if (update && td->reserved_block_code)
+ mtk_nand_update_bbt(this, (loff_t)(block - 1) <<
+ this->bbt_erase_shift);
+ }
+}
+
+/**
+ * verify_bbt_descr - verify the bad block description
+ * @mtd: MTD device structure
+ * @bd: the table to verify
+ *
+ * This functions performs a few sanity checks on the bad block description
+ * table.
+ */
+static void verify_bbt_descr(struct mtk_nand_chip *this, struct mtk_nand_bbt_descr *bd)
+{
+ u32 pattern_len;
+ u32 bits;
+ u32 table_size;
+
+ if (!bd)
+ return;
+ nand_debug("bd->options:0x%x", bd->options);
+
+ pattern_len = bd->len;
+ bits = bd->options & NAND_BBT_NRBITS_MSK;
+
+ BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
+ !(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!bits);
+
+ if (bd->options & NAND_BBT_VERSION)
+ pattern_len++;
+
+ if (bd->options & NAND_BBT_NO_OOB) {
+ BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
+ BUG_ON(bd->offs);
+ if (bd->options & NAND_BBT_VERSION)
+ BUG_ON(bd->veroffs != bd->len);
+ BUG_ON(bd->options & NAND_BBT_SAVECONTENT);
+ }
+
+ if (bd->options & NAND_BBT_PERCHIP)
+ table_size = this->chipsize >> this->bbt_erase_shift;
+ else
+ table_size = this->totalsize >> this->bbt_erase_shift;
+ table_size >>= 3;
+ table_size *= bits;
+ if (bd->options & NAND_BBT_NO_OOB)
+ table_size += pattern_len;
+ BUG_ON(table_size > (1 << this->bbt_erase_shift));
+}
+
+/**
+ * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks, if a bad block table(s) is/are already available. If
+ * not it scans the device for manufacturer marked good / bad blocks and writes
+ * the bad block table(s) to the selected place.
+ *
+ * The bad block table memory is allocated here. It must be freed by calling
+ * the nand_free_bbt function.
+ */
+static int nand_scan_bbt(struct mtk_nand_chip *this, struct mtk_nand_bbt_descr *bd)
+{
+ int len, res;
+ uint8_t *buf;
+ struct mtk_nand_bbt_descr *td = this->bbt_td;
+ struct mtk_nand_bbt_descr *md = this->bbt_md;
+
+ len = (this->totalsize>> (this->bbt_erase_shift + 2)) ? : 1;
+
+ nand_debug("len:%x", len);
+ /*
+ * Allocate memory (2bit per block) and clear the memory bad block
+ * table.
+ */
+ this->bbt = nand_malloc(len);
+ if (!this->bbt)
+ return -ENOMEM;
+ nand_memset(this->bbt, 0, len);
+
+ /*
+ * If no primary table decriptor is given, scan the device to build a
+ * memory based bad block table.
+ */
+ if (!td) {
+ if ((res = nand_memory_bbt(this, bd))) {
+ nand_err("nand_bbt: can't scan flash and build the RAM-based BBT");
+ goto err;
+ }
+ return 0;
+ }
+ verify_bbt_descr(this, td);
+ verify_bbt_descr(this, md);
+
+ /* Allocate a temporary buffer for one page buffer incl. oob */
+ len = (1 << this->bbt_erase_shift);
+ len += (len / this->pagesize) * this->oobsize;
+ buf = NAND_DRAM_BUF_DATABUF_ADDR;/* nand_malloc(len); */
+ if (!buf) {
+ res = -ENOMEM;
+ goto err;
+ }
+
+ /* Is the bbt at a given page? */
+ if (td->options & NAND_BBT_ABSPAGE) {
+ read_abs_bbts(this, buf, td, md);
+ } else {
+ /* Search the bad block table using a pattern in oob */
+ search_read_bbts(this, buf, td, md);
+ }
+
+ res = check_create(this, buf, bd);
+ if (res)
+ goto err;
+
+ /* Prevent the bbt regions from erasing / writing */
+ mark_bbt_region(this, td);
+ if (md)
+ mark_bbt_region(this, md);
+
+ /* nand_free(buf); */
+ return 0;
+
+err:
+ nand_free(this->bbt);
+ this->bbt = NULL;
+ return res;
+}
+
+/**
+ * nand_update_bbt - update bad block table(s)
+ * @mtd: MTD device structure
+ * @offs: the offset of the newly marked block
+ *
+ * The function updates the bad block table(s).
+ */
+static int mtk_nand_update_bbt(struct mtk_nand_chip *this, loff_t offs)
+{
+ int len, res = 0;
+ int chip, chipsel;
+ uint8_t *buf;
+ struct mtk_nand_bbt_descr *td = this->bbt_td;
+ struct mtk_nand_bbt_descr *md = this->bbt_md;
+
+ if (!this->bbt || !td)
+ return -EINVAL;
+
+ nand_debug("offs:0x%llx", offs);
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->bbt_erase_shift);
+ len += (len /this->pagesize) * this->oobsize;
+
+ buf = NAND_DRAM_BUF_DATABUF_ADDR;/* nand_malloc(len); */
+ if (!buf)
+ return -ENOMEM;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chip = (int)(offs / this->chipsize);
+ chipsel = chip;
+ } else {
+ chip = 0;
+ chipsel = -1;
+ }
+
+ td->version[chip]++;
+ if (md)
+ md->version[chip]++;
+
+ /* Write the bad block table to the device? */
+ if (td->options & NAND_BBT_WRITE) {
+ res = write_bbt(this, buf, td, md, chipsel);
+ if (res < 0)
+ goto out;
+ }
+ /* Write the mirror bad block table to the device? */
+ if (md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt(this, buf, md, td, chipsel);
+ }
+
+out:
+ /* nand_free(buf); */
+ return res;
+}
+
+/*
+ * Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks.
+ */
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+/* Generic flash bbt descriptors */
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct mtk_nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = bbt_pattern
+};
+
+static struct mtk_nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = mirror_pattern
+};
+
+static struct mtk_nand_bbt_descr bbt_main_no_oob_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+ | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = bbt_pattern
+};
+
+static struct mtk_nand_bbt_descr bbt_mirror_no_oob_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+ | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = mirror_pattern
+};
+
+#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
+/**
+ * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
+ * @this: NAND chip to create descriptor for
+ *
+ * This function allocates and initializes a mtk_nand_bbt_descr for BBM detection
+ * based on the properties of @this. The new descriptor is stored in
+ * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
+ * passed to this function.
+ */
+static int nand_create_badblock_pattern(struct mtk_nand_chip *this)
+{
+ struct mtk_nand_bbt_descr *bd;
+
+ if (this->badblock_pattern) {
+ nand_err("Bad block pattern already allocated; not replacing");
+ return -EINVAL;
+ }
+
+ bd = nand_malloc(sizeof(*bd));
+ if (!bd)
+ return -ENOMEM;
+
+ bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
+ bd->offs = this->badblockpos;
+ bd->len = 1;
+ bd->pattern = scan_ff_pattern;
+ bd->options |= NAND_BBT_DYNAMICSTRUCT;
+ this->badblock_pattern = bd;
+
+ return 0;
+}
+
+/*
+ * Set the bad block marker/indicator (BBM/BBI) patterns according to some
+ * heuristic patterns using various detected parameters (e.g., manufacturer,
+ * page size, cell-type information).
+ */
+void mtk_nand_set_bbt_options(struct mtk_nand_chip *chip, u8 maf_id)
+{
+ /*
+ * Bad block marker is stored in the last page of each block on Samsung
+ * and Hynix MLC devices; stored in first two pages of each block on
+ * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
+ * AMD/Spansion, and Macronix. All others scan only the first page.
+ */
+ if (!nand_is_slc(chip) &&
+ (maf_id == NAND_MFR_SAMSUNG ||
+ maf_id == NAND_MFR_HYNIX))
+ chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
+ else if ((nand_is_slc(chip) &&
+ (maf_id == NAND_MFR_SAMSUNG ||
+ maf_id == NAND_MFR_HYNIX ||
+ maf_id == NAND_MFR_TOSHIBA ||
+ maf_id == NAND_MFR_AMD ||
+ maf_id == NAND_MFR_MACRONIX)) ||
+ (chip->pagesize == 2048 &&
+ maf_id == NAND_MFR_MICRON))
+ chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+}
+
+/**
+ * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
+ * @mtd: MTD device structure
+ *
+ * This function selects the default bad block table support for the device and
+ * calls the nand_scan_bbt function.
+ */
+int mtk_nand_default_bbt(struct mtk_nand_chip *chip)
+{
+ int i, ret = 0;
+
+ /* Is a flash based bad block table requested? */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ /* Use the default pattern descriptors */
+ if (chip->bbt_options & NAND_BBT_NO_OOB) {
+ chip->bbt_td = &bbt_main_no_oob_descr;
+ chip->bbt_md = &bbt_mirror_no_oob_descr;
+ } else {
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+ } else {
+ chip->bbt_td = NULL;
+ chip->bbt_md = NULL;
+ }
+
+ if (!chip->badblock_pattern) {
+ ret = nand_create_badblock_pattern(chip);
+ if (ret)
+ return ret;
+ }
+
+ nand_debug("chip->bbt_options:%x", chip->bbt_options);
+
+ ret = nand_scan_bbt(chip, chip->badblock_pattern);
+
+ nand_info("BBT check total block:%d", chip->totalsize >> chip->bbt_erase_shift);
+ for (i = 0; i < chip->totalsize >> chip->bbt_erase_shift; i++) {
+ if (bbt_get_entry(chip, i) == BBT_BLOCK_WORN)
+ nand_info("Checked WORN bad blk: %d", i);
+ else if (bbt_get_entry(chip, i) == BBT_BLOCK_FACTORY_BAD)
+ nand_info("Checked Factory bad blk: %d", i);
+ else if (bbt_get_entry(chip, i) != BBT_BLOCK_GOOD)
+ nand_debug("Checked Reserved blk: %d", i);
+ }
+
+ return ret;
+}
+
+/**
+ * nand_isreserved_bbt - [NAND Interface] Check if a block is reserved
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ */
+int nand_isreserved_bbt(struct mtk_nand_chip *this, loff_t offs)
+{
+ return bbt_get_entry(this, offs>>this->bbt_erase_shift) == BBT_BLOCK_RESERVED;
+}
+
+/**
+ * nand_isbad_bbt - [NAND Interface] Check if a block is bad
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ */
+int mtk_nand_isbad_bbt(struct mtk_nand_chip *this, int block, int allowbbt)
+{
+ int res;
+
+ res = bbt_get_entry(this, block);
+
+ nand_debug("bbt info for (block %d) 0x%02x\n",
+ block, res);
+
+ switch (res) {
+ case BBT_BLOCK_GOOD:
+ return 0;
+ case BBT_BLOCK_WORN:
+ return 1;
+ case BBT_BLOCK_RESERVED:
+ return allowbbt ? 0 : 1;
+ }
+ return 1;
+}
+
+/**
+ * mtk_nand_markbad_bbt - [NAND Interface] Mark a block bad in the BBT
+ * @mtd: MTD device structure
+ * @offs: offset of the bad block
+ */
+int mtk_nand_markbad_bbt(struct mtk_nand_chip *this, loff_t offs)
+{
+ int block, ret = 0;
+
+ block = (int)(offs >> this->bbt_erase_shift);
+
+ nand_info("block:%d", block);
+
+ /* Mark bad block in memory */
+ bbt_mark_entry(this, block, BBT_BLOCK_WORN);
+
+ /* Update flash-based bad block table */
+ if (this->bbt_options & NAND_BBT_USE_FLASH)
+ ret = mtk_nand_update_bbt(this, offs);
+
+ return ret;
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/nand/slc/bbt/bbt.h b/src/bsp/lk/platform/mt8512/drivers/nand/slc/bbt/bbt.h
new file mode 100644
index 0000000..8e1adf6
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nand/slc/bbt/bbt.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include "../slc.h"
+#include "../slc_os.h"
+#include "../nfi/nfi.h"
+#include "../ecc/ecc.h"
+
+#define BBT_BLOCK_GOOD 0x00
+#define BBT_BLOCK_WORN 0x01
+#define BBT_BLOCK_RESERVED 0x02
+#define BBT_BLOCK_FACTORY_BAD 0x03
+
+#define BBT_ENTRY_MASK 0x03
+#define BBT_ENTRY_SHIFT 2
+
+/* The maximum number of NAND chips in an array */
+#define NAND_MAX_CHIPS 2
+
+/**
+ * struct nand_bbt_descr - bad block table descriptor
+ * @options: options for this descriptor
+ * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
+ * when bbt is searched, then we store the found bbts pages here.
+ * Its an array and supports up to 8 chips now
+ * @offs: offset of the pattern in the oob area of the page
+ * @veroffs: offset of the bbt version counter in the oob are of the page
+ * @version: version read from the bbt page during scan
+ * @len: length of the pattern, if 0 no pattern check is performed
+ * @maxblocks: maximum number of blocks to search for a bbt. This number of
+ * blocks is reserved at the end of the device where the tables are
+ * written.
+ * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
+ * bad) block in the stored bbt
+ * @pattern: pattern to identify bad block table or factory marked good /
+ * bad blocks, can be NULL, if len = 0
+ *
+ * Descriptor for the bad block table marker and the descriptor for the
+ * pattern which identifies good and bad blocks. The assumption is made
+ * that the pattern and the version count are always located in the oob area
+ * of the first block.
+ */
+struct mtk_nand_bbt_descr {
+ int options;
+ int pages[NAND_MAX_CHIPS];
+ int offs;
+ int veroffs;
+ uint8_t version[NAND_MAX_CHIPS];
+ int len;
+ int maxblocks;
+ int reserved_block_code;
+ uint8_t *pattern;
+};
+
+/*
+ * NAND Flash Manufacturer ID Codes
+ */
+#define NAND_MFR_TOSHIBA 0x98
+#define NAND_MFR_SAMSUNG 0xec
+#define NAND_MFR_FUJITSU 0x04
+#define NAND_MFR_NATIONAL 0x8f
+#define NAND_MFR_RENESAS 0x07
+#define NAND_MFR_STMICRO 0x20
+#define NAND_MFR_HYNIX 0xad
+#define NAND_MFR_MICRON 0x2c
+#define NAND_MFR_AMD 0x01
+#define NAND_MFR_MACRONIX 0xc2
+#define NAND_MFR_EON 0x92
+#define NAND_MFR_SANDISK 0x45
+#define NAND_MFR_INTEL 0x89
+#define NAND_MFR_ATO 0x9b
+#define NAND_MFR_WINBOND 0xef
+
+/* Options for the bad block table descriptors */
+
+/* The number of bits used per block in the bbt on the device */
+#define NAND_BBT_NRBITS_MSK 0x0000000F
+#define NAND_BBT_1BIT 0x00000001
+#define NAND_BBT_2BIT 0x00000002
+#define NAND_BBT_4BIT 0x00000004
+#define NAND_BBT_8BIT 0x00000008
+/* The bad block table is in the last good block of the device */
+#define NAND_BBT_LASTBLOCK 0x00000010
+/* The bbt is at the given page, else we must scan for the bbt */
+#define NAND_BBT_ABSPAGE 0x00000020
+/* bbt is stored per chip on multichip devices */
+#define NAND_BBT_PERCHIP 0x00000080
+/* bbt has a version counter at offset veroffs */
+#define NAND_BBT_VERSION 0x00000100
+/* Create a bbt if none exists */
+#define NAND_BBT_CREATE 0x00000200
+/*
+ * Create an empty BBT with no vendor information. Vendor's information may be
+ * unavailable, for example, if the NAND controller has a different data and OOB
+ * layout or if this information is already purged. Must be used in conjunction
+ * with NAND_BBT_CREATE.
+ */
+#define NAND_BBT_CREATE_EMPTY 0x00000400
+/* Write bbt if neccecary */
+#define NAND_BBT_WRITE 0x00002000
+/* Read and write back block contents when writing bbt */
+#define NAND_BBT_SAVECONTENT 0x00004000
+/* Search good / bad pattern on the first and the second page */
+#define NAND_BBT_SCAN2NDPAGE 0x00008000
+/* Search good / bad pattern on the last page of the eraseblock */
+#define NAND_BBT_SCANLASTPAGE 0x00010000
+/*
+ * Use a flash based bad block table. By default, OOB identifier is saved in
+ * OOB area. This option is passed to the default bad block table function.
+ */
+#define NAND_BBT_USE_FLASH 0x00020000
+/*
+ * Do not store flash based bad block table marker in the OOB area; store it
+ * in-band.
+ */
+#define NAND_BBT_NO_OOB 0x00040000
+/*
+ * Do not write new bad block markers to OOB; useful, e.g., when ECC covers
+ * entire spare area. Must be used with NAND_BBT_USE_FLASH.
+ */
+#define NAND_BBT_NO_OOB_BBM 0x00080000
+
+/*
+ * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr
+ * was allocated dynamicaly and must be freed in nand_release(). Has no meaning
+ * in nand_chip.bbt_options.
+ */
+#define NAND_BBT_DYNAMICSTRUCT 0x80000000
+
+/* The maximum number of blocks to scan for a bbt */
+#define NAND_BBT_SCAN_MAXBLOCKS 4
+
+/*
+ * Constants for oob configuration
+ */
+#define NAND_SMALL_BADBLOCK_POS 5
+#define NAND_LARGE_BADBLOCK_POS 0
+
+
+/**
+ * struct mtk_bbt_info - [GENERIC] Bad Block Table data structure
+ * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
+ * @badblockpos: [INTERN] position of the bad block marker in the oob area
+ * @options: options for this descriptor
+ * @bbt: [INTERN] bad block table pointer
+ * @isbad_bbt: function to determine if a block is bad
+ * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for
+ * initial bad block scan
+ * @priv: [OPTIONAL] pointer to private bbm date
+ */
+struct mtk_bbt_info {
+ int bbt_erase_shift;
+ int badblockpos;
+ int options;
+
+ uint8_t *bbt;
+
+ /* int (*isbad_bbt)(struct mtk_nand_chip *chip, loff_t ofs, int allowbbt); */
+
+ /* TODO Add more NAND specific fileds */
+ struct mtk_nand_bbt_descr *badblock_pattern;
+
+ void *priv;
+};
+
+struct mtk_nand_chip;
+
+extern int mtk_nand_default_bbt(struct mtk_nand_chip *chip);
+extern int mtk_nand_markbad_bbt(struct mtk_nand_chip *this, loff_t offs);
+extern int mtk_nand_isbad_bbt(struct mtk_nand_chip *this, int block, int allowbbt);
+extern void mtk_nand_set_bbt_options(struct mtk_nand_chip *chip, u8 maf_id);
diff --git a/src/bsp/lk/platform/mt8512/drivers/nand/slc/ecc/ecc.c b/src/bsp/lk/platform/mt8512/drivers/nand/slc/ecc/ecc.c
new file mode 100644
index 0000000..e6c24c4
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nand/slc/ecc/ecc.c
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ecc.h"
+#include "../slc_os.h"
+
+#define ECC_IDLE_MASK NAND_BIT(0)
+#define ECC_IRQ_EN NAND_BIT(0)
+#define ECC_OP_ENABLE (1)
+#define ECC_OP_DISABLE (0)
+
+#define ECC_ENCCON (0x00)
+#define ECC_ENCCNFG (0x04)
+#define ECC_CNFG_4BIT (0)
+#define ECC_CNFG_6BIT (1)
+#define ECC_CNFG_8BIT (2)
+#define ECC_CNFG_10BIT (3)
+#define ECC_CNFG_12BIT (4)
+#define ECC_CNFG_14BIT (5)
+#define ECC_CNFG_16BIT (6)
+#define ECC_CNFG_18BIT (7)
+#define ECC_CNFG_20BIT (8)
+#define ECC_CNFG_22BIT (9)
+#define ECC_CNFG_24BIT (0xa)
+#define ECC_CNFG_28BIT (0xb)
+#define ECC_CNFG_32BIT (0xc)
+#define ECC_CNFG_36BIT (0xd)
+#define ECC_CNFG_40BIT (0xe)
+#define ECC_CNFG_44BIT (0xf)
+#define ECC_CNFG_48BIT (0x10)
+#define ECC_CNFG_52BIT (0x11)
+#define ECC_CNFG_56BIT (0x12)
+#define ECC_CNFG_60BIT (0x13)
+#define ECC_CNFG_68BIT (0x14)
+#define ECC_CNFG_72BIT (0x15)
+#define ECC_CNFG_80BIT (0x16)
+#define ECC_MODE_SHIFT (5)
+#define ECC_MS_SHIFT (16)
+#define ECC_ENCDIADDR (0x08)
+#define ECC_ENCIDLE (0x0c)
+#define ECC_ENCSTA (0x7c)
+#define ENC_IDLE NAND_BIT(0)
+#define ECC_ENCIRQ_EN (0x80)
+#define ECC_ENCIRQ_STA (0x84)
+#define PG_IRQ_SEL NAND_BIT(1)
+#define ECC_PIO_DIRDY (0x90)
+#define PIO_DI_RDY (0x01)
+#define ECC_PIO_DI (0x94)
+#define ECC_DECCON (0x100)
+#define ECC_DECCNFG (0x104)
+#define DEC_EMPTY_EN NAND_BIT(31)
+#define DEC_CON_SHIFT (12)
+#define ECC_DECDIADDR (0x108)
+#define ECC_DECIDLE (0x10c)
+#define ECC_DECENUM(x) (0x114 + (x) * sizeof(u32))
+#define ERR_MASK (0x1f)
+#define ECC_DECDONE (0x124)
+#define ECC_DECIRQ_EN (0x200)
+#define ECC_DECIRQ_STA (0x204)
+#define ECC_DECFSM (0x208)
+#define FSM_MASK (0x3f3fff0f)
+#define FSM_IDLE (0x01011101)
+#define ECC_BYPASS (0x20c)
+#define ECC_BYPASS_EN NAND_BIT(0)
+#ifdef MT8512_NFI
+#define ECC_ENCPAR(x) (0x300 + (x) * sizeof(u32))
+#define ECC_DECEL(x) (0x500 + (x) * sizeof(u32))
+#else
+#define ECC_ENCPAR(x) (0x10 + (x) * sizeof(u32))
+#define ECC_ENCPAR_EXT(x) (0x300 + (x) * sizeof(u32))
+
+#define ECC_DECEL(x) (0x128 + (x) * sizeof(u32))
+#define ECC_DECEL_EXT(x) (0x400 + (x) * sizeof(u32))
+#endif
+
+#define ECC_TIMEOUT (500000)
+
+#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
+#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
+#define ECC_IRQ_REG(op) ((op) == ECC_ENCODE ? \
+ ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
+
+
+struct mtk_ecc {
+ nand_lock_t lock;
+ nand_completion_t done;
+
+ u64 regs;
+ u32 sectors;
+ u8 *buffer; /* for buffer not aligned issue */
+};
+
+static inline void mtk_ecc_wait_ioready(struct mtk_ecc *ecc)
+{
+ if (!check_with_timeout((nand_readl(ecc->regs + ECC_PIO_DIRDY) & PIO_DI_RDY), ECC_TIMEOUT))
+ nand_err("ecc io not ready");
+}
+
+static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
+{
+ u32 ecc_bit = ECC_CNFG_4BIT, dec_sz, enc_sz;
+ u32 reg;
+
+ switch (config->strength) {
+ case 4:
+ ecc_bit = ECC_CNFG_4BIT;
+ break;
+ case 6:
+ ecc_bit = ECC_CNFG_6BIT;
+ break;
+ case 8:
+ ecc_bit = ECC_CNFG_8BIT;
+ break;
+ case 10:
+ ecc_bit = ECC_CNFG_10BIT;
+ break;
+ case 12:
+ ecc_bit = ECC_CNFG_12BIT;
+ break;
+ case 14:
+ ecc_bit = ECC_CNFG_14BIT;
+ break;
+ case 16:
+ ecc_bit = ECC_CNFG_16BIT;
+ break;
+ case 18:
+ ecc_bit = ECC_CNFG_18BIT;
+ break;
+ case 20:
+ ecc_bit = ECC_CNFG_20BIT;
+ break;
+ case 22:
+ ecc_bit = ECC_CNFG_22BIT;
+ break;
+ case 24:
+ ecc_bit = ECC_CNFG_24BIT;
+ break;
+ case 28:
+ ecc_bit = ECC_CNFG_28BIT;
+ break;
+ case 32:
+ ecc_bit = ECC_CNFG_32BIT;
+ break;
+ case 36:
+ ecc_bit = ECC_CNFG_36BIT;
+ break;
+ case 40:
+ ecc_bit = ECC_CNFG_40BIT;
+ break;
+ case 44:
+ ecc_bit = ECC_CNFG_44BIT;
+ break;
+ case 48:
+ ecc_bit = ECC_CNFG_48BIT;
+ break;
+ case 52:
+ ecc_bit = ECC_CNFG_52BIT;
+ break;
+ case 56:
+ ecc_bit = ECC_CNFG_56BIT;
+ break;
+ case 60:
+ ecc_bit = ECC_CNFG_60BIT;
+ break;
+ case 68:
+ ecc_bit = ECC_CNFG_68BIT;
+ break;
+ case 72:
+ ecc_bit = ECC_CNFG_72BIT;
+ break;
+ case 80:
+ ecc_bit = ECC_CNFG_80BIT;
+ break;
+ default:
+ nand_err("invalid strength %d, default to 4 bits",
+ config->strength);
+ break;
+ }
+
+ if (config->op == ECC_ENCODE) {
+ /* configure ECC encoder (in bits) */
+ enc_sz = config->len << 3;
+
+ reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+ reg |= (enc_sz << ECC_MS_SHIFT);
+ nand_writel(reg, ecc->regs + ECC_ENCCNFG);
+
+ if (config->mode == ECC_DMA_MODE) {
+ if (config->addr & 0x3)
+ nand_err("ecc encode address is not 4B aligned !!", config->addr);
+ nand_writel(config->addr, ecc->regs + ECC_ENCDIADDR);
+ }
+
+ } else {
+ /* configure ECC decoder (in bits) */
+ dec_sz = (config->len << 3) +
+ config->strength * ECC_PARITY_BITS;
+
+ reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+ reg |= (dec_sz << ECC_MS_SHIFT) | (config->deccon << DEC_CON_SHIFT);
+ reg |= DEC_EMPTY_EN;
+ nand_writel(reg, ecc->regs + ECC_DECCNFG);
+
+ if (config->mode == ECC_DMA_MODE) {
+ if (config->addr & 0x3)
+ nand_err("ecc decode address is not 4B aligned !!", config->addr);
+ nand_writel(config->addr, ecc->regs + ECC_DECDIADDR);
+ }
+
+ if (config->sectors)
+ ecc->sectors = 1 << (config->sectors - 1);
+ }
+}
+
+static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
+ enum mtk_ecc_operation op)
+{
+ if (!check_with_timeout(nand_readl(ecc->regs + ECC_IDLE_REG(op)) & ECC_IDLE_MASK,
+ ECC_TIMEOUT))
+ nand_err("%s NOT idle", op == ECC_ENCODE ? "encoder" : "decoder");
+}
+
+#ifdef MTK_NAND_IRQ_EN
+static int mtk_ecc_irq_wait(struct mtk_ecc *ecc, u32 timeout)
+{
+ int ret;
+
+ ret = nand_wait_for_completion_timeout(&ecc->done, timeout);
+ if (ret != 0) {
+ nand_err("failed to get completion timeout");
+ return ret;
+ }
+
+ return 0;
+}
+
+static enum handler_return mtk_ecc_interrupt_handler(void *arg)
+{
+ struct mtk_ecc *ecc = arg;
+ enum mtk_ecc_operation op;
+ u32 dec, enc;
+
+ dec = nand_readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN;
+ if (dec) {
+ op = ECC_DECODE;
+ dec = nand_readw(ecc->regs + ECC_DECDONE);
+ if (dec & ecc->sectors) {
+ ecc->sectors = 0;
+ nand_complete(&ecc->done);
+ } else {
+ return NAND_IRQ_NONE;
+ }
+ } else {
+ enc = nand_readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN;
+ if (enc) {
+ op = ECC_ENCODE;
+ nand_complete(&ecc->done);
+ } else {
+ return NAND_IRQ_NONE;
+ }
+ }
+
+ nand_writel(0, ecc->regs + ECC_IRQ_REG(op));
+
+ return NAND_IRQ_HANDLED;
+}
+
+static int mtk_ecc_request_irq(struct mtk_ecc *ecc)
+{
+ nand_init_completion(&ecc->done);
+ mtk_nand_request_irq(NAND_NFIECC_IRQ_BIT_ID, &mtk_ecc_interrupt_handler, ecc);
+
+ return 0;
+}
+#endif
+
+int mtk_ecc_hw_init(struct mtk_ecc **ext_ecc)
+{
+ struct mtk_ecc *ecc;
+ u32 reg;
+ int ret = 0;
+
+ ecc = (struct mtk_ecc *)nand_malloc(sizeof(*ecc));
+ if (!ecc)
+ return -ENOMEM;
+
+ nand_memset(ecc, 0, sizeof(*ecc));
+#if 1
+ ecc->buffer = (u8 *)nand_memalign(4, ECC_MAX_CODESIZE);
+ if (!ecc->buffer) {
+ ret = -ENOMEM;
+ nand_err("failed to malloc ecc temp buffer %d", ECC_MAX_CODESIZE);
+ goto free_ecc;
+ }
+#endif
+ *ext_ecc = ecc;
+
+ ecc->regs = NAND_NFIECC_BASE;
+
+ mtk_ecc_wait_idle(ecc, ECC_ENCODE);
+ nand_writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON);
+
+ mtk_ecc_wait_idle(ecc, ECC_DECODE);
+ nand_writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON);
+
+ nand_lock_init(&ecc->lock);
+
+#ifdef MTK_NAND_IRQ_EN
+ /* register interrupt handler */
+ mtk_ecc_request_irq(ecc);
+#endif
+ /* disable ecc bypass */
+ reg = nand_readl(ecc->regs + ECC_BYPASS);
+ reg &= ~ECC_BYPASS_EN;
+ nand_writel(reg, ecc->regs + ECC_BYPASS);
+
+ return 0;
+
+free_ecc:
+ nand_free(ecc);
+ return ret;
+}
+
+
+int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config, int polling)
+{
+ enum mtk_ecc_operation op = config->op;
+
+ nand_lock(&ecc->lock);
+
+ mtk_ecc_wait_idle(ecc, op);
+ mtk_ecc_config(ecc, config);
+ nand_writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
+
+ if (!polling) {
+ nand_writew(ECC_IRQ_EN, ecc->regs + ECC_IRQ_REG(op));
+ }
+
+ return 0;
+}
+
+void mtk_ecc_disable(struct mtk_ecc *ecc)
+{
+ enum mtk_ecc_operation op = ECC_ENCODE;
+
+ /* find out the running operation */
+ if (nand_readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE)
+ op = ECC_DECODE;
+
+ /* disable it */
+ mtk_ecc_wait_idle(ecc, op);
+ nand_writew(0, ecc->regs + ECC_IRQ_REG(op));
+ nand_writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
+
+ nand_unlock(&ecc->lock);
+}
+
+void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
+ int sectors)
+{
+ u32 offset, i, err;
+ u32 bitflips = 0;
+
+ stats->corrected = 0;
+ stats->failed = 0;
+
+ for (i = 0; i < sectors; i++) {
+ offset = (i >> 2);
+ err = nand_readl(ecc->regs + ECC_DECENUM(offset));
+ err = err >> ((i % 4) * 8);
+ err &= ERR_MASK;
+ if (err == ERR_MASK) {
+ /* uncorrectable errors */
+ stats->failed++;
+ nand_err("sector %d is uncorrect", i);
+ continue;
+ }
+
+ stats->corrected += err;
+ bitflips = max(bitflips, err);
+ }
+
+ stats->bitflips = bitflips;
+}
+
+int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op, int polling)
+{
+ int ret = 0;
+
+ if (!polling) {
+#ifdef MTK_NAND_IRQ_EN
+ ret = mtk_ecc_irq_wait(ecc, ECC_TIMEOUT);
+ if (!ret)
+ nand_err("mtk_ecc_wait_done timeout");
+#endif
+ return -ETIMEDOUT;
+ } else {
+ if (op == ECC_ENCODE) {
+ if (!check_with_timeout((nand_readl(ecc->regs + ECC_ENCSTA) & ENC_IDLE), ECC_TIMEOUT)) {
+ nand_err("encoder timeout");
+ return -ETIMEDOUT;
+ }
+ } else {
+ if (!check_with_timeout((nand_readw(ecc->regs + ECC_DECDONE) & ecc->sectors), ECC_TIMEOUT)) {
+ nand_err("decoder timeout");
+ return -ETIMEDOUT;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int mtk_ecc_wait_decode_fsm_idle(struct mtk_ecc *ecc)
+{
+ /* decode done does not stands for ecc all work done.
+ * we need check syn, bma, chien, autoc all idle.
+ * just check it when ECC_DECCNFG[13:12] is 3, which means auto correct.*/
+ if (!check_with_timeout(((nand_readl(ecc->regs + ECC_DECFSM) & FSM_MASK) == FSM_IDLE), ECC_TIMEOUT)) {
+ nand_err("decode fsm(0x%x) is not idle", nand_readl(ecc->regs + ECC_DECFSM));
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
+ u8 *data, u32 bytes, int polling)
+{
+ u32 addr = (u32)data;
+ u8 *p;
+ u8 *buf = data;
+ u32 len, i, val = 0;
+ int ret = 0;
+
+ /* encoder memory address should be 4B aligned */
+ if ((config->mode == ECC_DMA_MODE) && (addr & 0x3)) {
+ /* buf =(u8 *)NAND_DRAM_BUF_ECCDE_ADDR; */
+ buf = ecc->buffer;
+ nand_memcpy(buf, data, bytes);
+ }
+
+ addr = nand_kvaddr_to_paddr(buf);
+
+ if (config->mode == ECC_DMA_MODE)
+ nand_dma_map(buf, bytes, true, NULL);
+
+ config->op = ECC_ENCODE;
+ config->addr = addr;
+ config->len = bytes;
+ ret = mtk_ecc_enable(ecc, config, polling);
+ if (ret)
+ goto freebuf;
+
+ if (config->mode == ECC_PIO_MODE) {
+ for (i = 0; i < ((config->len + 3) >> 2); i++) {
+ mtk_ecc_wait_ioready(ecc);
+ nand_writel(*((u32 *)data + i), ecc->regs + ECC_PIO_DI);
+ }
+ }
+
+ ret = mtk_ecc_wait_done(ecc, ECC_ENCODE, polling);
+ if (ret)
+ goto timeout;
+
+ mtk_ecc_wait_idle(ecc, ECC_ENCODE);
+
+ /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
+ len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
+ p = data + bytes;
+
+ /* write the parity bytes generated by the ECC back to the OOB region */
+ for (i = 0; i < len; i++) {
+ if ((i % 4) == 0) {
+#ifdef MT8512_NFI
+ val = nand_readl(ecc->regs + ECC_ENCPAR(i / 4));
+#else
+ if (i < 108)
+ val = nand_readl(ecc->regs + ECC_ENCPAR(i / 4));
+ else
+ val = nand_readl(ecc->regs + ECC_ENCPAR_EXT((i / 4) - 27));
+#endif
+ }
+ p[i] = (val >> ((i % 4) * 8)) & 0xff;
+ }
+timeout:
+ mtk_ecc_disable(ecc);
+freebuf:
+ if (config->mode == ECC_DMA_MODE) {
+ nand_dma_unmap(buf, bytes, false, NULL);
+ }
+
+ return ret;
+}
+
+int mtk_ecc_decode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
+ u8 *data, u32 len, int polling)
+{
+ struct mtk_ecc_stats stats;
+ u8* buf = data;
+ u32 addr = (u32)data, decodesize, i;
+ int ret;
+
+ decodesize = len + ((config->strength * ECC_PARITY_BITS + 7) >> 3);
+ if ((decodesize & 0x3)
+ || ((config->mode == ECC_DMA_MODE) && (addr & 0x3))) {
+ decodesize += 4 - (decodesize & 0x3);
+ /* buf = (u8 *)NAND_DRAM_BUF_ECCEN_ADDR; */
+ buf = ecc->buffer;
+ }
+ if (config->mode == ECC_DMA_MODE)
+ nand_dma_map(buf, decodesize, false, NULL);
+
+ addr = nand_kvaddr_to_paddr(buf);
+
+ config->op = ECC_DECODE;
+ config->addr = addr;
+ config->len = len;
+ ret = mtk_ecc_enable(ecc, config, polling);
+ if (ret)
+ goto freebuf;
+
+ if (config->mode == ECC_PIO_MODE) {
+ for (i = 0; i < (decodesize >> 2); i++) {
+ mtk_ecc_wait_ioready(ecc);
+ *((u32 *)buf + i) = nand_readl(ecc->regs + ECC_PIO_DI);
+ }
+ }
+
+ stats.bitflips = 0;
+ ret = mtk_ecc_cpu_correct(ecc, &stats, buf, 0, polling);
+ if (ret)
+ goto disecc;
+
+ if (config->mode == ECC_DMA_MODE)
+ nand_dma_unmap(buf, decodesize, false, NULL);
+
+ if (buf != data)
+ nand_memcpy(data, buf, len);
+
+disecc:
+ mtk_ecc_disable(ecc);
+
+freebuf:
+
+ return ret;
+}
+
+int mtk_ecc_cpu_correct(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats, u8 *data, u32 sector, int polling)
+{
+ u32 err, offset, i;
+ u32 loc, byteloc, bitloc;
+ int ret;
+
+ ecc->sectors = 1 << sector;
+ ret = mtk_ecc_wait_done(ecc, ECC_DECODE, polling);
+ if (ret)
+ return ret;
+
+ stats->corrected = 0;
+ stats->failed = 0;
+
+ offset = (sector >> 2);
+ err = nand_readl(ecc->regs + ECC_DECENUM(offset));
+ err = err >> ((sector % 4) * 8);
+ err &= ERR_MASK;
+ if (err == ERR_MASK) {
+ /* uncorrectable errors */
+ stats->failed++;
+ return 0;
+ }
+
+ stats->corrected += err;
+ stats->bitflips = max(stats->bitflips, err);
+
+ for (i = 0; i < err; i++) {
+#ifdef MT8512_NFI
+ loc = nand_readl(ecc->regs + ECC_DECEL(i >> 1));
+#else
+ if (i < 60)
+ loc = nand_readl(ecc->regs + ECC_DECEL(i >> 1));
+ else
+ loc = nand_readl(ecc->regs + ECC_DECEL_EXT((i >> 1) - 30));
+#endif
+ loc >>= ((i & 0x1) << 4);
+ byteloc = loc >> 3;
+ bitloc = loc & 0x7;
+ data[byteloc] ^= (1 << bitloc);
+ }
+
+ return 0;
+}
+
+void mtk_ecc_adjust_strength(u32 *p)
+{
+ u32 ecc[] = {4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
+ 40, 44, 48, 52, 56, 60, 68, 72, 80
+ };
+ int i;
+
+ for (i = 0; i < sizeof(ecc) / sizeof(u32); i++) {
+ if (*p <= ecc[i]) {
+ if (!i)
+ *p = ecc[i];
+ else if (*p != ecc[i])
+ *p = ecc[i - 1];
+ return;
+ }
+ }
+
+ *p = ecc[sizeof(ecc) / sizeof(u32) - 1];
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/nand/slc/ecc/ecc.h b/src/bsp/lk/platform/mt8512/drivers/nand/slc/ecc/ecc.h
new file mode 100644
index 0000000..5bc10dd
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nand/slc/ecc/ecc.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include "../slc_os.h"
+
+#define ECC_PARITY_BITS (14)
+/* for SLC */
+#define ECC_MAX_CODESIZE (1024+128)
+
+struct mtk_ecc;
+
+enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1, ECC_PIO_MODE = 2};
+enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
+enum mtk_ecc_deccon {ECC_DEC_FER = 1, ECC_DEC_LOCATE = 2, ECC_DEC_CORRECT = 3};
+
+struct mtk_ecc_stats {
+ u32 corrected;
+ u32 bitflips;
+ u32 failed;
+};
+
+struct mtk_ecc_config {
+ enum mtk_ecc_operation op;
+ enum mtk_ecc_mode mode;
+ enum mtk_ecc_deccon deccon;
+ u32 addr;
+ u32 strength;
+ u32 sectors;
+ u32 len;
+};
+
+extern int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
+ u8 *data, u32 bytes, int polling);
+extern int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config, int polling);
+extern void mtk_ecc_disable(struct mtk_ecc *ecc);
+extern void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats, int sectors);
+extern int mtk_ecc_cpu_correct(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats, u8 *data, u32 sector, int polling);
+extern int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op, int polling);
+extern int mtk_ecc_hw_init(struct mtk_ecc **ext_ecc);
+extern int mtk_ecc_wait_decode_fsm_idle(struct mtk_ecc *ecc);
+extern int mtk_ecc_decode(struct mtk_ecc *ecc, struct mtk_ecc_config *config, u8 *data, u32 len, int polling);
diff --git a/src/bsp/lk/platform/mt8512/drivers/nand/slc/nfi/nfi.c b/src/bsp/lk/platform/mt8512/drivers/nand/slc/nfi/nfi.c
new file mode 100644
index 0000000..e61a266
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nand/slc/nfi/nfi.c
@@ -0,0 +1,1848 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nfi.h"
+
+/* NAND controller register definition */
+#define NFI_CNFG (0x00)
+#define CNFG_AHB NAND_BIT(0)
+#define CNFG_READ_EN NAND_BIT(1)
+#define CNFG_DMA_BURST_EN NAND_BIT(2)
+#define CNFG_RESEED_SEC_EN NAND_BIT(4)
+#define CNFG_RAND_SEL NAND_BIT(5)
+#define CNFG_RAND_MASK (3 << 4)
+#define CNFG_BYTE_RW NAND_BIT(6)
+#define CNFG_HW_ECC_EN NAND_BIT(8)
+#define CNFG_AUTO_FMT_EN NAND_BIT(9)
+#define CNFG_OP_READ (1 << 12)
+#define CNFG_OP_PROGRAM (3 << 12)
+#define CNFG_OP_CUST (6 << 12)
+#define CNFG_OP_MASK (7 << 12)
+#define NFI_PAGEFMT (0x04)
+#define PAGEFMT_FDM_ECC_SHIFT (12)
+#define PAGEFMT_FDM_SHIFT (8)
+#define PAGEFMT_SPARE_16 (0)
+#define PAGEFMT_SPARE_26 (1)
+#define PAGEFMT_SPARE_27 (2)
+#define PAGEFMT_SPARE_28 (3)
+#define PAGEFMT_SPARE_32 (4)
+#define PAGEFMT_SPARE_36 (5)
+#define PAGEFMT_SPARE_40 (6)
+#define PAGEFMT_SPARE_44 (7)
+#define PAGEFMT_SPARE_48 (8)
+#define PAGEFMT_SPARE_49 (9)
+#define PAGEFMT_SPARE_50 (0xa)
+#define PAGEFMT_SPARE_51 (0xb)
+#define PAGEFMT_SPARE_52 (0xc)
+#define PAGEFMT_SPARE_62 (0xd)
+#define PAGEFMT_SPARE_61 (0xe)
+#define PAGEFMT_SPARE_63 (0xf)
+#define PAGEFMT_SPARE_64 (0x10)
+#define PAGEFMT_SPARE_67 (0x11)
+#define PAGEFMT_SPARE_74 (0x12)
+#define PAGEFMT_SPARE_SHIFT (16)
+#define PAGEFMT_SEC_SEL_512 NAND_BIT(2)
+#define PAGEFMT_512_2K (0)
+#define PAGEFMT_2K_4K (1)
+#define PAGEFMT_4K_8K (2)
+#define PAGEFMT_8K_16K (3)
+/* NFI control */
+#define NFI_CON (0x08)
+#define CON_FIFO_FLUSH NAND_BIT(0)
+#define CON_NFI_RST NAND_BIT(1)
+#define CON_BRD NAND_BIT(8) /* burst read */
+#define CON_BWR NAND_BIT(9) /* burst write */
+#define CON_SEC_SHIFT (12)
+/* Timming control register */
+#define NFI_ACCCON (0x0c)
+#define NFI_INTR_EN (0x10)
+#define INTR_BUSY_RETURN_EN NAND_BIT(4)
+#define INTR_AHB_DONE_EN NAND_BIT(6)
+#define NFI_INTR_STA (0x14)
+#define NFI_CMD (0x20)
+#define NFI_ADDRNOB (0x30)
+#define NFI_COLADDR (0x34)
+#define NFI_ROWADDR (0x38)
+#define NFI_STRDATA (0x40)
+#define STAR_EN (1)
+#define STAR_DE (0)
+#define NFI_CNRNB (0x44)
+#define NFI_DATAW (0x50)
+#define NFI_DATAR (0x54)
+#define NFI_PIO_DIRDY (0x58)
+#define PIO_DI_RDY (0x01)
+#define NFI_STA (0x60)
+#define STA_CMD NAND_BIT(0)
+#define STA_ADDR NAND_BIT(1)
+#define STA_BUSY NAND_BIT(8)
+#define STA_EMP_PAGE NAND_BIT(12)
+#define NFI_FSM_CUSTDATA (0xe << 16)
+#define NFI_FSM_MASK (0xf << 16)
+#define NFI_ADDRCNTR (0x70)
+#define CNTR_MASK NAND_GENMASK(16, 12)
+#define ADDRCNTR_SEC_SHIFT (12)
+#define ADDRCNTR_SEC(val) \
+ (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
+#define NFI_STRADDR (0x80)
+#define NFI_BYTELEN (0x84)
+#define NFI_CSEL (0x90)
+#define NFI_FDML(x) (0xa0 + (x) * sizeof(u32) * 2)
+#define NFI_FDMM(x) (0xa4 + (x) * sizeof(u32) * 2)
+#define NFI_FDM_MAX_SIZE (8)
+#define NFI_FDM_MIN_SIZE (1)
+#define NFI_DEBUG_CON1 (0x220)
+#define BYPASS_MASTER_EN NAND_BIT(15)
+#define NFI_MASTER_STA (0x224)
+#define MASTER_STA_MASK (0x3)
+#define NFI_RANDOM_CNFG (0x238)
+#define RAN_ENCODE_EN NAND_BIT(0)
+#define ENCODE_SEED_SHIFT (1)
+#define RAN_DECODE_EN NAND_BIT(16)
+#define DECODE_SEED_SHIFT (17)
+#define RAN_SEED_MASK (0x7fff)
+#define NFI_EMPTY_THRESH (0x23c)
+#define NFI_SNAND_CNFG (0x55c)
+
+#define MTK_RESET_TIMEOUT (1000000)
+#define MTK_MAX_SECTOR (16)
+#define MTK_NAND_MAX_NSELS (2)
+
+void swap(char *x, char *y)
+{
+ char tmp;
+ tmp = *x;
+ *x = *y;
+ *y = tmp;
+}
+
+u32 clamp(u32 fValue, u32 fMin, u32 fMax)
+{
+ return fValue > fMax ? fMax : (fValue < fMin ? fMin : fValue);
+}
+
+static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct mtk_nand_chip *chip)
+{
+ return containerof(chip, struct mtk_nfc_nand_chip, chip);
+}
+
+static inline u8 *data_ptr(struct mtk_nand_chip *chip, const u8 *p, int i)
+{
+ return (u8 *)p + i * chip->ecc_size;
+}
+
+static inline int mtk_data_len(struct mtk_nand_chip *chip)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+
+ return chip->ecc_size + mtk_nand->spare_per_sector;
+}
+
+static inline u8 *mtk_data_ptr(struct mtk_nand_chip *chip, int i)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ return nfc->buffer + i * mtk_data_len(chip);
+}
+
+static inline u8 *oob_ptr(struct mtk_nand_chip *chip, int i)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ u8 *poi;
+
+ /* map the sector's FDM data to free oob:
+ * the beginning of the oob area stores the FDM data of bad mark sectors
+ */
+ if (i < mtk_nand->bad_mark.sec)
+ poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size;
+ else if (i == mtk_nand->bad_mark.sec)
+ poi = chip->oob_poi;
+ else
+ poi = chip->oob_poi + i * mtk_nand->fdm.reg_size;
+
+ return poi;
+}
+
+static inline u8 *mtk_oob_ptr(struct mtk_nand_chip *chip, int i)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ return nfc->buffer + i * mtk_data_len(chip) + chip->ecc_size;
+}
+
+static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg)
+{
+ //nand_debug("val:0x%x, reg:0x%x nfc->regs:%x", val, reg, nfc->regs);
+ nand_writel(val, nfc->regs + reg);
+ //nand_debug("val:0x%x, reg:0x%x", val, reg);
+}
+
+static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg)
+{
+ //nand_debug("val:0x%x, reg:0x%x nfc->regs:%x", val, reg, nfc->regs);
+ nand_writew(val, nfc->regs + reg);
+ //nand_debug("val:0x%x, reg:0x%x nfc->regs:%x", val, reg, nfc->regs);
+}
+
+static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg)
+{
+ //nand_debug("val:0x%x, reg:0x%x nfc->regs:%x", val, reg, nfc->regs);
+ nand_writeb(val, nfc->regs + reg);
+}
+
+static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg)
+{
+ //nand_debug("reg:0x%x nfc->regs:%x", reg, nfc->regs);
+
+ return nand_readl(nfc->regs + reg);
+}
+
+static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg)
+{
+ //nand_debug("reg:0x%x nfc->regs:%x", reg, nfc->regs);
+ return nand_readw(nfc->regs + reg);
+}
+
+static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg)
+{
+ //nand_debug("reg:0x%x nfc->regs:%x", reg, nfc->regs);
+ return nand_readb(nfc->regs + reg);
+}
+
+static void mtk_nfc_dump_reg(struct mtk_nfc *nfc)
+{
+ u32 i;
+
+ nand_info("nfi and nfiecc registers");
+ for (i = 0; i < 0x2000; i+=4) {
+ if (!(i % 16))
+ nand_info("\n0x%x: ", i);
+ nand_info("%x", nfi_readl(nfc, i));
+ }
+
+ nand_info("nfi clock setting");
+ nand_info("0x10000004 = 0x%x", *((volatile u32*)0x10000004));
+ nand_info("0x1000007c = 0x%x", *((volatile u32*)0x1000007c));
+ nand_info("0x10000024 = 0x%x", *((volatile u32*)0x10000024));
+ nand_info("0x1000003c = 0x%x", *((volatile u32*)0x1000003c));
+ nand_info("0x10000070 = 0x%x", *((volatile u32*)0x10000070));
+
+ nand_info("nfi gpio setting");
+ nand_info("0x10005320 = 0x%x", *((volatile u32*)0x10005320));
+ nand_info("0x10005330 = 0x%x", *((volatile u32*)0x10005330));
+ nand_info("0x10005340 = 0x%x", *((volatile u32*)0x10005340));
+ nand_info("0x10005460 = 0x%x", *((volatile u32*)0x10005460));
+ nand_info("0x10005470 = 0x%x", *((volatile u32*)0x10005470));
+ nand_info("0x10005480 = 0x%x", *((volatile u32*)0x10005480));
+ nand_info("0x10005e60 = 0x%x", *((volatile u32*)0x10005e60));
+ nand_info("0x10005d00 = 0x%x", *((volatile u32*)0x10005d00));
+ nand_info("0x10005d60 = 0x%x", *((volatile u32*)0x10005d60));
+ nand_info("0x10005d70 = 0x%x", *((volatile u32*)0x10005d70));
+ nand_info("0x10005c10 = 0x%x", *((volatile u32*)0x10005c10));
+ nand_info("0x10005c60 = 0x%x", *((volatile u32*)0x10005c60));
+ nand_info("0x10005c70 = 0x%x", *((volatile u32*)0x10005c70));
+
+}
+
+static void mtk_nfc_hw_reset(struct mtk_nfc *nfc)
+{
+ //nand_debug("enter");
+
+ /* reset all registers and force the NFI master to terminate */
+ nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
+
+ /* wait for the master to finish the last transaction */
+ if (!check_with_timeout(!(nfi_readl(nfc, NFI_MASTER_STA) & MASTER_STA_MASK),
+ MTK_RESET_TIMEOUT))
+ nand_err("NFI HW reset timeout!");
+
+ /* ensure any status register affected by the NFI master is reset */
+ nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
+ nfi_writew(nfc, STAR_DE, NFI_STRDATA);
+
+ //nand_debug("end");
+
+}
+
+/* Randomizer define */
+#define SS_SEED_NUM 128
+#define RAND_SEED_SHIFT(op) ((op) == RAND_ENCODE ? ENCODE_SEED_SHIFT : DECODE_SEED_SHIFT)
+#define RAND_EN(op) ((op) == RAND_ENCODE ? RAN_ENCODE_EN : RAN_DECODE_EN)
+static u16 ss_randomizer_seed[SS_SEED_NUM] = {
+ 0x576A, 0x05E8, 0x629D, 0x45A3, 0x649C, 0x4BF0, 0x2342, 0x272E,
+ 0x7358, 0x4FF3, 0x73EC, 0x5F70, 0x7A60, 0x1AD8, 0x3472, 0x3612,
+ 0x224F, 0x0454, 0x030E, 0x70A5, 0x7809, 0x2521, 0x484F, 0x5A2D,
+ 0x492A, 0x043D, 0x7F61, 0x3969, 0x517A, 0x3B42, 0x769D, 0x0647,
+ 0x7E2A, 0x1383, 0x49D9, 0x07B8, 0x2578, 0x4EEC, 0x4423, 0x352F,
+ 0x5B22, 0x72B9, 0x367B, 0x24B6, 0x7E8E, 0x2318, 0x6BD0, 0x5519,
+ 0x1783, 0x18A7, 0x7B6E, 0x7602, 0x4B7F, 0x3648, 0x2C53, 0x6B99,
+ 0x0C23, 0x67CF, 0x7E0E, 0x4D8C, 0x5079, 0x209D, 0x244A, 0x747B,
+ 0x350B, 0x0E4D, 0x7004, 0x6AC3, 0x7F3E, 0x21F5, 0x7A15, 0x2379,
+ 0x1517, 0x1ABA, 0x4E77, 0x15A1, 0x04FA, 0x2D61, 0x253A, 0x1302,
+ 0x1F63, 0x5AB3, 0x049A, 0x5AE8, 0x1CD7, 0x4A00, 0x30C8, 0x3247,
+ 0x729C, 0x5034, 0x2B0E, 0x57F2, 0x00E4, 0x575B, 0x6192, 0x38F8,
+ 0x2F6A, 0x0C14, 0x45FC, 0x41DF, 0x38DA, 0x7AE1, 0x7322, 0x62DF,
+ 0x5E39, 0x0E64, 0x6D85, 0x5951, 0x5937, 0x6281, 0x33A1, 0x6A32,
+ 0x3A5A, 0x2BAC, 0x743A, 0x5E74, 0x3B2E, 0x7EC7, 0x4FD2, 0x5D28,
+ 0x751F, 0x3EF8, 0x39B1, 0x4E49, 0x746B, 0x6EF6, 0x44BE, 0x6DB7
+};
+
+static void mtk_nfc_randomizer_init(struct mtk_nand_chip *chip)
+{
+ /* check whether randomizer efuse is on */
+ if ((*EFUSE_RANDOM_CFG) & EFUSE_RANDOM_ENABLE)
+ chip->options |= NAND_NEED_SCRAMBLING;
+}
+
+void mtk_nfc_randomizer_enable(struct mtk_nand_chip *chip, int page,
+ enum mtk_randomizer_operation rand, int repage)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ u32 reg = 0;
+ u32 loop = SS_SEED_NUM;
+
+ if (!(chip->options & NAND_NEED_SCRAMBLING))
+ return;
+
+ /* nand_debug("page:0x%x repage %d", page, repage); */
+
+ mtk_nfc_hw_reset(nfc);
+
+ /* randomizer type and reseed type setup */
+ reg = nfi_readl(nfc, NFI_CNFG) | CNFG_RAND_SEL;
+ if (repage)
+ reg &= ~CNFG_RESEED_SEC_EN;
+ else
+ reg |= CNFG_RESEED_SEC_EN;
+ nfi_writel(nfc, reg, NFI_CNFG);
+
+ /* randomizer seed and type setup */
+ if (chip->page_per_block <= SS_SEED_NUM)
+ loop = chip->page_per_block;
+ reg = (ss_randomizer_seed[page % loop] & RAN_SEED_MASK) << RAND_SEED_SHIFT(rand);
+ reg |= RAND_EN(rand);
+
+ nfi_writel(nfc, reg, NFI_RANDOM_CNFG);
+}
+
+void mtk_nfc_randomizer_disable(struct mtk_nand_chip *chip)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ if (!(chip->options & NAND_NEED_SCRAMBLING))
+ return;
+
+ nfi_writel(nfc, 0, NFI_RANDOM_CNFG);
+
+ mtk_nfc_hw_reset(nfc);
+}
+
+static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command)
+{
+ nfi_writel(nfc, command, NFI_CMD);
+
+ if (!check_with_timeout(!(nfi_readl(nfc, NFI_STA) & STA_CMD), MTK_NAND_TIMEOUT))
+ nand_err("send cmd 0x%x timeout", command);
+
+ return 0;
+}
+
+static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr)
+{
+ nfi_writel(nfc, addr, NFI_COLADDR);
+ nfi_writel(nfc, 0, NFI_ROWADDR);
+ nfi_writew(nfc, 1, NFI_ADDRNOB);
+
+ if (!check_with_timeout(!(nfi_readl(nfc, NFI_STA) & STA_ADDR), MTK_NAND_TIMEOUT))
+ nand_err("send cmd 0x%x timeout", addr);
+
+ return 0;
+}
+
+static int mtk_nfc_irq_wait(struct mtk_nfc *nfc, u32 timeout)
+{
+ int ret;
+
+ ret = nand_wait_for_completion_timeout(&nfc->done, timeout);
+ if (ret != 0) {
+ nand_err("failed to get event INT=0x%x",
+ nfi_readw(nfc, NFI_INTR_EN));
+ return ret;
+ }
+
+ return 0;
+}
+
+static enum handler_return mtk_nfc_interrupt_handler(void *arg)
+{
+ struct mtk_nfc *nfc = arg;
+ u16 sta, ien;
+
+ sta = nfi_readw(nfc, NFI_INTR_STA);
+ ien = nfi_readw(nfc, NFI_INTR_EN);
+ if (!(sta & ien))
+ return NAND_IRQ_NONE;
+
+ nfi_writew(nfc, ~sta & ien, NFI_INTR_EN);
+
+ /* MUST BE *false*! otherwise, schedule in interrupt */
+ nand_complete(&nfc->done);
+
+ return NAND_IRQ_HANDLED;
+}
+
+static int mtk_nfc_request_irq(struct mtk_nfc *nfc)
+{
+ nand_init_completion(&nfc->done);
+ //mtk_nand_request_irq(NAND_NFI_IRQ_BIT_ID, &mtk_nfc_interrupt_handler, nfc);
+ return 0;
+}
+
+static int mtk_nfc_hw_runtime_config(struct mtk_nand_chip *chip)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ u32 fmt, spare;
+
+ if (!chip->pagesize)
+ return -EINVAL;
+
+ /* nand_debug("spare_per_sector:%d, ecc_size:%d, acctiming:0x%x",
+ mtk_nand->spare_per_sector, mtk_nand->fdm.ecc_size, mtk_nand->acctiming); */
+
+ spare = mtk_nand->spare_per_sector;
+
+ switch (chip->pagesize) {
+ case 512:
+ fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
+ break;
+ case KB(2):
+ if (chip->ecc_size == 512)
+ fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
+ else
+ fmt = PAGEFMT_512_2K;
+ break;
+ case KB(4):
+ if (chip->ecc_size == 512)
+ fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
+ else
+ fmt = PAGEFMT_2K_4K;
+ break;
+ case KB(8):
+ if (chip->ecc_size == 512)
+ fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
+ else
+ fmt = PAGEFMT_4K_8K;
+ break;
+ case KB(16):
+ fmt = PAGEFMT_8K_16K;
+ break;
+ default:
+ nand_err("invalid page len: %d", chip->pagesize);
+ return -EINVAL;
+ }
+
+ /*
+ * the hardware will double the value for this eccsize, so we need to
+ * halve it
+ */
+ if (chip->ecc_size == 1024)
+ spare >>= 1;
+
+ switch (spare) {
+ case 16:
+ fmt |= (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 26:
+ fmt |= (PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 27:
+ fmt |= (PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 28:
+ fmt |= (PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 32:
+ fmt |= (PAGEFMT_SPARE_32 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 36:
+ fmt |= (PAGEFMT_SPARE_36 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 40:
+ fmt |= (PAGEFMT_SPARE_40 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 44:
+ fmt |= (PAGEFMT_SPARE_44 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 48:
+ fmt |= (PAGEFMT_SPARE_48 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 49:
+ fmt |= (PAGEFMT_SPARE_49 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 50:
+ fmt |= (PAGEFMT_SPARE_50 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 51:
+ fmt |= (PAGEFMT_SPARE_51 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 52:
+ fmt |= (PAGEFMT_SPARE_52 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 62:
+ fmt |= (PAGEFMT_SPARE_62 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 63:
+ fmt |= (PAGEFMT_SPARE_63 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 64:
+ fmt |= (PAGEFMT_SPARE_64 << PAGEFMT_SPARE_SHIFT);
+ break;
+ default:
+ nand_err("invalid spare per sector %d", spare);
+ return -EINVAL;
+ }
+
+ fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT;
+ fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT;
+ nfi_writel(nfc, fmt, NFI_PAGEFMT);
+
+ nfc->ecc_cfg.strength = chip->ecc_strength;
+ nfc->ecc_cfg.len = chip->ecc_size + mtk_nand->fdm.ecc_size;
+
+ return 0;
+}
+
+static void mtk_nfc_select_chip(struct mtk_nand_chip *chip, int chip_num)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ if ((chip_num < 0) || (chip_num == chip->activechip))
+ return;
+
+ if(chip_num > 0)
+ nand_info("chip_num:%d", chip_num);
+
+ if (!mtk_nfc_hw_runtime_config(chip)) {
+ chip->activechip = chip_num;
+ }
+
+ nfi_writel(nfc, chip_num, NFI_CSEL);
+}
+
+static int mtk_nfc_dev_ready(struct mtk_nand_chip *chip)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ //nand_debug("0x%x", nfi_readl(nfc, NFI_STA));
+
+ if (nfi_readl(nfc, NFI_STA) & STA_BUSY)
+ return 0;
+
+ return 1;
+}
+
+static int mtk_nfc_wait_busy_irq(struct mtk_nand_chip *chip)
+{
+ int ret;
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ /* set wait busy interrupt */
+ nfi_writew(nfc, INTR_BUSY_RETURN_EN, NFI_INTR_EN);
+
+ /* wait interrupt */
+ ret = mtk_nfc_irq_wait(nfc, MTK_NAND_TIMEOUT);
+ if (!ret) {
+ nand_err("wait busy interrupt timeout");
+ nfi_writew(nfc, 0, NFI_INTR_EN);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void mtk_nfc_cmd_ctrl(struct mtk_nand_chip *chip, int dat, unsigned int ctrl)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ u16 reg;
+
+ //nand_debug("ctrl:0x%x dat:0x%x", ctrl, dat);
+ if (ctrl & NAND_ALE) {
+ mtk_nfc_send_address(nfc, dat);
+ } else if (ctrl & NAND_CLE) {
+ mtk_nfc_hw_reset(nfc);
+
+ reg = nfi_readw(nfc, NFI_CNFG);
+ reg &= CNFG_RAND_MASK;
+ reg |= CNFG_OP_CUST;
+ nfi_writew(nfc, reg, NFI_CNFG);
+ mtk_nfc_send_command(nfc, dat);
+ }
+
+ //nand_debug("ctrl:0x%x dat:0x%x", ctrl, dat);
+
+}
+
+static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
+{
+ if (!check_with_timeout((nfi_readl(nfc, NFI_PIO_DIRDY) & PIO_DI_RDY), MTK_NAND_TIMEOUT)) {
+ nand_err("data not ready");
+ nand_err("cntr 0x%x cnfg 0x%x fmt 0x%x con 0x%x",
+ nfi_readl(nfc, NFI_BYTELEN), nfi_readl(nfc, NFI_CNFG),
+ nfi_readl(nfc, NFI_PAGEFMT), nfi_readl(nfc, NFI_CON));
+ }
+}
+
+static inline u8 mtk_nfc_read_byte(struct mtk_nand_chip *chip)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ u32 reg;
+
+ /* after each byte read, the NFI_STA reg is reset by the hardware */
+ reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
+ if (reg != NFI_FSM_CUSTDATA) {
+ reg = nfi_readw(nfc, NFI_CNFG);
+ reg |= CNFG_BYTE_RW | CNFG_READ_EN;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ /*
+ * set to max sector to allow the HW to continue reading over
+ * unaligned accesses
+ */
+ reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD;
+ nfi_writel(nfc, reg, NFI_CON);
+
+ /* trigger to fetch data */
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+ }
+
+ mtk_nfc_wait_ioready(nfc);
+
+ return nfi_readb(nfc, NFI_DATAR);
+}
+
+static void mtk_nfc_read_buf(struct mtk_nand_chip *chip, u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = mtk_nfc_read_byte(chip);
+}
+
+static void mtk_nfc_write_byte(struct mtk_nand_chip *chip, u8 byte)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ u32 reg;
+
+ reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
+
+ if (reg != NFI_FSM_CUSTDATA) {
+ reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR;
+ nfi_writel(nfc, reg, NFI_CON);
+
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+ }
+
+ mtk_nfc_wait_ioready(nfc);
+ nfi_writeb(nfc, byte, NFI_DATAW);
+}
+
+static void mtk_nfc_write_buf(struct mtk_nand_chip *chip, const u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ mtk_nfc_write_byte(chip, buf[i]);
+}
+
+static int mtk_nfc_sector_encode(struct mtk_nand_chip *chip, u8 *data, int dma, int polling)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ int size = chip->ecc_size + mtk_nand->fdm.reg_size;
+
+ if (dma)
+ nfc->ecc_cfg.mode = ECC_DMA_MODE;
+ else
+ nfc->ecc_cfg.mode = ECC_PIO_MODE;
+ nfc->ecc_cfg.op = ECC_ENCODE;
+
+ return mtk_ecc_encode(nfc->ecc, &nfc->ecc_cfg, data, size, polling);
+}
+
+static void mtk_nfc_no_bad_mark_swap(struct mtk_nand_chip *a, u8 *b, int c)
+{
+ /* nop */
+}
+
+static void mtk_nfc_bad_mark_swap(struct mtk_nand_chip *chip, u8 *buf, int raw)
+{
+ struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip);
+ u32 bad_pos = nand->bad_mark.pos;
+
+ if (raw)
+ bad_pos += nand->bad_mark.sec * mtk_data_len(chip);
+ else
+ bad_pos += nand->bad_mark.sec * chip->ecc_size;
+
+ swap(chip->oob_poi, buf + bad_pos);
+}
+
+static int mtk_nfc_format_subpage(struct mtk_nand_chip *chip, u32 offset,
+ u32 len, const u8 *buf, int dma, int polling)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 start, end;
+ int i, ret;
+
+ start = offset / chip->ecc_size;
+ end = DIV_ROUND_UP(offset + len, chip->ecc_size);
+
+ nand_memset(nfc->buffer, 0xff, chip->pagesize + chip->oobsize);
+ for (i = 0; i < chip->ecc_steps; i++) {
+ nand_memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
+ chip->ecc_size);
+
+ if (start > i || i >= end)
+ continue;
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(chip, nfc->buffer, 1);
+
+ nand_memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
+
+ /* program the CRC back to the OOB */
+ ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i), dma, polling);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mtk_nfc_format_page(struct mtk_nand_chip *chip, const u8 *buf)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 i;
+
+ nand_memset(nfc->buffer, 0xff, chip->pagesize + chip->oobsize);
+ for (i = 0; i < chip->ecc_steps; i++) {
+ if (buf)
+ nand_memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
+ chip->ecc_size);
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(chip, nfc->buffer, 1);
+
+ nand_memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
+ }
+}
+
+static inline void mtk_nfc_read_fdm(struct mtk_nand_chip *chip, u32 start,
+ u32 sectors)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 vall, valm;
+ u8 *oobptr;
+ u32 i, j;
+
+ for (i = 0; i < sectors; i++) {
+ oobptr = oob_ptr(chip, start + i);
+ vall = nfi_readl(nfc, NFI_FDML(i));
+ valm = nfi_readl(nfc, NFI_FDMM(i));
+
+ for (j = 0; j < fdm->reg_size; j++)
+ oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
+ }
+}
+
+static inline void mtk_nfc_write_fdm(struct mtk_nand_chip *chip)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 vall, valm;
+ u8 *oobptr;
+ u32 i, j;
+
+ for (i = 0; i < chip->ecc_steps; i++) {
+ oobptr = oob_ptr(chip, i);
+ vall = 0;
+ valm = 0;
+ for (j = 0; j < 8; j++) {
+ if (j < 4)
+ vall |= (j < fdm->reg_size ? oobptr[j] : 0xff)
+ << (j * 8);
+ else
+ valm |= (j < fdm->reg_size ? oobptr[j] : 0xff)
+ << ((j - 4) * 8);
+ }
+ nfi_writel(nfc, vall, NFI_FDML(i));
+ nfi_writel(nfc, valm, NFI_FDMM(i));
+ }
+}
+
+static int mtk_nfc_do_write_page(struct mtk_nand_chip *chip,
+ const u8 *buf, int page, int len, int raw, int dma, int polling)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ u32 *buf32 = (u32 *)buf;
+ u32 addr, reg, i;
+ u32 data_len = chip->ecc_size;
+ int ret = 0, byterw;
+
+ addr = nand_kvaddr_to_paddr(buf);
+ if (dma) {
+ reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN;
+ nfi_writew(nfc, reg, NFI_CNFG);
+ nand_dma_map(buf, len, true, NULL);
+ }
+
+ nfi_writel(nfc, chip->ecc_steps << CON_SEC_SHIFT, NFI_CON);
+ nfi_writel(nfc, addr, NFI_STRADDR);
+
+ if (dma && (!polling)) {
+ nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
+ }
+
+ reg = nfi_readl(nfc, NFI_CON) | CON_BWR;
+ nfi_writel(nfc, reg, NFI_CON);
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+
+ if (!dma) {
+ if (raw)
+ data_len = mtk_data_len(chip);
+ data_len *= chip->ecc_steps;
+
+ if (data_len & 0x3) {
+ reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
+ nfi_writew(nfc, reg, NFI_CNFG);
+ byterw = 1;
+ } else {
+ data_len >>= 2;
+ byterw = 0;
+ }
+
+ for (i = 0; i < data_len; i++) {
+ mtk_nfc_wait_ioready(nfc);
+ if (!byterw)
+ nfi_writel(nfc, buf32[i],NFI_DATAW);
+ else
+ nfi_writeb(nfc, buf[i], NFI_DATAW);
+ }
+ }
+
+ if (dma && (!polling)) {
+#ifdef MTK_NAND_IRQ_EN
+ ret = mtk_nfc_irq_wait(nfc, MTK_NAND_TIMEOUT);
+#endif
+ if (!ret) {
+ nand_err("program ahb done timeout");
+ nfi_writew(nfc, 0, NFI_INTR_EN);
+ ret = -ETIMEDOUT;
+ goto timeout;
+ }
+ }
+
+ if (!check_with_timeout(ADDRCNTR_SEC(nfi_readl(nfc, NFI_ADDRCNTR)) >= chip->ecc_steps,
+ MTK_NAND_TIMEOUT))
+ nand_err("do page write timeout");
+
+timeout:
+ if (dma)
+ nand_dma_unmap(buf, len, false, NULL);
+
+ nfi_writel(nfc, 0, NFI_CON);
+
+ return ret;
+}
+
+static int mtk_nfc_write_page(struct mtk_nand_chip *chip,
+ const u8 *buf, int page, int raw, int dma, int polling)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ u32 len;
+ const u8 *bufpoi;
+ u32 reg;
+ int ret;
+
+ if (!raw) {
+ /* OOB => FDM: from register, ECC: from HW */
+ reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
+ nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG);
+
+ nfc->ecc_cfg.op = ECC_ENCODE;
+ nfc->ecc_cfg.mode = ECC_NFI_MODE;
+ ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg, polling);
+ if (ret) {
+ /* clear NFI config */
+ reg = nfi_readw(nfc, NFI_CNFG);
+ reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ return ret;
+ }
+
+ nand_memcpy(nfc->buffer, buf, chip->pagesize);
+ mtk_nand->bad_mark.bm_swap(chip, nfc->buffer, raw);
+ bufpoi = nfc->buffer;
+
+ /* write OOB into the FDM registers (OOB area in MTK NAND) */
+ mtk_nfc_write_fdm(chip);
+ } else {
+ bufpoi = buf;
+ }
+
+ len = chip->pagesize + (raw ? chip->oobsize : 0);
+ ret = mtk_nfc_do_write_page(chip, bufpoi, page, len, raw, dma, polling);
+
+ if (!raw)
+ mtk_ecc_disable(nfc->ecc);
+
+ return ret;
+}
+
+static int mtk_nfc_write_page_ecc_dma_polling(struct mtk_nand_chip *chip, const u8 *buf,
+ int page)
+{
+ return mtk_nfc_write_page(chip, buf, page, 0, 1, 1);
+}
+
+static int mtk_nfc_write_page_ecc_dma_irq(struct mtk_nand_chip *chip, const u8 *buf,
+ int page)
+{
+ return mtk_nfc_write_page(chip, buf, page, 0, 1, 0);
+}
+
+static int mtk_nfc_write_page_ecc_pio_polling(struct mtk_nand_chip *chip, const u8 *buf,
+ int page)
+{
+ return mtk_nfc_write_page(chip, buf, page, 0, 0, 1);
+}
+
+static int mtk_nfc_write_page_ecc_pio_irq(struct mtk_nand_chip *chip, const u8 *buf,
+ int page)
+{
+ return mtk_nfc_write_page(chip, buf, page, 0, 0, 0);
+}
+
+static int mtk_nfc_write_page_raw_dma_polling(struct mtk_nand_chip *chip,
+ const u8 *buf, int pg)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ mtk_nfc_format_page(chip, buf);
+ return mtk_nfc_write_page(chip, nfc->buffer, pg, 1, 1, 1);
+}
+
+static int mtk_nfc_write_page_raw_dma_irq(struct mtk_nand_chip *chip,
+ const u8 *buf, int pg)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ mtk_nfc_format_page(chip, buf);
+ return mtk_nfc_write_page(chip, nfc->buffer, pg, 1, 1, 0);
+}
+
+static int mtk_nfc_write_page_raw_pio_polling(struct mtk_nand_chip *chip,
+ const u8 *buf, int pg)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ mtk_nfc_format_page(chip, buf);
+ return mtk_nfc_write_page(chip, nfc->buffer, pg, 1, 0, 1);
+}
+
+static int mtk_nfc_write_page_raw_pio_irq(struct mtk_nand_chip *chip,
+ const u8 *buf, int pg)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ mtk_nfc_format_page(chip, buf);
+ return mtk_nfc_write_page(chip, nfc->buffer, pg, 1, 0, 0);
+}
+
+static int mtk_nfc_write_subpage_ecc_dma_polling(struct mtk_nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf, int page)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ int ret;
+
+ ret = mtk_nfc_format_subpage(chip, offset, data_len, buf, 1, 1);
+ if (ret < 0)
+ return ret;
+
+ /* use the data in the private buffer (now with FDM and CRC) */
+ return mtk_nfc_write_page(chip, nfc->buffer, page, 1, 1, 1);
+}
+
+static int mtk_nfc_write_subpage_ecc_dma_irq(struct mtk_nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf, int page)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ int ret;
+
+ ret = mtk_nfc_format_subpage(chip, offset, data_len, buf, 1, 0);
+ if (ret < 0)
+ return ret;
+
+ /* use the data in the private buffer (now with FDM and CRC) */
+ return mtk_nfc_write_page(chip, nfc->buffer, page, 1, 1, 0);
+}
+
+static int mtk_nfc_write_subpage_ecc_pio_polling(struct mtk_nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf, int page)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ int ret;
+
+ ret = mtk_nfc_format_subpage(chip, offset, data_len, buf, 0, 1);
+ if (ret < 0)
+ return ret;
+
+ /* use the data in the private buffer (now with FDM and CRC) */
+ return mtk_nfc_write_page(chip, nfc->buffer, page, 1, 0, 1);
+}
+
+static int mtk_nfc_write_subpage_ecc_pio_irq(struct mtk_nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf, int page)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ int ret;
+
+ ret = mtk_nfc_format_subpage(chip, offset, data_len, buf, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ /* use the data in the private buffer (now with FDM and CRC) */
+ return mtk_nfc_write_page(chip, nfc->buffer, page, 1, 0, 0);
+}
+
+static int mtk_nfc_update_ecc_stats(struct mtk_nand_chip *chip, u8 *buf, u32 sectors)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_ecc_stats stats;
+ int rc, i;
+
+ rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
+ if (rc) {
+ nand_memset(buf, 0xff, sectors * chip->ecc_size);
+ for (i = 0; i < sectors; i++)
+ nand_memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
+ return 0;
+ }
+
+ mtk_ecc_get_stats(nfc->ecc, &stats, sectors);
+ chip->stats.corrected += stats.corrected;
+ chip->stats.failed += stats.failed;
+
+ return stats.bitflips;
+}
+
+static int mtk_nfc_read_subpage(struct mtk_nand_chip *chip,
+ u32 data_offs, u32 readlen,
+ u8 *bufpoi, int page, int raw, int dma, int polling)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_ecc_stats stats;
+ u32 spare = mtk_nand->spare_per_sector;
+ u32 column, sectors, start, end, reg;
+ u32 addr, i, j;
+ int bitflips = 0;
+ u32 len, correct = 0, fail = 0;
+ u8 *buf;
+ u32 *buf32;
+ u32 data_len = chip->ecc_size;
+ int rc, byterw;
+
+ start = data_offs / chip->ecc_size;
+ end = DIV_ROUND_UP(data_offs + readlen, chip->ecc_size);
+
+ sectors = end - start;
+ column = start * (chip->ecc_size + spare);
+
+ len = sectors * chip->ecc_size + ((raw || !dma) ? sectors * spare : 0);
+ buf = bufpoi + start * (chip->ecc_size + ((raw || !dma) ? sectors * spare : 0));
+ buf32 = (u32 *)buf;
+
+ if (column != 0)
+ chip->cmdfunc(chip, NAND_CMD_RNDOUT, column, -1);
+
+ addr = nand_kvaddr_to_paddr(buf);
+
+ reg = nfi_readw(nfc, NFI_CNFG);
+ reg |= CNFG_READ_EN;
+ if (dma)
+ reg |= CNFG_DMA_BURST_EN | CNFG_AHB;
+ if (!raw) {
+ reg |= CNFG_HW_ECC_EN;
+ if (dma)
+ reg |= CNFG_AUTO_FMT_EN;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ nfc->ecc_cfg.mode = ECC_NFI_MODE;
+ nfc->ecc_cfg.sectors = sectors;
+ nfc->ecc_cfg.op = ECC_DECODE;
+ if (dma) {
+ nfc->ecc_cfg.deccon = ECC_DEC_CORRECT;
+ } else {
+ nfc->ecc_cfg.deccon = ECC_DEC_LOCATE;
+ }
+ rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg, polling);
+ if (rc) {
+ nand_err("ecc enable failed");
+ /* clear NFI_CNFG */
+ reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN |
+ CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
+ nfi_writew(nfc, reg, NFI_CNFG);
+ /* error handle */
+ return rc;
+ }
+ } else {
+ nfi_writew(nfc, reg, NFI_CNFG);
+ }
+
+ if (dma)
+ nand_dma_map(buf, len, false, NULL);
+
+ nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON);
+ nfi_writel(nfc, addr, NFI_STRADDR);
+
+ if (dma && (!polling)) {
+ nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
+ }
+ reg = nfi_readl(nfc, NFI_CON) | CON_BRD;
+ nfi_writel(nfc, reg, NFI_CON);
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+
+ if (!dma) {
+ data_len = mtk_data_len(chip);
+
+ if (data_len & 0x3) {
+ reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
+ nfi_writew(nfc, reg, NFI_CNFG);
+ byterw = 1;
+ } else {
+ data_len >>= 2;
+ byterw = 0;
+ }
+ if (!raw) {
+ stats.bitflips = 0;
+ correct = chip->stats.corrected;
+ fail = chip->stats.failed;
+ }
+ for (i = 0; i < sectors; i++) {
+ for (j = 0; j < data_len; j++) {
+ mtk_nfc_wait_ioready(nfc);
+ if (!byterw)
+ *(buf32 + (i * data_len) + j) = nfi_readl(nfc, NFI_DATAR);
+ else
+ *(buf + (i * data_len) + j) = nfi_readb(nfc, NFI_DATAR);
+ }
+ if (!raw) {
+ rc = mtk_ecc_cpu_correct(nfc->ecc, &stats, buf +
+ (i * (byterw ? data_len : (data_len << 2))), i, polling);
+ if (rc < 0)
+ goto disecc;
+ chip->stats.corrected += stats.corrected;
+ chip->stats.failed += stats.failed;
+ if (stats.failed) {
+ nand_info("sectoer %d uncorrect", i);
+ }
+ }
+ }
+ if (!raw) {
+ bitflips = stats.bitflips;
+ rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
+ if (rc) {
+ nand_info("page %d is empty", page);
+ nand_memset(buf, 0xff, sectors * mtk_data_len(chip));
+ bitflips = 0;
+ chip->stats.corrected = correct;
+ chip->stats.failed = fail;
+ }
+ }
+ }
+
+ if (dma && (!polling)) {
+ rc = mtk_nfc_irq_wait(nfc, MTK_NAND_TIMEOUT);
+ if (!rc) {
+ nand_err("read ahb/dma done timeout");
+ }
+ }
+
+ if (!check_with_timeout(ADDRCNTR_SEC(nfi_readl(nfc, NFI_BYTELEN)) >= sectors,
+ MTK_NAND_TIMEOUT)) {
+ nand_err("subpage done timeout %d", nfi_readl(nfc, NFI_BYTELEN));
+ nand_err("cnfg 0x%x fmt 0x%x\n con 0x%x", nfi_readl(nfc, NFI_CNFG),
+ nfi_readl(nfc, NFI_PAGEFMT), nfi_readl(nfc, NFI_CON));
+ bitflips = -EIO;
+ } else {
+ if ((!raw) && dma) {
+ bitflips = 0;
+ rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE, polling);
+ if (!rc)
+ rc = mtk_ecc_wait_decode_fsm_idle(nfc->ecc);
+ bitflips = rc < 0 ? -ETIMEDOUT :
+ mtk_nfc_update_ecc_stats(chip, buf, sectors);
+ nand_dma_unmap(buf, len, false, NULL);
+ mtk_nfc_read_fdm(chip, start, sectors);
+ bitflips = rc < 0 ? -ETIMEDOUT :
+ mtk_nfc_update_ecc_stats(chip, buf, sectors);
+ }
+ }
+
+ if (raw)
+ goto done;
+
+disecc:
+ mtk_ecc_disable(nfc->ecc);
+
+ if (!dma)
+ goto done;
+
+ if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(chip, bufpoi, raw);
+done:
+ nfi_writel(nfc, 0, NFI_CON);
+
+ return bitflips;
+}
+
+static int mtk_nfc_read_subpage_ecc_dma_polling(struct mtk_nand_chip *chip, u32 off,
+ u32 len, u8 *p, int pg)
+{
+ return mtk_nfc_read_subpage(chip, off, len, p, pg, 0, 1, 1);
+}
+
+static int mtk_nfc_read_subpage_ecc_dma_irq(struct mtk_nand_chip *chip, u32 off,
+ u32 len, u8 *p, int pg)
+{
+ return mtk_nfc_read_subpage(chip, off, len, p, pg, 0, 1, 0);
+}
+
+static int mtk_nfc_read_subpage_ecc_pio_polling(struct mtk_nand_chip *chip, u32 off,
+ u32 len, u8 *p, int pg)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 sectors, start, end;
+ int i, ret;
+
+ start = off / chip->ecc_size;
+ end = DIV_ROUND_UP(off + len, chip->ecc_size);
+ sectors = end - start;
+
+ nand_memset(nfc->buffer, 0xff, chip->pagesize + chip->oobsize);
+ ret = mtk_nfc_read_subpage(chip, off, len, nfc->buffer, pg, 0, 0, 1);
+ if (ret < 0)
+ return ret;
+
+ for (i = start; i < end; i++) {
+ nand_memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(chip, nfc->buffer, 1);
+
+ if (p)
+ nand_memcpy(data_ptr(chip, p, i), mtk_data_ptr(chip, i),
+ chip->ecc_size);
+ }
+
+ return ret;
+}
+
+static int mtk_nfc_read_subpage_ecc_pio_irq(struct mtk_nand_chip *chip, u32 off,
+ u32 len, u8 *p, int pg)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 sectors, start, end;
+ int i, ret;
+
+ start = off / chip->ecc_size;
+ end = DIV_ROUND_UP(off + len, chip->ecc_size);
+ sectors = end - start;
+
+ nand_memset(nfc->buffer, 0xff, chip->pagesize + chip->oobsize);
+ ret = mtk_nfc_read_subpage(chip, off, len, nfc->buffer, pg, 0, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = start; i < end; i++) {
+ nand_memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(chip, nfc->buffer, 1);
+
+ if (p)
+ nand_memcpy(data_ptr(chip, p, i), mtk_data_ptr(chip, i),
+ chip->ecc_size);
+ }
+
+ return ret;
+}
+
+static int mtk_nfc_read_page_ecc_dma_polling(struct mtk_nand_chip *chip, u8 *p,
+ int pg)
+{
+ return mtk_nfc_read_subpage(chip, 0, chip->pagesize, p, pg, 0, 1, 1);
+}
+
+static int mtk_nfc_read_page_ecc_dma_irq(struct mtk_nand_chip *chip, u8 *p,
+ int pg)
+{
+ return mtk_nfc_read_subpage(chip, 0, chip->pagesize, p, pg, 0, 1, 0);
+}
+
+static int mtk_nfc_read_page_ecc_pio_polling(struct mtk_nand_chip *chip, u8 *p,
+ int pg)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ int i, ret;
+
+ nand_memset(nfc->buffer, 0xff, chip->pagesize + chip->oobsize);
+ ret = mtk_nfc_read_subpage(chip, 0, chip->pagesize, nfc->buffer, pg, 0, 0, 1);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < chip->ecc_steps; i++) {
+ nand_memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(chip, nfc->buffer, 1);
+
+ if (p)
+ nand_memcpy(data_ptr(chip, p, i), mtk_data_ptr(chip, i),
+ chip->ecc_size);
+ }
+
+ return ret;
+}
+
+static int mtk_nfc_read_page_ecc_pio_irq(struct mtk_nand_chip *chip, u8 *p,
+ int pg)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ int i, ret;
+
+ nand_memset(nfc->buffer, 0xff, chip->pagesize + chip->oobsize);
+ ret = mtk_nfc_read_subpage(chip, 0, chip->pagesize, nfc->buffer, pg, 0, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < chip->ecc_steps; i++) {
+ nand_memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(chip, nfc->buffer, 1);
+
+ if (p)
+ nand_memcpy(data_ptr(chip, p, i), mtk_data_ptr(chip, i),
+ chip->ecc_size);
+ }
+
+ return ret;
+}
+
+static int mtk_nfc_read_page_raw_dma_polling(struct mtk_nand_chip *chip,
+ u8 *buf, int page)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ int i, ret;
+
+ nand_memset(nfc->buffer, 0xff, chip->pagesize + chip->oobsize);
+ ret = mtk_nfc_read_subpage(chip, 0, chip->pagesize, nfc->buffer,
+ page, 1, 1, 1);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < chip->ecc_steps; i++) {
+ nand_memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(chip, nfc->buffer, 1);
+
+ if (buf)
+ nand_memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
+ chip->ecc_size);
+ }
+
+ return ret;
+}
+
+static int mtk_nfc_read_page_raw_dma_irq(struct mtk_nand_chip *chip,
+ u8 *buf, int page)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ int i, ret;
+
+ nand_memset(nfc->buffer, 0xff, chip->pagesize + chip->oobsize);
+ ret = mtk_nfc_read_subpage(chip, 0, chip->pagesize, nfc->buffer,
+ page, 1, 1, 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < chip->ecc_steps; i++) {
+ nand_memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(chip, nfc->buffer, 1);
+
+ if (buf)
+ nand_memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
+ chip->ecc_size);
+ }
+
+ return ret;
+}
+
+static int mtk_nfc_read_page_raw_pio_polling(struct mtk_nand_chip *chip,
+ u8 *buf, int page)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ int i, ret;
+
+ nand_memset(nfc->buffer, 0xff, chip->pagesize + chip->oobsize);
+ ret = mtk_nfc_read_subpage(chip, 0, chip->pagesize, nfc->buffer,
+ page, 1, 0, 1);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < chip->ecc_steps; i++) {
+ nand_memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(chip, nfc->buffer, 1);
+
+ if (buf)
+ nand_memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
+ chip->ecc_size);
+ }
+
+ return ret;
+}
+
+static int mtk_nfc_read_page_raw_pio_irq(struct mtk_nand_chip *chip,
+ u8 *buf, int page)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ int i, ret;
+
+ nand_memset(nfc->buffer, 0xff, chip->pagesize + chip->oobsize);
+ ret = mtk_nfc_read_subpage(chip, 0, chip->pagesize, nfc->buffer,
+ page, 1, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < chip->ecc_steps; i++) {
+ nand_memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(chip, nfc->buffer, 1);
+
+ if (buf)
+ nand_memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
+ chip->ecc_size);
+ }
+
+ return ret;
+}
+
+static void mtk_nfc_set_timing(struct mtk_nand_chip *chip)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ u32 reg;
+
+ /*
+ * ACCON: access timing control register
+ * -------------------------------------
+ * 31:28: minimum required time for CS post pulling down after accessing
+ * the device
+ * 27:22: minimum required time for CS pre pulling down before accessing
+ * the device
+ * 21:16: minimum required time from NCEB low to NREB low
+ * 15:12: minimum required time from NWEB high to NREB low.
+ * 11:08: write enable hold time
+ * 07:04: write wait states
+ * 03:00: read wait states
+ */
+ if (chip->acctiming) {
+ nfi_writel(nfc, chip->acctiming, NFI_ACCCON);
+ if ((chip->acctiming == 0x10804111)
+ || (chip->acctiming == 0x10804122)){
+ /* Set strobe_sel to delay 1 cycle for NRE */
+ reg = nfi_readl(nfc, NFI_DEBUG_CON1);
+ reg &= ~(0x3 << 3);
+ reg |= (0x1 << 3);
+ nfi_writel(nfc, reg, NFI_DEBUG_CON1);
+ }
+ } else
+ nfi_writel(nfc, 0x10804222, NFI_ACCCON);
+}
+
+static void mtk_nfc_hw_init(struct mtk_nand_chip *chip)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ u32 reg;
+
+ /* Change to NFI mode */
+ nfi_writel(nfc, 0, NFI_SNAND_CNFG);
+
+ nfi_writel(nfc, 0, NFI_CSEL);
+
+ /* disable bypass_master_en */
+ reg = nfi_readl(nfc, NFI_DEBUG_CON1);
+ reg &= ~BYPASS_MASTER_EN;
+ nfi_writel(nfc, reg, NFI_DEBUG_CON1);
+ /*
+ * ACCON: access timing control register
+ * -------------------------------------
+ * 31:28: minimum required time for CS post pulling down after accessing
+ * the device
+ * 27:22: minimum required time for CS pre pulling down before accessing
+ * the device
+ * 21:16: minimum required time from NCEB low to NREB low
+ * 15:12: minimum required time from NWEB high to NREB low.
+ * 11:08: write enable hold time
+ * 07:04: write wait states
+ * 03:00: read wait states
+ */
+ if (chip->acctiming)
+ nfi_writel(nfc, chip->acctiming, NFI_ACCCON);
+ else
+ nfi_writel(nfc, 0x30C77FFF, NFI_ACCCON);
+
+ /*
+ * CNRNB: nand ready/busy register
+ * -------------------------------
+ * 7:4: timeout register for polling the NAND busy/ready signal
+ * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles.
+ */
+ nfi_writew(nfc, 0xf1, NFI_CNRNB);
+ nfi_writew(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
+
+ mtk_nfc_hw_reset(nfc);
+
+ nfi_readl(nfc, NFI_INTR_STA);
+ nfi_writel(nfc, 0, NFI_INTR_EN);
+}
+
+static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtk_nand_chip *nand)
+{
+ struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
+ u32 ecc_bytes;
+
+ ecc_bytes = DIV_ROUND_UP(nand->ecc_strength * ECC_PARITY_BITS, 8);
+
+ fdm->reg_size = chip->spare_per_sector - ecc_bytes;
+ if (fdm->reg_size > NFI_FDM_MAX_SIZE)
+ fdm->reg_size = NFI_FDM_MAX_SIZE;
+
+ /* bad block mark storage */
+ fdm->ecc_size = nand->fdm_ecc_size > NFI_FDM_MAX_SIZE ? NFI_FDM_MAX_SIZE : nand->fdm_ecc_size;
+}
+
+static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl,
+ struct mtk_nand_chip *nand)
+{
+ /* mt8561 no swap */
+ if (0) { /*(nand->pagesize == 512)*/
+ bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap;
+ } else {
+ bm_ctl->bm_swap = mtk_nfc_bad_mark_swap;
+ bm_ctl->sec = nand->pagesize / mtk_data_len(nand);
+ bm_ctl->pos = nand->pagesize % mtk_data_len(nand);
+ }
+}
+
+static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtk_nand_chip *nand)
+{
+ u32 spare[] = {16, 26, 27, 28, 32, 36, 40, 44,
+ 48, 49, 50, 51, 52, 62, 63, 64
+ };
+ u32 eccsteps, i;
+
+ eccsteps = nand->pagesize / nand->ecc_size;
+ *sps = nand->oobsize / eccsteps;
+
+ nand_debug("pagesize:%d, oobsize:%d, ecc_size:%d",
+ nand->pagesize, nand->oobsize, nand->ecc_size);
+
+ if (nand->ecc_size == 1024)
+ *sps >>= 1;
+
+ for (i = 0; i < sizeof(spare) / sizeof(u32); i++) {
+ if (*sps <= spare[i]) {
+ if (*sps == spare[i])
+ *sps = spare[i];
+ else if (i != 0)
+ *sps = spare[i - 1];
+ break;
+ }
+ }
+
+ if (i >= sizeof(spare) / sizeof(u32))
+ *sps = spare[sizeof(spare) / sizeof(u32) - 1];
+
+ if (nand->ecc_size == 1024)
+ *sps <<= 1;
+}
+
+static void dump_nand_info(struct mtk_nand_chip *chip)
+{
+ nand_info("------------dump nand info ------------\n");
+ nand_info("totalsize 0x%llx\n", chip->totalsize);
+ nand_info("chipsize 0x%llx\n", chip->chipsize);
+ nand_info("pagesize 0x%x\n", chip->pagesize);
+ nand_info("oobsize 0x%x\n", chip->oobsize);
+ nand_info("blocksize 0x%x\n", chip->blocksize);
+ nand_info("ecc_size 0x%x\n", chip->ecc_size);
+ nand_info("ecc_strength 0x%x\n", chip->ecc_strength);
+ nand_info("ecc_steps 0x%x\n", chip->ecc_steps);
+ nand_info("subpagesize 0x%x\n", chip->subpagesize);
+ nand_info("fdm_ecc_size 0x%x\n", chip->fdm_ecc_size);
+ nand_info("bits_per_cell 0x%x\n", chip->bits_per_cell);
+ nand_info("page_per_chip 0x%x\n", chip->page_per_chip);
+ nand_info("page_per_block 0x%x\n", chip->page_per_block);
+ nand_info("chip_delay 0x%x\n", chip->chip_delay);
+ nand_info("acctiming 0x%x\n", chip->acctiming);
+ nand_info("options 0x%x\n", chip->options);
+ nand_info("numchips 0x%x\n", chip->numchips);
+ nand_info("activechip 0x%x\n", chip->activechip);
+ nand_info("bbt_options 0x%x\n", chip->bbt_options);
+ nand_info("badblockpos 0x%x\n", chip->badblockpos);
+ nand_info("badblockbits 0x%x\n", chip->badblockbits);
+ nand_info("bbt_erase_shift 0x%x\n", chip->bbt_erase_shift);
+ nand_info("lbasize 0x%x\n", chip->lbasize);
+ nand_info("lbacnt 0x%x\n\n", chip->lbacnt);
+}
+
+void nand_gpio_cfg_bit32(u64 addr, u32 field , u32 val)
+{
+ u32 tv = (unsigned int)(*(volatile u32*)(addr));
+ tv &= ~(field); tv |= val;
+ (*(volatile u32*)(addr) = (u32)(tv));
+}
+
+#define NFI_GPIO_CFG_BIT32(reg,field,val) nand_gpio_cfg_bit32(reg, field, val)
+
+static void mtk_nfc_gpio_init(void)
+{
+/* Nand GPIO register define */
+#define NFI_GPIO_BASE (IO_PHYS+0x5000)
+/* For NFI GPIO setting *//* NCLE */
+#define NFI_GPIO_MODE1 (NFI_GPIO_BASE + 0x300)
+/* NCEB1/NCEB0/NREB */
+#define NFI_GPIO_MODE2 (NFI_GPIO_BASE + 0x310)
+/* NRNB/NREB_C/NDQS_C */
+#define NFI_GPIO_MODE3 (NFI_GPIO_BASE + 0x320)
+#define NFI_GPIO_PUPD_CTRL0 (NFI_GPIO_BASE + 0xE00)
+#define NFI_GPIO_PUPD_CTRL1 (NFI_GPIO_BASE + 0xE10)
+#define NFI_GPIO_PUPD_CTRL2 (NFI_GPIO_BASE + 0xE20)
+#define NFI_GPIO_PUPD_CTRL6 (NFI_GPIO_BASE + 0xE60)
+/* Drving */
+#define NFI_GPIO_DRV_MODE0 (NFI_GPIO_BASE + 0xD00)
+#define NFI_GPIO_DRV_MODE6 (NFI_GPIO_BASE + 0xD60)
+#define NFI_GPIO_DRV_MODE7 (NFI_GPIO_BASE + 0xD70)
+//TDSEL,
+#define NFI_GPIO_TDSEL6_EN (NFI_GPIO_BASE + 0xB60)
+#define NFI_GPIO_TDSEL7_EN (NFI_GPIO_BASE + 0xB70)
+//RDSEL, no need for 1.8V
+#define NFI_GPIO_RDSEL1_EN (NFI_GPIO_BASE + 0xC10)
+#define NFI_GPIO_RDSELE_EN (NFI_GPIO_BASE + 0xCE0)
+#define NFI_GPIO_RDSELF_EN (NFI_GPIO_BASE + 0xCF0)
+
+ NFI_GPIO_CFG_BIT32(NFI_GPIO_MODE1, 0x7FFF,
+ (1 << 0) | (1 << 3) | (1 << 6) | (2 << 9) | (2 << 12));
+ NFI_GPIO_CFG_BIT32(NFI_GPIO_MODE2, 0x7FFF,
+ (2 << 0) | (2 << 3) | (2 << 6) | (2 << 9) | (2 << 12));
+ NFI_GPIO_CFG_BIT32(NFI_GPIO_MODE3, 0xFFF,
+ (2 << 0) | (2 << 3) | (2 << 6) | (2 << 9));
+ NFI_GPIO_CFG_BIT32(NFI_GPIO_PUPD_CTRL6, (0xFFF << 4), (0x111 << 4));
+ NFI_GPIO_CFG_BIT32(NFI_GPIO_PUPD_CTRL1, 0xFFFF, 0x6666);
+ NFI_GPIO_CFG_BIT32(NFI_GPIO_PUPD_CTRL2, 0xFFF, 0x616);
+ NFI_GPIO_CFG_BIT32(NFI_GPIO_PUPD_CTRL0, 0xFFFF, 0x6666);
+ /*only for 3.3V */
+ NFI_GPIO_CFG_BIT32(NFI_GPIO_DRV_MODE6, (0xFF << 8), (0x11 << 8));
+ NFI_GPIO_CFG_BIT32(NFI_GPIO_DRV_MODE7, 0xFFF, 0x111);
+ NFI_GPIO_CFG_BIT32(NFI_GPIO_TDSEL6_EN, (0xFF << 8), (0xAA << 8));
+ NFI_GPIO_CFG_BIT32(NFI_GPIO_TDSEL7_EN, 0xFFF, 0xAAA);
+ /*only for 3.3v */
+ NFI_GPIO_CFG_BIT32(NFI_GPIO_RDSEL1_EN, (0x3F << 6), (0xC << 6));
+ NFI_GPIO_CFG_BIT32(NFI_GPIO_RDSELE_EN, 0x3F3F, 0xC0C);
+ NFI_GPIO_CFG_BIT32(NFI_GPIO_RDSELF_EN, 0x3F3F, 0xC0C);
+}
+
+static void mtk_nfc_clk_init(void)
+{
+ u32 reg;
+/* Nand clock select register define */
+#define NFI_CLK_SEL1 (IO_PHYS+0x4)
+
+ reg = (unsigned int)(*(volatile u32*)(NFI_CLK_SEL1));
+ reg &= ~(0x7);
+ /* 215MHz */
+ reg |= 0x4;
+ (*(volatile u32*)(NFI_CLK_SEL1) = (u32)(reg));
+}
+
+struct mtk_nand_chip *g_nand_chip;
+int mtk_nfc_nand_chip_init(struct mtk_nand_chip **ext_nand)
+{
+ struct mtk_nfc *nfc;
+ struct mtk_nfc_nand_chip *chip;
+ struct mtk_nand_chip *nand;
+ int ret = 0;
+
+ nfc = (struct mtk_nfc *)nand_malloc(sizeof(*nfc));
+ if (!nfc)
+ return -ENOMEM;
+ nand_memset(nfc, 0, sizeof(*nfc));
+ nfc->regs = NAND_NFI_BASE;
+
+ chip = (struct mtk_nfc_nand_chip *)nand_malloc(sizeof(*chip));
+ if (!chip) {
+ goto free_nfc;
+ ret = -ENOMEM;
+ }
+ nand_memset(chip, 0, sizeof(*chip));
+
+ nand_debug("nfc->regs:0x%lx nfc:0x%x chip:0x%x NAND_NFI_BASE:0x%x NFI_BASE:0x%x IO_PHYS:0x%x\n",
+ (u32)nfc->regs, (u32)nfc, (u32)chip, NAND_NFI_BASE, NFI_BASE, IO_PHYS);
+
+#if 0
+ /* register interrupt handler */
+ mtk_nfc_request_irq(nfc);
+#endif
+
+ nand = &chip->chip;
+ *ext_nand = nand;
+
+ nand_set_controller_data(nand, nfc);
+
+ nand->dev_ready = mtk_nfc_dev_ready;
+ nand->wait_busy_irq = mtk_nfc_wait_busy_irq;
+ nand->select_chip = mtk_nfc_select_chip;
+ nand->write_byte = mtk_nfc_write_byte;
+ nand->write_buf = mtk_nfc_write_buf;
+ nand->read_byte = mtk_nfc_read_byte;
+ nand->read_buf = mtk_nfc_read_buf;
+ nand->cmd_ctrl = mtk_nfc_cmd_ctrl;
+
+ nand->write_page_ecc_dma_irq = mtk_nfc_write_page_ecc_dma_irq;
+ nand->write_page_ecc_dma_polling = mtk_nfc_write_page_ecc_dma_polling;
+ nand->write_page_ecc_pio_irq = mtk_nfc_write_page_ecc_pio_irq;
+ nand->write_page_ecc_pio_polling = mtk_nfc_write_page_ecc_pio_polling;
+ nand->write_page_raw_dma_irq = mtk_nfc_write_page_raw_dma_irq;
+ nand->write_page_raw_dma_polling = mtk_nfc_write_page_raw_dma_polling;
+ nand->write_page_raw_pio_irq = mtk_nfc_write_page_raw_pio_irq;
+ nand->write_page_raw_pio_polling = mtk_nfc_write_page_raw_pio_polling;
+ nand->write_subpage_ecc_dma_irq = mtk_nfc_write_subpage_ecc_dma_irq;
+ nand->write_subpage_ecc_dma_polling = mtk_nfc_write_subpage_ecc_dma_polling;
+ nand->write_subpage_ecc_pio_irq = mtk_nfc_write_subpage_ecc_pio_irq;
+ nand->write_subpage_ecc_pio_polling = mtk_nfc_write_subpage_ecc_pio_polling;
+
+ nand->read_subpage_ecc_dma_irq = mtk_nfc_read_subpage_ecc_dma_irq;
+ nand->read_subpage_ecc_dma_polling = mtk_nfc_read_subpage_ecc_dma_polling;
+ nand->read_subpage_ecc_pio_irq = mtk_nfc_read_subpage_ecc_pio_irq;
+ nand->read_subpage_ecc_pio_polling = mtk_nfc_read_subpage_ecc_pio_polling;
+ nand->read_page_ecc_dma_irq = mtk_nfc_read_page_ecc_dma_irq;
+ nand->read_page_ecc_dma_polling = mtk_nfc_read_page_ecc_dma_polling;
+ nand->read_page_ecc_pio_irq = mtk_nfc_read_page_ecc_pio_irq;
+ nand->read_page_ecc_pio_polling = mtk_nfc_read_page_ecc_pio_polling;
+ nand->read_page_raw_dma_irq = mtk_nfc_read_page_raw_dma_irq;
+ nand->read_page_raw_dma_polling = mtk_nfc_read_page_raw_dma_polling;
+ nand->read_page_raw_pio_irq = mtk_nfc_read_page_raw_pio_irq;
+ nand->read_page_raw_pio_polling = mtk_nfc_read_page_raw_pio_polling;
+
+ mtk_nfc_gpio_init();
+ mtk_nfc_clk_init();
+
+#ifndef MT8512_NFI
+ mtk_nfc_randomizer_init(nand);
+#endif
+
+ mtk_nfc_hw_init(nand);
+
+ ret = mtk_nand_scan(nand, 1 /*MTK_NAND_MAX_NSELS*/);
+ if (ret)
+ goto free_chip;
+
+ mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, nand);
+ mtk_nfc_set_fdm(&chip->fdm, nand);
+ mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, nand);
+ mtk_nfc_set_timing(nand);
+
+ ret = mtk_ecc_hw_init(&nfc->ecc);
+ if (ret)
+ goto free_chip;
+
+ /* nfc->buffer = (u8 *)NAND_DRAM_BUF_NFCBUF_ADDR; */
+ nfc->buffer = (u8 *)nand_memalign(4, nand->pagesize + nand->oobsize);
+ if (!nfc->buffer) {
+ ret = -ENOMEM;
+ goto free_chip;
+ }
+
+ nand_lock_init(&nfc->lock);
+ g_nand_chip = nand;
+ nand_info("nand chip init done.\n");
+
+ dump_nand_info(nand);
+
+ return 0;
+
+free_chip:
+ nand_free(chip);
+free_nfc:
+ nand_free(nfc);
+
+ return ret;
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/nand/slc/nfi/nfi.h b/src/bsp/lk/platform/mt8512/drivers/nand/slc/nfi/nfi.h
new file mode 100644
index 0000000..64dcfbb
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nand/slc/nfi/nfi.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include "../slc.h"
+#include "../slc_os.h"
+#include "../bbt/bbt.h"
+#include "../ecc/ecc.h"
+
+struct mtk_nand_chip;
+struct mtk_ecc_stats;
+struct mtk_nand_bbt_descr;
+struct mtk_nand_bbt_descr;
+struct mtk_nand_bbt_descr;
+
+/*
+ * used to do bad mark byte swap
+ */
+struct mtk_nfc_bad_mark_ctl {
+ void (*bm_swap)(struct mtk_nand_chip *chip, u8 *buf, int raw);
+ u32 sec;
+ u32 pos;
+};
+
+/*
+ * FDM: region used to store free OOB data
+ */
+struct mtk_nfc_fdm {
+ u32 reg_size;
+ u32 ecc_size;
+};
+
+struct mtk_nfc {
+ nand_lock_t lock;
+ nand_completion_t done;
+ struct mtk_ecc_config ecc_cfg;
+ struct mtk_ecc *ecc;
+
+ void *regs;
+ u8 *buffer;
+};
+
+struct mtk_nfc_nand_chip {
+ struct mtk_nand_chip chip;
+ struct mtk_nfc_bad_mark_ctl bad_mark;
+ struct mtk_nfc_fdm fdm;
+
+ u32 spare_per_sector;
+};
+
+extern int mtk_nfc_nand_chip_init(struct mtk_nand_chip **ext_nand);
+
+enum mtk_randomizer_operation {RAND_ENCODE, RAND_DECODE};
+extern void mtk_nfc_randomizer_disable(struct mtk_nand_chip *chip);
+extern void mtk_nfc_randomizer_enable(struct mtk_nand_chip *chip, int page,
+ enum mtk_randomizer_operation rand, int repage);
diff --git a/src/bsp/lk/platform/mt8512/drivers/nand/slc/slc.c b/src/bsp/lk/platform/mt8512/drivers/nand/slc/slc.c
new file mode 100644
index 0000000..6f96de6
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nand/slc/slc.c
@@ -0,0 +1,838 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "slc.h"
+#include "nfi/nfi.h"
+#include "bbt/bbt.h"
+#include "test/slc_test.h"
+
+static inline int nand_ffs(unsigned int x)
+{
+ return __builtin_ffs(x);
+}
+
+#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
+
+u64 g_nand_size = 0;
+
+static int mtk_nand_do_read_ops(struct mtk_nand_chip *chip, struct mtk_nand_ops *ops);
+
+static int mtk_nand_is_dram_buf(u8* buf)
+{
+ //return (buf < NAND_DRAM_BASE_VIRT) ? 0 : 1;
+ return 1;
+}
+
+static int mtk_nand_get_controller(struct mtk_nand_chip *chip)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ nand_lock(&nfc->lock);
+
+ return 0;
+}
+
+static int mtk_nand_release_controller(struct mtk_nand_chip *chip)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ nand_unlock(&nfc->lock);
+
+ return 0;
+}
+
+static int mtk_nand_wait_func(struct mtk_nand_chip *chip, int polling)
+{
+ int status;
+ unsigned long timeo = 1000000;
+
+ chip->cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
+
+ if (!polling) {
+ if (chip->wait_busy_irq(chip))
+ nand_err("nand dev ready timeout");
+ } else {
+ if (!check_with_timeout(chip->dev_ready(chip), timeo))
+ nand_err("nand dev ready timeout");
+ }
+
+ status = (int)chip->read_byte(chip);
+
+ return status;
+}
+
+void mtk_nand_wait_ready(struct mtk_nand_chip *chip)
+{
+ unsigned long timeo = 1000000;
+
+ if (!check_with_timeout(chip->dev_ready(chip), timeo))
+ nand_err("nand dev ready timeout");
+
+}
+
+
+static int mtk_nand_check_wp(struct mtk_nand_chip *chip)
+{
+ /* Check the WP bit */
+ chip->cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
+ return (chip->read_byte(chip) & NAND_STATUS_WP) ? 0 : 1;
+}
+
+static int mtk_nand_block_bad(struct mtk_nand_chip *chip, u64 ofs)
+{
+ int page, res = 0, i = 0;
+ u16 bad;
+
+ if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
+ ofs += chip->blocksize - chip->pagesize;
+
+ page = (int)(ofs / chip->pagesize) % chip->page_per_chip;
+
+ do {
+ chip->cmdfunc(chip, NAND_CMD_READOOB, chip->badblockpos, page);
+ bad = chip->read_byte(chip);
+
+ if (chip->badblockbits == 8)
+ res = bad != 0xFF;
+
+ ofs += chip->pagesize;
+ page = (int)(ofs / chip->pagesize) % chip->page_per_chip;
+ i++;
+ } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
+
+ return res;
+}
+
+static int mtk_nand_block_checkbad(struct mtk_nand_chip *chip, u32 page)
+{
+ struct mtk_nand_ops ops;
+ int ret = 0;
+
+ /* block align */
+ page = page / chip->page_per_block * chip->page_per_block;
+
+ if (chip->bbt) {
+ ret = mtk_nand_isbad_bbt(chip, page / chip->page_per_block, 1);
+ return ret;
+ }
+#if 0
+ /* be careful of randomizer on case, may get a wrong feedback by chip->read_byte */
+ if (!(chip->options & NAND_NEED_SCRAMBLING)) {
+ ret = chip->block_bad(chip, (u64)page * chip->pagesize);
+ } else
+#endif
+ {
+ nand_memset(&ops, 0, sizeof(ops));
+ ops.mode = NAND_OPS_ECC_DMA_POLL;
+ ops.offset = (u64)page * chip->pagesize;
+ ops.len = chip->pagesize;
+ ops.datbuf = chip->databuf;
+ mtk_nand_do_read_ops(chip, &ops);
+ ret = (chip->oob_poi[chip->badblockpos] != 0xFF);
+ }
+
+ return ret;
+}
+
+int mtk_nand_block_isbad(struct mtk_nand_chip *nand, u32 page)
+{
+ int ret = 0;
+
+ if (!nand->bbt) {
+ mtk_nand_get_controller(nand);
+ ret = mtk_nand_block_checkbad(nand, page);
+ mtk_nand_release_controller(nand);
+ } else {
+ ret = mtk_nand_isbad_bbt(nand, page/nand->page_per_block, 1);
+ }
+
+ return ret;
+}
+
+int nand_reset(struct mtk_nand_chip *chip, int chipnr)
+{
+ /* power on sequence delay */
+ mtk_nand_udelay(300);
+
+ /*
+ * The CS line has to be released before we can apply the new NAND
+ * interface settings, hence this weird ->select_chip() dance.
+ */
+ chip->select_chip(chip, chipnr);
+ chip->cmdfunc(chip, NAND_CMD_RESET, -1, -1);
+ chip->select_chip(chip, -1);
+
+ return 0;
+}
+
+static inline int mtk_nand_opcode_8bits(unsigned int command)
+{
+ switch (command) {
+ case NAND_CMD_READID:
+ case NAND_CMD_PARAM:
+ case NAND_CMD_GET_FEATURES:
+ case NAND_CMD_SET_FEATURES:
+ return 1;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void mtk_nand_command_lp(struct mtk_nand_chip *chip, unsigned int command,
+ int column, int page_addr)
+{
+ /* Emulate NAND_CMD_READOOB */
+ if (command == NAND_CMD_READOOB) {
+ column += chip->pagesize;
+ command = NAND_CMD_READ0;
+ }
+
+ /* Command latch cycle */
+ chip->cmd_ctrl(chip, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+
+ if (column != -1 || page_addr != -1) {
+ int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
+
+ /* Serially input address */
+ if (column != -1) {
+ chip->cmd_ctrl(chip, column, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+
+ /* Only output a single addr cycle for 8bits opcodes. */
+ if (!mtk_nand_opcode_8bits(command))
+ chip->cmd_ctrl(chip, column >> 8, ctrl);
+ }
+ if (page_addr != -1) {
+ chip->cmd_ctrl(chip, page_addr, ctrl);
+ chip->cmd_ctrl(chip, page_addr >> 8,
+ NAND_NCE | NAND_ALE);
+ /* One more address cycle for devices > 128MiB */
+ if (chip->chipsize > (128 << 20))
+ chip->cmd_ctrl(chip, page_addr >> 16,
+ NAND_NCE | NAND_ALE);
+ }
+ }
+ chip->cmd_ctrl(chip, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Program and erase have their own busy handlers status, sequential
+ * in and status need no delay.
+ */
+ switch (command) {
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ return;
+
+ case NAND_CMD_RNDOUT:
+ /* No ready / busy check necessary */
+ chip->cmd_ctrl(chip, NAND_CMD_RNDOUTSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ return;
+
+ case NAND_CMD_READ0:
+ chip->cmd_ctrl(chip, NAND_CMD_READSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ /* This applies to read commands */
+ default:
+ break;
+ }
+
+ //nand_debug("command:0x%x column:0x%x page_addr:0x%x", command, column, page_addr);
+
+ mtk_nand_wait_ready(chip);
+
+ //nand_debug("command:0x%x column:0x%x page_addr:0x%x", command, column, page_addr);
+
+}
+
+int mtk_nand_block_markbad(struct mtk_nand_chip *chip, u32 page)
+{
+ int ret = 0;
+
+ if (mtk_nand_block_isbad(chip, (u64)(page*chip->pagesize))) {
+ return 0;
+ } else {
+ /* Mark block bad in BBT */
+ if (chip->bbt) {
+ ret = mtk_nand_markbad_bbt(chip, (u64)(page*chip->pagesize));
+ }
+ }
+
+ return ret;
+}
+
+void lk_nand_irq_handler(unsigned int irq)
+{
+ /* no need irq handler for lk, we use polling */
+ return;
+}
+
+static void mtk_nand_set_defaults(struct mtk_nand_chip *chip)
+{
+ /* chip_delay setup set 20us if not */
+ chip->chip_delay = 20;
+
+ /* command function*/
+ chip->cmdfunc = mtk_nand_command_lp;
+
+ /* wait function */
+ chip->waitfunc = mtk_nand_wait_func;
+
+ /* bad block check */
+ chip->block_bad = mtk_nand_block_isbad;
+ /* bad block mark */
+ chip->block_markbad = mtk_nand_block_markbad;
+
+ /* scan bbt, disable bbt here */
+ /* chip->scan_bbt = mtk_nand_default_bbt; */
+
+ /* variable defalut value */
+ chip->badblockbits = 8;
+ chip->badblockpos = 0;
+
+ chip->activechip = -1;
+
+ /* scan bbt */
+ if (chip->options & NAND_BBT_SUPPORT) {
+ chip->scan_bbt = mtk_nand_default_bbt;
+ /* BBT options setting, must align for all drivers */
+ chip->bbt_options |= (NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB);
+ }
+}
+
+int mtk_nand_flash_get(struct mtk_nand_chip *chip, int maxchips)
+{
+ u32 i;
+ u8 id_data[8];
+ struct mtk_nand_flash_dev *type = nand_flash_devs;
+
+ nand_reset(chip, 0);
+
+ /* Select the device */
+ chip->select_chip(chip, 0);
+
+ /* Send the command for reading device ID */
+ chip->cmdfunc(chip, NAND_CMD_READID, 0x00, -1);
+
+ /* Read entire ID string */
+ for (i = 0; i < 8; i++) {
+ id_data[i] = chip->read_byte(chip);
+ }
+ nand_info("nand id: %x %x %x %x %x %x",
+ id_data[0], id_data[1], id_data[2],id_data[3], id_data[4], id_data[5]);
+
+
+ for (; type->name != NULL; type++) {
+ if (!nand_strncmp(type->id, id_data, type->id_len)) {
+ nand_info("nand found [%s]", type->name);
+ break;
+ }
+ }
+
+ chip->select_chip(chip, -1);
+ if (!type->name) {
+ return -ENODEV;
+ }
+
+ chip->numchips = 1;
+
+ /* Check for a chip array */
+ for (i = 1; i < maxchips; i++) {
+ /* See comment in nand_get_flash_type for reset */
+ nand_reset(chip, i);
+
+ chip->select_chip(chip, i);
+ /* Send the command for reading device ID */
+ chip->cmdfunc(chip, NAND_CMD_READID, 0x00, -1);
+ /* Read manufacturer and device IDs */
+ if (id_data[0] != chip->read_byte(chip) ||
+ id_data[1] != chip->read_byte(chip)) {
+ chip->select_chip(chip, -1);
+ break;
+ }
+ nand_info("chip %d is found", i);
+ chip->select_chip(chip, -1);
+ chip->numchips++;
+ }
+
+ /* set nand chip parameters */
+ chip->pagesize = type->pagesize;
+ chip->oobsize = type->oobsize;
+ chip->bits_per_cell = type->bits_per_cell;
+ /* KB to B */
+ chip->chipsize = ((u64)type->chipsize) << 10;
+ chip->blocksize = type->erasesize;
+ chip->bbt_erase_shift = nand_ffs(type->erasesize) - 1;
+ chip->bbt_options |= type->bbt_options;
+ chip->options |= type->options;
+ chip->ecc_size = type->ecc_size;
+ chip->ecc_strength = type->ecc_strength;
+ chip->fdm_ecc_size = type->fdmeccsize;
+
+ chip->totalsize = i * chip->chipsize;
+
+ chip->acctiming = type->acctiming;
+
+ nand_info("chip acctiming %x should equal type->acctiming %x\n",
+ chip->acctiming, type->acctiming);
+
+ chip->ecc_steps = chip->pagesize / chip->ecc_size;
+ if (nand_is_slc(chip)) {
+ if (chip->ecc_steps == 2)
+ chip->subpagesize = chip->pagesize / 2;
+ else if (chip->ecc_steps > 2)
+ chip->subpagesize = chip->pagesize / 4;
+ else
+ chip->subpagesize = chip->pagesize;
+ }
+ chip->page_per_block = chip->blocksize / chip->pagesize;
+ chip->page_per_chip = chip->chipsize / chip->pagesize;
+
+ chip->lbasize = chip->pagesize;
+ /* change lbacnt if want to reserve blocks */
+ chip->lbacnt = chip->totalsize / chip->lbasize;
+
+ chip->databuf = (u8 *)nand_memalign(4, chip->pagesize + chip->oobsize);
+ if (!chip->databuf)
+ return -ENOMEM;
+ chip->oob_poi = chip->databuf + chip->pagesize;
+
+ mtk_nand_set_bbt_options(chip, id_data[0]);
+
+ nand_info("pagesize:%d, oobsize:%d, blocksize:0x%x totalsize:0x%x",
+ chip->pagesize, chip->oobsize, chip->blocksize, chip->totalsize);
+
+ return 0;
+}
+
+int mtk_nand_scan(struct mtk_nand_chip *chip, int maxchips)
+{
+ int ret;
+
+ /* Set the defaults */
+ mtk_nand_set_defaults(chip);
+
+ ret = mtk_nand_flash_get(chip, maxchips);
+ if (ret) {
+ nand_err("no nand device found");
+ return ret;
+ }
+
+ /* ret = chip->scan_bbt(chip); */
+
+ return ret;
+}
+
+static int mtk_nand_fill_ecc_oob()
+{
+ return 0;
+}
+
+static int mtk_nand_fill_free_oob()
+{
+ return 0;
+}
+
+static int mtk_nand_transfer_ecc_oob()
+{
+ return 0;
+}
+
+static int mtk_nand_transfer_free_oob()
+{
+ return 0;
+}
+
+static int mtk_nand_do_read_ops(struct mtk_nand_chip *chip, struct mtk_nand_ops *ops)
+{
+ int chipnr, page, realpage, col, bytes, aligned;
+ u8 *buf, *oob_ecc, *oob_free, *bufpoi;
+ u64 readlen = ops->len, from = ops->offset;
+ u32 max_bitflips = 0;
+ u32 ecc_failures = chip->stats.failed;
+ int ret = 0, ecc_fail = 0;
+
+ chipnr = (int)(from / chip->chipsize);
+ chip->select_chip(chip, chipnr);
+
+ realpage = (int)(from / chip->pagesize);
+ page = realpage % chip->page_per_chip;
+
+ col = (int)(from & (chip->pagesize - 1));
+
+ buf = ops->datbuf;
+ oob_ecc = ops->oobeccbuf;
+ oob_free = ops->oobfreebuf;
+
+ nand_debug("realpage:0x%x col:0x%x", realpage, col);
+
+ while (1) {
+ bytes = min(chip->pagesize - col, readlen);
+ aligned = (bytes == chip->pagesize);
+ /* workaround for dma to sram */
+ if (!mtk_nand_is_dram_buf(buf))
+ aligned = 0;
+ bufpoi = aligned ? buf : chip->databuf;
+
+ /* send read page command */
+ nand_debug("[nand] read page %d chip %d", page, chipnr);
+ #ifndef MT8512_NFI
+ mtk_nfc_randomizer_enable(chip, page, RAND_DECODE, 0);
+ #endif
+ chip->cmdfunc(chip, NAND_CMD_READ0, 0x00, page);
+
+ if (!aligned) {
+ if (ops->mode == NAND_OPS_ECC_DMA_IRQ)
+ ret = chip->read_subpage_ecc_dma_irq(chip, col, bytes, bufpoi, page);
+ else if (ops->mode == NAND_OPS_ECC_DMA_POLL)
+ ret = chip->read_subpage_ecc_dma_polling(chip, col, bytes, bufpoi, page);
+ else if (ops->mode == NAND_OPS_ECC_PIO_IRQ)
+ ret = chip->read_subpage_ecc_pio_irq(chip, col, bytes, bufpoi, page);
+ else if (ops->mode == NAND_OPS_ECC_PIO_POLL)
+ ret = chip->read_subpage_ecc_pio_polling(chip, col, bytes, bufpoi, page);
+ } else {
+ if (ops->mode == NAND_OPS_RAW_DMA_IRQ)
+ ret = chip->read_page_raw_dma_irq(chip, bufpoi, page);
+ else if (ops->mode == NAND_OPS_RAW_DMA_POLL)
+ ret = chip->read_page_raw_dma_polling(chip, bufpoi, page);
+ else if (ops->mode == NAND_OPS_RAW_PIO_IRQ)
+ ret = chip->read_page_raw_pio_irq(chip, bufpoi, page);
+ else if (ops->mode == NAND_OPS_RAW_PIO_POLL)
+ ret = chip->read_page_raw_pio_polling(chip, bufpoi, page);
+ else if (ops->mode == NAND_OPS_ECC_DMA_IRQ)
+ ret = chip->read_page_ecc_dma_irq(chip, bufpoi, page);
+ else if (ops->mode == NAND_OPS_ECC_DMA_POLL)
+ ret = chip->read_page_ecc_dma_polling(chip, bufpoi, page);
+ else if (ops->mode == NAND_OPS_ECC_PIO_IRQ)
+ ret = chip->read_page_ecc_pio_irq(chip, bufpoi, page);
+ else if (ops->mode == NAND_OPS_ECC_PIO_POLL)
+ ret = chip->read_page_ecc_pio_polling(chip, bufpoi, page);
+ }
+ #ifndef MT8512_NFI
+ mtk_nfc_randomizer_disable(chip);
+ #endif
+ if (ret < 0)
+ break;
+
+ max_bitflips = max(max_bitflips, ret);
+
+ if (chip->stats.failed - ecc_failures) {
+ ecc_fail = 1;
+ break;
+ }
+
+ if (!aligned)
+ nand_memcpy(buf, chip->databuf + col, bytes);
+ if (!oob_ecc)
+ mtk_nand_transfer_ecc_oob();
+ else if (!oob_free)
+ mtk_nand_transfer_free_oob();
+
+ nand_debug("page:0x%x data[0~7] %2x %2x %2x %2x %2x %2x %2x %2x",
+ page, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
+
+ readlen -= bytes;
+ buf += bytes;
+
+ if (!readlen)
+ break;
+
+ /* For subsequent reads align to page boundary */
+ col = 0;
+ /* Increment page address */
+ realpage++;
+
+ page = realpage % chip->page_per_chip;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ chip->select_chip(chip, -1);
+ chip->select_chip(chip, chipnr);
+ }
+ }
+ chip->select_chip(chip, -1);
+
+ if (ecc_fail) {
+ nand_err("uncorrect error at page:0x%x", page);
+ return -EBADMSG;
+ }
+
+ return max_bitflips;
+}
+
+int mtk_nand_read(struct mtk_nand_chip *chip, struct mtk_nand_ops *ops)
+{
+ int ret;
+
+ mtk_nand_get_controller(chip);
+ ret = mtk_nand_do_read_ops(chip, ops);
+ mtk_nand_release_controller(chip);
+
+ return ret;
+}
+
+static int mtk_nand_do_write_ops(struct mtk_nand_chip *chip, struct mtk_nand_ops *ops)
+{
+ int chipnr, realpage, page, col, bytes, aligned;
+ u32 writelen = ops->len;
+ u64 to = ops->offset;
+ u8 *buf = ops->datbuf;
+ u8 *oob_ecc = ops->oobeccbuf;
+ u8 *oob_free = ops->oobfreebuf;
+ u8 *bufpoi;
+ int ret = 0, status, polling_wait = 1;
+
+ /* Reject writes, which are not subpage aligned */
+ if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
+ nand_err("attempt to write non page aligned data (offset 0x%llx, len 0x%llx)", to, ops->len);
+ return -EINVAL;
+ }
+
+ col = to & (chip->pagesize - 1);
+ chipnr = (int)(to / chip->chipsize);
+ chip->select_chip(chip, chipnr);
+
+ /* Check, if it is write protected */
+ if (mtk_nand_check_wp(chip)) {
+ ret = -EIO;
+ nand_err("write protected!");
+ goto err_out;
+ }
+
+ realpage = (int)(to / chip->pagesize);
+ page = realpage % chip->page_per_chip;
+
+ while (1) {
+ bytes = min(chip->pagesize - col, writelen);
+ aligned = (bytes == chip->pagesize);
+ /* workaround for dma to sram */
+ if (!mtk_nand_is_dram_buf(buf))
+ aligned = 0;
+ bufpoi = aligned ? buf : chip->databuf;
+
+ if (!aligned) {
+ nand_memset(chip->databuf, 0xff, chip->pagesize);
+ nand_memcpy(chip->databuf + col, buf, bytes);
+ }
+ nand_memset(chip->oob_poi, 0xff, chip->oobsize);
+ if (!oob_ecc)
+ mtk_nand_fill_ecc_oob();
+ else if (!oob_free)
+ mtk_nand_fill_free_oob();
+
+ /* nand_debug("[nand] write page %d chip %d", page, chipnr); */
+ #ifndef MT8512_NFI
+ mtk_nfc_randomizer_enable(chip, page, RAND_ENCODE, 0);
+ #endif
+ chip->cmdfunc(chip, NAND_CMD_SEQIN, 0x00, page);
+
+ if (!aligned) {
+ if (ops->mode == NAND_OPS_ECC_DMA_IRQ) {
+ polling_wait = 0;
+ ret = chip->write_subpage_ecc_dma_irq(chip, col, bytes, bufpoi, page);
+ } else if (ops->mode == NAND_OPS_ECC_DMA_POLL)
+ ret = chip->write_subpage_ecc_dma_polling(chip, col, bytes, bufpoi, page);
+ else if (ops->mode == NAND_OPS_ECC_PIO_IRQ) {
+ polling_wait = 0;
+ ret = chip->write_subpage_ecc_pio_irq(chip, col, bytes, bufpoi, page);
+ } else if (ops->mode == NAND_OPS_ECC_PIO_POLL)
+ ret = chip->write_subpage_ecc_pio_polling(chip, col, bytes, bufpoi, page);
+ } else {
+ if (ops->mode == NAND_OPS_RAW_DMA_IRQ) {
+ polling_wait = 0;
+ ret = chip->write_page_raw_dma_irq(chip, bufpoi, page);
+ } else if (ops->mode == NAND_OPS_RAW_DMA_POLL)
+ ret = chip->write_page_raw_dma_polling(chip, bufpoi, page);
+ else if (ops->mode == NAND_OPS_RAW_PIO_IRQ) {
+ polling_wait = 0;
+ ret = chip->write_page_raw_pio_irq(chip, bufpoi, page);
+ } else if (ops->mode == NAND_OPS_RAW_PIO_POLL)
+ ret = chip->write_page_raw_pio_polling(chip, bufpoi, page);
+ else if (ops->mode == NAND_OPS_ECC_DMA_IRQ) {
+ polling_wait = 0;
+ ret = chip->write_page_ecc_dma_irq(chip, bufpoi, page);
+ } else if (ops->mode == NAND_OPS_ECC_DMA_POLL)
+ ret = chip->write_page_ecc_dma_polling(chip, bufpoi, page);
+ else if (ops->mode == NAND_OPS_ECC_PIO_IRQ) {
+ polling_wait = 0;
+ ret = chip->write_page_ecc_pio_irq(chip, bufpoi, page);
+ } else if (ops->mode == NAND_OPS_ECC_PIO_POLL)
+ ret = chip->write_page_ecc_pio_polling(chip, bufpoi, page);
+ }
+ #ifndef MT8512_NFI
+ mtk_nfc_randomizer_disable(chip);
+ #endif
+ if (ret < 0)
+ break;
+
+ chip->cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(chip, polling_wait);
+ if (status & NAND_STATUS_FAIL) {
+ ret = -EIO;
+ nand_err("write failed at page 0x%x status:0x%x", realpage, status);
+ goto err_out;
+ }
+
+ writelen -= bytes;
+ if (!writelen)
+ break;
+
+ col = 0;
+ buf += bytes;
+ realpage++;
+
+ page = realpage % chip->page_per_chip;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ chip->select_chip(chip, -1);
+ chip->select_chip(chip, chipnr);
+ }
+ }
+
+err_out:
+ chip->select_chip(chip, -1);
+ if (ret < 0) {
+ if (!mtk_nand_block_checkbad(chip, page))
+ mtk_nand_markbad_bbt(chip, page*chip->pagesize);
+ }
+
+ return ret;
+}
+
+int mtk_nand_write(struct mtk_nand_chip *chip, struct mtk_nand_ops *ops)
+{
+ int ret;
+
+ mtk_nand_get_controller(chip);
+ ret = mtk_nand_do_write_ops(chip, ops);
+ mtk_nand_release_controller(chip);
+
+ return ret;
+}
+
+static int mtk_nand_do_erase_ops(struct mtk_nand_chip *chip, struct mtk_nand_ops *ops)
+{
+ u64 offset = ops->offset;
+ u64 eraselen = ops->len;
+ int page, status, ret = 0, chipnr, polling_wait = 0;
+
+ if ((offset % chip->blocksize) || (eraselen % chip->blocksize)) {
+ nand_err("erase is not aligned (off 0x%llx, len 0x%llx)", offset, eraselen);
+ return -EINVAL;
+ }
+
+ page = (int)(offset / chip->pagesize);
+ chipnr = (int)(offset / chip->chipsize);
+
+ nand_debug("page:0x%x, chipnr:0x%x", page, chipnr);
+
+ chip->select_chip(chip, chipnr);
+
+ /* Check, if it is write protected */
+ if (mtk_nand_check_wp(chip)) {
+ ret = -EIO;
+ nand_err("write protected!");
+ goto err_out;
+ }
+
+ while (1) {
+ if (mtk_nand_block_checkbad(chip, page)) {
+ nand_err("attempt to erase bad block at page 0x%x", page);
+ }
+
+ nand_debug("[nand] erase page %d chip %d", page, chipnr);
+ chip->cmdfunc(chip, NAND_CMD_ERASE1, -1, (page % chip->page_per_chip));
+ chip->cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
+ if (ops->mode == NAND_OPS_ERASE_IRQ)
+ polling_wait = 0;
+ else if (ops->mode == NAND_OPS_ERASE_POLL)
+ polling_wait = 1;
+ status = chip->waitfunc(chip, polling_wait);
+
+ if (status & NAND_STATUS_FAIL) {
+ ret = -EIO;
+ nand_err("erase failed at page 0x%x status:0x%x", page, status);
+ goto err_out;
+ }
+
+ eraselen -= chip->blocksize;
+ if (!eraselen)
+ break;
+ page += chip->page_per_block;
+
+ if (eraselen && !(page % chip->page_per_chip)) {
+ chipnr++;
+ chip->select_chip(chip, -1);
+ chip->select_chip(chip, chipnr);
+ }
+ }
+err_out:
+ chip->select_chip(chip, -1);
+ if (ret < 0) {
+ if (!mtk_nand_block_checkbad(chip, page))
+ mtk_nand_markbad_bbt(chip, page*chip->pagesize);
+ }
+
+ return ret;
+}
+
+int mtk_nand_erase(struct mtk_nand_chip *chip, struct mtk_nand_ops *ops)
+{
+ int ret;
+
+ mtk_nand_get_controller(chip);
+ ret = mtk_nand_do_erase_ops(chip, ops);
+ mtk_nand_release_controller(chip);
+
+ return ret;
+}
+
+
+int mtk_nand_init(struct mtk_nand_chip **ext_nand)
+{
+ struct mtk_nand_chip *chip;
+ int ret;
+
+ ret = mtk_nfc_nand_chip_init(ext_nand);
+
+ chip = *ext_nand;
+
+#if MTK_NAND_UNIT_TEST
+ mtk_nand_chip_test(chip);
+#endif
+ if (chip->scan_bbt)
+ ret = chip->scan_bbt(chip);
+
+ return ret;
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/nand/slc/slc.h b/src/bsp/lk/platform/mt8512/drivers/nand/slc/slc.h
new file mode 100644
index 0000000..b9a94c5
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nand/slc/slc.h
@@ -0,0 +1,262 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+#include "slc_os.h"
+#include "ecc/ecc.h"
+
+/* Select the chip by setting nCE to low */
+#define NAND_NCE 0x01
+/* Select the command latch by setting CLE to high */
+#define NAND_CLE 0x02
+/* Select the address latch by setting ALE to high */
+#define NAND_ALE 0x04
+
+#define NAND_CTRL_CLE (NAND_NCE | NAND_CLE)
+#define NAND_CTRL_ALE (NAND_NCE | NAND_ALE)
+#define NAND_CTRL_CHANGE 0x80
+
+/*
+ * Standard NAND flash commands
+ */
+#define NAND_CMD_READ0 0
+#define NAND_CMD_READ1 1
+#define NAND_CMD_RNDOUT 5
+#define NAND_CMD_PAGEPROG 0x10
+#define NAND_CMD_READOOB 0x50
+#define NAND_CMD_ERASE1 0x60
+#define NAND_CMD_STATUS 0x70
+#define NAND_CMD_SEQIN 0x80
+#define NAND_CMD_RNDIN 0x85
+#define NAND_CMD_READID 0x90
+#define NAND_CMD_ERASE2 0xd0
+#define NAND_CMD_PARAM 0xec
+#define NAND_CMD_GET_FEATURES 0xee
+#define NAND_CMD_SET_FEATURES 0xef
+#define NAND_CMD_RESET 0xff
+
+#define NAND_CMD_LOCK 0x2a
+#define NAND_CMD_UNLOCK1 0x23
+#define NAND_CMD_UNLOCK2 0x24
+
+/* Extended commands for large page devices */
+#define NAND_CMD_READSTART 0x30
+#define NAND_CMD_RNDOUTSTART 0xE0
+#define NAND_CMD_CACHEDPROG 0x15
+
+#define NAND_CMD_NONE -1
+
+/* Status bits */
+#define NAND_STATUS_FAIL 0x01
+#define NAND_STATUS_FAIL_N1 0x02
+#define NAND_STATUS_TRUE_READY 0x20
+#define NAND_STATUS_READY 0x40
+#define NAND_STATUS_WP 0x80
+
+/* chip options definition */
+/*
+ * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
+ * patterns.
+ */
+#define NAND_NEED_SCRAMBLING 0x00002000
+#define NAND_BBT_SUPPORT 0x00000001
+
+#define NAND_MAX_ID_LEN 8
+
+struct mtk_nand_flash_dev {
+ const char *name;
+ u8 id[NAND_MAX_ID_LEN];
+ u8 id_len;
+
+ /* unit: KByte */
+ u32 chipsize;
+ u32 erasesize;
+ u32 pagesize;
+ u16 oobsize;
+ u32 fdmeccsize;
+ u8 bits_per_cell;
+
+ /* customized setting if need */
+ u32 acctiming;
+ u32 ecc_size;
+ u32 ecc_strength;
+ u32 bbt_options;
+ u32 options;
+};
+
+enum {
+ NAND_OPS_RAW_DMA_POLL = 0,
+ NAND_OPS_RAW_DMA_IRQ,
+ NAND_OPS_RAW_PIO_POLL,
+ NAND_OPS_RAW_PIO_IRQ,
+ NAND_OPS_ECC_DMA_POLL,
+ NAND_OPS_ECC_DMA_IRQ,
+ NAND_OPS_ECC_PIO_POLL,
+ NAND_OPS_ECC_PIO_IRQ,
+
+ NAND_OPS_ERASE_POLL,
+ NAND_OPS_ERASE_IRQ,
+};
+
+struct mtk_nand_ops {
+ u32 mode;
+ u64 offset;
+ u64 len;
+ u8 *datbuf;
+ /* ecc protected oob data */
+ u8 *oobeccbuf;
+ u32 oobecclen;
+ /* ecc unprotected oob data */
+ u8 *oobfreebuf;
+ u32 oobfreelen;
+};
+
+struct mtk_nand_chip {
+ u8 (*read_byte)(struct mtk_nand_chip *nand);
+ void (*write_byte)(struct mtk_nand_chip *nand, u8 byte);
+ void (*write_buf)(struct mtk_nand_chip *nand, const u8 *buf, int len);
+ void (*read_buf)(struct mtk_nand_chip *nand, u8 *buf, int len);
+ void (*select_chip)(struct mtk_nand_chip *nand, int chip);
+ void (*cmd_ctrl)(struct mtk_nand_chip *nand, int dat, unsigned int ctrl);
+ int (*dev_ready)(struct mtk_nand_chip *nand);
+ int (*wait_busy_irq)(struct mtk_nand_chip *nand);
+ void (*cmdfunc)(struct mtk_nand_chip *nand, unsigned command, int column,
+ int page_addr);
+ int(*waitfunc)(struct mtk_nand_chip *this, int polling);
+ int (*scan_bbt)(struct mtk_nand_chip *chip);
+
+ int (*block_bad)(struct mtk_nand_chip *chip, u32 page);
+ int (*block_markbad)(struct mtk_nand_chip *chip, u32 page);
+
+ int (*write_page_ecc_dma_polling)(struct mtk_nand_chip *chip, const u8 *buf,
+ int page);
+ int (*write_page_ecc_dma_irq)(struct mtk_nand_chip *chip, const u8 *buf,
+ int page);
+ int (*write_page_ecc_pio_polling)(struct mtk_nand_chip *chip, const u8 *buf,
+ int page);
+ int (*write_page_ecc_pio_irq)(struct mtk_nand_chip *chip, const u8 *buf,
+ int page);
+ int (*write_page_raw_dma_polling)(struct mtk_nand_chip *chip, const u8 *buf,
+ int page);
+ int (*write_page_raw_dma_irq)(struct mtk_nand_chip *chip, const u8 *buf,
+ int page);
+ int (*write_page_raw_pio_polling)(struct mtk_nand_chip *chip, const u8 *buf,
+ int page);
+ int (*write_page_raw_pio_irq)(struct mtk_nand_chip *chip, const u8 *buf,
+ int page);
+ int (*write_subpage_ecc_dma_polling)(struct mtk_nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf, int page);
+ int (*write_subpage_ecc_dma_irq)(struct mtk_nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf, int page);
+ int (*write_subpage_ecc_pio_polling)(struct mtk_nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf, int page);
+ int (*write_subpage_ecc_pio_irq)(struct mtk_nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf, int page);
+
+ int (*read_subpage_ecc_dma_polling)(struct mtk_nand_chip *chip, u32 off,
+ u32 len, u8 *p, int pg);
+ int (*read_subpage_ecc_dma_irq)(struct mtk_nand_chip *chip, u32 off,
+ u32 len, u8 *p, int pg);
+ int (*read_subpage_ecc_pio_polling)(struct mtk_nand_chip *chip, u32 off,
+ u32 len, u8 *p, int pg);
+ int (*read_subpage_ecc_pio_irq)(struct mtk_nand_chip *chip, u32 off,
+ u32 len, u8 *p, int pg);
+ int (*read_page_ecc_dma_polling)(struct mtk_nand_chip *chip, u8 *p, int pg);
+ int (*read_page_ecc_dma_irq)(struct mtk_nand_chip *chip, u8 *p, int pg);
+ int (*read_page_ecc_pio_polling)(struct mtk_nand_chip *chip, u8 *p, int pg);
+ int (*read_page_ecc_pio_irq)(struct mtk_nand_chip *chip, u8 *p, int pg);
+ int (*read_page_raw_dma_polling)(struct mtk_nand_chip *chip, u8 *buf, int page);
+ int (*read_page_raw_dma_irq)(struct mtk_nand_chip *chip, u8 *buf, int page);
+ int (*read_page_raw_pio_polling)(struct mtk_nand_chip *chip, u8 *buf, int page);
+ int (*read_page_raw_pio_irq)(struct mtk_nand_chip *chip, u8 *buf, int page);
+
+ /* nand device information */
+ u64 totalsize;
+ /* unit: Byte */
+ u64 chipsize;
+ u32 pagesize;
+ u32 oobsize;
+ u32 blocksize;
+ u32 ecc_size;
+ u32 ecc_strength;
+ u32 ecc_steps;
+ u32 subpagesize;
+ u32 fdm_ecc_size;
+ u8 bits_per_cell;
+ u32 page_per_chip;
+ u32 page_per_block;
+ int chip_delay;
+ u32 acctiming;
+ u32 options;
+ u8 numchips;
+ int activechip;
+
+ u8 *databuf;
+ u8 *oob_poi;
+
+ /* BBT related */
+ u32 bbt_options;
+ int badblockpos;
+ int badblockbits;
+ u32 bbt_erase_shift;
+
+ u8 *bbt;
+ struct mtk_nand_bbt_descr *bbt_td;
+ struct mtk_nand_bbt_descr *bbt_md;
+ struct mtk_nand_bbt_descr *badblock_pattern;
+
+ /* block device information if need */
+ u32 lbasize;
+ u32 lbacnt;
+
+ struct mtk_ecc_stats stats;
+
+ void *priv;
+};
+
+static inline void *nand_get_controller_data(struct mtk_nand_chip *chip)
+{
+ return chip->priv;
+}
+
+static inline void nand_set_controller_data(struct mtk_nand_chip *chip, void *priv)
+{
+ chip->priv = priv;
+}
+
+static inline bool nand_is_slc(struct mtk_nand_chip *chip)
+{
+ return chip->bits_per_cell == 1;
+}
+
+extern struct mtk_nand_flash_dev nand_flash_devs[];
+
+extern int mtk_nand_erase(struct mtk_nand_chip *chip, struct mtk_nand_ops *ops);
+extern int mtk_nand_write(struct mtk_nand_chip *chip, struct mtk_nand_ops *ops);
+extern int mtk_nand_read(struct mtk_nand_chip *chip, struct mtk_nand_ops *ops);
+extern int mtk_nand_block_isbad(struct mtk_nand_chip *nand, u32 page);
+extern int mtk_nand_init(struct mtk_nand_chip **ext_nand);
+extern int mtk_nand_scan(struct mtk_nand_chip *chip, int maxchips);
+extern void lk_nand_irq_handler(unsigned int irq);
+extern int nand_get_alignment(void);
+extern int nand_write_img(u64 addr, void *data, u32 img_sz, u32 partition_size, int img_type);
diff --git a/src/bsp/lk/platform/mt8512/drivers/nand/slc/slc_ids.c b/src/bsp/lk/platform/mt8512/drivers/nand/slc/slc_ids.c
new file mode 100644
index 0000000..f75b0e4
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nand/slc/slc_ids.c
@@ -0,0 +1,103 @@
+/*
+* Copyright (c) 2017 MediaTek Inc.
+*
+* Permission is hereby granted, free of charge, to any person obtaining
+* a copy of this software and associated documentation files
+* (the "Software"), to deal in the Software without restriction,
+* including without limitation the rights to use, copy, modify, merge,
+* publish, distribute, sublicense, and/or sell copies of the Software,
+* and to permit persons to whom the Software is furnished to do so,
+* subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be
+* included in all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+#include "slc.h"
+#include "slc_os.h"
+
+#define NAND_OPTIONS_NONE 0
+#define NFI_DEFAULT_ACTIMING 0x10804011
+
+struct mtk_nand_flash_dev nand_flash_devs[] = {
+ /* MXIC */
+ {
+ "MX30LF2G18AC", {0xc2, 0xda, 0x90, 0x95, 0x6, 0, 0, 0},
+ 5, KB(256), KB(128), 2048, 64, 1, 1, NFI_DEFAULT_ACTIMING, 1024, 12,
+ NAND_OPTIONS_NONE, NAND_OPTIONS_NONE
+ },
+ {
+ "MX30LF4G18AC", {0xc2, 0xdc, 0x90, 0x95, 0x56, 0, 0, 0},
+ 5, KB(512), KB(128), 2048, 64, 1, 1, NFI_DEFAULT_ACTIMING, 1024, 12,
+ NAND_OPTIONS_NONE, NAND_OPTIONS_NONE
+ },
+ {
+ "MX60LF8G18AC", {0xc2, 0xd3, 0xd1, 0x95, 0x5a, 0x00},
+ 5, KB(1024), KB(128), 2048, 64, 1, 1, NFI_DEFAULT_ACTIMING, 1024, 12,
+ NAND_OPTIONS_NONE, NAND_OPTIONS_NONE
+ },
+ /* Micron */
+ {
+ "MT29F2G08ABAEA", {0x2c, 0xda, 0x90, 0x95, 0x06, 0, 0, 0},
+ 5, KB(256), KB(128), 2048, 64, 1, 1, NFI_DEFAULT_ACTIMING, 1024, 12,
+ NAND_OPTIONS_NONE, NAND_OPTIONS_NONE
+ },
+ {
+ "MT29F4G08ABAEA", {0x2c, 0xdc, 0x90, 0xa6, 0x54, 0x00},
+ 5, KB(512), KB(512), 4096, 224, 1, 1, NFI_DEFAULT_ACTIMING, 1024, 24,
+ NAND_OPTIONS_NONE, NAND_OPTIONS_NONE
+ },
+ {
+ "MT29F8G08ABABA", {0x2C, 0x38, 0x00, 0x26, 0x85, 0x0, 0},
+ 5, KB(1024), KB(512), 4096, 224, 1, 1, NFI_DEFAULT_ACTIMING, 1024, 24,
+ NAND_OPTIONS_NONE, NAND_OPTIONS_NONE
+ },
+ {
+ "MT29F8G08ABACA", {0x2c, 0xd3, 0x90, 0xa6, 0x64, 0x00},
+ 5, KB(256), KB(256), 4096, 224, 1, 1, NFI_DEFAULT_ACTIMING, 1024, 24,
+ NAND_OPTIONS_NONE, NAND_OPTIONS_NONE
+ },
+ {
+ "MT29F16G08ADBCA", {0x2c, 0xa5, 0xd1, 0x26, 0x68, 0, 0, 0},
+ 5, KB(2048), KB(256), 4096, 224, 1, 1, NFI_DEFAULT_ACTIMING, 1024, 24,
+ NAND_OPTIONS_NONE, NAND_OPTIONS_NONE
+ },
+ /* Toshiba */
+ {
+ "TC58NYG1S3HBAI6", {0x98, 0xaa, 0x90, 0x15, 0x76, 0x16, 0, 0},
+ 6, KB(256), KB(128), 2048, 128, 1, 1, NFI_DEFAULT_ACTIMING, 1024, 24,
+ NAND_OPTIONS_NONE, NAND_OPTIONS_NONE
+ },
+ {
+ "TC58BVG1S3HTA00", {0x98, 0xda, 0x90, 0x15, 0xf6, 0x00, 0, 0},
+ 5, KB(256), KB(128), 2048, 64, 1, 1, NFI_DEFAULT_ACTIMING, 1024, 12,
+ NAND_OPTIONS_NONE, NAND_OPTIONS_NONE
+ },
+ {
+ "TC58NVG2S0HTA00", {0x98, 0xdc, 0x90, 0x26, 0x76, 0x16, 0, 0},
+ 6, KB(512), KB(256), 4096, 256, 1, 1, NFI_DEFAULT_ACTIMING, 1024, 24,
+ NAND_OPTIONS_NONE, NAND_OPTIONS_NONE
+ },
+ /* Samsung */
+ {
+ "K9F2G08U0D", {0xec, 0xda, 0x10, 0x95, 0x46, 0, 0, 0},
+ 5, KB(256), KB(128), 2048, 64, 1, 1, NFI_DEFAULT_ACTIMING, 1024, 12,
+ NAND_OPTIONS_NONE, NAND_OPTIONS_NONE
+ },
+ /* ESMT */
+ {
+ "F59D4G81A-45TG-18V", {0xc8, 0xac, 0x90, 0x15, 0x54, 0, 0, 0},
+ 5, KB(512), KB(128), 2048, 64, 1, 1, NFI_DEFAULT_ACTIMING, 1024, 4,
+ NAND_OPTIONS_NONE, NAND_OPTIONS_NONE
+ },
+
+ {NULL}
+};
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/nand/slc/slc_os.h b/src/bsp/lk/platform/mt8512/drivers/nand/slc/slc_os.h
new file mode 100644
index 0000000..725159c
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nand/slc/slc_os.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#pragma once
+
+/* System related head file*/
+#include <platform/mt_irq.h>
+#include <platform/interrupts.h>
+#include <platform/mt_reg_base.h>
+#include <platform/timer.h>
+#include <platform/mtk_timer.h>
+//#include <platform/mt8516.h>
+#include <kernel/mutex.h>
+#include <kernel/event.h>
+//#include <kernel/vm.h>
+#include <arch/ops.h>
+#include <sys/types.h>
+#include <platform.h>
+#include <reg.h>
+#include <string.h>
+#include <errno.h>
+#include <malloc.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <kernel/vm.h>
+#ifdef MTK_GPT_SCHEME_SUPPORT
+#include <partition.h>
+#endif
+#include <debug.h>
+
+#define MT8512_NFI
+
+/* Error codes */
+#ifndef EIO
+#define EIO 5 /* I/O error */
+#define ENOMEM 12 /* Out of memory */
+#define EFAULT 14 /* Bad address */
+#define EBUSY 16 /* Device or resource busy */
+#define EINVAL 22 /* Invalid argument */
+#define ENOSPC 28 /* No space left on device */
+#define EBADMSG 77 /* Trying to read unreadable message */
+#define ETIMEDOUT 110 /* Connection timed out */
+#endif
+
+/* Data types define */
+typedef signed char int8_t;
+typedef unsigned char uint8_t;
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+
+#ifndef loff_t
+typedef u64 loff_t;
+#endif
+#ifndef status_t
+typedef int status_t;
+#endif
+#ifndef bool
+typedef char bool;
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifndef false
+#define false 0
+#define true 1
+#endif
+
+#ifndef BUG_ON
+#define BUG_ON(cond) assert(!(cond))
+#endif
+
+/* Common define */
+#define NAND_BIT(nr) (1UL << (nr))
+#define NAND_GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> ((sizeof(unsigned long) * 8) - 1 - (h))))
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+#define ALIGN(S,A) ((S + A) & ~(A))
+
+#define max(a, b) (a > b ? a : b)
+#define min(a, b) (a > b ? b : a)
+//#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
+
+#if 0//ndef containerof
+#define containerof(ptr, type, member) \
+ ((type *)((unsigned long)(ptr) - __builtin_offsetof(type, member)))
+#endif
+
+#define MTK_NAND_TIMEOUT (500000)
+
+#define KB(x) ((x) * 1024UL)
+#define MB(x) (KB(x) * 1024UL)
+
+/*
+ * wait until cond gets true or timeout.
+ *
+ * cond : C expression to wait
+ * timeout : usecs
+ *
+ * Returns:
+ * 0 : if cond = false after timeout elapsed.
+ * 1 : if cond = true after timeout elapsed,
+ * or the remain usecs if cond = true before timeout elapsed.
+ */
+#define nand_time size_t
+
+/* Mutex/lock related */
+#define nand_lock_t mutex_t
+
+/* completion related */
+#define nand_completion_t event_t
+
+static inline nand_time nand_current_time(void)
+{
+ return current_time();
+}
+
+static inline void *nand_memalign(size_t boundary, size_t size)
+{
+ return memalign(boundary, size);
+}
+
+/*
+ * allocate memory and memset zero, see calloc
+ * @nmemb: Number of element to allocate
+ * @size: Size of each element
+ */
+static inline void *nand_malloc(size_t size)
+{
+ return calloc(1, size);
+}
+
+/*
+ * Free a buffer allocated by os_calloc
+ * @buf: Buffer to free. os_free will just return if it is NULL.
+ */
+static inline void nand_free(void *buf)
+{
+ free(buf);
+}
+
+/* see memcpy */
+static inline void *nand_memcpy(void *dest, const void *src, u64 n)
+{
+ return memcpy(dest, src, n);
+}
+
+/* see strncmp */
+static inline int nand_strncmp(char const *cs, char const *ct, size_t count)
+{
+ return strncmp(cs, ct, count);
+}
+
+static inline int nand_memcmp(const void *cs, const void *ct, size_t count)
+{
+ return memcmp(cs, ct, count);
+}
+
+/* see memset */
+static inline void *nand_memset(void *s, int c, u64 n)
+{
+ return memset(s, c, n);
+}
+
+/* Abort the system. Should only be used when debug. */
+static inline void nand_abort(char *s)
+{
+ panic("Nand abort: %s\n", s);
+}
+
+static inline void nand_lock_init(nand_lock_t *m)
+{
+ //mutex_init(m);
+}
+
+static inline status_t nand_lock(nand_lock_t *m)
+{
+ return 0;//mutex_acquire(m);
+}
+
+static inline status_t nand_unlock(nand_lock_t *m)
+{
+ return 0;//mutex_release(m);
+}
+
+static inline void nand_init_completion(nand_completion_t *x)
+{
+ event_init(x, false, EVENT_FLAG_AUTOUNSIGNAL);
+}
+
+static inline void nand_complete(nand_completion_t *x)
+{
+ event_signal(x, false);
+}
+
+static inline status_t nand_wait_for_completion_timeout(nand_completion_t *x, nand_time t)
+{
+ return event_wait_timeout(x, t);
+}
+
+static inline u32 nand_kvaddr_to_paddr(const u8 *buf)
+{
+ u32 addr;
+
+#ifdef WITH_KERNEL_VM
+ addr = (u32)kvaddr_to_paddr(buf);
+#else
+ addr = (u32)(unsigned long)buf;
+#endif
+
+ return addr;
+}
+
+static inline u32 nand_dma_map(const u8 *buf, size_t len, bool flag, void *arg)
+{
+ if (flag)
+ arch_clean_cache_range((addr_t)buf, (size_t)len);
+ else
+ arch_clean_invalidate_cache_range((addr_t)buf, (size_t)len);
+ return 0;
+}
+
+static inline void nand_dma_unmap(const u8 *buf, size_t len, bool flag, void *arg)
+{
+ if (flag)
+ arch_clean_cache_range((addr_t)buf, (size_t)len);
+ else
+ arch_clean_invalidate_cache_range((addr_t)buf, (size_t)len);
+}
+
+#define check_with_timeout(cond, timeout) \
+({ \
+ nand_time __ret; \
+ if (cond) { \
+ __ret = timeout; \
+ } else { \
+ nand_time __end = nand_current_time() + timeout; \
+ for (;;) { \
+ nand_time __now = nand_current_time(); \
+ if (cond) { \
+ __ret = (__end > __now) ? (__end - __now) : 1; \
+ break; \
+ } \
+ if (__end <= __now) { \
+ __ret = 0; \
+ break; \
+ } \
+ } \
+ } \
+ __ret; \
+})
+
+#define mtk_nand_udelay(a) udelay(a)
+#define mtk_nand_mdelay(a) mdelay(a)
+
+/* Nand print info related */
+#define NAND_DEBUG_FLAG 0
+
+#if NAND_DEBUG_FLAG
+#define nand_debug(fmt, ...) dprintf(CRITICAL, "NAND debug::%s %d: " fmt "\n",\
+ __func__, __LINE__, ##__VA_ARGS__)
+
+#define nand_info(fmt, ...) dprintf(CRITICAL, "NAND info::%s %d: " fmt "\n",\
+ __func__, __LINE__, ##__VA_ARGS__)
+#else
+#define nand_debug(fmt, ...) do {} while (0)
+#define nand_info(fmt, ...) do {} while (0)
+#endif
+
+/* Nand error messages */
+#define nand_err(fmt, ...) dprintf(CRITICAL, "NAND error::%s %d: " fmt "\n",\
+ __func__, __LINE__, ##__VA_ARGS__)
+
+/* Nand register RW function re-define */
+#define nand_readb(a) (*(volatile u8 * const)(a))
+#define nand_readw(a) (*(volatile u16 * const)(a))
+#define nand_readl(a) (*(volatile u32 * const)(a))
+
+#define nand_writeb(v, a) (*(volatile u8 * const)(a)) = (v)
+#define nand_writew(v, a) (*(volatile u16 * const)(a)) = (v)
+#define nand_writel(v, a) (*(volatile u32 * const)(a)) = (v)
+
+/* Nand Base register define */
+#define NAND_NFI_BASE NFI_BASE
+#define NAND_NFIECC_BASE NFIECC_BASE
+//#define NAND_DRAM_BASE_VIRT DRAM_BASE_VIRT
+//#define NAND_NFI_IRQ_BIT_ID NFI_IRQ_BIT_ID
+//#define NAND_NFIECC_IRQ_BIT_ID NFIECC_IRQ_BIT_ID
+
+#define NAND_IRQ_NONE INT_NO_RESCHEDULE
+#define NAND_IRQ_HANDLED INT_RESCHEDULE
+
+/* reserve 1M dram buffer for system memory issue */
+/* unsigned char g_data_buf[16384+2048]; */
+#define NAND_DRAM_BUF_DATABUF_ADDR (NAND_BUF_ADDR)
+#define NAND_DRAM_BUF_DATABUF_SIZE (4096+256)
+#define NAND_DRAM_BUF_NFCBUF_ADDR (NAND_DRAM_BUF_DATABUF_ADDR + NAND_DRAM_BUF_DATABUF_SIZE)
+#define NAND_DRAM_BUF_NFCBUF_SIZE (4096+256)
+#define NAND_DRAM_BUF_ECCDE_ADDR (NAND_DRAM_BUF_NFCBUF_ADDR + NAND_DRAM_BUF_NFCBUF_SIZE)
+#define NAND_DRAM_BUF_ECCDE_SIZE (4096+256)
+#define NAND_DRAM_BUF_ECCEN_ADDR (NAND_DRAM_BUF_ECCDE_ADDR + NAND_DRAM_BUF_ECCDE_SIZE)
+#define NAND_DRAM_BUF_ECCEN_SIZE (4096+256)
+#define NAND_DRAM_BUF_BAD_MAP_ADDR (NAND_DRAM_BUF_ECCEN_ADDR + NAND_DRAM_BUF_ECCEN_SIZE)
+#define NAND_DRAM_BUF_BAD_MAP_SIZE (16384)
+
+/* Nand EFUSE register define */
+#define EFUSE_RANDOM_CFG ((volatile u32 *)(IO_PHYS+0x9020))
+#define EFUSE_RANDOM_ENABLE 0x00001000
diff --git a/src/bsp/lk/platform/mt8512/drivers/nand/slc/test/slc_test.c b/src/bsp/lk/platform/mt8512/drivers/nand/slc/test/slc_test.c
new file mode 100644
index 0000000..20480ad
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nand/slc/test/slc_test.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "../slc.h"
+#include "../slc_os.h"
+
+void mtk_nand_chip_test(struct mtk_nand_chip *chip)
+{
+ struct mtk_nand_ops ops;
+ u8 *buf_w, *buf_r;
+ int total_block, i, j;
+ int ret = 0;
+
+ total_block = chip->totalsize/chip->blocksize;
+ nand_info("mtk_nand_unit_test start total_block: %d", total_block);
+
+ buf_w = nand_malloc(chip->pagesize * 2);
+ if (buf_w == NULL) {
+ nand_err("malloc buf_w failed: %d \n", chip->pagesize);
+ return;
+ }
+
+ buf_r = nand_malloc(chip->pagesize * 2);
+ if (buf_r == NULL) {
+ nand_err("malloc buf_r failed: %d \n", chip->pagesize);
+ return;
+ }
+
+ for (i = 0; i < chip->pagesize*2; i++)
+ buf_w[i] = i;
+
+ for (i = 0; i < 10; i++) {
+ //for (i = total_block-1; ; i--) {
+
+ if (mtk_nand_block_isbad(chip, i*chip->page_per_block)) {
+ nand_info("check bad blk: %d", i);
+ //continue;
+ }
+
+ nand_info("test blk: %d", i);
+ nand_memset(&ops, 0, sizeof(ops));
+ ops.mode = NAND_OPS_ERASE_POLL;
+ ops.offset = (u64)(i * chip->blocksize);
+ ops.len = chip->blocksize;
+
+ ret = mtk_nand_erase(chip, &ops);
+ if (ret) {
+ nand_err("Erase failed at blk: %d", i);
+ continue;
+ }
+
+ for (j = i*chip->page_per_block;
+ j < i*chip->page_per_block+ chip->page_per_block; j++) {
+
+ nand_memset(&ops, 0, sizeof(ops));
+ ops.mode = NAND_OPS_ECC_DMA_POLL;
+ ops.offset = (u64)(j * chip->pagesize);
+ ops.len = (u64)chip->pagesize;
+ ops.datbuf = buf_w;
+
+ ret = mtk_nand_write(chip, &ops);
+ if (ret) {
+ nand_err("Write failed at blk:%d, page:%d", i, j);
+ break;
+ }
+
+ nand_memset(&ops, 0, sizeof(ops));
+ nand_memset(buf_r, 0x5A, chip->pagesize);
+ ops.mode = NAND_OPS_ECC_DMA_POLL;
+ ops.offset = (u64)(j * chip->pagesize);
+ ops.len = (u64)chip->pagesize;
+ ops.datbuf = buf_r;
+
+ ret = mtk_nand_read(chip, &ops);
+ if (ret) {
+ nand_err("Read failed at blk:%d page:%d", i, j);
+ break;
+ }
+
+ /* compare the read buf and write buf */
+ if (nand_memcmp(buf_r, buf_w, chip->pagesize)) {
+ nand_err("compare failed! addr:0x%x, buf_r:0x%x, %x, %x, %x, %x buf_w:0x%x, %x, %x, %x, %x",
+ (int)ops.offset, buf_r[0], buf_r[1], buf_r[2], buf_r[3], buf_r[4]
+ , buf_w[0], buf_w[1], buf_w[2], buf_w[3], buf_w[4]);
+ }
+ }
+
+ nand_memset(&ops, 0, sizeof(ops));
+ ops.mode = NAND_OPS_ERASE_POLL;
+ ops.offset = (u64)(i * chip->blocksize);
+ ops.len = chip->blocksize;
+ ret = mtk_nand_erase(chip, &ops);
+ if (ret) {
+ nand_err("Erase failed at blk: %d", i);
+ continue;
+ }
+ //if(i == 0)
+ // break;
+ }
+
+ nand_info("mtk_nand_chip_test start end");
+
+ if (buf_r != NULL)
+ nand_free(buf_r);
+ if (buf_w != NULL)
+ nand_free(buf_w);
+
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/nand/slc/test/slc_test.h b/src/bsp/lk/platform/mt8512/drivers/nand/slc/test/slc_test.h
new file mode 100644
index 0000000..1d56085
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nand/slc/test/slc_test.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#define MTK_NAND_UNIT_TEST 0
+
+void mtk_nand_chip_test(struct mtk_nand_chip *chip);
diff --git a/src/bsp/lk/platform/mt8512/drivers/nand/slc_bdev.c b/src/bsp/lk/platform/mt8512/drivers/nand/slc_bdev.c
new file mode 100644
index 0000000..4f6edd0
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nand/slc_bdev.c
@@ -0,0 +1,792 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <platform/mtk_timer.h>
+#include <lib/bio.h>
+#include <lib/partition.h>
+#include <malloc.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <err.h>
+#include <errno.h>
+#include <pow2.h>
+#include <bits.h>
+#include <platform.h>
+#include <platform/nand.h>
+#include <trace.h>
+#include "slc/slc.h"
+#include "slc/slc_os.h"
+#include "slc/test/slc_test.h"
+
+#define LOCAL_TRACE 0
+
+/* global variable define */
+struct mtk_nand_chip *chip;
+
+struct nand_bdev {
+ struct bdev dev;
+ int firstblk;
+ int blkcnt;
+ /* this flag enables users to handler bad blocks */
+ int check_bad;
+};
+
+static struct nand_devices {
+ /* Bad block bitmap */
+ unsigned long *bitmap;
+ /* Bad block mapping table */
+ unsigned short *blkmap;
+} nand_devs;
+
+static int nand_reg_subdev(struct bdev *bdev);
+static int nand_unreg_subdev(struct bdev *bdev);
+static bnum_t blk_map(struct bdev *dev, bnum_t page);
+
+static inline int byte2blk(struct mtk_nand_chip *chip, off_t offset)
+{
+ return offset / chip->blocksize;
+}
+
+static inline off_t blk2byte(struct mtk_nand_chip *chip, int blk)
+{
+ return blk * chip->blocksize;
+}
+
+static inline int pg2blk(struct mtk_nand_chip *chip, int page)
+{
+ return page / chip->page_per_block;
+}
+
+static inline int blk2pg(struct mtk_nand_chip *chip, int blk)
+{
+ return blk * chip->page_per_block;
+}
+
+/* partition range check */
+static size_t nand_trim_range(struct bdev *dev, off_t offset, size_t len)
+{
+ struct nand_bdev *ndev = (struct nand_bdev *)dev;
+ /* end block, last block of the partition */
+ int eb, peb;
+
+ eb = byte2blk(chip, len + offset - 1);
+ peb = ndev->firstblk + ndev->blkcnt - 1;
+
+ /* bio_read/write/erase has checked argument "offset" */
+ if (eb > peb)
+ return 0;
+
+ return len;
+}
+
+/* Calculate the absolute adderss */
+static off_t abs_addr(struct bdev *dev, off_t offset)
+{
+ struct nand_bdev *ndev = (struct bdev *)dev;
+
+ return offset + blk2byte(chip, ndev->firstblk);
+}
+
+static ssize_t nand_bwrite(bdev_t *bdev, void *buf, u32 blknr, u32 blks)
+{
+ struct mtk_nand_ops ops;
+
+ dprintf(INFO, "[%s] %s %s lba %d(%d) pagecnt %d\n", __func__,
+ bdev->name, bdev->label, pg2blk(chip, blknr),
+ ((struct nand_bdev *)bdev)->firstblk, blks);
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = NAND_OPS_ECC_DMA_POLL;
+ ops.offset = (u64)blknr * chip->pagesize;
+ ops.len = (u64)blks * chip->pagesize;
+ ops.datbuf = buf;
+
+ mtk_nand_write(chip, &ops);
+
+ return (ssize_t)blks * chip->pagesize;
+}
+
+static ssize_t nand_bread(bdev_t *bdev, void *buf, u32 blknr, u32 blks)
+{
+ struct mtk_nand_ops ops;
+
+ dprintf(INFO, "[%s] %s %s lba %d(%d) pagecnt %d\n", __func__,
+ bdev->name, bdev->label, pg2blk(chip, blknr),
+ ((struct nand_bdev *)bdev)->firstblk, blks);
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = NAND_OPS_ECC_DMA_POLL;
+ ops.offset = (u64)blknr * chip->pagesize;
+ ops.len = (u64)blks * chip->pagesize;
+ ops.datbuf = buf;
+
+ mtk_nand_read(chip, &ops);
+
+ return (ssize_t)blks * chip->pagesize;
+}
+
+static ssize_t nand_bio_read(struct bdev *dev, void *_buf, off_t offset, size_t len)
+{
+ uint8_t *buf = (uint8_t *)_buf;
+ ssize_t bytes_read = 0;
+ bnum_t lba, pba;
+ ssize_t err = 0;
+ uint32_t page_per_blk = chip->blocksize / chip->pagesize;
+ uint8_t *temp = memalign(16, dev->block_size);
+ if (temp == NULL)
+ return ERR_NO_MEMORY;
+
+ offset = abs_addr(dev, offset);
+ len = nand_trim_range(dev, offset, len);
+ if (len == 0)
+ return 0;
+
+ /* find the starting block */
+ lba = offset / dev->block_size;
+ dprintf(INFO, "nand_bio_read: page 0x%x, len 0x%x\n", lba, len);
+
+ /* handle partial first block */
+ if ((offset % dev->block_size) != 0) {
+ /* Convert to physical address */
+ pba = blk_map(dev, lba);
+ /* read in the block */
+ err = nand_bread(dev, temp, pba, 1);
+ if (err < 0) {
+ goto err;
+ } else if ((size_t)err != dev->block_size) {
+ err = ERR_IO;
+ goto err;
+ }
+
+ /* copy what we need */
+ size_t block_offset = offset % dev->block_size;
+ size_t tocopy = MIN(dev->block_size - block_offset, len);
+ memcpy(buf, temp + block_offset, tocopy);
+
+ /* increment our buffers */
+ buf += tocopy;
+ len -= tocopy;
+ bytes_read += tocopy;
+ lba++;
+ }
+
+ // If the device requires alignment AND our buffer is not alread aligned.
+ bool requires_alignment =
+ (dev->flags & BIO_FLAG_CACHE_ALIGNED_READS) &&
+ (IS_ALIGNED((size_t)buf, CACHE_LINE) == false);
+ /* handle middle blocks */
+ if (requires_alignment) {
+ while (len >= dev->block_size) {
+ /* Convert to physical address */
+ pba = blk_map(dev, lba);
+ /* do the middle reads */
+ err = nand_bread(dev, temp, pba, 1);
+ if (err < 0) {
+ goto err;
+ } else if ((size_t)err != dev->block_size) {
+ err = ERR_IO;
+ goto err;
+ }
+ memcpy(buf, temp, dev->block_size);
+
+ buf += dev->block_size;
+ len -= dev->block_size;
+ bytes_read += dev->block_size;
+ lba++;
+ }
+ } else {
+ uint32_t num_blocks = divpow2(len, dev->block_shift);
+ uint32_t read_blks;
+
+ while (num_blocks) {
+ read_blks = page_per_blk - (lba % page_per_blk);
+ read_blks = num_blocks > read_blks ? read_blks : num_blocks;
+
+ /* Convert to physical address */
+ pba = blk_map(dev, lba);
+
+ err = nand_bread(dev, buf, pba, read_blks);
+ if (err < 0) {
+ goto err;
+ } else if ((size_t)err != dev->block_size * read_blks) {
+ err = ERR_IO;
+ goto err;
+ }
+ buf += err;
+ len -= err;
+ bytes_read += err;
+ lba += read_blks;
+ num_blocks -= read_blks;
+ }
+ }
+
+ /* handle partial last block */
+ if (len > 0) {
+ /* Convert to physical address */
+ pba = blk_map(dev, lba);
+ /* read the block */
+ err = nand_bread(dev, temp, pba, 1);
+ if (err < 0) {
+ goto err;
+ } else if ((size_t)err != dev->block_size) {
+ err = ERR_IO;
+ goto err;
+ }
+
+ /* copy the partial block from our temp buffer */
+ memcpy(buf, temp, len);
+
+ bytes_read += len;
+ }
+
+ err:
+ free(temp);
+
+ /* return error or bytes read */
+ return (err >= 0) ? bytes_read : err;
+}
+
+static ssize_t nand_bio_write(struct bdev *dev, const void *_buf, off_t offset, size_t len)
+{
+ const uint8_t *buf = (const uint8_t *)_buf;
+ ssize_t bytes_written = 0;
+ bnum_t lba, pba;
+ uint32_t page_per_blk = chip->blocksize / chip->pagesize;
+ ssize_t err = 0;
+
+ uint8_t *temp = memalign(16, dev->block_size);
+ if (temp == NULL)
+ return ERR_NO_MEMORY;
+
+ offset = abs_addr(dev, offset);
+ len = nand_trim_range(dev, offset, len);
+ if (len == 0)
+ return 0;
+
+ /* find the starting block */
+ lba = offset / dev->block_size;
+ dprintf(CRITICAL, "nand_bio_write: page 0x%x, len 0x%x\n", lba, len);
+
+ /* handle partial first block */
+ if ((offset % dev->block_size) != 0) {
+ /* Convert to physical address */
+ pba = blk_map(dev, lba);
+ err = nand_bread(dev, temp, pba, 1);
+ if (err < 0) {
+ goto err;
+ } else if ((size_t)err != dev->block_size) {
+ err = ERR_IO;
+ goto err;
+ }
+
+ /* copy what we need */
+ size_t block_offset = offset % dev->block_size;
+ size_t tocopy = MIN(dev->block_size - block_offset, len);
+ memcpy(temp + block_offset, buf, tocopy);
+
+ /* write it back out */
+ err = nand_bwrite(dev, temp, pba, 1);
+ if (err < 0) {
+ goto err;
+ } else if ((size_t)err != dev->block_size) {
+ err = ERR_IO;
+ goto err;
+ }
+
+ /* increment our buffers */
+ buf += tocopy;
+ len -= tocopy;
+ bytes_written += tocopy;
+ lba++;
+ }
+
+
+ // If the device requires alignment AND our buffer is not alread aligned.
+ bool requires_alignment =
+ (dev->flags & BIO_FLAG_CACHE_ALIGNED_WRITES) &&
+ (IS_ALIGNED((size_t)buf, CACHE_LINE) == false);
+
+ /* handle middle blocks */
+ if (requires_alignment) {
+ while (len >= dev->block_size) {
+ /* Convert to physical address */
+ pba = blk_map(dev, lba);
+ /* do the middle reads */
+ memcpy(temp, buf, dev->block_size);
+ err = nand_bwrite(dev, temp, pba, 1);
+ if (err < 0) {
+ goto err;
+ } else if ((size_t)err != dev->block_size) {
+ err = ERR_IO;
+ goto err;
+ }
+
+ buf += dev->block_size;
+ len -= dev->block_size;
+ bytes_written += dev->block_size;
+ lba++;
+ }
+ } else {
+ uint32_t num_blocks = divpow2(len, dev->block_shift);
+ uint32_t write_blks;
+
+ while (num_blocks) {
+ write_blks = page_per_blk - (lba % page_per_blk);
+ write_blks = num_blocks > write_blks ? write_blks : num_blocks;
+
+ /* Convert to physical address */
+ pba = blk_map(dev, lba);
+
+ err = nand_bwrite(dev, buf, pba, write_blks);
+ if (err < 0) {
+ goto err;
+ } else if ((size_t)err != dev->block_size * write_blks) {
+ err = ERR_IO;
+ goto err;
+ }
+ DEBUG_ASSERT((size_t)err == (write_blks * dev->block_size));
+
+ buf += err;
+ len -= err;
+ bytes_written += err;
+ lba += write_blks;
+ num_blocks -= write_blks;
+ }
+ }
+
+ /* handle partial last block */
+ if (len > 0) {
+ /* Convert to physical address */
+ pba = blk_map(dev, lba);
+ /* read the block */
+ err = nand_bread(dev, temp, pba, 1);
+ if (err < 0) {
+ goto err;
+ } else if ((size_t)err != dev->block_size) {
+ err = ERR_IO;
+ goto err;
+ }
+
+ /* copy the partial block from our temp buffer */
+ memcpy(temp, buf, len);
+
+ /* write it back out */
+ err = nand_bwrite(dev, temp, pba, 1);
+ if (err < 0) {
+ goto err;
+ } else if ((size_t)err != dev->block_size) {
+ err = ERR_IO;
+ goto err;
+ }
+
+ bytes_written += len;
+ }
+
+err:
+ free(temp);
+ /* return error or bytes written */
+ return (err >= 0) ? bytes_written : err;
+}
+
+static ssize_t nand_erase(bdev_t *dev, off_t offset, size_t len)
+{
+ struct mtk_nand_ops ops;
+ u32 blocks;
+ ssize_t erase_len = 0;
+
+ len = bio_trim_range(dev, offset, len);
+
+ offset = abs_addr(dev, offset);
+ len = nand_trim_range(dev, offset, len);
+ if (len == 0)
+ return 0;
+
+ if (offset % chip->blocksize)
+ return ERR_INVALID_ARGS;
+ if (len % chip->blocksize)
+ return ERR_INVALID_ARGS;
+
+ blocks = len / chip->blocksize;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = NAND_OPS_ERASE_POLL;
+ ops.offset = offset;
+ ops.len = chip->blocksize;
+
+ dprintf(INFO, "[%s] offset %d(%d) len %dbytes\n", __func__,
+ byte2blk(chip, offset), byte2blk(chip, ops.offset), len);
+
+ while (blocks) {
+ int ret;
+ if (!mtk_nand_block_isbad(chip, (u32)(ops.offset / chip->pagesize))) {
+ ret = (ssize_t)mtk_nand_erase(chip, &ops);
+ if (ret < 0)
+ return ret;
+ erase_len += chip->blocksize;
+ }
+ ops.offset += chip->blocksize;
+ blocks--;
+ }
+
+ return erase_len;
+}
+
+static ssize_t nand_force_erase(bdev_t *dev)
+{
+ struct mtk_nand_ops ops;
+ u32 blocks;
+ ssize_t erase_len = 0, ret;
+
+ blocks = chip->totalsize / chip->blocksize;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.mode = NAND_OPS_ERASE_POLL;
+ ops.offset = 0;
+ ops.len = chip->blocksize;
+
+ dprintf(INFO, "[%s] force format whole chip blocks:%d\n",
+ __func__, blocks);
+
+ while (blocks) {
+ ret = (ssize_t)mtk_nand_erase(chip, &ops);
+
+ erase_len += chip->blocksize;
+ ops.offset += chip->blocksize;
+ blocks--;
+ }
+
+ return erase_len;
+}
+
+static ssize_t nand_force_chip_test(bdev_t *dev)
+{
+
+#if MTK_NAND_UNIT_TEST
+ mtk_nand_chip_test(chip);
+#endif
+ return chip->totalsize;
+}
+
+static int nand_bio_ioctl(struct bdev *bdev, int request, void *argp)
+{
+ int ret = NO_ERROR;
+
+ LTRACEF("dev %p, request %d, argp %p\n", bdev, request, argp);
+
+ switch (request) {
+ case NAND_IOCTL_GET_ERASE_SIZE:
+ if (likely(argp))
+ *(unsigned int *)argp = chip->blocksize;
+ else
+ ret = ERR_NOT_SUPPORTED;
+ break;
+ case NAND_IOCTL_REGISTER_SUBDEV:
+ ret = nand_reg_subdev(bdev);
+ break;
+ case NAND_IOCTL_UNREGISTER_SUBDEV:
+ ret = nand_unreg_subdev(bdev);
+ break;
+ case NAND_IOCTL_CHECK_BAD_BLOCK:
+ ((struct nand_bdev *)bdev)->check_bad = 1;
+ break;
+ case NAND_IOCTL_FORCE_FORMAT_ALL:
+ ret = nand_force_erase(bdev);
+ break;
+ case NAND_IOCTL_FORCE_TEST_ALL:
+ ret = nand_force_chip_test(bdev);
+ break;
+ case NAND_IOCTL_IS_BAD_BLOCK:
+ {
+ off_t offset = *(off_t *)argp;
+ struct nand_bdev *ndev = (struct nand_bdev *)bdev;
+ int blk;
+
+ blk = ndev->firstblk + byte2blk(chip, offset);
+ return bitmap_test(nand_devs.bitmap, blk);
+ }
+ default:
+ return ERR_NOT_SUPPORTED;
+ }
+
+ return ret;
+}
+
+/* Scan all block and create the bad block bitmap */
+static int construct_bitmap(void)
+{
+ uint32_t block;
+ uint32_t blkcnt = pg2blk(chip, chip->lbacnt);
+
+ nand_devs.bitmap = (unsigned long *)malloc(blkcnt / 8);
+ if (!nand_devs.bitmap) {
+ dprintf(CRITICAL, "no enough memory for nand bitmap\n");
+ return -ENOMEM;
+ }
+
+ memset(nand_devs.bitmap, 0, blkcnt / 8);
+
+ /* Skip the block0 */
+ for (block = 1; block < blkcnt; block++) {
+ #if 1
+ if (unlikely(mtk_nand_block_isbad(chip, blk2pg(chip, block)))) {
+ bitmap_set(nand_devs.bitmap, block);
+ dprintf(ALWAYS, "block%d is a bad block!\n", block);
+ }
+ #endif
+ }
+
+ return 0;
+}
+
+static int find_good_blk(int pba)
+{
+ /* Bad block ratio should be less than 2%, */
+ int maxbits = chip->lbacnt * 2 / 100;
+ /* byte offset in a WORD */
+ int offs_wd = BITMAP_BIT_IN_WORD(pba);
+ /* WORD offset in the bitmap */
+ int offs = BITMAP_WORD(pba);
+ unsigned long bitmap;
+ int retblk = 0;
+
+ if (offs_wd) {
+ /* in the middle of a WORD */
+ bitmap = nand_devs.bitmap[offs];
+ bitmap |= ~(~0UL << offs_wd);
+ retblk = bitmap_ffz(&bitmap, BITMAP_BITS_PER_WORD);
+ if (retblk >= 0) {
+ /* we found a good block */
+ pba += (retblk - offs_wd);
+ } else {
+ /* Keep searching good bits in WORDs behind */
+ offs++;
+ retblk = bitmap_ffz(&nand_devs.bitmap[offs], maxbits);
+ if (retblk >= 0) {
+ /* found a good block */
+ pba += (BITMAP_BITS_PER_WORD - offs_wd + retblk);
+ }
+ }
+ } else {
+ retblk = bitmap_ffz(&nand_devs.bitmap[offs], maxbits);
+ if (retblk >= 0) {
+ /* found a good block */
+ pba = offs * BITMAP_BITS_PER_WORD + retblk;
+ }
+ }
+
+ if (retblk < 0) {
+ dprintf(CRITICAL, "Too many bad blocks!\n");
+ return -EIO;
+ } else
+ return pba;
+}
+
+static int construct_bmap(struct nand_bdev *dev)
+{
+ int lba, pba;
+ unsigned short *blkmap;
+
+ pba = dev->firstblk;
+ blkmap = &nand_devs.blkmap[dev->firstblk];
+ for (lba = 0; lba < dev->blkcnt; lba++) {
+ pba = find_good_blk(pba);
+ if (pba < 0)
+ return -EIO;
+ blkmap[lba] = pba;
+ pba++;
+ if (pba >= dev->firstblk + dev->blkcnt)
+ /* end of partition */
+ break;
+ }
+
+ return 0;
+}
+
+static int initialize_nand_bdev(struct bdev *dev, bnum_t start_lba, bnum_t lbacnt)
+{
+ struct nand_bdev *ndev = (struct nand_bdev *)dev;
+
+ if (!ndev) {
+ ndev = malloc(sizeof(struct nand_bdev));
+ if (!ndev) {
+ dprintf(CRITICAL, "no enough memory for nand_bdev\n");
+ return -ENOMEM;
+ }
+ memset(ndev, 0, sizeof(struct nand_bdev));
+ }
+
+ ndev->firstblk = pg2blk(chip, start_lba);
+ ndev->blkcnt = pg2blk(chip, lbacnt);
+
+ construct_bmap(ndev);
+
+ dprintf(INFO, "[%s] dev %s %s 1st %d len %d\n", __func__,
+ ndev->dev.name, ndev->dev.label,
+ ndev->firstblk, ndev->blkcnt);
+
+ return 0;
+}
+
+static struct bdev *nand_reg_bio_dev(const char *name, size_t lbasize,
+ bnum_t lbacnt, const char *label)
+{
+ struct nand_bdev *ndev;
+ struct bdev *dev;
+
+ ndev = malloc(sizeof(struct nand_bdev));
+ if (!ndev) {
+ dprintf(CRITICAL, "no enough memory for nand_bdev\n");
+ return NULL;
+ }
+
+ memset(ndev, 0, sizeof(struct nand_bdev));
+ dev = &ndev->dev;
+ bio_initialize_bdev(dev, name, lbasize, lbacnt, 0, NULL, BIO_FLAGS_NONE);
+ dev->read = nand_bio_read;
+ dev->write = nand_bio_write;
+ dev->read_block = nand_bread;
+ dev->write_block = nand_bwrite;
+ dev->erase = nand_erase;
+ dev->erase_byte = 0xff;
+ dev->ioctl = nand_bio_ioctl;
+ if (label)
+ dev->label = strdup(label);
+ bio_register_device(dev);
+ dprintf(INFO, "[%s] %s %s %ld %d\n", __func__, name, label,
+ lbasize, lbacnt);
+
+ return dev;
+}
+
+/* We register BIO device instead of BIO subdev device here,
+ * it is a easy way to manage the partitions
+ */
+static int nand_reg_subdev(struct bdev *bdev)
+{
+ int i, ret, start_lba;
+ struct bdev *dev, *new;
+ char devname[32];
+
+ for (i = 0; i < 128; i++) {
+ sprintf(devname, "%sp%d", bdev->name, i + 1);
+ dev = bio_open(devname);
+ if (!dev)
+ break;
+
+ /* find the start_lba in structure subdev_t */
+ start_lba = *(bnum_t *)((unsigned long)&dev[1] + sizeof(bdev_t *) +
+ sizeof(bio_erase_geometry_info_t));
+ new = nand_reg_bio_dev(devname, dev->block_size, dev->block_count,
+ dev->label);
+ if (!new)
+ return -EIO;
+
+ dprintf(INFO, "[%s] %s %s\n", __func__, new->name, new->label);
+ ret = initialize_nand_bdev(new, start_lba, dev->block_count);
+ if (ret)
+ return ret;
+
+ /* Remove the subdevs from the list */
+ bio_unregister_device(dev);
+ bio_close(dev);
+ }
+
+ return 0;
+}
+
+static int nand_unreg_subdev(struct bdev *bdev)
+{
+ struct nand_bdev *ndev = (struct nand_bdev *)bdev;
+
+ dprintf(INFO, "[%s] %s %s\n", __func__, bdev->name, bdev->label);
+ free(ndev->dev.label);
+ free(ndev);
+
+ return 0;
+}
+
+/* Convert logical address to physical block address */
+static bnum_t blk_map(struct bdev *dev, bnum_t page)
+{
+ struct nand_bdev *ndev = (struct nand_bdev *)dev;
+ int remain, blk;
+ int index = pg2blk(chip, page);
+
+ /* User check bad block itself, just return the page address */
+ if (ndev->check_bad)
+ return page;
+
+ blk = nand_devs.blkmap[index];
+ remain = page & (chip->page_per_block - 1);
+
+ if (index && blk == 0) {
+ /* the mapping address is 0, means the good block run out */
+ dprintf(CRITICAL, "%s(%s): Too many bad blocks, can not erase/program\n",
+ ndev->dev.name, ndev->dev.label);
+ return -EIO;
+ }
+
+ return blk2pg(chip, blk) + remain;
+}
+
+int nand_init_device()
+{
+ struct bdev *dev;
+ u32 lbacnt;
+ int ret;
+
+ ret = mtk_nand_init(&chip);
+ if (ret) {
+ dprintf(CRITICAL, "nand device init error (%d)!\n", ret);
+ return ret;
+ }
+
+ ret = construct_bitmap();
+ if (ret)
+ return ret;
+
+ /* bad block mapping table */
+ nand_devs.blkmap = (unsigned short *)NAND_DRAM_BUF_BAD_MAP_ADDR;
+ memset(nand_devs.blkmap, 0, NAND_DRAM_BUF_BAD_MAP_SIZE);
+
+ /* 4blocks for bbt scan, 4blocks reserved for SGPT */
+ lbacnt = chip->lbacnt - (8 * chip->blocksize / chip->pagesize);
+ dev = nand_reg_bio_dev("nand0", chip->lbasize, lbacnt, NULL);
+ if (!dev)
+ return -EIO;
+
+ /* nand0 */
+ ret = initialize_nand_bdev(dev, 0, lbacnt);
+ if (ret)
+ return ret;
+
+ /* MBR */
+ ret = initialize_nand_bdev(NULL, 0, 384);
+ if (ret)
+ return ret;
+
+ partition_publish("nand0", 0);
+
+ dprintf(CRITICAL, "nand device init done.\n");
+
+ return 0;
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/NOTICE b/src/bsp/lk/platform/mt8512/drivers/nandx/NOTICE
new file mode 100644
index 0000000..1a06ca3
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/NOTICE
@@ -0,0 +1,52 @@
+
+/*
+ * Nandx - Mediatek Common Nand Driver
+ * Copyright (C) 2017 MediaTek Inc.
+ *
+ * Nandx is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+####################################################################################################
\ No newline at end of file
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/Nandx.config b/src/bsp/lk/platform/mt8512/drivers/nandx/Nandx.config
new file mode 100644
index 0000000..9871354
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/Nandx.config
@@ -0,0 +1,16 @@
+NANDX_SIMULATOR_SUPPORT := n
+NANDX_CTP_SUPPORT := n
+NANDX_DA_SUPPORT := n
+NANDX_PRELOADER_SUPPORT := n
+NANDX_LK_SUPPORT := y
+NANDX_KERNEL_SUPPORT := n
+NANDX_BROM_SUPPORT := n
+NANDX_BBT_SUPPORT := y
+
+NANDX_NAND_SPI := y
+NANDX_NAND_SLC := n
+NANDX_NAND_MLC := n
+NANDX_NAND_TLC := n
+NANDX_NFI_BASE := y
+NANDX_NFI_ECC := y
+NANDX_NFI_SPI := y
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/Nandx.mk b/src/bsp/lk/platform/mt8512/drivers/nandx/Nandx.mk
new file mode 100644
index 0000000..f930c56
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/Nandx.mk
@@ -0,0 +1,83 @@
+#
+# Copyright (C) 2017 MediaTek Inc.
+# Licensed under either
+# BSD Licence, (see NOTICE for more details)
+# GNU General Public License, version 2.0, (see NOTICE for more details)
+#
+
+nandx_dir := $(shell dirname $(lastword $(MAKEFILE_LIST)))
+include $(nandx_dir)/Nandx.config
+
+ifeq ($(NANDX_SIMULATOR_SUPPORT), y)
+sim-obj :=
+sim-inc :=
+nandx-obj := sim-obj
+nandx-prefix := .
+nandx-postfix := %.o
+sim-inc += -I$(nandx-prefix)/include/internal
+sim-inc += -I$(nandx-prefix)/include/simulator
+endif
+
+ifeq ($(NANDX_CTP_SUPPORT), y)
+nandx-obj := C_SRC_FILES
+nandx-prefix := $(nandx_dir)
+nandx-postfix := %.c
+INC_DIRS += $(nandx_dir)/include/internal
+INC_DIRS += $(nandx_dir)/include/ctp
+endif
+
+ifeq ($(NANDX_DA_SUPPORT), y)
+nandx-obj := obj-y
+nandx-prefix := $(nandx_dir)
+nandx-postfix := %.o
+INCLUDE_PATH += $(TOPDIR)/platform/$(CODE_BASE)/dev/nand/nandx/include/internal
+INCLUDE_PATH += $(TOPDIR)/platform/$(CODE_BASE)/dev/nand/nandx/include/da
+endif
+
+ifeq ($(NANDX_PRELOADER_SUPPORT), y)
+nandx-obj := MOD_SRC
+nandx-prefix := $(nandx_dir)
+nandx-postfix := %.c
+C_OPTION += -I$(MTK_PATH_PLATFORM)/src/drivers/nandx/include/internal
+C_OPTION += -I$(MTK_PATH_PLATFORM)/src/drivers/nandx/include/preloader
+endif
+
+ifeq ($(NANDX_LK_SUPPORT), y)
+nandx-obj := MODULE_SRCS
+nandx-prefix := $(nandx_dir)
+nandx-postfix := %.c
+GLOBAL_INCLUDES += $(nandx_dir)/include/internal
+GLOBAL_INCLUDES += $(nandx_dir)/include/lk
+endif
+
+ifeq ($(NANDX_KERNEL_SUPPORT), y)
+nandx-obj := obj-y
+nandx-prefix := nandx
+nandx-postfix := %.o
+ccflags-y += -I$(nandx_dir)/include/internal
+ccflags-y += -I$(nandx_dir)/include/kernel
+endif
+
+nandx-y :=
+include $(nandx_dir)/core/Nandx.mk
+nandx-target := $(nandx-prefix)/core/$(nandx-postfix)
+$(nandx-obj) += $(patsubst %.c, $(nandx-target), $(nandx-y))
+
+
+nandx-y :=
+include $(nandx_dir)/driver/Nandx.mk
+nandx-target := $(nandx-prefix)/driver/$(nandx-postfix)
+$(nandx-obj) += $(patsubst %.c, $(nandx-target), $(nandx-y))
+
+ifeq ($(NANDX_SIMULATOR_SUPPORT), y)
+cc := gcc
+CFLAGS += $(sim-inc)
+
+.PHONY:nandx
+nandx: $(sim-obj)
+ $(cc) $(sim-obj) -o nandx
+
+.PHONY:clean
+clean:
+ rm -rf $(sim-obj) nandx
+endif
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/README b/src/bsp/lk/platform/mt8512/drivers/nandx/README
new file mode 100644
index 0000000..0feaeae
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/README
@@ -0,0 +1,31 @@
+
+ NAND2.0
+ ===============================
+
+ NAND2.0 is a common nand driver which designed for accessing
+different type of NANDs(SLC, SPI-NAND, MLC, TLC) on various OS. This
+driver can work on mostly SoCs of Mediatek.
+
+ Although there already has a common nand driver, it doesn't cover
+SPI-NAND, and not match our IC-Verification's reqirement. We need
+a driver that can be exten or cut easily.
+
+ This driver is base on NANDX & SLC. We try to refactor structures,
+and make them inheritable. We also refactor some operations' flow
+principally for adding SPI-NAND support.
+
+ This driver's architecture is like:
+
+ Driver @LK/Uboot/DA... |IC verify/other purposes
+ ----------------------------------------------------------------
+ partition | BBM |
+ -------------------------------------- | extend_core
+ nandx_core/core_io |
+ ----------------------------------------------------------------
+ nand_chip/nand_base |
+ -------------------------------------- | extend_nfi
+ nand_device | nfi/nfi_base |
+
+ Any block of above graph can be extended at your will, if you
+want add new feature into this code, please make sure that your code
+would follow the framework, and we will be appreciated about it.
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/Nandx.mk b/src/bsp/lk/platform/mt8512/drivers/nandx/core/Nandx.mk
new file mode 100644
index 0000000..8f998fe
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/Nandx.mk
@@ -0,0 +1,38 @@
+#
+# Copyright (C) 2017 MediaTek Inc.
+# Licensed under either
+# BSD Licence, (see NOTICE for more details)
+# GNU General Public License, version 2.0, (see NOTICE for more details)
+#
+
+nandx-y += nand_device.c
+nandx-y += nand_base.c
+nandx-y += nand_chip.c
+nandx-y += core_io.c
+
+nandx-header-y += nand_device.h
+nandx-header-y += nand_base.h
+nandx-header-y += nand_chip.h
+nandx-header-y += core_io.h
+nandx-header-y += nfi.h
+
+nandx-$(NANDX_NAND_SPI) += nand/device_spi.c
+nandx-$(NANDX_NAND_SPI) += nand/nand_spi.c
+nandx-$(NANDX_NAND_SLC) += nand/device_slc.c
+nandx-$(NANDX_NAND_SLC) += nand/nand_slc.c
+
+nandx-header-$(NANDX_NAND_SPI) += nand/device_spi.h
+nandx-header-$(NANDX_NAND_SPI) += nand/nand_spi.h
+nandx-header-$(NANDX_NAND_SLC) += nand/device_slc.h
+nandx-header-$(NANDX_NAND_SLC) += nand/nand_slc.h
+
+nandx-$(NANDX_NFI_BASE) += nfi/nfi_base.c
+nandx-$(NANDX_NFI_ECC) += nfi/nfiecc.c
+nandx-$(NANDX_NFI_SPI) += nfi/nfi_spi.c
+
+nandx-header-$(NANDX_NFI_BASE) += nfi/nfi_base.h
+nandx-header-$(NANDX_NFI_BASE) += nfi/nfi_regs.h
+nandx-header-$(NANDX_NFI_ECC) += nfi/nfiecc.h
+nandx-header-$(NANDX_NFI_ECC) += nfi/nfiecc_regs.h
+nandx-header-$(NANDX_NFI_SPI) += nfi/nfi_spi.h
+nandx-header-$(NANDX_NFI_SPI) += nfi/nfi_spi_regs.h
\ No newline at end of file
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/core_io.c b/src/bsp/lk/platform/mt8512/drivers/nandx/core/core_io.c
new file mode 100644
index 0000000..cdfb0f3
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/core_io.c
@@ -0,0 +1,691 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+/*NOTE: switch cache/multi*/
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "nand_chip.h"
+#include "core_io.h"
+
+static struct nandx_desc *g_nandx;
+
+static inline bool is_sector_align(u64 val)
+{
+ return reminder(val, g_nandx->chip->sector_size) ? false : true;
+}
+
+static inline bool is_page_align(u64 val)
+{
+ return reminder(val, g_nandx->chip->page_size) ? false : true;
+}
+
+static inline bool is_block_align(u64 val)
+{
+ return reminder(val, g_nandx->chip->block_size) ? false : true;
+}
+
+static inline u32 page_sectors(void)
+{
+ return div_down(g_nandx->chip->page_size, g_nandx->chip->sector_size);
+}
+
+static inline u32 sector_oob(void)
+{
+ return div_down(g_nandx->chip->oob_size, page_sectors());
+}
+
+static inline u32 sector_padded_size(void)
+{
+ return g_nandx->chip->sector_size + g_nandx->chip->sector_spare_size;
+}
+
+static inline u32 page_padded_size(void)
+{
+ return page_sectors() * sector_padded_size();
+}
+
+static inline u32 offset_to_padded_col(u64 offset)
+{
+ struct nandx_desc *nandx = g_nandx;
+ u32 col, sectors;
+
+ col = reminder(offset, nandx->chip->page_size);
+ sectors = div_down(col, nandx->chip->sector_size);
+
+ return col + sectors * nandx->chip->sector_spare_size;
+}
+
+static inline u32 offset_to_row(u64 offset)
+{
+ return div_down(offset, g_nandx->chip->page_size);
+}
+
+static inline u32 offset_to_col(u64 offset)
+{
+ return reminder(offset, g_nandx->chip->page_size);
+}
+
+static inline u32 oob_upper_size(void)
+{
+ return g_nandx->ecc_en ? g_nandx->chip->oob_size :
+ g_nandx->chip->sector_spare_size * page_sectors();
+}
+
+static inline bool is_upper_oob_align(u64 val)
+{
+ return reminder(val, oob_upper_size()) ? false : true;
+}
+
+#define prepare_op(_op, _row, _col, _len, _data, _oob) \
+ do { \
+ (_op).row = (_row); \
+ (_op).col = (_col); \
+ (_op).len = (_len); \
+ (_op).data = (_data); \
+ (_op).oob = (_oob); \
+ } while (0)
+
+static int operation_multi(enum nandx_op_mode mode, u8 *data, u8 *oob,
+ u64 offset, size_t len)
+{
+ struct nandx_desc *nandx = g_nandx;
+ u32 row = offset_to_row(offset);
+ u32 col = offset_to_padded_col(offset);
+
+ if (nandx->mode == NANDX_IDLE) {
+ nandx->mode = mode;
+ nandx->ops_current = 0;
+ } else if (nandx->mode != mode) {
+ pr_info("forbid mixed operations.\n");
+ return -EOPNOTSUPP;
+ }
+
+ prepare_op(nandx->ops[nandx->ops_current], row, col, len, data, oob);
+ nandx->ops_current++;
+
+ if (nandx->ops_current == nandx->ops_multi_len)
+ return nandx_sync();
+
+ return nandx->ops_multi_len - nandx->ops_current;
+}
+
+static int operation_sequent(enum nandx_op_mode mode, u8 *data, u8 *oob,
+ u64 offset, size_t len)
+{
+ struct nandx_desc *nandx = g_nandx;
+ struct nand_chip *chip = nandx->chip;
+ u32 row = offset_to_row(offset);
+ func_chip_ops chip_ops;
+ u8 *ref_data = data, *ref_oob = oob;
+ int align, ops, row_step;
+ int i, rem;
+
+ align = data ? chip->page_size : oob_upper_size();
+ ops = data ? div_down(len, align) : div_down(len, oob_upper_size());
+ row_step = 1;
+
+ switch (mode) {
+ case NANDX_ERASE:
+ chip_ops = chip->erase_block;
+ align = chip->block_size;
+ ops = div_down(len, align);
+ row_step = chip->block_pages;
+ break;
+
+ case NANDX_READ:
+ chip_ops = chip->read_page;
+ break;
+
+ case NANDX_WRITE:
+ chip_ops = chip->write_page;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!data) {
+ ref_data = nandx->head_buf;
+ memset(ref_data, 0xff, chip->page_size);
+ }
+
+ if (!oob) {
+ ref_oob = nandx->head_buf + chip->page_size;
+ memset(ref_oob, 0xff, oob_upper_size());
+ }
+
+ for (i = 0; i < ops; i++) {
+ prepare_op(nandx->ops[nandx->ops_current],
+ row + i * row_step, 0, align, ref_data, ref_oob);
+ nandx->ops_current++;
+ /* if data or oob is null, nandx->head_buf or
+ * nandx->head_buf + chip->page_size should not been used
+ * so, here it is safe to use the buf.
+ */
+ ref_data = data ? ref_data + chip->page_size : nandx->head_buf;
+ ref_oob = oob ? ref_oob + oob_upper_size() :
+ nandx->head_buf + chip->page_size;
+ }
+
+ if (nandx->mode == NANDX_WRITE) {
+ rem = reminder(nandx->ops_current, nandx->min_write_pages);
+ if (rem)
+ return nandx->min_write_pages - rem;
+ }
+
+ nandx->ops_current = 0;
+ return chip_ops(chip, nandx->ops, ops);
+}
+
+static int read_pages(u8 *data, u8 *oob, u64 offset, size_t len)
+{
+ struct nandx_desc *nandx = g_nandx;
+ struct nand_chip *chip = nandx->chip;
+ struct nandx_split64 split = {0};
+ u8 *ref_data = data, *ref_oob;
+ u32 row, col;
+ int ret = 0, i, ops;
+ u32 head_offset = 0;
+ u64 val;
+
+ if (!data)
+ return operation_sequent(NANDX_READ, NULL, oob, offset, len);
+
+ ref_oob = oob ? oob : nandx->head_buf + chip->page_size;
+
+ nandx_split(&split, offset, len, val, chip->page_size);
+
+ if (split.head_len) {
+ row = offset_to_row(split.head);
+ col = offset_to_col(split.head);
+ prepare_op(nandx->ops[nandx->ops_current], row, 0,
+ chip->page_size,
+ nandx->head_buf, ref_oob);
+ nandx->ops_current++;
+
+ head_offset = col;
+
+ ref_data += split.head_len;
+ ref_oob = oob ? ref_oob + oob_upper_size() :
+ nandx->head_buf + chip->page_size;
+ }
+
+ if (split.body_len) {
+ ops = div_down(split.body_len, chip->page_size);
+ row = offset_to_row(split.body);
+ for (i = 0; i < ops; i++) {
+ prepare_op(nandx->ops[nandx->ops_current],
+ row + i, 0, chip->page_size,
+ ref_data, ref_oob);
+ nandx->ops_current++;
+ ref_data += chip->page_size;
+ ref_oob = oob ? ref_oob + oob_upper_size() :
+ nandx->head_buf + chip->page_size;
+ }
+ }
+
+ if (split.tail_len) {
+ row = offset_to_row(split.tail);
+ prepare_op(nandx->ops[nandx->ops_current], row, 0,
+ chip->page_size, nandx->tail_buf, ref_oob);
+ nandx->ops_current++;
+ }
+
+ ret = chip->read_page(chip, nandx->ops, nandx->ops_current);
+
+ if (split.head_len)
+ memcpy(data, nandx->head_buf + head_offset, split.head_len);
+ if (split.tail_len)
+ memcpy(ref_data, nandx->tail_buf, split.tail_len);
+
+ nandx->ops_current = 0;
+ return ret;
+}
+
+int nandx_read(u8 *data, u8 *oob, u64 offset, size_t len)
+{
+ struct nandx_desc *nandx = g_nandx;
+
+ if (!len || len > nandx->info.total_size)
+ return -EINVAL;
+ if (div_up(len, nandx->chip->page_size) > nandx->ops_len)
+ return -EINVAL;
+ if (!data && !oob)
+ return -EINVAL;
+ /**
+ * as design, oob not support partial read
+ * and, the length of oob buf should be oob size aligned
+ */
+ if (!data && !is_upper_oob_align(len))
+ return -EINVAL;
+
+ if (g_nandx->multi_en) {
+ /* as design, there only 2 buf for partial read,
+ * if partial read allowed for multi read,
+ * there are not enough buf
+ */
+ if (!is_sector_align(offset))
+ return -EINVAL;
+ if (data && !is_sector_align(len))
+ return -EINVAL;
+ return operation_multi(NANDX_READ, data, oob, offset, len);
+ }
+
+ nandx->ops_current = 0;
+ nandx->mode = NANDX_IDLE;
+ return read_pages(data, oob, offset, len);
+}
+
+static int write_pages(u8 *data, u8 *oob, u64 offset, size_t len)
+{
+ struct nandx_desc *nandx = g_nandx;
+ struct nand_chip *chip = nandx->chip;
+ struct nandx_split64 split = {0};
+ int ret, rem, i, ops;
+ u32 row, col;
+ u8 *ref_oob = oob;
+ u64 val;
+
+ nandx->mode = NANDX_WRITE;
+
+ if (!data)
+ return operation_sequent(NANDX_WRITE, NULL, oob, offset, len);
+
+ if (!oob) {
+ ref_oob = nandx->head_buf + chip->page_size;
+ memset(ref_oob, 0xff, oob_upper_size());
+ }
+
+ nandx_split(&split, offset, len, val, chip->page_size);
+
+ /*NOTE: slc can support sector write, here copy too many data.*/
+ if (split.head_len) {
+ row = offset_to_row(split.head);
+ col = offset_to_col(split.head);
+ memset(nandx->head_buf, 0xff, page_padded_size());
+ memcpy(nandx->head_buf + col, data, split.head_len);
+ prepare_op(nandx->ops[nandx->ops_current], row, 0,
+ chip->page_size, nandx->head_buf, ref_oob);
+ nandx->ops_current++;
+
+ data += split.head_len;
+ ref_oob = oob ? ref_oob + oob_upper_size() :
+ nandx->head_buf + chip->page_size;
+ }
+
+ if (split.body_len) {
+ row = offset_to_row(split.body);
+ ops = div_down(split.body_len, chip->page_size);
+ for (i = 0; i < ops; i++) {
+ prepare_op(nandx->ops[nandx->ops_current],
+ row + i, 0, chip->page_size, data, ref_oob);
+ nandx->ops_current++;
+ data += chip->page_size;
+ ref_oob = oob ? ref_oob + oob_upper_size() :
+ nandx->head_buf + chip->page_size;
+ }
+ }
+
+ if (split.tail_len) {
+ row = offset_to_row(split.tail);
+ memset(nandx->tail_buf, 0xff, page_padded_size());
+ memcpy(nandx->tail_buf, data, split.tail_len);
+ prepare_op(nandx->ops[nandx->ops_current], row, 0,
+ chip->page_size, nandx->tail_buf, ref_oob);
+ nandx->ops_current++;
+ }
+
+ rem = reminder(nandx->ops_current, nandx->min_write_pages);
+ if (rem)
+ return nandx->min_write_pages - rem;
+
+ ret = chip->write_page(chip, nandx->ops, nandx->ops_current);
+
+ nandx->ops_current = 0;
+ nandx->mode = NANDX_IDLE;
+ return ret;
+}
+
+int nandx_write(u8 *data, u8 *oob, u64 offset, size_t len)
+{
+ struct nandx_desc *nandx = g_nandx;
+
+ if (!len || len > nandx->info.total_size)
+ return -EINVAL;
+ if (div_up(len, nandx->chip->page_size) > nandx->ops_len)
+ return -EINVAL;
+ if (!data && !oob)
+ return -EINVAL;
+ if (!data && !is_upper_oob_align(len))
+ return -EINVAL;
+
+ if (nandx->multi_en) {
+ if (!is_page_align(offset))
+ return -EINVAL;
+ if (data && !is_page_align(len))
+ return -EINVAL;
+
+ return operation_multi(NANDX_WRITE, data, oob, offset, len);
+ }
+
+ return write_pages(data, oob, offset, len);
+}
+
+int nandx_erase(u64 offset, size_t len)
+{
+ struct nandx_desc *nandx = g_nandx;
+
+ if (!len || len > nandx->info.total_size)
+ return -EINVAL;
+ if (div_down(len, nandx->chip->block_size) > nandx->ops_len)
+ return -EINVAL;
+ if (!is_block_align(offset) || !is_block_align(len))
+ return -EINVAL;
+
+ if (g_nandx->multi_en)
+ return operation_multi(NANDX_ERASE, NULL, NULL, offset, len);
+
+ nandx->ops_current = 0;
+ nandx->mode = NANDX_IDLE;
+ return operation_sequent(NANDX_ERASE, NULL, NULL, offset, len);
+}
+
+int nandx_sync(void)
+{
+ struct nandx_desc *nandx = g_nandx;
+ struct nand_chip *chip = nandx->chip;
+ func_chip_ops chip_ops;
+ int ret, i, rem;
+
+ if (!nandx->ops_current)
+ return 0;
+
+ rem = reminder(nandx->ops_current, nandx->ops_multi_len);
+ if (nandx->multi_en && rem) {
+ ret = -EIO;
+ goto error;
+ }
+
+ switch (nandx->mode) {
+ case NANDX_IDLE:
+ return 0;
+ case NANDX_ERASE:
+ chip_ops = chip->erase_block;
+ break;
+ case NANDX_READ:
+ chip_ops = chip->read_page;
+ break;
+ case NANDX_WRITE:
+ chip_ops = chip->write_page;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rem = reminder(nandx->ops_current, nandx->min_write_pages);
+ if (!nandx->multi_en && nandx->mode == NANDX_WRITE && rem) {
+ /* in one process of program, only allow 2 pages to do partial
+ * write, here we supposed 1st buf would be used, and 2nd
+ * buf should be not used.
+ */
+ memset(nandx->tail_buf, 0xff,
+ chip->page_size + oob_upper_size());
+ for (i = 0; i < rem; i++) {
+ prepare_op(nandx->ops[nandx->ops_current],
+ nandx->ops[nandx->ops_current - 1].row + 1,
+ 0, chip->page_size, nandx->tail_buf,
+ nandx->tail_buf + chip->page_size);
+ nandx->ops_current++;
+ }
+ }
+
+ ret = chip_ops(nandx->chip, nandx->ops, nandx->ops_current);
+
+error:
+ nandx->mode = NANDX_IDLE;
+ nandx->ops_current = 0;
+
+ return ret;
+}
+
+int nandx_ioctl(int cmd, void *arg)
+{
+ struct nandx_desc *nandx = g_nandx;
+ struct nand_chip *chip = nandx->chip;
+ int ret = 0;
+
+ switch (cmd) {
+ case CORE_CTRL_NAND_INFO:
+ *(struct nandx_info *)arg = nandx->info;
+ break;
+
+ case CHIP_CTRL_OPS_MULTI:
+ ret = chip->chip_ctrl(chip, cmd, arg);
+ if (!ret)
+ nandx->multi_en = *(bool *)arg;
+ break;
+
+ case NFI_CTRL_ECC:
+ ret = chip->chip_ctrl(chip, cmd, arg);
+ if (!ret)
+ nandx->ecc_en = *(bool *)arg;
+ break;
+
+ default:
+ ret = chip->chip_ctrl(chip, cmd, arg);
+ break;
+ }
+
+ return ret;
+}
+
+bool nandx_is_bad_block(u64 offset)
+{
+ struct nandx_desc *nandx = g_nandx;
+
+ prepare_op(nandx->ops[0], offset_to_row(offset), 0,
+ nandx->chip->page_size, nandx->head_buf,
+ nandx->head_buf + nandx->chip->page_size);
+
+ return nandx->chip->is_bad_block(nandx->chip, nandx->ops, 1);
+}
+
+int nandx_suspend(void)
+{
+ return g_nandx->chip->suspend(g_nandx->chip);
+}
+
+int nandx_resume(void)
+{
+ return g_nandx->chip->resume(g_nandx->chip);
+}
+
+int nandx_init(struct nfi_resource *res)
+{
+ struct nand_chip *chip;
+ struct nandx_desc *nandx;
+ int ret = 0;
+
+ if (!res)
+ return -EINVAL;
+
+ chip = nand_chip_init(res);
+ if (!chip) {
+ pr_err("nand chip init fail.\n");
+ return -EFAULT;
+ }
+
+ nandx = (struct nandx_desc *)mem_alloc(1, sizeof(struct nandx_desc));
+ if (!nandx)
+ return -ENOMEM;
+
+ g_nandx = nandx;
+
+ nandx->chip = chip;
+ nandx->min_write_pages = chip->min_program_pages;
+ nandx->ops_multi_len = nandx->min_write_pages * chip->plane_num;
+ nandx->ops_len = chip->block_pages * chip->plane_num;
+ nandx->ops = mem_alloc(1, sizeof(struct nand_ops) * nandx->ops_len);
+ if (!nandx->ops) {
+ ret = -ENOMEM;
+ goto ops_error;
+ }
+
+#if NANDX_BULK_IO_USE_DRAM
+ nandx->head_buf = NANDX_CORE_BUF_ADDR;
+#else
+ nandx->head_buf = mem_alloc(2, page_padded_size());
+#endif
+ if (!nandx->head_buf) {
+ ret = -ENOMEM;
+ goto buf_error;
+ }
+ nandx->tail_buf = nandx->head_buf + page_padded_size();
+ memset(nandx->head_buf, 0xff, 2 * page_padded_size());
+ nandx->multi_en = false;
+ nandx->ecc_en = false;
+ nandx->ops_current = 0;
+ nandx->mode = NANDX_IDLE;
+
+ nandx->info.max_io_count = nandx->ops_len;
+ nandx->info.min_write_pages = nandx->min_write_pages;
+ nandx->info.plane_num = chip->plane_num;
+ nandx->info.oob_size = chip->oob_size;
+ nandx->info.page_parity_size = chip->sector_spare_size * page_sectors();
+ nandx->info.page_size = chip->page_size;
+ nandx->info.block_size = chip->block_size;
+ nandx->info.total_size = chip->block_size * chip->block_num;
+ nandx->info.fdm_ecc_size = chip->fdm_ecc_size;
+ nandx->info.fdm_reg_size = chip->fdm_reg_size;
+ nandx->info.ecc_strength = chip->ecc_strength;
+ nandx->info.sector_size = chip->sector_size;
+
+ return 0;
+
+buf_error:
+#if !NANDX_BULK_IO_USE_DRAM
+ mem_free(nandx->head_buf);
+#endif
+ops_error:
+ mem_free(nandx);
+
+ return ret;
+}
+
+void nandx_exit(void)
+{
+ nand_chip_exit(g_nandx->chip);
+#if !NANDX_BULK_IO_USE_DRAM
+ mem_free(g_nandx->head_buf);
+#endif
+ mem_free(g_nandx->ops);
+ mem_free(g_nandx);
+}
+
+static void dump_buf(u8 *buf, u32 len)
+{
+ u32 i;
+
+ pr_info("dump buf@0x%X start", buf);
+ for (i = 0; i < len; i++) {
+ if (!reminder(i, 16))
+ pr_info("\n0x");
+ pr_info("%x ", buf[i]);
+ }
+ pr_info("\ndump buf done.\n");
+}
+
+int nandx_unit_test(u64 offset, size_t len)
+{
+ u8 *src_buf, *dst_buf;
+ size_t i;
+ int ret;
+
+ if (!len || len > g_nandx->chip->block_size)
+ return -EINVAL;
+
+#if NANDX_BULK_IO_USE_DRAM
+ src_buf = NANDX_UT_SRC_ADDR;
+ dst_buf = NANDX_UT_DST_ADDR;
+
+#else
+ src_buf = mem_alloc(1, len);
+ if (!src_buf)
+ return -ENOMEM;
+ dst_buf = mem_alloc(1, len);
+ if (!dst_buf) {
+ mem_free(src_buf);
+ return -ENOMEM;
+ }
+#endif
+
+ pr_debug("%s: src_buf address 0x%x, dst_buf address 0x%x\n",
+ __func__, (int)((unsigned long)src_buf),
+ (int)((unsigned long)dst_buf));
+
+ /*fill random data in source buffer, em... it's not real random data.*/
+ for (i = 0; i < len; i++)
+ src_buf[i] = (u8)reminder(get_current_time_us(), 255);
+
+ ret = nandx_erase(offset, g_nandx->chip->block_size);
+ if (ret < 0) {
+ pr_err("erase fail with ret %d\n", ret);
+ goto error;
+ }
+
+ ret = nandx_write(src_buf, NULL, offset, len);
+ if (ret < 0) {
+ pr_err("write fail with ret %d\n", ret);
+ goto error;
+ }
+
+ ret = nandx_read(dst_buf, NULL, offset, len);
+ if (ret < 0) {
+ pr_err("read fail with ret %d\n", ret);
+ goto error;
+ }
+
+ for (i = 0; i < len; i++) {
+ if (dst_buf[i] != src_buf[i]) {
+ pr_err("read after write, check fail\n");
+ pr_err("dst_buf should be same as src_buf\n");
+ ret = -EIO;
+ dump_buf(src_buf, len);
+ dump_buf(dst_buf, len);
+ goto error;
+ }
+ }
+
+ ret = nandx_erase(offset, g_nandx->chip->block_size);
+ if (ret < 0) {
+ pr_err("erase fail with ret %d\n", ret);
+ goto error;
+ }
+
+ ret = nandx_read(dst_buf, NULL, offset, len);
+ if (ret < 0) {
+ pr_err("read fail with ret %d\n", ret);
+ goto error;
+ }
+
+ for (i = 0; i < len; i++) {
+ if (dst_buf[i] != 0xff) {
+ pr_err("read after erase, check fail\n");
+ pr_err("all data should be 0xff\n");
+ ret = -ENANDERASE;
+ dump_buf(dst_buf, len);
+ goto error;
+ }
+ }
+
+error:
+#if !NANDX_BULK_IO_USE_DRAM
+ mem_free(src_buf);
+ mem_free(dst_buf);
+#endif
+ return ret;
+}
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/core_io.h b/src/bsp/lk/platform/mt8512/drivers/nandx/core/core_io.h
new file mode 100644
index 0000000..edcb609
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/core_io.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __CORE_IO_H__
+#define __CORE_IO_H__
+
+typedef int (*func_chip_ops)(struct nand_chip *, struct nand_ops *,
+ int);
+
+enum nandx_op_mode {
+ NANDX_IDLE,
+ NANDX_WRITE,
+ NANDX_READ,
+ NANDX_ERASE
+};
+
+struct nandx_desc {
+ struct nand_chip *chip;
+ struct nandx_info info;
+ enum nandx_op_mode mode;
+
+ bool multi_en;
+ bool ecc_en;
+
+ struct nand_ops *ops;
+ int ops_len;
+ int ops_multi_len;
+ int ops_current;
+ int min_write_pages;
+
+ u8 *head_buf;
+ u8 *tail_buf;
+};
+
+#endif /* __CORE_IO_H__ */
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand/device_spi.c b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand/device_spi.c
new file mode 100644
index 0000000..f7e8d69
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand/device_spi.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#include "nandx_util.h"
+#include "../nand_device.h"
+#include "device_spi.h"
+
+/* spi nand basic commands */
+static struct nand_cmds spi_cmds = {
+ .reset = 0xff,
+ .read_id = 0x9f,
+ .read_status = 0x0f,
+ .read_param_page = 0x03,
+ .set_feature = 0x1f,
+ .get_feature = 0x0f,
+ .read_1st = 0x13,
+ .read_2nd = -1,
+ .random_out_1st = 0x03,
+ .random_out_2nd = -1,
+ .program_1st = 0x02,
+ .program_2nd = 0x10,
+ .erase_1st = 0xd8,
+ .erase_2nd = -1,
+ .read_cache = 0x30,
+ .read_cache_last = 0x3f,
+ .program_cache = 0x02
+};
+
+/* spi nand extend commands */
+static struct spi_extend_cmds spi_extend_cmds = {
+ .die_select = 0xc2,
+ .write_enable = 0x06
+};
+
+/* means the start bit of addressing type */
+static struct nand_addressing spi_addressing = {
+ .row_bit_start = 0,
+ .block_bit_start = 0,
+ .plane_bit_start = 12,
+ .lun_bit_start = 0,
+};
+
+/* spi nand endurance */
+static struct nand_endurance spi_endurance = {
+ .pe_cycle = 100000,
+ .ecc_req = 1,
+ .max_bitflips = 1
+};
+
+/* array_busy, write_protect, erase_fail, program_fail */
+static struct nand_status spi_status[] = {
+ {.array_busy = BIT(0),
+ .write_protect = BIT(1),
+ .erase_fail = BIT(2),
+ .program_fail = BIT(3)}
+};
+
+/* measure time by the us */
+static struct nand_array_timing spi_array_timing = {
+ .tRST = 500,
+ .tWHR = 1,
+ .tR = 25,
+ .tRCBSY = 25,
+ .tFEAT = 1,
+ .tPROG = 600,
+ .tPCBSY = 600,
+ .tBERS = 10000,
+ .tDBSY = 1
+};
+
+/* spi nand device table */
+static struct device_spi spi_nand[] = {
+ {
+ NAND_DEVICE("W25N01GV",
+ NAND_PACK_ID(0xef, 0xaa, 0x21, 0, 0, 0, 0, 0),
+ 3, 0, 3, 3,
+ 1, 1, 1, 1024, KB(128), KB(2), 64, 1,
+ &spi_cmds, &spi_addressing, &spi_status[0],
+ &spi_endurance, &spi_array_timing),
+ {
+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+ NAND_SPI_CONFIG(0xb0, 4, 6, 0),
+ NAND_SPI_STATUS(0xc0, 4, 5),
+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+ },
+ &spi_extend_cmds, 0xff, 0xff
+ },
+ {
+ NAND_DEVICE("MX35LF1G",
+ NAND_PACK_ID(0xc2, 0x12, 0x21, 0, 0, 0, 0, 0),
+ 2, 0, 3, 3,
+ 1, 1, 1, 1024, KB(128), KB(2), 64, 1,
+ &spi_cmds, &spi_addressing, &spi_status[0],
+ &spi_endurance, &spi_array_timing),
+ {
+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+ NAND_SPI_CONFIG(0xb0, 4, 6, 1),
+ NAND_SPI_STATUS(0xc0, 4, 5),
+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+ },
+ &spi_extend_cmds, 0xff, 0xff
+ },
+ {
+ NAND_DEVICE("MT29F4G01ABAFDWB",
+ NAND_PACK_ID(0x2c, 0x34, 0, 0, 0, 0, 0, 0),
+ 2, 0, 3, 3,
+ 1, 1, 1, 2048, KB(256), KB(4), 256, 1,
+ &spi_cmds, &spi_addressing, &spi_status[0],
+ &spi_endurance, &spi_array_timing),
+ {
+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+ NAND_SPI_CONFIG(0xb0, 4, 6, 1),
+ NAND_SPI_STATUS(0xc0, 4, 5),
+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+ },
+ &spi_extend_cmds, 0xff, 0xff
+ },
+ {
+ NAND_DEVICE("GD5F4GQ4UB",
+ NAND_PACK_ID(0xc8, 0xd4, 0, 0, 0, 0, 0, 0),
+ 2, 0, 3, 3,
+ 1, 1, 1, 2048, KB(256), KB(4), 256, 1,
+ &spi_cmds, &spi_addressing, &spi_status[0],
+ &spi_endurance, &spi_array_timing),
+ {
+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+ NAND_SPI_CONFIG(0xb0, 4, 6, 1),
+ NAND_SPI_STATUS(0xc0, 4, 5),
+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+ },
+ &spi_extend_cmds, 0xff, 0xff
+ },
+ {
+ NAND_DEVICE("TC58CVG2S0HRAIJ",
+ NAND_PACK_ID(0x98, 0xED, 0x51, 0, 0, 0, 0, 0),
+ 3, 0, 3, 3,
+ 1, 1, 1, 2048, KB(256), KB(4), 256, 1,
+ &spi_cmds, &spi_addressing, &spi_status[0],
+ &spi_endurance, &spi_array_timing),
+ {
+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+ NAND_SPI_CONFIG(0xb0, 4, 6, 1),
+ NAND_SPI_STATUS(0xc0, 4, 5),
+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+ },
+ &spi_extend_cmds, 0xff, 0xff
+ },
+
+ {
+ NAND_DEVICE("NO-DEVICE",
+ NAND_PACK_ID(0, 0, 0, 0, 0, 0, 0, 0), 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1,
+ &spi_cmds, &spi_addressing, &spi_status[0],
+ &spi_endurance, &spi_array_timing),
+ {
+ NAND_SPI_PROTECT(0xa0, 1, 2, 6),
+ NAND_SPI_CONFIG(0xb0, 4, 6, 0),
+ NAND_SPI_STATUS(0xc0, 4, 5),
+ NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
+ },
+ &spi_extend_cmds, 0xff, 0xff
+ }
+};
+
+u8 spi_replace_rx_cmds(u8 mode)
+{
+ u8 rx_replace_cmds[] = {0x03, 0x3b, 0x6b, 0xbb, 0xeb};
+
+ return rx_replace_cmds[mode];
+}
+
+u8 spi_replace_tx_cmds(u8 mode)
+{
+ u8 tx_replace_cmds[] = {0x02, 0x32};
+
+ return tx_replace_cmds[mode];
+}
+
+u8 spi_replace_rx_col_cycle(u8 mode)
+{
+ u8 rx_replace_col_cycle[] = {3, 3, 3, 3, 4};
+
+ return rx_replace_col_cycle[mode];
+}
+
+u8 spi_replace_tx_col_cycle(u8 mode)
+{
+ u8 tx_replace_col_cycle[] = {2, 2};
+
+ return tx_replace_col_cycle[mode];
+}
+
+struct nand_device *nand_get_device(int index)
+{
+ return &spi_nand[index].dev;
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand/device_spi.h b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand/device_spi.h
new file mode 100644
index 0000000..1676b61
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand/device_spi.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __DEVICE_SPI_H__
+#define __DEVICE_SPI_H__
+
+/*
+ * extend commands
+ * @die_select: select nand device die command
+ * @write_enable: enable write command before write data to spi nand
+ * spi nand device will auto to be disable after write done
+ */
+struct spi_extend_cmds {
+ short die_select;
+ short write_enable;
+};
+
+/*
+ * protection feature register
+ * @addr: register address
+ * @wp_en_bit: write protection enable bit
+ * @bp_start_bit: block protection mask start bit
+ * @bp_end_bit: block protection mask end bit
+ */
+struct feature_protect {
+ u8 addr;
+ u8 wp_en_bit;
+ u8 bp_start_bit;
+ u8 bp_end_bit;
+};
+
+/*
+ * configuration feature register
+ * @addr: register address
+ * @ecc_en_bit: in-die ecc enable bit
+ * @otp_en_bit: enter otp access mode bit
+ * @need_qe: quad io enable bit
+ */
+struct feature_config {
+ u8 addr;
+ u8 ecc_en_bit;
+ u8 otp_en_bit;
+ u8 need_qe;
+};
+
+/*
+ * status feature register
+ * @addr: register address
+ * @ecc_start_bit: ecc status mask start bit for error bits number
+ * @ecc_end_bit: ecc status mask end bit for error bits number
+ * note that:
+ * operations status (ex. array busy status) could see on struct nand_status
+ */
+struct feature_status {
+ u8 addr;
+ u8 ecc_start_bit;
+ u8 ecc_end_bit;
+};
+
+/*
+ * character feature register
+ * @addr: register address
+ * @die_sel_bit: die select bit
+ * @drive_start_bit: drive strength mask start bit
+ * @drive_end_bit: drive strength mask end bit
+ */
+struct feature_character {
+ u8 addr;
+ u8 die_sel_bit;
+ u8 drive_start_bit;
+ u8 drive_end_bit;
+};
+
+/*
+ * spi features
+ * @protect: protection feature register
+ * @config: configuration feature register
+ * @status: status feature register
+ * @character: character feature register
+ */
+struct spi_features {
+ struct feature_protect protect;
+ struct feature_config config;
+ struct feature_status status;
+ struct feature_character character;
+};
+
+/*
+ * device_spi
+ * configurations of spi nand device table
+ * @dev: base information of nand device
+ * @feature: feature information for spi nand
+ * @extend_cmds: extended the nand base commands
+ * @tx_mode_mask: tx mode mask for chip read
+ * @rx_mode_mask: rx mode mask for chip write
+ */
+struct device_spi {
+ struct nand_device dev;
+ struct spi_features feature;
+ struct spi_extend_cmds *extend_cmds;
+
+ u8 tx_mode_mask;
+ u8 rx_mode_mask;
+};
+
+#define NAND_SPI_PROTECT(addr, wp_en_bit, bp_start_bit, bp_end_bit) \
+ {addr, wp_en_bit, bp_start_bit, bp_end_bit}
+
+#define NAND_SPI_CONFIG(addr, ecc_en_bit, otp_en_bit, need_qe) \
+ {addr, ecc_en_bit, otp_en_bit, need_qe}
+
+#define NAND_SPI_STATUS(addr, ecc_start_bit, ecc_end_bit) \
+ {addr, ecc_start_bit, ecc_end_bit}
+
+#define NAND_SPI_CHARACTER(addr, die_sel_bit, drive_start_bit, drive_end_bit) \
+ {addr, die_sel_bit, drive_start_bit, drive_end_bit}
+
+static inline struct device_spi *device_to_spi(struct nand_device *dev)
+{
+ return container_of(dev, struct device_spi, dev);
+}
+
+u8 spi_replace_rx_cmds(u8 mode);
+u8 spi_replace_tx_cmds(u8 mode);
+u8 spi_replace_rx_col_cycle(u8 mode);
+u8 spi_replace_tx_col_cycle(u8 mode);
+
+#endif /* __DEVICE_SPI_H__ */
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand/nand_spi.c b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand/nand_spi.c
new file mode 100644
index 0000000..7122a71
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand/nand_spi.c
@@ -0,0 +1,529 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "../nand_chip.h"
+#include "../nand_device.h"
+#include "../nfi.h"
+#include "../nand_base.h"
+#include "device_spi.h"
+#include "nand_spi.h"
+
+#define READY_TIMEOUT 500000 /* us */
+
+static int nand_spi_read_status(struct nand_base *nand)
+{
+ struct device_spi *dev = device_to_spi(nand->dev);
+ u8 status;
+
+ nand->get_feature(nand, dev->feature.status.addr, &status, 1);
+
+ return status;
+}
+
+static int nand_spi_wait_ready(struct nand_base *nand, u32 timeout)
+{
+ u64 now, end;
+ int status;
+
+ end = get_current_time_us() + timeout;
+
+ do {
+ status = nand_spi_read_status(nand);
+ status &= nand->dev->status->array_busy;
+ now = get_current_time_us();
+
+ if (now > end)
+ break;
+ } while (status);
+
+ return status ? -EBUSY : 0;
+}
+
+static int nand_spi_set_op_mode(struct nand_base *nand, u8 mode)
+{
+ struct nand_spi *spi_nand = base_to_spi(nand);
+ struct nfi *nfi = nand->nfi;
+ int ret = 0;
+
+ if (spi_nand->op_mode != mode) {
+ ret = nfi->nfi_ctrl(nfi, SNFI_CTRL_OP_MODE, (void *)&mode);
+ spi_nand->op_mode = mode;
+ }
+
+ return ret;
+}
+
+static int nand_spi_set_config(struct nand_base *nand, u8 addr, u8 mask,
+ bool en)
+{
+ u8 configs = 0;
+
+ nand->get_feature(nand, addr, &configs, 1);
+
+ if ((configs & mask) == en)
+ return 0;
+
+ if (en)
+ configs |= mask;
+ else
+ configs &= ~mask;
+
+ nand->set_feature(nand, addr, &configs, 1);
+
+ configs = 0;
+ nand->get_feature(nand, addr, &configs, 1);
+
+ return (configs & mask) == en ? 0 : -EFAULT;
+}
+
+static int nand_spi_die_select(struct nand_base *nand, int *row)
+{
+ struct device_spi *dev = device_to_spi(nand->dev);
+ struct nfi *nfi = nand->nfi;
+ int lun_blocks, block_pages, lun, blocks;
+ int page = *row, ret = 0;
+ u8 param = 0, die_sel;
+
+ if (nand->dev->lun_num < 2)
+ return 0;
+
+ block_pages = nand_block_pages(nand->dev);
+ lun_blocks = nand_lun_blocks(nand->dev);
+ blocks = div_down(page, block_pages);
+ lun = div_down(blocks, lun_blocks);
+
+ if (dev->extend_cmds->die_select == -1) {
+ die_sel = (u8)(lun << dev->feature.character.die_sel_bit);
+ nand->get_feature(nand, dev->feature.character.addr, ¶m, 1);
+ param |= die_sel;
+ nand->set_feature(nand, dev->feature.character.addr, ¶m, 1);
+ param = 0;
+ nand->get_feature(nand, dev->feature.character.addr, ¶m, 1);
+ ret = (param & die_sel) ? 0 : -EFAULT;
+ } else {
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->extend_cmds->die_select);
+ nfi->send_addr(nfi, lun, 0, 1, 0);
+ nfi->trigger(nfi);
+ }
+
+ *row = page - (lun_blocks * block_pages) * lun;
+
+ return ret;
+}
+
+static int nand_spi_select_device(struct nand_base *nand, int cs)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_base *parent = spi->parent;
+
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ return parent->select_device(nand, cs);
+}
+
+static int nand_spi_reset(struct nand_base *nand)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_base *parent = spi->parent;
+
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ parent->reset(nand);
+
+ return nand_spi_wait_ready(nand, READY_TIMEOUT);
+}
+
+static int nand_spi_read_id(struct nand_base *nand, u8 *id, int count)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_base *parent = spi->parent;
+
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ return parent->read_id(nand, id, count);
+}
+
+static int nand_spi_read_param_page(struct nand_base *nand, u8 *data,
+ int count)
+{
+ struct device_spi *dev = device_to_spi(nand->dev);
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nfi *nfi = nand->nfi;
+ int sectors, value;
+ u8 param = 0;
+
+ sectors = div_round_up(count, nfi->sector_size);
+
+ nand->get_feature(nand, dev->feature.config.addr, ¶m, 1);
+ param |= BIT(dev->feature.config.otp_en_bit);
+ nand->set_feature(nand, dev->feature.config.addr, ¶m, 1);
+
+ param = 0;
+ nand->get_feature(nand, dev->feature.config.addr, ¶m, 1);
+ if (param & BIT(dev->feature.config.otp_en_bit)) {
+ value = 0;
+ nfi->nfi_ctrl(nfi, NFI_CTRL_ECC, &value);
+ nand->dev->col_cycle = spi_replace_rx_col_cycle(spi->rx_mode);
+ nand->read_page(nand, 0x01);
+ nand->read_data(nand, 0x01, 0, sectors, data, NULL);
+ }
+
+ param &= ~BIT(dev->feature.config.otp_en_bit);
+ nand->set_feature(nand, dev->feature.config.addr, ¶m, 1);
+
+ return 0;
+}
+
+static int nand_spi_set_feature(struct nand_base *nand, u8 addr,
+ u8 *param,
+ int count)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_base *parent = spi->parent;
+
+ nand->write_enable(nand);
+
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ return parent->set_feature(nand, addr, param, count);
+}
+
+static int nand_spi_get_feature(struct nand_base *nand, u8 addr,
+ u8 *param,
+ int count)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_base *parent = spi->parent;
+
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ return parent->get_feature(nand, addr, param, count);
+}
+
+static int nand_spi_addressing(struct nand_base *nand, int *row,
+ int *col)
+{
+ struct nand_device *dev = nand->dev;
+ int plane, block, block_pages;
+ int ret;
+
+ ret = nand_spi_die_select(nand, row);
+ if (ret)
+ return ret;
+
+ block_pages = nand_block_pages(dev);
+ block = div_down(*row, block_pages);
+
+ plane = block % dev->plane_num;
+ *col |= (plane << dev->addressing->plane_bit_start);
+
+ return 0;
+}
+
+static int nand_spi_read_page(struct nand_base *nand, int row)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_base *parent = spi->parent;
+
+ if (spi->op_mode == SNFI_AUTO_MODE)
+ nand_spi_set_op_mode(nand, SNFI_AUTO_MODE);
+ else
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ parent->read_page(nand, row);
+
+ return nand_spi_wait_ready(nand, READY_TIMEOUT);
+}
+
+static int nand_spi_read_data(struct nand_base *nand, int row, int col,
+ int sectors, u8 *data, u8 *oob)
+{
+ struct device_spi *dev = device_to_spi(nand->dev);
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_base *parent = spi->parent;
+ int ret;
+
+ if ((spi->rx_mode == SNFI_RX_114 || spi->rx_mode == SNFI_RX_144) &&
+ dev->feature.config.need_qe)
+ nand_spi_set_config(nand, dev->feature.config.addr,
+ BIT(0), true);
+
+ nand->dev->col_cycle = spi_replace_rx_col_cycle(spi->rx_mode);
+
+ nand_spi_set_op_mode(nand, SNFI_CUSTOM_MODE);
+
+ ret = parent->read_data(nand, row, col, sectors, data, oob);
+ if (ret)
+ return -ENANDREAD;
+
+ if (spi->ondie_ecc) {
+ ret = nand_spi_read_status(nand);
+ ret &= GENMASK(dev->feature.status.ecc_end_bit,
+ dev->feature.status.ecc_start_bit);
+ ret >>= dev->feature.status.ecc_start_bit;
+ if (ret > nand->dev->endurance->ecc_req)
+ return -ENANDREAD;
+ else if (ret > nand->dev->endurance->max_bitflips)
+ return -ENANDFLIPS;
+ }
+
+ return 0;
+}
+
+static int nand_spi_write_enable(struct nand_base *nand)
+{
+ struct device_spi *dev = device_to_spi(nand->dev);
+ struct nfi *nfi = nand->nfi;
+ int status;
+
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->extend_cmds->write_enable);
+
+ nfi->trigger(nfi);
+
+ status = nand_spi_read_status(nand);
+ status &= nand->dev->status->write_protect;
+
+ return !status;
+}
+
+static int nand_spi_program_data(struct nand_base *nand, int row,
+ int col,
+ u8 *data, u8 *oob)
+{
+ struct device_spi *dev = device_to_spi(nand->dev);
+ struct nand_spi *spi = base_to_spi(nand);
+
+ if (spi->tx_mode == SNFI_TX_114 && dev->feature.config.need_qe)
+ nand_spi_set_config(nand, dev->feature.config.addr,
+ BIT(0), true);
+
+ nand_spi_set_op_mode(nand, SNFI_CUSTOM_MODE);
+
+ nand->dev->col_cycle = spi_replace_tx_col_cycle(spi->tx_mode);
+
+ return spi->parent->program_data(nand, row, col, data, oob);
+}
+
+static int nand_spi_program_page(struct nand_base *nand, int row)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_device *dev = nand->dev;
+ struct nfi *nfi = nand->nfi;
+
+ if (spi->op_mode == SNFI_AUTO_MODE)
+ nand_spi_set_op_mode(nand, SNFI_AUTO_MODE);
+ else
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->program_2nd);
+ nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
+ nfi->trigger(nfi);
+
+ return nand_spi_wait_ready(nand, READY_TIMEOUT);
+}
+
+static int nand_spi_erase_block(struct nand_base *nand, int row)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nand_base *parent = spi->parent;
+
+ nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
+
+ parent->erase_block(nand, row);
+
+ return nand_spi_wait_ready(nand, READY_TIMEOUT);
+}
+
+static int nand_chip_spi_ctrl(struct nand_chip *chip, int cmd,
+ void *args)
+{
+ struct nand_base *nand = chip->nand;
+ struct device_spi *dev = device_to_spi(nand->dev);
+ struct nand_spi *spi = base_to_spi(nand);
+ struct nfi *nfi = nand->nfi;
+ int ret = 0, value = *(int *)args;
+
+ switch (cmd) {
+ case CHIP_CTRL_ONDIE_ECC:
+ spi->ondie_ecc = (bool)value;
+ ret = nand_spi_set_config(nand, dev->feature.config.addr,
+ BIT(dev->feature.config.ecc_en_bit),
+ spi->ondie_ecc);
+ break;
+
+ case SNFI_CTRL_TX_MODE:
+ if (value < 0 || value > SNFI_TX_114)
+ return -EOPNOTSUPP;
+
+ if (dev->tx_mode_mask & BIT(value)) {
+ spi->tx_mode = value;
+ nand->dev->cmds->random_out_1st = spi_replace_tx_cmds(
+ spi->tx_mode);
+ ret = nfi->nfi_ctrl(nfi, cmd, args);
+ }
+
+ break;
+
+ case SNFI_CTRL_RX_MODE:
+ if (value < 0 || value > SNFI_RX_144)
+ return -EOPNOTSUPP;
+
+ if (dev->rx_mode_mask & BIT(value)) {
+ spi->rx_mode = value;
+ nand->dev->cmds->program_1st = spi_replace_rx_cmds(
+ spi->rx_mode);
+ ret = nfi->nfi_ctrl(nfi, cmd, args);
+ }
+
+ break;
+
+ case CHIP_CTRL_OPS_CACHE:
+ case CHIP_CTRL_OPS_MULTI:
+ case CHIP_CTRL_PSLC_MODE:
+ case CHIP_CTRL_DDR_MODE:
+ case CHIP_CTRL_DRIVE_STRENGTH:
+ case CHIP_CTRL_TIMING_MODE:
+ ret = -EOPNOTSUPP;
+ break;
+
+ default:
+ ret = nfi->nfi_ctrl(nfi, cmd, args);
+ break;
+ }
+
+ return ret;
+}
+
+int nand_chip_spi_resume(struct nand_chip *chip)
+{
+ struct nand_base *nand = chip->nand;
+ struct nand_spi *spi = base_to_spi(nand);
+ struct device_spi *dev = device_to_spi(nand->dev);
+ struct nfi *nfi = nand->nfi;
+ struct nfi_format format;
+ u8 mask;
+
+ nand->reset(nand);
+
+ mask = GENMASK(dev->feature.protect.bp_end_bit,
+ dev->feature.protect.bp_start_bit);
+ nand_spi_set_config(nand, dev->feature.config.addr, mask, false);
+ mask = BIT(dev->feature.config.ecc_en_bit);
+ nand_spi_set_config(nand, dev->feature.config.addr, mask,
+ spi->ondie_ecc);
+
+ format.page_size = nand->dev->page_size;
+ format.spare_size = nand->dev->spare_size;
+ format.ecc_req = nand->dev->endurance->ecc_req;
+
+ return nfi->set_format(nfi, &format);
+}
+
+static int nand_spi_set_format(struct nand_base *nand)
+{
+ struct nfi_format format = {
+ nand->dev->page_size,
+ nand->dev->spare_size,
+ nand->dev->endurance->ecc_req
+ };
+
+ return nand->nfi->set_format(nand->nfi, &format);
+}
+
+struct nand_base *nand_init(struct nand_chip *chip)
+{
+ struct nand_base *nand;
+ struct nand_spi *spi;
+ struct device_spi *dev;
+ int ret;
+ u8 mask;
+
+ spi = mem_alloc(1, sizeof(struct nand_spi));
+ if (!spi) {
+ pr_err("alloc nand_spi fail\n");
+ return NULL;
+ }
+
+ spi->ondie_ecc = false;
+ spi->op_mode = SNFI_CUSTOM_MODE;
+ spi->rx_mode = SNFI_RX_114;
+ spi->tx_mode = SNFI_TX_114;
+
+ spi->parent = chip->nand;
+ nand = &spi->base;
+ nand->dev = spi->parent->dev;
+ nand->nfi = spi->parent->nfi;
+
+ nand->select_device = nand_spi_select_device;
+ nand->reset = nand_spi_reset;
+ nand->read_id = nand_spi_read_id;
+ nand->read_param_page = nand_spi_read_param_page;
+ nand->set_feature = nand_spi_set_feature;
+ nand->get_feature = nand_spi_get_feature;
+ nand->read_status = nand_spi_read_status;
+ nand->addressing = nand_spi_addressing;
+ nand->read_page = nand_spi_read_page;
+ nand->read_data = nand_spi_read_data;
+ nand->write_enable = nand_spi_write_enable;
+ nand->program_data = nand_spi_program_data;
+ nand->program_page = nand_spi_program_page;
+ nand->erase_block = nand_spi_erase_block;
+
+ chip->chip_ctrl = nand_chip_spi_ctrl;
+ chip->nand_type = NAND_SPI;
+ chip->resume = nand_chip_spi_resume;
+
+ ret = nand_detect_device(nand);
+ if (ret)
+ goto err;
+
+ nand->select_device(nand, 0);
+
+ ret = nand_spi_set_format(nand);
+ if (ret)
+ goto err;
+
+ dev = (struct device_spi *)nand->dev;
+
+ nand->dev->cmds->random_out_1st =
+ spi_replace_rx_cmds(spi->rx_mode);
+ nand->dev->cmds->program_1st =
+ spi_replace_tx_cmds(spi->tx_mode);
+
+ mask = GENMASK(dev->feature.protect.bp_end_bit,
+ dev->feature.protect.bp_start_bit);
+ ret = nand_spi_set_config(nand, dev->feature.protect.addr, mask, false);
+ if (ret)
+ goto err;
+
+ mask = BIT(dev->feature.config.ecc_en_bit);
+ ret = nand_spi_set_config(nand, dev->feature.config.addr, mask,
+ spi->ondie_ecc);
+ if (ret)
+ goto err;
+
+ return nand;
+
+err:
+ mem_free(spi);
+ return NULL;
+}
+
+void nand_exit(struct nand_base *nand)
+{
+ struct nand_spi *spi = base_to_spi(nand);
+
+ nand_base_exit(spi->parent);
+ mem_free(spi);
+}
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand/nand_spi.h b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand/nand_spi.h
new file mode 100644
index 0000000..e55e4de
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand/nand_spi.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NAND_SPI_H__
+#define __NAND_SPI_H__
+
+/*
+ * spi nand handler
+ * @base: spi nand base functions
+ * @parent: common parent nand base functions
+ * @tx_mode: spi bus width of transfer to device
+ * @rx_mode: spi bus width of transfer from device
+ * @op_mode: spi nand controller (NFI) operation mode
+ * @ondie_ecc: spi nand on-die ecc flag
+ */
+
+struct nand_spi {
+ struct nand_base base;
+ struct nand_base *parent;
+ u8 tx_mode;
+ u8 rx_mode;
+ u8 op_mode;
+ bool ondie_ecc;
+};
+
+static inline struct nand_spi *base_to_spi(struct nand_base *base)
+{
+ return container_of(base, struct nand_spi, base);
+}
+
+#endif /* __NAND_SPI_H__ */
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_base.c b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_base.c
new file mode 100644
index 0000000..65998e5
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_base.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "nand_chip.h"
+#include "nand_device.h"
+#include "nfi.h"
+#include "nand_base.h"
+
+static int nand_base_select_device(struct nand_base *nand, int cs)
+{
+ struct nfi *nfi = nand->nfi;
+
+ nfi->reset(nfi);
+
+ return nfi->select_chip(nfi, cs);
+}
+
+static int nand_base_reset(struct nand_base *nand)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->reset);
+ nfi->trigger(nfi);
+
+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tRST);
+}
+
+static int nand_base_read_id(struct nand_base *nand, u8 *id, int count)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->read_id);
+ nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tWHR);
+ nfi->send_addr(nfi, 0, 0, 1, 0);
+
+ return nfi->read_bytes(nfi, id, count);
+}
+
+static int nand_base_read_param_page(struct nand_base *nand, u8 *data,
+ int count)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->read_param_page);
+ nfi->send_addr(nfi, 0, 0, 1, 0);
+
+ nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tR);
+
+ return nfi->read_bytes(nfi, data, count);
+}
+
+static int nand_base_set_feature(struct nand_base *nand, u8 addr,
+ u8 *param,
+ int count)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->set_feature);
+ nfi->send_addr(nfi, addr, 0, 1, 0);
+
+ nfi->write_bytes(nfi, param, count);
+
+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
+ dev->array_timing->tFEAT);
+}
+
+static int nand_base_get_feature(struct nand_base *nand, u8 addr,
+ u8 *param,
+ int count)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->get_feature);
+ nfi->send_addr(nfi, addr, 0, 1, 0);
+ nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tFEAT);
+
+ return nfi->read_bytes(nfi, param, count);
+}
+
+static int nand_base_read_status(struct nand_base *nand)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+ u8 status = 0;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->read_status);
+ nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tWHR);
+ nfi->read_bytes(nfi, &status, 1);
+
+ return status;
+}
+
+static int nand_base_addressing(struct nand_base *nand, int *row,
+ int *col)
+{
+ struct nand_device *dev = nand->dev;
+ int lun, plane, block, page, cs = 0;
+ int block_pages, target_blocks, wl = 0;
+ int icol = *col;
+
+ if (dev->target_num > 1) {
+ block_pages = nand_block_pages(dev);
+ target_blocks = nand_target_blocks(dev);
+ cs = div_down(*row, block_pages * target_blocks);
+ *row -= cs * block_pages * target_blocks;
+ }
+
+ nand->select_device(nand, cs);
+
+ block_pages = nand_block_pages(dev);
+ block = div_down(*row, block_pages);
+ page = *row - block * block_pages;
+ plane = reminder(block, dev->plane_num);
+ lun = div_down(block, nand_lun_blocks(dev));
+
+ wl |= (page << dev->addressing->row_bit_start);
+ wl |= (block << dev->addressing->block_bit_start);
+ wl |= (plane << dev->addressing->plane_bit_start);
+ wl |= (lun << dev->addressing->lun_bit_start);
+
+ *row = wl;
+ *col = icol;
+
+ return 0;
+}
+
+static int nand_base_read_page(struct nand_base *nand, int row)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->read_1st);
+ nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
+ nfi->send_cmd(nfi, dev->cmds->read_2nd);
+ nfi->trigger(nfi);
+
+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tR);
+}
+
+static int nand_base_read_data(struct nand_base *nand, int row, int col,
+ int sectors, u8 *data, u8 *oob)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->random_out_1st);
+ nfi->send_addr(nfi, col, row, dev->col_cycle, dev->row_cycle);
+ nfi->send_cmd(nfi, dev->cmds->random_out_2nd);
+ nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tRCBSY);
+
+ return nfi->read_sectors(nfi, data, oob, sectors);
+}
+
+static int nand_base_write_enable(struct nand_base *nand)
+{
+ struct nand_device *dev = nand->dev;
+ int status;
+
+ status = nand_base_read_status(nand);
+ if (status & dev->status->write_protect)
+ return 0;
+
+ return -ENANDWP;
+}
+
+static int nand_base_program_data(struct nand_base *nand, int row,
+ int col,
+ u8 *data, u8 *oob)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->program_1st);
+ nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
+
+ return nfi->write_page(nfi, data, oob);
+}
+
+static int nand_base_program_page(struct nand_base *nand, int row)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->program_2nd);
+ nfi->trigger(nfi);
+
+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
+ dev->array_timing->tPROG);
+}
+
+static int nand_base_erase_block(struct nand_base *nand, int row)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->erase_1st);
+ nfi->send_addr(nfi, 0, row, 0, dev->row_cycle);
+ nfi->send_cmd(nfi, dev->cmds->erase_2nd);
+ nfi->trigger(nfi);
+
+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
+ dev->array_timing->tBERS);
+}
+
+static int nand_base_read_cache(struct nand_base *nand, int row)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->read_1st);
+ nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
+ nfi->send_cmd(nfi, dev->cmds->read_cache);
+ nfi->trigger(nfi);
+
+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
+ dev->array_timing->tRCBSY);
+}
+
+static int nand_base_read_last(struct nand_base *nand)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->read_cache_last);
+ nfi->trigger(nfi);
+
+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
+ dev->array_timing->tRCBSY);
+}
+
+static int nand_base_program_cache(struct nand_base *nand)
+{
+ struct nfi *nfi = nand->nfi;
+ struct nand_device *dev = nand->dev;
+
+ nfi->reset(nfi);
+ nfi->send_cmd(nfi, dev->cmds->program_cache);
+ nfi->trigger(nfi);
+
+ return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
+ dev->array_timing->tPCBSY);
+}
+
+struct nand_base *nand_base_init(struct nand_device *dev,
+ struct nfi *nfi)
+{
+ struct nand_base *nand;
+
+ nand = mem_alloc(1, sizeof(struct nand_base));
+ if (!nand)
+ return NULL;
+
+ nand->dev = dev;
+ nand->nfi = nfi;
+ nand->select_device = nand_base_select_device;
+ nand->reset = nand_base_reset;
+ nand->read_id = nand_base_read_id;
+ nand->read_param_page = nand_base_read_param_page;
+ nand->set_feature = nand_base_set_feature;
+ nand->get_feature = nand_base_get_feature;
+ nand->read_status = nand_base_read_status;
+ nand->addressing = nand_base_addressing;
+ nand->read_page = nand_base_read_page;
+ nand->read_data = nand_base_read_data;
+ nand->read_cache = nand_base_read_cache;
+ nand->read_last = nand_base_read_last;
+ nand->write_enable = nand_base_write_enable;
+ nand->program_data = nand_base_program_data;
+ nand->program_page = nand_base_program_page;
+ nand->program_cache = nand_base_program_cache;
+ nand->erase_block = nand_base_erase_block;
+
+ return nand;
+}
+
+void nand_base_exit(struct nand_base *base)
+{
+ nfi_exit(base->nfi);
+ mem_free(base);
+}
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_base.h b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_base.h
new file mode 100644
index 0000000..02644c2
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_base.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NAND_BASE_H__
+#define __NAND_BASE_H__
+
+/*
+ * nand base functions
+ * @dev: nand device infomations
+ * @nfi: nand host controller
+ * @select_device: select one nand device of multi nand on chip
+ * @reset: reset current nand device
+ * @read_id: read current nand id
+ * @read_param_page: read current nand parameters page
+ * @set_feature: configurate the nand device feature
+ * @get_feature: get the nand device feature
+ * @read_status: read nand device status
+ * @addressing: addressing the address to nand device physical address
+ * @read_page: read page data to device cache register
+ * @read_data: read data from device cache register by bus protocol
+ * @read_cache: nand cache read operation for data output
+ * @read_last: nand cache read operation for last page output
+ * @write_enable: enable program/erase for nand, especially spi nand
+ * @program_data: program data to nand device cache register
+ * @program_page: program page data from nand device cache register to array
+ * @program_cache: nand cache program operation for data input
+ * @erase_block: erase nand block operation
+ */
+struct nand_base {
+ struct nand_device *dev;
+ struct nfi *nfi;
+ int (*select_device)(struct nand_base *nand, int cs);
+ int (*reset)(struct nand_base *nand);
+ int (*read_id)(struct nand_base *nand, u8 *id, int count);
+ int (*read_param_page)(struct nand_base *nand, u8 *data, int count);
+ int (*set_feature)(struct nand_base *nand, u8 addr, u8 *param,
+ int count);
+ int (*get_feature)(struct nand_base *nand, u8 addr, u8 *param,
+ int count);
+ int (*read_status)(struct nand_base *nand);
+ int (*addressing)(struct nand_base *nand, int *row, int *col);
+
+ int (*read_page)(struct nand_base *nand, int row);
+ int (*read_data)(struct nand_base *nand, int row, int col, int sectors,
+ u8 *data, u8 *oob);
+ int (*read_cache)(struct nand_base *nand, int row);
+ int (*read_last)(struct nand_base *nand);
+
+ int (*write_enable)(struct nand_base *nand);
+ int (*program_data)(struct nand_base *nand, int row, int col, u8 *data,
+ u8 *oob);
+ int (*program_page)(struct nand_base *nand, int row);
+ int (*program_cache)(struct nand_base *nand);
+
+ int (*erase_block)(struct nand_base *nand, int row);
+};
+
+struct nand_base *nand_base_init(struct nand_device *device,
+ struct nfi *nfi);
+void nand_base_exit(struct nand_base *base);
+
+struct nand_base *nand_init(struct nand_chip *nand);
+void nand_exit(struct nand_base *nand);
+
+int nand_detect_device(struct nand_base *nand);
+
+#ifndef writew
+#define writew(v, a) (*REG16(a) = (v))
+#endif
+#ifndef readw
+#define readw(a) (*REG16(a))
+#endif
+
+#endif /* __NAND_BASE_H__ */
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_chip.c b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_chip.c
new file mode 100644
index 0000000..ec97d83
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_chip.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "nand_chip.h"
+#include "nand_device.h"
+#include "nfi.h"
+#include "nand_base.h"
+
+static int nand_chip_read_page(struct nand_chip *chip,
+ struct nand_ops *ops,
+ int count)
+{
+ struct nand_base *nand = chip->nand;
+ struct nand_device *dev = nand->dev;
+ int i, ret = 0;
+ int row, col, sectors;
+ u8 *data, *oob;
+
+ for (i = 0; i < count; i++) {
+ row = ops[i].row;
+ col = ops[i].col;
+
+ nand->addressing(nand, &row, &col);
+ ops[i].status = nand->read_page(nand, row);
+ if (ops[i].status < 0) {
+ ret = ops[i].status;
+ continue;
+ }
+
+ data = ops[i].data;
+ oob = ops[i].oob;
+ sectors = ops[i].len / chip->sector_size;
+ ops[i].status = nand->read_data(nand, row, col,
+ sectors, data, oob);
+ if (ops[i].status > 0)
+ ops[i].status = ops[i].status >=
+ dev->endurance->max_bitflips ?
+ -ENANDFLIPS : 0;
+
+ ret = min_t(int, ret, ops[i].status);
+ }
+
+ return ret;
+}
+
+static int nand_chip_write_page(struct nand_chip *chip,
+ struct nand_ops *ops,
+ int count)
+{
+ struct nand_base *nand = chip->nand;
+ struct nand_device *dev = nand->dev;
+ int i, ret = 0;
+ int row, col;
+ u8 *data, *oob;
+
+ for (i = 0; i < count; i++) {
+ row = ops[i].row;
+ col = ops[i].col;
+
+ nand->addressing(nand, &row, &col);
+
+ ops[i].status = nand->write_enable(nand);
+ if (ops[i].status) {
+ pr_debug("Write Protect at %x!\n", row);
+ ops[i].status = -ENANDWP;
+ return -ENANDWP;
+ }
+
+ data = ops[i].data;
+ oob = ops[i].oob;
+ ops[i].status = nand->program_data(nand, row, col, data, oob);
+ if (ops[i].status < 0) {
+ ret = ops[i].status;
+ continue;
+ }
+
+ ops[i].status = nand->program_page(nand, row);
+ if (ops[i].status < 0) {
+ ret = ops[i].status;
+ continue;
+ }
+
+ ops[i].status = nand->read_status(nand);
+ if (ops[i].status & dev->status->program_fail)
+ ops[i].status = -ENANDWRITE;
+
+ ret = min_t(int, ret, ops[i].status);
+ }
+
+ return ret;
+}
+
+static int nand_chip_erase_block(struct nand_chip *chip,
+ struct nand_ops *ops,
+ int count)
+{
+ struct nand_base *nand = chip->nand;
+ struct nand_device *dev = nand->dev;
+ int i, ret = 0;
+ int row, col;
+
+ for (i = 0; i < count; i++) {
+ row = ops[i].row;
+ col = ops[i].col;
+
+ nand->addressing(nand, &row, &col);
+
+ ops[i].status = nand->write_enable(nand);
+ if (ops[i].status) {
+ pr_debug("Write Protect at %x!\n", row);
+ ops[i].status = -ENANDWP;
+ return -ENANDWP;
+ }
+
+ ops[i].status = nand->erase_block(nand, row);
+ if (ops[i].status < 0) {
+ ret = ops[i].status;
+ continue;
+ }
+
+ ops[i].status = nand->read_status(nand);
+ if (ops[i].status & dev->status->erase_fail)
+ ops[i].status = -ENANDERASE;
+
+ ret = min_t(int, ret, ops[i].status);
+ }
+
+ return ret;
+}
+
+/* read first bad mark on spare */
+static int nand_chip_is_bad_block(struct nand_chip *chip,
+ struct nand_ops *ops,
+ int count)
+{
+ int i, ret, value;
+ int status = 0;
+ u8 *data;
+
+ /* Disable ECC */
+ value = 0;
+ ret = chip->chip_ctrl(chip, NFI_CTRL_ECC, &value);
+ if (ret)
+ return ret;
+
+ ret = chip->read_page(chip, ops, count);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ data = ops[i].data;
+
+ if (data[chip->page_size] != 0xff) {
+ ops[i].status = -ENANDBAD;
+ status = -ENANDBAD;
+ } else {
+ ops[i].status = 0;
+ }
+ }
+
+ /* Enable ECC */
+ value = 1;
+ ret = chip->chip_ctrl(chip, NFI_CTRL_ECC, &value);
+ if (ret)
+ return ret;
+
+ return status;
+}
+
+static int nand_chip_ctrl(struct nand_chip *chip, int cmd, void *args)
+{
+ return -EOPNOTSUPP;
+}
+
+static int nand_chip_suspend(struct nand_chip *chip)
+{
+ return 0;
+}
+
+static int nand_chip_resume(struct nand_chip *chip)
+{
+ return 0;
+}
+
+struct nand_chip *nand_chip_init(struct nfi_resource *res)
+{
+ struct nand_chip *chip;
+ struct nand_base *nand;
+ struct nfi *nfi;
+
+ chip = mem_alloc(1, sizeof(struct nand_chip));
+ if (!chip) {
+ pr_err("nand chip alloc fail!\n");
+ return NULL;
+ }
+
+ nfi = nfi_init(res);
+ if (!nfi) {
+ pr_err("nfi init fail!\n");
+ goto nfi_err;
+ }
+
+ nand = nand_base_init(NULL, nfi);
+ if (!nand) {
+ pr_err("nand base init fail!\n");
+ goto base_err;
+ }
+
+ chip->nand = (void *)nand;
+ chip->read_page = nand_chip_read_page;
+ chip->write_page = nand_chip_write_page;
+ chip->erase_block = nand_chip_erase_block;
+ chip->is_bad_block = nand_chip_is_bad_block;
+ chip->chip_ctrl = nand_chip_ctrl;
+ chip->suspend = nand_chip_suspend;
+ chip->resume = nand_chip_resume;
+
+ nand = nand_init(chip);
+ if (!nand)
+ goto nand_err;
+
+ chip->nand = (void *)nand;
+ chip->plane_num = nand->dev->plane_num;
+ chip->block_num = nand_total_blocks(nand->dev);
+ chip->block_size = nand->dev->block_size;
+ chip->block_pages = nand_block_pages(nand->dev);
+ chip->page_size = nand->dev->page_size;
+ chip->oob_size = nfi->fdm_size * div_down(chip->page_size,
+ nfi->sector_size);
+ chip->sector_size = nfi->sector_size;
+ chip->sector_spare_size = nfi->sector_spare_size;
+ chip->min_program_pages = nand->dev->min_program_pages;
+ chip->ecc_strength = nfi->ecc_strength;
+ chip->ecc_parity_size = nfi->ecc_parity_size;
+ chip->fdm_ecc_size = nfi->fdm_ecc_size;
+ chip->fdm_reg_size = nfi->fdm_size;
+
+ return chip;
+
+nand_err:
+ mem_free(nand);
+base_err:
+ nfi_exit(nfi);
+nfi_err:
+ mem_free(chip);
+ return NULL;
+}
+
+void nand_chip_exit(struct nand_chip *chip)
+{
+ nand_exit(chip->nand);
+ mem_free(chip);
+}
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_chip.h b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_chip.h
new file mode 100644
index 0000000..3e9c8e6
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_chip.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NAND_CHIP_H__
+#define __NAND_CHIP_H__
+
+enum nand_type {
+ NAND_SPI,
+ NAND_SLC,
+ NAND_MLC,
+ NAND_TLC
+};
+
+/*
+ * nand chip operation unit
+ * one nand_ops indicates one row operation
+ * @row: nand chip row address, like as nand row
+ * @col: nand chip column address, like as nand column
+ * @len: operate data length, min is sector_size,
+ * max is page_size and sector_size aligned
+ * @status: one operation result status
+ * @data: data buffer for operation
+ * @oob: oob buffer for operation, like as nand spare area
+ */
+struct nand_ops {
+ int row;
+ int col;
+ int len;
+ int status;
+ void *data;
+ void *oob;
+};
+
+/*
+ * nand chip descriptions
+ * nand chip includes nand controller and the several same nand devices
+ * @nand_type: the nand type on this chip,
+ * the chip maybe have several nand device and the type must be same
+ * @plane_num: the whole plane number on the chip
+ * @block_num: the whole block number on the chip
+ * @block_size: nand device block size
+ * @block_pages: nand device block has page number
+ * @page_size: nand device page size
+ * @oob_size: chip out of band size, like as nand spare szie,
+ * but restricts this:
+ * the size is provied by nand controller(NFI),
+ * because NFI would use some nand spare size
+ * @min_program_pages: chip needs min pages per program operations
+ * one page as one nand_ops
+ * @sector_size: chip min read size
+ * @sector_spare_size: spare size for sector, is spare_size/page_sectors
+ * @ecc_strength: ecc stregth per sector_size, it would be for calculated ecc
+ * @ecc_parity_size: ecc parity size for one sector_size data
+ * @nand: pointer to inherited struct nand_base
+ * @read_page: read %count pages on chip
+ * @write_page: write %count pages on chip
+ * @erase_block: erase %count blocks on chip, one block is one nand_ops
+ * it is better to set nand_ops.row to block start row
+ * @is_bad_block: judge the %count blocks on chip if they are bad
+ * by vendor specification
+ * @chip_ctrl: control the chip features by nandx_ctrl_cmd
+ * @suspend: suspend nand chip
+ * @resume: resume nand chip
+ */
+struct nand_chip {
+ int nand_type;
+ int plane_num;
+ int block_num;
+ int block_size;
+ int block_pages;
+ int page_size;
+ int oob_size;
+
+ int min_program_pages;
+ int sector_size;
+ int sector_spare_size;
+ int ecc_strength;
+ int ecc_parity_size;
+ u32 fdm_ecc_size;
+ u32 fdm_reg_size;
+
+ void *nand;
+
+ int (*read_page)(struct nand_chip *chip, struct nand_ops *ops,
+ int count);
+ int (*write_page)(struct nand_chip *chip, struct nand_ops *ops,
+ int count);
+ int (*erase_block)(struct nand_chip *chip, struct nand_ops *ops,
+ int count);
+ int (*is_bad_block)(struct nand_chip *chip, struct nand_ops *ops,
+ int count);
+ int (*chip_ctrl)(struct nand_chip *chip, int cmd, void *args);
+ int (*suspend)(struct nand_chip *chip);
+ int (*resume)(struct nand_chip *chip);
+};
+
+struct nand_chip *nand_chip_init(struct nfi_resource *res);
+void nand_chip_exit(struct nand_chip *chip);
+#endif /* __NAND_CHIP_H__ */
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_device.c b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_device.c
new file mode 100644
index 0000000..5167ab2
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_device.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "nand_chip.h"
+#include "nand_device.h"
+#include "nand_base.h"
+
+#define MAX_CHIP_DEVICE 4
+#define PARAM_PAGE_LEN 2048
+#define ONFI_CRC_BASE 0x4f4e
+
+static u16 nand_onfi_crc16(u16 crc, u8 const *p, size_t len)
+{
+ int i;
+
+ while (len--) {
+ crc ^= *p++ << 8;
+
+ for (i = 0; i < 8; i++)
+ crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
+ }
+
+ return crc;
+}
+
+static inline void decode_addr_cycle(u8 addr_cycle, u8 *row_cycle,
+ u8 *col_cycle)
+{
+ *row_cycle = addr_cycle & 0xf;
+ *col_cycle = (addr_cycle >> 4) & 0xf;
+}
+
+static int detect_onfi(struct nand_device *dev,
+ struct nand_onfi_params *onfi)
+{
+ struct nand_endurance *endurance = dev->endurance;
+ u16 size, i, crc16;
+ u8 *id;
+
+ size = sizeof(struct nand_onfi_params) - sizeof(u16);
+
+ for (i = 0; i < 3; i++) {
+ crc16 = nand_onfi_crc16(ONFI_CRC_BASE, (u8 *)&onfi[i], size);
+
+ if (onfi[i].signature[0] == 'O' &&
+ onfi[i].signature[1] == 'N' &&
+ onfi[i].signature[2] == 'F' &&
+ onfi[i].signature[3] == 'I' &&
+ onfi[i].crc16 == crc16)
+ break;
+
+ /* in some spi nand, onfi signature maybe "NAND" */
+ if (onfi[i].signature[0] == 'N' &&
+ onfi[i].signature[1] == 'A' &&
+ onfi[i].signature[2] == 'N' &&
+ onfi[i].signature[3] == 'D' &&
+ onfi[i].crc16 == crc16)
+ break;
+ }
+
+ if (i == 3)
+ return -ENODEV;
+
+ memcpy(dev->name, onfi[i].model, 20);
+ id = onfi[i].manufacturer;
+ dev->id = NAND_PACK_ID(id[0], id[1], id[2], id[3], id[4], id[5], id[6],
+ id[7]);
+ dev->id_len = MAX_ID_NUM;
+ dev->io_width = (onfi[i].features & 1) ? NAND_IO16 : NAND_IO8;
+ decode_addr_cycle(onfi[i].addr_cycle, &dev->row_cycle,
+ &dev->col_cycle);
+ dev->target_num = 1;
+ dev->lun_num = onfi[i].lun_num;
+ dev->plane_num = BIT(onfi[i].plane_address_bits);
+ dev->block_num = onfi[i].lun_blocks / dev->plane_num;
+ dev->block_size = onfi[i].block_pages * onfi[i].page_size;
+ dev->page_size = onfi[i].page_size;
+ dev->spare_size = onfi[i].spare_size;
+
+ endurance->ecc_req = onfi[i].ecc_req;
+ endurance->pe_cycle = onfi[i].valid_block_endurance;
+ endurance->max_bitflips = endurance->ecc_req >> 1;
+
+ return 0;
+}
+
+static int detect_jedec(struct nand_device *dev,
+ struct nand_jedec_params *jedec)
+{
+ struct nand_endurance *endurance = dev->endurance;
+ u16 size, i, crc16;
+ u8 *id;
+
+ size = sizeof(struct nand_jedec_params) - sizeof(u16);
+
+ for (i = 0; i < 3; i++) {
+ crc16 = nand_onfi_crc16(ONFI_CRC_BASE, (u8 *)&jedec[i], size);
+
+ if (jedec[i].signature[0] == 'J' &&
+ jedec[i].signature[1] == 'E' &&
+ jedec[i].signature[2] == 'S' &&
+ jedec[i].signature[3] == 'D' &&
+ jedec[i].crc16 == crc16)
+ break;
+ }
+
+ if (i == 3)
+ return -ENODEV;
+
+ memcpy(dev->name, jedec[i].model, 20);
+ id = jedec[i].manufacturer;
+ dev->id = NAND_PACK_ID(id[0], id[1], id[2], id[3], id[4], id[5], id[6],
+ id[7]);
+ dev->id_len = MAX_ID_NUM;
+ dev->io_width = (jedec[i].features & 1) ? NAND_IO16 : NAND_IO8;
+ decode_addr_cycle(jedec[i].addr_cycle, &dev->row_cycle,
+ &dev->col_cycle);
+ dev->target_num = 1;
+ dev->lun_num = jedec[i].lun_num;
+ dev->plane_num = BIT(jedec[i].plane_address_bits);
+ dev->block_num = jedec[i].lun_blocks / dev->plane_num;
+ dev->block_size = jedec[i].block_pages * jedec[i].page_size;
+ dev->page_size = jedec[i].page_size;
+ dev->spare_size = jedec[i].spare_size;
+
+ endurance->ecc_req = jedec[i].endurance_block0[0];
+ endurance->pe_cycle = jedec[i].valid_block_endurance;
+ endurance->max_bitflips = endurance->ecc_req >> 1;
+
+ return 0;
+}
+
+static struct nand_device *detect_parameters_page(struct nand_base
+ *nand)
+{
+ struct nand_device *dev = nand->dev;
+ void *params;
+ int ret;
+
+ params = mem_alloc(1, PARAM_PAGE_LEN);
+ if (!params)
+ return NULL;
+
+ memset(params, 0, PARAM_PAGE_LEN);
+ ret = nand->read_param_page(nand, params, PARAM_PAGE_LEN);
+ if (ret < 0) {
+ pr_err("read parameters page fail!\n");
+ goto error;
+ }
+
+ ret = detect_onfi(dev, params);
+ if (ret) {
+ pr_err("detect onfi device fail! try to detect jedec\n");
+ ret = detect_jedec(dev, params);
+ if (ret) {
+ pr_err("detect jedec device fail!\n");
+ goto error;
+ }
+ }
+
+ mem_free(params);
+ return dev;
+
+error:
+ mem_free(params);
+ return NULL;
+}
+
+static int read_device_id(struct nand_base *nand, int cs, u8 *id)
+{
+ int i;
+
+ nand->select_device(nand, cs);
+ nand->reset(nand);
+ nand->read_id(nand, id, MAX_ID_NUM);
+ pr_info("device %d ID: ", cs);
+
+ for (i = 0; i < MAX_ID_NUM; i++)
+ pr_info("%x ", id[i]);
+
+ pr_info("\n");
+
+ return 0;
+}
+
+static int detect_more_device(struct nand_base *nand, u8 *id)
+{
+ u8 id_ext[MAX_ID_NUM];
+ int i, j, target_num = 0;
+
+ for (i = 1; i < MAX_CHIP_DEVICE; i++) {
+ memset(id_ext, 0xff, MAX_ID_NUM);
+ read_device_id(nand, i, id_ext);
+
+ for (j = 0; j < MAX_ID_NUM; j++) {
+ if (id_ext[j] != id[j])
+ goto out;
+ }
+
+ target_num += 1;
+ }
+
+out:
+ return target_num;
+}
+
+static struct nand_device *scan_device_table(const u8 *id, int id_len)
+{
+ struct nand_device *dev;
+ int i = 0, j;
+ u8 ids[MAX_ID_NUM] = {0};
+
+ while (1) {
+ dev = nand_get_device(i);
+
+ if (!strcmp(dev->name, "NO-DEVICE"))
+ break;
+
+ if (id_len < dev->id_len) {
+ i += 1;
+ continue;
+ }
+
+ NAND_UNPACK_ID(dev->id, ids, MAX_ID_NUM);
+ for (j = 0; j < dev->id_len; j++) {
+ if (ids[j] != id[j])
+ break;
+ }
+
+ if (j == dev->id_len)
+ break;
+
+ i += 1;
+ }
+
+ return dev;
+}
+
+int nand_detect_device(struct nand_base *nand)
+{
+ struct nand_device *dev;
+ u8 id[MAX_ID_NUM] = { 0 };
+ int target_num = 0;
+
+ /* Get nand device default setting for reset/read_id */
+ nand->dev = scan_device_table(NULL, -1);
+
+ read_device_id(nand, 0, id);
+ dev = scan_device_table(id, MAX_ID_NUM);
+
+ if (!strcmp(dev->name, "NO-DEVICE")) {
+ pr_info("device scan fail\n");
+ return -ENODEV;
+ }
+
+ /* TobeFix: has null pointer issue in this funciton */
+ if (!strcmp(dev->name, "NO-DEVICE")) {
+ pr_info("device scan fail, detect parameters page\n");
+ dev = detect_parameters_page(nand);
+ if (!dev) {
+ pr_info("detect parameters fail\n");
+ return -ENODEV;
+ }
+ }
+
+ if (dev->target_num > 1)
+ target_num = detect_more_device(nand, id);
+
+ target_num += 1;
+ pr_info("chip has target device num: %d\n", target_num);
+
+ if (dev->target_num != target_num)
+ dev->target_num = target_num;
+
+ nand->dev = dev;
+
+ return 0;
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_device.h b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_device.h
new file mode 100644
index 0000000..e142cf5
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nand_device.h
@@ -0,0 +1,608 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NAND_DEVICE_H__
+#define __NAND_DEVICE_H__
+
+/* onfi 3.2 */
+struct nand_onfi_params {
+ /* Revision information and features block. 0 */
+ /*
+ * Byte 0: 4Fh,
+ * Byte 1: 4Eh,
+ * Byte 2: 46h,
+ * Byte 3: 49h,
+ */
+ u8 signature[4];
+ /*
+ * 9-15 Reserved (0)
+ * 8 1 = supports ONFI version 3.2
+ * 7 1 = supports ONFI version 3.1
+ * 6 1 = supports ONFI version 3.0
+ * 5 1 = supports ONFI version 2.3
+ * 4 1 = supports ONFI version 2.2
+ * 3 1 = supports ONFI version 2.1
+ * 2 1 = supports ONFI version 2.0
+ * 1 1 = supports ONFI version 1.0
+ * 0 Reserved (0)
+ */
+ u16 revision;
+ /*
+ * 13-15 Reserved (0)
+ * 12 1 = supports external Vpp
+ * 11 1 = supports Volume addressing
+ * 10 1 = supports NV-DDR2
+ * 9 1 = supports EZ NAND
+ * 8 1 = supports program page register clear enhancement
+ * 7 1 = supports extended parameter page
+ * 6 1 = supports multi-plane read operations
+ * 5 1 = supports NV-DDR
+ * 4 1 = supports odd to even page Copyback
+ * 3 1 = supports multi-plane program and erase operations
+ * 2 1 = supports non-sequential page programming
+ * 1 1 = supports multiple LUN operations
+ * 0 1 = supports 16-bit data bus width
+ */
+ u16 features;
+ /*
+ * 13-15 Reserved (0)
+ * 12 1 = supports LUN Get and LUN Set Features
+ * 11 1 = supports ODT Configure
+ * 10 1 = supports Volume Select
+ * 9 1 = supports Reset LUN
+ * 8 1 = supports Small Data Move
+ * 7 1 = supports Change Row Address
+ * 6 1 = supports Change Read Column Enhanced
+ * 5 1 = supports Read Unique ID
+ * 4 1 = supports Copyback
+ * 3 1 = supports Read Status Enhanced
+ * 2 1 = supports Get Features and Set Features
+ * 1 1 = supports Read Cache commands
+ * 0 1 = supports Page Cache Program command
+ */
+ u16 opt_cmds;
+ /*
+ * 4-7 Reserved (0)
+ * 3 1 = supports Multi-plane Block Erase
+ * 2 1 = supports Multi-plane Copyback Program
+ * 1 1 = supports Multi-plane Page Program
+ * 0 1 = supports Random Data Out
+ */
+ u8 advance_cmds;
+ u8 reserved0[1];
+ u16 extend_param_len;
+ u8 param_page_num;
+ u8 reserved1[17];
+
+ /* Manufacturer information block. 32 */
+ u8 manufacturer[12];
+ u8 model[20];
+ u8 jedec_id;
+ u16 data_code;
+ u8 reserved2[13];
+
+ /* Memory organization block. 80 */
+ u32 page_size;
+ u16 spare_size;
+ u32 partial_page_size; /* obsolete */
+ u16 partial_spare_size; /* obsolete */
+ u32 block_pages;
+ u32 lun_blocks;
+ u8 lun_num;
+ /*
+ * 4-7 Column address cycles
+ * 0-3 Row address cycles
+ */
+ u8 addr_cycle;
+ u8 cell_bits;
+ u16 lun_max_bad_blocks;
+ u16 block_endurance;
+ u8 target_begin_valid_blocks;
+ u16 valid_block_endurance;
+ u8 page_program_num;
+ u8 partial_program_attr; /* obsolete */
+ u8 ecc_req;
+ /*
+ * 4-7 Reserved (0)
+ * 0-3 Number of plane address bits
+ */
+ u8 plane_address_bits;
+ /*
+ * 6-7 Reserved (0)
+ * 5 1 = lower bit XNOR block address restriction
+ * 4 1 = read cache supported
+ * 3 Address restrictions for cache operations
+ * 2 1 = program cache supported
+ * 1 1 = no block address restrictions
+ * 0 Overlapped / concurrent multi-plane support
+ */
+ u8 multi_plane_attr;
+ u8 ez_nand_support;
+ u8 reserved3[12];
+
+ /* Electrical parameters block. 128 */
+ u8 io_pin_max_capacitance;
+ /*
+ * 6-15 Reserved (0)
+ * 5 1 = supports timing mode 5
+ * 4 1 = supports timing mode 4
+ * 3 1 = supports timing mode 3
+ * 2 1 = supports timing mode 2
+ * 1 1 = supports timing mode 1
+ * 0 1 = supports timing mode 0, shall be 1
+ */
+ u16 sdr_timing_mode;
+ u16 sdr_program_cache_timing_mode; /* obsolete */
+ u16 tPROG;
+ u16 tBERS;
+ u16 tR;
+ u16 tCCS;
+ /*
+ * 7 Reserved (0)
+ * 6 1 = supports NV-DDR2 timing mode 8
+ * 5 1 = supports NV-DDR timing mode 5
+ * 4 1 = supports NV-DDR timing mode 4
+ * 3 1 = supports NV-DDR timing mode 3
+ * 2 1 = supports NV-DDR timing mode 2
+ * 1 1 = supports NV-DDR timing mode 1
+ * 0 1 = supports NV-DDR timing mode 0
+ */
+ u8 nvddr_timing_mode;
+ /*
+ * 7 1 = supports timing mode 7
+ * 6 1 = supports timing mode 6
+ * 5 1 = supports timing mode 5
+ * 4 1 = supports timing mode 4
+ * 3 1 = supports timing mode 3
+ * 2 1 = supports timing mode 2
+ * 1 1 = supports timing mode 1
+ * 0 1 = supports timing mode 0
+ */
+ u8 nvddr2_timing_mode;
+ /*
+ * 4-7 Reserved (0)
+ * 3 1 = device requires Vpp enablement sequence
+ * 2 1 = device supports CLK stopped for data input
+ * 1 1 = typical capacitance
+ * 0 tCAD value to use
+ */
+ u8 nvddr_fetures;
+ u16 clk_pin_capacitance;
+ u16 io_pin_capacitance;
+ u16 input_pin_capacitance;
+ u8 input_pin_max_capacitance;
+ /*
+ * 3-7 Reserved (0)
+ * 2 1 = supports 18 Ohm drive strength
+ * 1 1 = supports 25 Ohm drive strength
+ * 0 1 = supports driver strength settings
+ */
+ u8 drive_strength;
+ u16 tR_multi_plane;
+ u16 tADL;
+ u16 tR_ez_nand;
+ /*
+ * 6-7 Reserved (0)
+ * 5 1 = external VREFQ required for >= 200 MT/s
+ * 4 1 = supports differential signaling for DQS
+ * 3 1 = supports differential signaling for RE_n
+ * 2 1 = supports ODT value of 30 Ohms
+ * 1 1 = supports matrix termination ODT
+ * 0 1 = supports self-termination ODT
+ */
+ u8 nvddr2_features;
+ u8 nvddr2_warmup_cycles;
+ u8 reserved4[4];
+
+ /* vendor block. 164 */
+ u16 vendor_revision;
+ u8 vendor_spec[88];
+
+ /* CRC for Parameter Page. 254 */
+ u16 crc16;
+} __packed;
+
+/* JESD230-B */
+struct nand_jedec_params {
+ /* Revision information and features block. 0 */
+ /*
+ * Byte 0:4Ah
+ * Byte 1:45h
+ * Byte 2:53h
+ * Byte 3:44h
+ */
+ u8 signature[4];
+ /*
+ * 3-15: Reserved (0)
+ * 2: 1 = supports parameter page revision 1.0 and standard revision 1.0
+ * 1: 1 = supports vendor specific parameter page
+ * 0: Reserved (0)
+ */
+ u16 revision;
+ /*
+ * 9-15 Reserved (0)
+ * 8: 1 = supports program page register clear enhancement
+ * 7: 1 = supports external Vpp
+ * 6: 1 = supports Toggle Mode DDR
+ * 5: 1 = supports Synchronous DDR
+ * 4: 1 = supports multi-plane read operations
+ * 3: 1 = supports multi-plane program and erase operations
+ * 2: 1 = supports non-sequential page programming
+ * 1: 1 = supports multiple LUN operations
+ * 0: 1 = supports 16-bit data bus width
+ */
+ u16 features;
+ /*
+ * 11-23: Reserved (0)
+ * 10: 1 = supports Synchronous Reset
+ * 9: 1 = supports Reset LUN (Primary)
+ * 8: 1 = supports Small Data Move
+ * 7: 1 = supports Multi-plane Copyback Program (Primary)
+ * 6: 1 = supports Random Data Out (Primary)
+ * 5: 1 = supports Read Unique ID
+ * 4: 1 = supports Copyback
+ * 3: 1 = supports Read Status Enhanced (Primary)
+ * 2: 1 = supports Get Features and Set Features
+ * 1: 1 = supports Read Cache commands
+ * 0: 1 = supports Page Cache Program command
+ */
+ u8 opt_cmds[3];
+ /*
+ * 8-15: Reserved (0)
+ * 7: 1 = supports secondary Read Status Enhanced
+ * 6: 1 = supports secondary Multi-plane Block Erase
+ * 5: 1 = supports secondary Multi-plane Copyback Program
+ * 4: 1 = supports secondary Multi-plane Program
+ * 3: 1 = supports secondary Random Data Out
+ * 2: 1 = supports secondary Multi-plane Copyback Read
+ * 1: 1 = supports secondary Multi-plane Read Cache Random
+ * 0: 1 = supports secondary Multi-plane Read
+ */
+ u16 secondary_cmds;
+ u8 param_page_num;
+ u8 reserved0[18];
+
+ /* Manufacturer information block. 32*/
+ u8 manufacturer[12];
+ u8 model[20];
+ u8 jedec_id[6];
+ u8 reserved1[10];
+
+ /* Memory organization block. 80 */
+ u32 page_size;
+ u16 spare_size;
+ u8 reserved2[6];
+ u32 block_pages;
+ u32 lun_blocks;
+ u8 lun_num;
+ /*
+ * 4-7 Column address cycles
+ * 0-3 Row address cycles
+ */
+ u8 addr_cycle;
+ u8 cell_bits;
+ u8 page_program_num;
+ /*
+ * 4-7 Reserved (0)
+ * 0-3 Number of plane address bits
+ */
+ u8 plane_address_bits;
+ /*
+ * 3-7: Reserved (0)
+ * 2: 1= read cache supported
+ * 1: 1 = program cache supported
+ * 0: 1= No multi-plane block address restrictions
+ */
+ u8 multi_plane_attr;
+ u8 reserved3[38];
+
+ /* Electrical parameters block. 144 */
+ /*
+ * 6-15: Reserved (0)
+ * 5: 1 = supports 20 ns speed grade (50 MHz)
+ * 4: 1 = supports 25 ns speed grade (40 MHz)
+ * 3: 1 = supports 30 ns speed grade (~33 MHz)
+ * 2: 1 = supports 35 ns speed grade (~28 MHz)
+ * 1: 1 = supports 50 ns speed grade (20 MHz)
+ * 0: 1 = supports 100 ns speed grade (10 MHz)
+ */
+ u16 sdr_speed;
+ /*
+ * 8-15: Reserved (0)
+ * 7: 1 = supports 5 ns speed grade (200 MHz)
+ * 6: 1 = supports 6 ns speed grade (~166 MHz)
+ * 5: 1 = supports 7.5 ns speed grade (~133 MHz)
+ * 4: 1 = supports 10 ns speed grade (100 MHz)
+ * 3: 1 = supports 12 ns speed grade (~83 MHz)
+ * 2: 1 = supports 15 ns speed grade (~66 MHz)
+ * 1: 1 = supports 25 ns speed grade (40 MHz)
+ * 0: 1 = supports 30 ns speed grade (~33 MHz)
+ */
+ u16 toggle_ddr_speed;
+ /*
+ * 6-15: Reserved (0)
+ * 5: 1 = supports 10 ns speed grade (100 MHz)
+ * 4: 1 = supports 12 ns speed grade (~83 MHz)
+ * 3: 1 = supports 15 ns speed grade (~66 MHz)
+ * 2: 1 = supports 20 ns speed grade (50 MHz)
+ * 1: 1 = supports 30 ns speed grade (~33 MHz)
+ * 0: 1 = supports 50 ns speed grade (20 MHz)
+ */
+ u16 sync_ddr_speed;
+ u8 sdr_features;
+ u8 toggle_ddr_features;
+ /*
+ * 2-7: Reserved (0)
+ * 1: Device supports CK stopped for data input
+ * 0: tCAD value to use
+ */
+ u8 sync_ddr_features;
+ u16 tPROG;
+ u16 tBERS;
+ u16 tR;
+ u16 tR_multi_plane;
+ u16 tCCS;
+ u16 io_pin_capacitance;
+ u16 input_pin_capacitance;
+ u16 ck_pin_capacitance;
+ /*
+ * 3-7: Reserved (0)
+ * 2: 1 = supports 18 ohm drive strength
+ * 1: 1 = supports 25 ohm drive strength
+ * 0: 1 = supports 35ohm/50ohm drive strength
+ */
+ u8 drive_strength;
+ u16 tADL;
+ u8 reserved4[36];
+
+ /* ECC and endurance block. 208 */
+ u8 target_begin_valid_blocks;
+ u16 valid_block_endurance;
+ /*
+ * Byte 0: Number of bits ECC correctability
+ * Byte 1: Codeword size
+ * Byte 2-3: Bad blocks maximum per LUN
+ * Byte 4-5: Block endurance
+ * Byte 6-7: Reserved (0)
+ */
+ u8 endurance_block0[8];
+ u8 endurance_block1[8];
+ u8 endurance_block2[8];
+ u8 endurance_block3[8];
+ u8 reserved5[29];
+
+ /* Reserved. 272 */
+ u8 reserved6[148];
+
+ /* Vendor specific block. 420 */
+ u16 vendor_revision;
+ u8 vendor_spec[88];
+
+ /* CRC for Parameter Page. 510 */
+ u16 crc16;
+} __packed;
+
+/* parallel nand io width */
+enum nand_io_width {
+ NAND_IO8,
+ NAND_IO16
+};
+
+/* all supported nand timming type */
+enum nand_timing_type {
+ NAND_TIMING_SDR,
+ NAND_TIMING_SYNC_DDR,
+ NAND_TIMING_TOGGLE_DDR,
+ NAND_TIMING_NVDDR2
+};
+
+/* nand basic commands */
+struct nand_cmds {
+ short reset;
+ short read_id;
+ short read_status;
+ short read_param_page;
+ short set_feature;
+ short get_feature;
+ short read_1st;
+ short read_2nd;
+ short random_out_1st;
+ short random_out_2nd;
+ short program_1st;
+ short program_2nd;
+ short erase_1st;
+ short erase_2nd;
+ short read_cache;
+ short read_cache_last;
+ short program_cache;
+};
+
+/*
+ * addressing for nand physical address
+ * @row_bit_start: row address start bit
+ * @block_bit_start: block address start bit
+ * @plane_bit_start: plane address start bit
+ * @lun_bit_start: lun address start bit
+ */
+struct nand_addressing {
+ u8 row_bit_start;
+ u8 block_bit_start;
+ u8 plane_bit_start;
+ u8 lun_bit_start;
+};
+
+/*
+ * nand operations status
+ * @array_busy: indicates device array operation busy
+ * @write_protect: indicates the device cannot be wrote or erased
+ * @erase_fail: indicates erase operation fail
+ * @program_fail: indicates program operation fail
+ */
+struct nand_status {
+ u8 array_busy;
+ u8 write_protect;
+ u8 erase_fail;
+ u8 program_fail;
+};
+
+/*
+ * nand endurance information
+ * @pe_cycle: max program/erase cycle for nand stored data stability
+ * @ecc_req: ecc strength required for the nand, measured per 1KB
+ * @max_bitflips: bitflips is ecc corrected bits,
+ * max_bitflips is the threshold for nand stored data stability
+ * if corrected bits is over max_bitflips, stored data must be moved
+ * to another good block
+ */
+struct nand_endurance {
+ int pe_cycle;
+ int ecc_req;
+ int max_bitflips;
+};
+
+/* wait for nand busy type */
+enum nand_wait_type {
+ NAND_WAIT_IRQ,
+ NAND_WAIT_POLLING,
+ NAND_WAIT_TWHR2,
+};
+
+/* each nand array operations time */
+struct nand_array_timing {
+ u16 tRST;
+ u16 tWHR;
+ u16 tR;
+ u16 tRCBSY;
+ u16 tFEAT;
+ u16 tPROG;
+ u16 tPCBSY;
+ u16 tBERS;
+ u16 tDBSY;
+};
+
+/* nand sdr interface timing required */
+struct nand_sdr_timing {
+ u16 tREA;
+ u16 tREH;
+ u16 tCR;
+ u16 tRP;
+ u16 tWP;
+ u16 tWH;
+ u16 tWHR;
+ u16 tCLS;
+ u16 tALS;
+ u16 tCLH;
+ u16 tALH;
+ u16 tWC;
+ u16 tRC;
+};
+
+/* nand onfi ddr (nvddr) interface timing required */
+struct nand_onfi_timing {
+ u16 tCAD;
+ u16 tWPRE;
+ u16 tWPST;
+ u16 tWRCK;
+ u16 tDQSCK;
+ u16 tWHR;
+};
+
+/* nand toggle ddr (toggle 1.0) interface timing required */
+struct nand_toggle_timing {
+ u16 tCS;
+ u16 tCH;
+ u16 tCAS;
+ u16 tCAH;
+ u16 tCALS;
+ u16 tCALH;
+ u16 tWP;
+ u16 tWPRE;
+ u16 tWPST;
+ u16 tWPSTH;
+ u16 tCR;
+ u16 tRPRE;
+ u16 tRPST;
+ u16 tRPSTH;
+ u16 tCDQSS;
+ u16 tWHR;
+};
+
+/* nand basic device information */
+struct nand_device {
+ u8 *name;
+ u64 id;
+ u8 id_len;
+ u8 io_width;
+ u8 row_cycle;
+ u8 col_cycle;
+ u8 target_num;
+ u8 lun_num;
+ u8 plane_num;
+ int block_num;
+ int block_size;
+ int page_size;
+ int spare_size;
+ int min_program_pages;
+ struct nand_cmds *cmds;
+ struct nand_addressing *addressing;
+ struct nand_status *status;
+ struct nand_endurance *endurance;
+ struct nand_array_timing *array_timing;
+};
+
+#define NAND_DEVICE(_name, _id, _id_len, _io_width, _row_cycle, \
+ _col_cycle, _target_num, _lun_num, _plane_num, \
+ _block_num, _block_size, _page_size, _spare_size, \
+ _min_program_pages, _cmds, _addressing, _status, \
+ _endurance, _array_timing) \
+{ \
+ _name, _id, _id_len, _io_width, _row_cycle, \
+ _col_cycle, _target_num, _lun_num, _plane_num, \
+ _block_num, _block_size, _page_size, _spare_size, \
+ _min_program_pages, _cmds, _addressing, _status, \
+ _endurance, _array_timing \
+}
+
+#define MAX_ID_NUM sizeof(u64)
+
+#define NAND_PACK_ID(id0, id1, id2, id3, id4, id5, id6, id7) \
+ ( \
+ id0 | id1 << 8 | id2 << 16 | id3 << 24 | \
+ (u64)id4 << 32 | (u64)id5 << 40 | \
+ (u64)id6 << 48 | (u64)id7 << 56 \
+ )
+
+#define NAND_UNPACK_ID(id, ids, len) \
+ do { \
+ int _i; \
+ for (_i = 0; _i < len; _i++) \
+ ids[_i] = id >> (_i << 3) & 0xff; \
+ } while (0)
+
+static inline int nand_block_pages(struct nand_device *device)
+{
+ return div_down(device->block_size, device->page_size);
+}
+
+static inline int nand_lun_blocks(struct nand_device *device)
+{
+ return device->plane_num * device->block_num;
+}
+
+static inline int nand_target_blocks(struct nand_device *device)
+{
+ return device->lun_num * device->plane_num * device->block_num;
+}
+
+static inline int nand_total_blocks(struct nand_device *device)
+{
+ return device->target_num * device->lun_num * device->plane_num *
+ device->block_num;
+}
+
+struct nand_device *nand_get_device(int index);
+#endif /* __NAND_DEVICE_H__ */
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi.h b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi.h
new file mode 100644
index 0000000..ba84e73
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NFI_H__
+#define __NFI_H__
+
+struct nfi_format {
+ int page_size;
+ int spare_size;
+ int ecc_req;
+};
+
+struct nfi {
+ int sector_size;
+ int sector_spare_size;
+ int fdm_size; /*for sector*/
+ int fdm_ecc_size;
+ int ecc_strength;
+ int ecc_parity_size; /*for sector*/
+
+ int (*select_chip)(struct nfi *nfi, int cs);
+ int (*set_format)(struct nfi *nfi, struct nfi_format *format);
+ int (*set_timing)(struct nfi *nfi, void *timing, int type);
+ int (*nfi_ctrl)(struct nfi *nfi, int cmd, void *args);
+
+ int (*reset)(struct nfi *nfi);
+ int (*send_cmd)(struct nfi *nfi, short cmd);
+ int (*send_addr)(struct nfi *nfi, int col, int row,
+ int col_cycle, int row_cycle);
+ int (*trigger)(struct nfi *nfi);
+
+ int (*write_page)(struct nfi *nfi, u8 *data, u8 *fdm);
+ int (*write_bytes)(struct nfi *nfi, u8 *data, int count);
+ int (*read_sectors)(struct nfi *nfi, u8 *data, u8 *fdm,
+ int sectors);
+ int (*read_bytes)(struct nfi *nfi, u8 *data, int count);
+
+ int (*wait_ready)(struct nfi *nfi, int type, u32 timeout);
+
+ int (*enable_randomizer)(struct nfi *nfi, u32 row, bool encode);
+ int (*disable_randomizer)(struct nfi *nfi);
+};
+
+struct nfi *nfi_init(struct nfi_resource *res);
+void nfi_exit(struct nfi *nfi);
+
+#endif /* __NFI_H__ */
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_base.c b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_base.c
new file mode 100644
index 0000000..35d33b4
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_base.c
@@ -0,0 +1,1340 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+/**
+ * nfi_base.c - the base logic for nfi to access nand flash
+ *
+ * slc/mlc/tlc could use same code to access nand
+ * of cause, there still some work need to do
+ * even for spi nand, there should be a chance to integrate code together
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "../nfi.h"
+#include "../nand_device.h"
+#include "nfi_regs.h"
+#include "nfiecc.h"
+#include "nfi_base.h"
+
+static const int spare_size_mt8512[] = {
+ 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51,
+ 52, 62, 61, 63, 64, 67, 74
+};
+
+#define RAND_SEED_SHIFT(op) \
+ ((op) == RAND_ENCODE ? ENCODE_SEED_SHIFT : DECODE_SEED_SHIFT)
+#define RAND_EN(op) \
+ ((op) == RAND_ENCODE ? RAN_ENCODE_EN : RAN_DECODE_EN)
+
+#define SS_SEED_NUM 128
+static u16 ss_randomizer_seed[SS_SEED_NUM] = {
+ 0x576A, 0x05E8, 0x629D, 0x45A3, 0x649C, 0x4BF0, 0x2342, 0x272E,
+ 0x7358, 0x4FF3, 0x73EC, 0x5F70, 0x7A60, 0x1AD8, 0x3472, 0x3612,
+ 0x224F, 0x0454, 0x030E, 0x70A5, 0x7809, 0x2521, 0x484F, 0x5A2D,
+ 0x492A, 0x043D, 0x7F61, 0x3969, 0x517A, 0x3B42, 0x769D, 0x0647,
+ 0x7E2A, 0x1383, 0x49D9, 0x07B8, 0x2578, 0x4EEC, 0x4423, 0x352F,
+ 0x5B22, 0x72B9, 0x367B, 0x24B6, 0x7E8E, 0x2318, 0x6BD0, 0x5519,
+ 0x1783, 0x18A7, 0x7B6E, 0x7602, 0x4B7F, 0x3648, 0x2C53, 0x6B99,
+ 0x0C23, 0x67CF, 0x7E0E, 0x4D8C, 0x5079, 0x209D, 0x244A, 0x747B,
+ 0x350B, 0x0E4D, 0x7004, 0x6AC3, 0x7F3E, 0x21F5, 0x7A15, 0x2379,
+ 0x1517, 0x1ABA, 0x4E77, 0x15A1, 0x04FA, 0x2D61, 0x253A, 0x1302,
+ 0x1F63, 0x5AB3, 0x049A, 0x5AE8, 0x1CD7, 0x4A00, 0x30C8, 0x3247,
+ 0x729C, 0x5034, 0x2B0E, 0x57F2, 0x00E4, 0x575B, 0x6192, 0x38F8,
+ 0x2F6A, 0x0C14, 0x45FC, 0x41DF, 0x38DA, 0x7AE1, 0x7322, 0x62DF,
+ 0x5E39, 0x0E64, 0x6D85, 0x5951, 0x5937, 0x6281, 0x33A1, 0x6A32,
+ 0x3A5A, 0x2BAC, 0x743A, 0x5E74, 0x3B2E, 0x7EC7, 0x4FD2, 0x5D28,
+ 0x751F, 0x3EF8, 0x39B1, 0x4E49, 0x746B, 0x6EF6, 0x44BE, 0x6DB7
+};
+
+static int nfi_enable_randomizer(struct nfi *nfi, u32 row, bool encode)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ enum randomizer_op op = RAND_ENCODE;
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+
+ if (!encode)
+ op = RAND_DECODE;
+
+ /* randomizer type and reseed type setup */
+ val = readl(regs + NFI_CNFG);
+ val |= CNFG_RAND_SEL | CNFG_RESEED_SEC_EN;
+ writel(val, regs + NFI_CNFG);
+
+ /* randomizer seed and type setup */
+ val = ss_randomizer_seed[row % SS_SEED_NUM] & RAN_SEED_MASK;
+ val <<= RAND_SEED_SHIFT(op);
+ val |= RAND_EN(op);
+ writel(val, regs + NFI_RANDOM_CNFG);
+
+ return 0;
+}
+
+static int nfi_disable_randomizer(struct nfi *nfi)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+
+ writel(0, nb->res.nfi_regs + NFI_RANDOM_CNFG);
+
+ return 0;
+}
+
+static int nfi_irq_handler(void *data)
+{
+ struct nfi_base *nb = (struct nfi_base *) data;
+ void *regs = nb->res.nfi_regs;
+ u16 status, en;
+
+ status = readw(regs + NFI_INTR_STA);
+ en = readw(regs + NFI_INTR_EN);
+
+ if (!(status & en))
+ return NAND_IRQ_NONE;
+
+ writew(~status & en, regs + NFI_INTR_EN);
+
+ nandx_event_complete(nb->done);
+
+ return NAND_IRQ_HANDLED;
+}
+
+static int nfi_select_chip(struct nfi *nfi, int cs)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+
+ writel(cs, nb->res.nfi_regs + NFI_CSEL);
+
+ return 0;
+}
+
+static inline void set_op_mode(void *regs, u32 mode)
+{
+ u32 val = readl(regs + NFI_CNFG);
+
+ val &= ~CNFG_OP_MODE_MASK;
+ val |= mode;
+
+ writel(val, regs + NFI_CNFG);
+}
+
+static int nfi_reset(struct nfi *nfi)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ void *regs = nb->res.nfi_regs;
+ int ret, val;
+
+ /* The NFI reset to reset all registers and force the NFI
+ * master be early terminated
+ */
+ writel(CON_FIFO_FLUSH | CON_NFI_RST, regs + NFI_CON);
+
+ /* check state of NFI internal FSM and NAND interface FSM */
+ ret = readl_poll_timeout_atomic(regs + NFI_MASTER_STA, val,
+ !(val & MASTER_BUS_BUSY),
+ 10, NFI_TIMEOUT);
+ if (ret)
+ pr_warn("nfi reset timeout...\n");
+
+ writel(CON_FIFO_FLUSH | CON_NFI_RST, regs + NFI_CON);
+ writew(STAR_DE, regs + NFI_STRDATA);
+
+ return ret;
+}
+
+static void bad_mark_swap(struct nfi *nfi, u8 *buf, u8 *fdm)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ u32 start_sector = div_down(nb->col, nfi->sector_size);
+ u32 data_mark_pos;
+ u8 temp;
+
+ /* raw access, no need to do swap. */
+ if (!nb->ecc_en)
+ return;
+
+ if (!buf || !fdm)
+ return;
+
+ if (nb->bad_mark_ctrl.sector < start_sector ||
+ nb->bad_mark_ctrl.sector > start_sector + nb->rw_sectors)
+ return;
+
+ data_mark_pos = nb->bad_mark_ctrl.position +
+ (nb->bad_mark_ctrl.sector - start_sector) *
+ nfi->sector_size;
+
+ temp = *fdm;
+ *fdm = *(buf + data_mark_pos);
+ *(buf + data_mark_pos) = temp;
+}
+
+static u8 *fdm_shift(struct nfi *nfi, u8 *fdm, int sector)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ u8 *pos;
+
+ if (!fdm)
+ return NULL;
+
+ /* map the sector's FDM data to free oob:
+ * the beginning of the oob area stores the FDM data of bad mark sectors
+ */
+ if (sector < nb->bad_mark_ctrl.sector)
+ pos = fdm + (sector + 1) * nfi->fdm_size;
+ else if (sector == nb->bad_mark_ctrl.sector)
+ pos = fdm;
+ else
+ pos = fdm + sector * nfi->fdm_size;
+
+ return pos;
+
+}
+
+static void set_bad_mark_ctrl(struct nfi_base *nb)
+{
+ int temp, page_size = nb->format.page_size;
+
+ nb->bad_mark_ctrl.bad_mark_swap = bad_mark_swap;
+ nb->bad_mark_ctrl.fdm_shift = fdm_shift;
+
+ temp = nb->nfi.sector_size + nb->nfi.sector_spare_size;
+ nb->bad_mark_ctrl.sector = div_down(page_size, temp);
+ nb->bad_mark_ctrl.position = reminder(page_size, temp);
+}
+
+/* NOTE: check if page_size valid future */
+static int setup_format(struct nfi_base *nb, int spare_idx)
+{
+ struct nfi *nfi = &nb->nfi;
+ u32 page_size = nb->format.page_size;
+ u32 val;
+
+ switch (page_size) {
+ case 512:
+ val = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
+ break;
+
+ case KB(2):
+ if (nfi->sector_size == 512)
+ val = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
+ else
+ val = PAGEFMT_512_2K;
+
+ break;
+
+ case KB(4):
+ if (nfi->sector_size == 512)
+ val = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
+ else
+ val = PAGEFMT_2K_4K;
+
+ break;
+
+ case KB(8):
+ if (nfi->sector_size == 512)
+ val = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
+ else
+ val = PAGEFMT_4K_8K;
+
+ break;
+
+ case KB(16):
+ val = PAGEFMT_8K_16K;
+ break;
+
+ default:
+ pr_err("invalid page len: %d\n", page_size);
+ return -EINVAL;
+ }
+
+ val |= spare_idx << PAGEFMT_SPARE_SHIFT;
+ val |= nfi->fdm_size << PAGEFMT_FDM_SHIFT;
+ val |= nfi->fdm_ecc_size << PAGEFMT_FDM_ECC_SHIFT;
+ writel(val, nb->res.nfi_regs + NFI_PAGEFMT);
+
+ if (nb->custom_sector_en) {
+ val = nfi->sector_spare_size + nfi->sector_size;
+ val |= SECCUS_SIZE_EN;
+ writel(val, nb->res.nfi_regs + NFI_SECCUS_SIZE);
+ }
+
+ return 0;
+}
+
+static int adjust_spare(struct nfi_base *nb, int *spare)
+{
+ int multi = nb->nfi.sector_size == 512 ? 1 : 2;
+ int i, count = nb->caps->spare_size_num;
+
+ if (*spare >= nb->caps->spare_size[count - 1] * multi) {
+ *spare = nb->caps->spare_size[count - 1] * multi;
+ return count - 1;
+ }
+
+ if (*spare < nb->caps->spare_size[0] * multi)
+ return -EINVAL;
+
+ for (i = 1; i < count; i++) {
+ if (*spare < nb->caps->spare_size[i] * multi) {
+ *spare = nb->caps->spare_size[i - 1] * multi;
+ return i - 1;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int nfi_set_format(struct nfi *nfi, struct nfi_format *format)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ struct nfiecc *ecc = nb->ecc;
+ int ecc_strength = format->ecc_req;
+ int min_fdm, min_ecc, max_ecc;
+ u32 temp, page_sectors;
+ int spare_idx = 0;
+
+ if (!nb->buf) {
+#if NANDX_BULK_IO_USE_DRAM
+ nb->buf = NANDX_NFI_BUF_ADDR;
+#else
+ nb->buf = mem_alloc(1, format->page_size + format->spare_size);
+#endif
+ if (!nb->buf)
+ return -ENOMEM;
+ }
+
+ nb->format = *format;
+
+ /* ToBeFixed: for spi nand, now sector size is 512,
+ * it should be same with slc.
+ */
+ nfi->sector_size = 512;
+ /* format->ecc_req is the requirement per 1KB */
+ ecc_strength >>= 1;
+
+ page_sectors = div_down(format->page_size, nfi->sector_size);
+ nfi->sector_spare_size = div_down(format->spare_size, page_sectors);
+
+ if (!nb->custom_sector_en) {
+ spare_idx = adjust_spare(nb, &nfi->sector_spare_size);
+ if (spare_idx < 0)
+ return -EINVAL;
+ }
+
+ /* calculate ecc strength and fdm size */
+ temp = (nfi->sector_spare_size - nb->caps->max_fdm_size) * 8;
+ min_ecc = div_down(temp, nb->caps->ecc_parity_bits);
+ min_ecc = ecc->adjust_strength(ecc, min_ecc);
+ if (min_ecc < 0)
+ return -EINVAL;
+
+ temp = div_up(nb->res.min_oob_req, page_sectors);
+ temp = (nfi->sector_spare_size - temp) * 8;
+ max_ecc = div_down(temp, nb->caps->ecc_parity_bits);
+ max_ecc = ecc->adjust_strength(ecc, max_ecc);
+ if (max_ecc < 0)
+ return -EINVAL;
+
+ temp = div_up(temp * nb->caps->ecc_parity_bits, 8);
+ temp = nfi->sector_spare_size - temp;
+ min_fdm = min_t(u32, temp, (u32)nb->caps->max_fdm_size);
+
+ if (ecc_strength > max_ecc) {
+ pr_warn("required ecc strength %d, max supported %d\n",
+ ecc_strength, max_ecc);
+ nfi->ecc_strength = max_ecc;
+ nfi->fdm_size = min_fdm;
+ } else if (format->ecc_req < min_ecc) {
+ nfi->ecc_strength = min_ecc;
+ nfi->fdm_size = nb->caps->max_fdm_size;
+ } else {
+ ecc_strength = ecc->adjust_strength(ecc, ecc_strength);
+ if (ecc_strength < 0)
+ return -EINVAL;
+
+ nfi->ecc_strength = ecc_strength;
+ temp = div_up(ecc_strength * nb->caps->ecc_parity_bits, 8);
+ nfi->fdm_size = nfi->sector_spare_size - temp;
+ }
+
+ nb->page_sectors = div_down(format->page_size, nfi->sector_size);
+
+ /* some IC has fixed fdm_ecc_size, if not assigend, set to fdm_size */
+ nfi->fdm_ecc_size = nb->caps->fdm_ecc_size ? : nfi->fdm_size;
+
+ nfi->ecc_parity_size = div_up(nfi->ecc_strength *
+ nb->caps->ecc_parity_bits,
+ 8);
+ set_bad_mark_ctrl(nb);
+
+ pr_info("sector_size: %d\n", nfi->sector_size);
+ pr_info("sector_spare_size: %d\n", nfi->sector_spare_size);
+ pr_info("fdm_size: %d\n", nfi->fdm_size);
+ pr_info("fdm_ecc_size: %d\n", nfi->fdm_ecc_size);
+ pr_info("ecc_strength: %d\n", nfi->ecc_strength);
+ pr_info("ecc_parity_size: %d\n", nfi->ecc_parity_size);
+
+ return setup_format(nb, spare_idx);
+}
+
+static int nfi_ctrl(struct nfi *nfi, int cmd, void *args)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ int ret = 0;
+
+ switch (cmd) {
+ case NFI_CTRL_DMA:
+ nb->dma_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_AUTOFORMAT:
+ nb->auto_format = *(bool *)args;
+ break;
+
+ case NFI_CTRL_NFI_IRQ:
+ nb->nfi_irq_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_PAGE_IRQ:
+ nb->page_irq_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_BAD_MARK_SWAP:
+ nb->bad_mark_swap_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_ECC:
+ nb->ecc_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_ECC_MODE:
+ nb->ecc_mode = *(enum nfiecc_mode *)args;
+ break;
+
+ case NFI_CTRL_ECC_CLOCK:
+ /* NOTE: it seems that there's nothing need to do
+ * if new IC need, just add tht logic
+ */
+ nb->ecc_clk_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_ECC_IRQ:
+ nb->ecc_irq_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_ECC_DECODE_MODE:
+ nb->ecc_deccon = *(enum nfiecc_deccon *)args;
+ break;
+
+ default:
+ pr_err("invalid arguments.\n");
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ pr_debug("%s: set cmd(%d) to %d\n", __func__, cmd, *(int *)args);
+ return ret;
+}
+
+static int nfi_send_cmd(struct nfi *nfi, short cmd)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ void *regs = nb->res.nfi_regs;
+ int ret;
+ u32 val;
+
+ pr_debug("%s: cmd 0x%x\n", __func__, cmd);
+
+ if (cmd < 0)
+ return -EINVAL;
+
+ set_op_mode(regs, nb->op_mode);
+
+ writel(cmd, regs + NFI_CMD);
+
+ ret = readl_poll_timeout_atomic(regs + NFI_STA,
+ val, !(val & STA_CMD),
+ 5, NFI_TIMEOUT);
+ if (ret)
+ pr_err("send cmd 0x%x timeout\n", cmd);
+
+ return ret;
+}
+
+static int nfi_send_addr(struct nfi *nfi, int col, int row,
+ int col_cycle, int row_cycle)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ void *regs = nb->res.nfi_regs;
+ int ret;
+ u32 val;
+
+ pr_debug("%s: col 0x%x, row 0x%x, col_cycle 0x%x, row_cycle 0x%x\n",
+ __func__, col, row, col_cycle, row_cycle);
+
+ nb->col = col;
+ nb->row = row;
+
+ writel(col, regs + NFI_COLADDR);
+ writel(row, regs + NFI_ROWADDR);
+ writel(col_cycle | (row_cycle << ROW_SHIFT), regs + NFI_ADDRNOB);
+
+ ret = readl_poll_timeout_atomic(regs + NFI_STA,
+ val, !(val & STA_ADDR),
+ 5, NFI_TIMEOUT);
+ if (ret)
+ pr_err("send address timeout\n");
+
+ return ret;
+}
+
+static int nfi_trigger(struct nfi *nfi)
+{
+ /* Nothing need to do. */
+ return 0;
+}
+
+static inline int wait_io_ready(void *regs)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(regs + NFI_PIO_DIRDY,
+ val, val & PIO_DI_RDY,
+ 2, NFI_TIMEOUT);
+ if (ret)
+ pr_err("wait io ready timeout\n");
+
+ return ret;
+}
+
+static int wait_ready_irq(struct nfi_base *nb, u32 timeout)
+{
+ void *regs = nb->res.nfi_regs;
+ int ret;
+ u32 val;
+
+ writel(0xf1, regs + NFI_CNRNB);
+ nandx_event_init(nb->done);
+
+ writel(INTR_BUSY_RETURN_EN, (void *)(regs + NFI_INTR_EN));
+
+ /**
+ * check if nand already bean ready,
+ * avoid issue that casued by missing irq-event.
+ */
+ val = readl(regs + NFI_STA);
+ if (val & STA_BUSY2READY) {
+ readl(regs + NFI_INTR_STA);
+ writel(0, (void *)(regs + NFI_INTR_EN));
+ return 0;
+ }
+
+ ret = nandx_event_wait_complete(nb->done, timeout);
+
+ writew(0, regs + NFI_CNRNB);
+ return ret;
+}
+
+static void wait_ready_twhr2(struct nfi_base *nb, u32 timeout)
+{
+ /* NOTE: this for tlc */
+}
+
+static int wait_ready_poll(struct nfi_base *nb, u32 timeout)
+{
+ void *regs = nb->res.nfi_regs;
+ int ret;
+ u32 val;
+
+ writel(0x21, regs + NFI_CNRNB);
+ ret = readl_poll_timeout_atomic(regs + NFI_STA, val,
+ val & STA_BUSY2READY,
+ 2, timeout);
+ writew(0, regs + NFI_CNRNB);
+
+ return ret;
+}
+
+static int nfi_wait_ready(struct nfi *nfi, int type, u32 timeout)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ int ret;
+
+ switch (type) {
+ case NAND_WAIT_IRQ:
+ if (nb->nfi_irq_en)
+ ret = wait_ready_irq(nb, timeout);
+ else
+ ret = -EINVAL;
+
+ break;
+
+ case NAND_WAIT_POLLING:
+ ret = wait_ready_poll(nb, timeout);
+ break;
+
+ case NAND_WAIT_TWHR2:
+ wait_ready_twhr2(nb, timeout);
+ ret = 0;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ pr_err("%s: type 0x%x, timeout 0x%x\n",
+ __func__, type, timeout);
+
+ return ret;
+}
+
+static int enable_ecc_decode(struct nfi_base *nb, int sectors)
+{
+ struct nfi *nfi = &nb->nfi;
+ struct nfiecc *ecc = nb->ecc;
+
+ ecc->config.op = ECC_DECODE;
+ ecc->config.mode = nb->ecc_mode;
+ ecc->config.deccon = nb->ecc_deccon;
+ ecc->config.sectors = sectors;
+ ecc->config.len = nfi->sector_size + nfi->fdm_ecc_size;
+ ecc->config.strength = nfi->ecc_strength;
+
+ return ecc->enable(ecc);
+}
+
+static int enable_ecc_encode(struct nfi_base *nb)
+{
+ struct nfiecc *ecc = nb->ecc;
+ struct nfi *nfi = &nb->nfi;
+
+ ecc->config.op = ECC_ENCODE;
+ ecc->config.mode = nb->ecc_mode;
+ ecc->config.len = nfi->sector_size + nfi->fdm_ecc_size;
+ ecc->config.strength = nfi->ecc_strength;
+
+ return ecc->enable(ecc);
+}
+
+static void read_fdm(struct nfi_base *nb, u8 *fdm, int start_sector,
+ int sectors)
+{
+ void *regs = nb->res.nfi_regs;
+ int j, i = start_sector;
+ u32 vall, valm;
+ u8 *buf = fdm;
+
+ for (; i < start_sector + sectors; i++) {
+ if (nb->bad_mark_swap_en)
+ buf = nb->bad_mark_ctrl.fdm_shift(&nb->nfi, fdm, i);
+
+ vall = readl(regs + NFI_FDML(i));
+ valm = readl(regs + NFI_FDMM(i));
+
+ for (j = 0; j < nb->nfi.fdm_size; j++)
+ *buf++ = (j >= 4 ? valm : vall) >> ((j & 3) << 3);
+ }
+}
+
+static void write_fdm(struct nfi_base *nb, u8 *fdm)
+{
+ struct nfi *nfi = &nb->nfi;
+ void *regs = nb->res.nfi_regs;
+ u32 vall, valm;
+ int i, j;
+ u8 *buf = fdm;
+
+ for (i = 0; i < nb->page_sectors; i++) {
+ if (nb->bad_mark_swap_en)
+ buf = nb->bad_mark_ctrl.fdm_shift(nfi, fdm, i);
+
+ vall = 0;
+ for (j = 0; j < 4; j++)
+ vall |= (j < nfi->fdm_size ? *buf++ : 0xff) << (j * 8);
+ writel(vall, regs + NFI_FDML(i));
+
+ valm = 0;
+ for (j = 0; j < 4; j++)
+ valm |= (j < nfi->fdm_size ? *buf++ : 0xff) << (j * 8);
+ writel(valm, regs + NFI_FDMM(i));
+ }
+}
+
+/* NOTE: pio not use auto format */
+static int pio_rx_data(struct nfi_base *nb, u8 *data, u8 *fdm,
+ int sectors)
+{
+ struct nfiecc_status ecc_status;
+ struct nfi *nfi = &nb->nfi;
+ void *regs = nb->res.nfi_regs;
+ u32 val, bitflips = 0;
+ int len, ret, i;
+ u8 *buf;
+
+ val = readl(regs + NFI_CNFG) | CNFG_BYTE_RW;
+ writel(val, regs + NFI_CNFG);
+
+ len = nfi->sector_size + nfi->sector_spare_size;
+ len *= sectors;
+
+ for (i = 0; i < len; i++) {
+ ret = wait_io_ready(regs);
+ if (ret)
+ return ret;
+
+ nb->buf[i] = readb(regs + NFI_DATAR);
+ }
+
+ /* TODO: do error handle for autoformat setting of pio */
+ if (nb->ecc_en) {
+ for (i = 0; i < sectors; i++) {
+ buf = nb->buf + i * (nfi->sector_size +
+ nfi->sector_spare_size);
+ ret = nb->ecc->correct_data(nb->ecc, &ecc_status,
+ buf, i);
+ if (data)
+ memcpy(data + i * nfi->sector_size,
+ buf, nfi->sector_size);
+ if (fdm)
+ memcpy(fdm + i * nfi->fdm_size,
+ buf + nfi->sector_size, nfi->fdm_size);
+ if (ret) {
+ ret = nb->ecc->decode_status(nb->ecc, i, 1);
+ if (ret < 0)
+ return ret;
+
+ bitflips = max_t(int, (int)bitflips, ret);
+ }
+ }
+
+ return bitflips;
+ }
+
+ /* raw read, only data not null, and its length should be $len */
+ if (data)
+ memcpy(data, nb->buf, len);
+
+ return 0;
+}
+
+static int pio_tx_data(struct nfi_base *nb, u8 *data, u8 *fdm,
+ int sectors)
+{
+ struct nfi *nfi = &nb->nfi;
+ void *regs = nb->res.nfi_regs;
+ u32 i, val;
+ int len, ret;
+
+ val = readw(regs + NFI_CNFG) | CNFG_BYTE_RW;
+ writew(val, regs + NFI_CNFG);
+
+ len = nb->ecc_en ? nfi->sector_size :
+ nfi->sector_size + nfi->sector_spare_size;
+ len *= sectors;
+
+ /* data shouldn't null,
+ * and if ecc enable ,fdm been written in prepare process
+ */
+ for (i = 0; i < len; i++) {
+ ret = wait_io_ready(regs);
+ if (ret)
+ return ret;
+ writeb(data[i], regs + NFI_DATAW);
+ }
+
+ return 0;
+}
+
+static bool is_page_empty(struct nfi_base *nb, u8 *data, u8 *fdm,
+ int sectors)
+{
+ u32 empty = readl(nb->res.nfi_regs + NFI_STA) & STA_EMP_PAGE;
+
+ if (empty) {
+ pr_info("empty page!\n");
+ return true;
+ }
+
+ return false;
+}
+
+static int rw_prepare(struct nfi_base *nb, int sectors, u8 *data,
+ u8 *fdm, bool read)
+{
+ void *regs = nb->res.nfi_regs;
+ u32 len = nb->nfi.sector_size * sectors;
+ bool irq_en = nb->dma_en && nb->nfi_irq_en;
+ void *dma_addr;
+ u32 val;
+ int ret;
+
+ nb->rw_sectors = sectors;
+
+ if (irq_en) {
+ nandx_event_init(nb->done);
+ writel(INTR_AHB_DONE_EN, regs + NFI_INTR_EN);
+ }
+
+ val = readw(regs + NFI_CNFG);
+ if (read)
+ val |= CNFG_READ_EN;
+ else
+ val &= ~CNFG_READ_EN;
+
+ /* as design, now, auto format enabled when ecc enabled */
+ if (nb->ecc_en) {
+ val |= CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
+
+ if (read)
+ ret = enable_ecc_decode(nb, sectors);
+ else
+ ret = enable_ecc_encode(nb);
+
+ if (ret) {
+ pr_err("%s: ecc enable %s fail!\n", __func__,
+ read ? "decode" : "encode");
+ return ret;
+ }
+ }
+
+ if (!read && nb->bad_mark_swap_en)
+ nb->bad_mark_ctrl.bad_mark_swap(&nb->nfi, data, fdm);
+
+ if (!nb->ecc_en && read)
+ len += sectors * nb->nfi.sector_spare_size;
+
+ if (nb->dma_en) {
+ val |= CNFG_DMA_BURST_EN | CNFG_AHB;
+
+ if (read) {
+ dma_addr = (void *)(unsigned long)nandx_dma_map(
+ nb->res.dev, nb->buf,
+ (u64)len, NDMA_FROM_DEV);
+ } else {
+ memcpy(nb->buf, data, len);
+ dma_addr = (void *)(unsigned long)nandx_dma_map(
+ nb->res.dev, nb->buf,
+ (u64)len, NDMA_TO_DEV);
+ }
+
+ writel((unsigned long)dma_addr, (void *)regs + NFI_STRADDR);
+
+ nb->access_len = len;
+ nb->dma_addr = dma_addr;
+ }
+
+ if (nb->ecc_en && !read && fdm)
+ write_fdm(nb, fdm);
+
+ writew(val, regs + NFI_CNFG);
+ /* setup R/W sector number */
+ writel(sectors << CON_SEC_SHIFT, regs + NFI_CON);
+
+ return 0;
+}
+
+static void rw_trigger(struct nfi_base *nb, bool read)
+{
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+
+ val = read ? CON_BRD : CON_BWR;
+ val |= readl(regs + NFI_CON);
+ writel(val, regs + NFI_CON);
+
+ writel(STAR_EN, regs + NFI_STRDATA);
+}
+
+static int rw_wait_done(struct nfi_base *nb, int sectors, bool read)
+{
+ void *regs = nb->res.nfi_regs;
+ bool irq_en = nb->dma_en && nb->nfi_irq_en;
+ int ret;
+ u32 val;
+
+ if (irq_en) {
+ ret = nandx_event_wait_complete(nb->done, NFI_TIMEOUT);
+ if (!ret) {
+ writew(0, regs + NFI_INTR_EN);
+ return ret;
+ }
+ }
+
+ if (read) {
+ ret = readl_poll_timeout_atomic(regs + NFI_BYTELEN, val,
+ ADDRCNTR_SEC(val) >=
+ (u32)sectors,
+ 2, NFI_TIMEOUT);
+ /* HW issue: if not wait ahb done, need polling bus busy */
+ if (!ret && !irq_en)
+ ret = readl_poll_timeout_atomic(regs + NFI_MASTER_STA,
+ val,
+ !(val &
+ MASTER_BUS_BUSY),
+ 2, NFI_TIMEOUT);
+ } else {
+ ret = readl_poll_timeout_atomic(regs + NFI_ADDRCNTR, val,
+ ADDRCNTR_SEC(val) >=
+ (u32)sectors,
+ 2, NFI_TIMEOUT);
+ }
+
+ if (ret) {
+ pr_warn("do page %s timeout\n", read ? "read" : "write");
+ return ret;
+ }
+
+ if (read && nb->ecc_en) {
+ ret = nb->ecc->wait_done(nb->ecc);
+ if (ret)
+ return ret;
+
+ return nb->ecc->decode_status(nb->ecc, 0, sectors);
+ }
+
+ return 0;
+}
+
+static int rw_data(struct nfi_base *nb, u8 *data, u8 *fdm, int sectors,
+ bool read)
+{
+ if (read && nb->dma_en && nb->ecc_en && fdm)
+ read_fdm(nb, fdm, 0, sectors);
+
+ if (!nb->dma_en) {
+ if (read)
+ return pio_rx_data(nb, data, fdm, sectors);
+
+ return pio_tx_data(nb, data, fdm, sectors);
+ }
+
+ return 0;
+}
+
+static void rw_complete(struct nfi_base *nb, u8 *data, u8 *fdm,
+ bool read)
+{
+ int data_len = 0;
+ bool is_empty;
+
+ if (nb->dma_en) {
+ if (read) {
+ nandx_dma_unmap(nb->res.dev, nb->buf, nb->dma_addr,
+ (u64)nb->access_len, NDMA_FROM_DEV);
+
+ if (data) {
+ data_len = nb->rw_sectors * nb->nfi.sector_size;
+ memcpy(data, nb->buf, data_len);
+ }
+
+ if (fdm)
+ memcpy(fdm, nb->buf + data_len,
+ nb->access_len - data_len);
+
+ if (nb->read_status == -ENANDREAD) {
+ is_empty = nb->is_page_empty(nb, data, fdm,
+ nb->rw_sectors);
+ if (is_empty)
+ nb->read_status = 0;
+ }
+ } else {
+ nandx_dma_unmap(nb->res.dev, nb->buf, nb->dma_addr,
+ (u64)nb->access_len, NDMA_TO_DEV);
+ }
+ }
+
+ /* whether it's reading or writing, we all check if nee swap
+ * for write, we need to restore data
+ */
+ if (nb->bad_mark_swap_en)
+ nb->bad_mark_ctrl.bad_mark_swap(&nb->nfi, data, fdm);
+
+ if (nb->ecc_en)
+ nb->ecc->disable(nb->ecc);
+
+ writel(0, nb->res.nfi_regs + NFI_CNFG);
+ writel(0, nb->res.nfi_regs + NFI_CON);
+}
+
+static int nfi_read_sectors(struct nfi *nfi, u8 *data, u8 *fdm,
+ int sectors)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ int bitflips = 0, ret;
+
+ pr_debug("%s: read page#%d\n", __func__, nb->row);
+ pr_debug("%s: data address 0x%x, fdm address 0x%x, sectors 0x%x\n",
+ __func__, (u32)((unsigned long)data),
+ (u32)((unsigned long)fdm), sectors);
+
+ nb->read_status = 0;
+
+ ret = nb->rw_prepare(nb, sectors, data, fdm, true);
+ if (ret)
+ return ret;
+
+ nb->rw_trigger(nb, true);
+
+ if (nb->dma_en) {
+ ret = nb->rw_wait_done(nb, sectors, true);
+ if (ret > 0)
+ bitflips = ret;
+ else if (ret == -ENANDREAD)
+ nb->read_status = -ENANDREAD;
+ else if (ret < 0)
+ goto complete;
+
+ }
+
+ ret = nb->rw_data(nb, data, fdm, sectors, true);
+ if (ret > 0)
+ ret = max_t(int, ret, bitflips);
+
+complete:
+ nb->rw_complete(nb, data, fdm, true);
+
+ if (nb->read_status == -ENANDREAD)
+ return -ENANDREAD;
+
+ return ret;
+}
+
+int nfi_write_page(struct nfi *nfi, u8 *data, u8 *fdm)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ u32 sectors = div_down(nb->format.page_size, nfi->sector_size);
+ int ret;
+
+ pr_debug("%s: write page#%d\n", __func__, nb->row);
+ pr_debug("%s: data address 0x%x, fdm address 0x%x\n",
+ __func__, (int)((unsigned long)data),
+ (int)((unsigned long)fdm));
+
+ ret = nb->rw_prepare(nb, sectors, data, fdm, false);
+ if (ret)
+ return ret;
+
+ nb->rw_trigger(nb, false);
+
+ ret = nb->rw_data(nb, data, fdm, sectors, false);
+ if (ret)
+ return ret;
+
+ ret = nb->rw_wait_done(nb, sectors, false);
+
+ nb->rw_complete(nb, data, fdm, false);
+
+ return ret;
+}
+
+static int nfi_rw_bytes(struct nfi *nfi, u8 *data, int count, bool read)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ void *regs = nb->res.nfi_regs;
+ int i, ret;
+ u32 val;
+
+ for (i = 0; i < count; i++) {
+ val = readl(regs + NFI_STA) & NFI_FSM_MASK;
+ if (val != NFI_FSM_CUSTDATA) {
+ val = readw(regs + NFI_CNFG) | CNFG_BYTE_RW;
+ if (read)
+ val |= CNFG_READ_EN;
+ writew(val, regs + NFI_CNFG);
+
+ val = div_up(count, nfi->sector_size);
+ val = (val << CON_SEC_SHIFT) | CON_BRD | CON_BWR;
+ writel(val, regs + NFI_CON);
+
+ writew(STAR_EN, regs + NFI_STRDATA);
+ }
+
+ ret = wait_io_ready(regs);
+ if (ret)
+ return ret;
+
+ if (read)
+ data[i] = readb(regs + NFI_DATAR);
+ else
+ writeb(data[i], regs + NFI_DATAW);
+ }
+
+ writel(0, nb->res.nfi_regs + NFI_CNFG);
+
+ return 0;
+}
+
+static int nfi_read_bytes(struct nfi *nfi, u8 *data, int count)
+{
+ return nfi_rw_bytes(nfi, data, count, true);
+}
+
+static int nfi_write_bytes(struct nfi *nfi, u8 *data, int count)
+{
+ return nfi_rw_bytes(nfi, data, count, false);
+}
+
+/* As register map says, only when flash macro is idle,
+ * sw reset or nand interface change can be issued
+ */
+static inline int wait_flash_macro_idle(void *regs)
+{
+ u32 val;
+
+ return readl_poll_timeout_atomic(regs + NFI_STA, val,
+ val & FLASH_MACRO_IDLE, 2,
+ NFI_TIMEOUT);
+}
+
+#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
+ ((tpoecs) << 28 | (tprecs) << 22 | (tc2r) << 16 | \
+ (tw2r) << 12 | (twh) << 8 | (twst) << 4 | (trlt))
+
+static int nfi_set_sdr_timing(struct nfi *nfi, void *timing, u8 type)
+{
+ struct nand_sdr_timing *sdr = (struct nand_sdr_timing *) timing;
+ struct nfi_base *nb = nfi_to_base(nfi);
+ void *regs = nb->res.nfi_regs;
+ u32 tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt, tstrobe;
+ u32 rate, val;
+ int ret;
+
+ ret = wait_flash_macro_idle(regs);
+ if (ret)
+ return ret;
+
+ /* turn clock rate into KHZ */
+ rate = nb->res.clock_1x / 1000;
+
+ tpoecs = max_t(u16, sdr->tALH, sdr->tCLH);
+ tpoecs = div_up(tpoecs * rate, 1000000);
+ tpoecs &= 0xf;
+
+ tprecs = max_t(u16, sdr->tCLS, sdr->tALS);
+ tprecs = div_up(tprecs * rate, 1000000);
+ tprecs &= 0x3f;
+
+ /* tc2r is in unit of 2T */
+ tc2r = div_up(sdr->tCR * rate, 1000000);
+ tc2r = div_down(tc2r, 2);
+ tc2r &= 0x3f;
+
+ tw2r = div_up(sdr->tWHR * rate, 1000000);
+ tw2r = div_down(tw2r, 2);
+ tw2r &= 0xf;
+
+ twh = max_t(u16, sdr->tREH, sdr->tWH);
+ twh = div_up(twh * rate, 1000000) - 1;
+ twh &= 0xf;
+
+ twst = div_up(sdr->tWP * rate, 1000000) - 1;
+ twst &= 0xf;
+
+ trlt = div_up(sdr->tRP * rate, 1000000) - 1;
+ trlt &= 0xf;
+
+ /* If tREA is bigger than tRP, setup strobe sel here */
+ if ((trlt + 1) * 1000000 / rate < sdr->tREA) {
+ tstrobe = sdr->tREA - (trlt + 1) * 1000000 / rate;
+ tstrobe = div_up(tstrobe * rate, 1000000);
+ val = readl(regs + NFI_DEBUG_CON1);
+ val &= ~STROBE_MASK;
+ val |= tstrobe << STROBE_SHIFT;
+ writel(val, regs + NFI_DEBUG_CON1);
+ }
+
+ /*
+ * ACCON: access timing control register
+ * -------------------------------------
+ * 31:28: tpoecs, minimum required time for CS post pulling down after
+ * accessing the device
+ * 27:22: tprecs, minimum required time for CS pre pulling down before
+ * accessing the device
+ * 21:16: tc2r, minimum required time from NCEB low to NREB low
+ * 15:12: tw2r, minimum required time from NWEB high to NREB low.
+ * 11:08: twh, write enable hold time
+ * 07:04: twst, write wait states
+ * 03:00: trlt, read wait states
+ */
+ val = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt);
+ pr_info("acctiming: 0x%x\n", val);
+ writel(val, regs + NFI_ACCCON);
+
+ /* set NAND type */
+ writel(NAND_TYPE_ASYNC, regs + NFI_NAND_TYPE_CNFG);
+
+ return ret;
+}
+
+static int nfi_set_timing(struct nfi *nfi, void *timing, int type)
+{
+ switch (type) {
+ case NAND_TIMING_SDR:
+ return nfi_set_sdr_timing(nfi, timing, type);
+
+ /* NOTE: for mlc/tlc */
+ case NAND_TIMING_SYNC_DDR:
+ case NAND_TIMING_TOGGLE_DDR:
+ case NAND_TIMING_NVDDR2:
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void set_nfi_funcs(struct nfi *nfi)
+{
+ nfi->select_chip = nfi_select_chip;
+ nfi->set_format = nfi_set_format;
+ nfi->nfi_ctrl = nfi_ctrl;
+ nfi->set_timing = nfi_set_timing;
+
+ nfi->reset = nfi_reset;
+ nfi->send_cmd = nfi_send_cmd;
+ nfi->send_addr = nfi_send_addr;
+ nfi->trigger = nfi_trigger;
+
+ nfi->write_page = nfi_write_page;
+ nfi->write_bytes = nfi_write_bytes;
+ nfi->read_sectors = nfi_read_sectors;
+ nfi->read_bytes = nfi_read_bytes;
+
+ nfi->wait_ready = nfi_wait_ready;
+
+ nfi->enable_randomizer = nfi_enable_randomizer;
+ nfi->disable_randomizer = nfi_disable_randomizer;
+}
+
+static struct nfi_caps nfi_caps_mt8512 = {
+ .max_fdm_size = 8,
+ .fdm_ecc_size = 1,
+ .ecc_parity_bits = 14,
+ .spare_size = spare_size_mt8512,
+ .spare_size_num = 19,
+};
+
+static struct nfi_caps *nfi_get_match_data(enum mtk_ic_version ic)
+{
+ /* NOTE: add other IC's data */
+ return &nfi_caps_mt8512;
+}
+
+static void set_nfi_base_params(struct nfi_base *nb)
+{
+ nb->ecc_en = false;
+ nb->dma_en = false;
+ nb->nfi_irq_en = false;
+ nb->ecc_irq_en = false;
+ nb->page_irq_en = false;
+ nb->ecc_clk_en = false;
+ nb->randomize_en = false;
+ nb->custom_sector_en = false;
+ nb->bad_mark_swap_en = false;
+
+ nb->op_mode = CNFG_CUSTOM_MODE;
+ nb->ecc_deccon = ECC_DEC_CORRECT;
+ nb->ecc_mode = ECC_NFI_MODE;
+
+ nb->done = nandx_event_create();
+ nb->caps = nfi_get_match_data(nb->res.ic_ver);
+
+ nb->set_op_mode = set_op_mode;
+ nb->is_page_empty = is_page_empty;
+
+ nb->rw_prepare = rw_prepare;
+ nb->rw_trigger = rw_trigger;
+ nb->rw_wait_done = rw_wait_done;
+ nb->rw_data = rw_data;
+ nb->rw_complete = rw_complete;
+}
+
+struct nfi *__weak nfi_extend_init(struct nfi_base *nb)
+{
+ return &nb->nfi;
+}
+
+void __weak nfi_extend_exit(struct nfi_base *nb)
+{
+ mem_free(nb);
+}
+
+struct nfi *nfi_init(struct nfi_resource *res)
+{
+ struct nfiecc_resource ecc_res;
+ struct nfi_base *nb;
+ struct nfiecc *ecc;
+ struct nfi *nfi;
+ int ret;
+
+ nb = mem_alloc(1, sizeof(struct nfi_base));
+ if (!nb) {
+ pr_err("nfi alloc memory fail @%s.\n", __func__);
+ return NULL;
+ }
+
+ nb->res = *res;
+
+ ret = nandx_irq_register(res->dev, res->nfi_irq_id, nfi_irq_handler,
+ "mtk_nand", nb);
+ if (ret) {
+ pr_err("nfi irq register failed!\n");
+ goto error;
+ }
+
+ /* fill ecc paras and init ecc */
+ ecc_res.ic_ver = nb->res.ic_ver;
+ ecc_res.dev = nb->res.dev;
+ ecc_res.irq_id = nb->res.ecc_irq_id;
+ ecc_res.regs = nb->res.ecc_regs;
+ ecc = nfiecc_init(&ecc_res);
+ if (!ecc) {
+ pr_err("nfiecc init fail.\n");
+ return NULL;
+ }
+
+ nb->ecc = ecc;
+
+ set_nfi_base_params(nb);
+ set_nfi_funcs(&nb->nfi);
+
+ /* Assign a temp sector size for reading ID & para page.
+ * We may assign new value later.
+ */
+ nb->nfi.sector_size = 512;
+
+ /* give a default timing, and as discuss
+ * this is the only thing what we need do for nfi init
+ * if need do more, then we can add a function
+ */
+ writel(0x30C77FFF, nb->res.nfi_regs + NFI_ACCCON);
+
+ nfi = nfi_extend_init(nb);
+ if (nfi)
+ return nfi;
+
+error:
+ mem_free(nb);
+ return NULL;
+}
+
+void nfi_exit(struct nfi *nfi)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+
+ nandx_event_destroy(nb->done);
+ nfiecc_exit(nb->ecc);
+#if !NANDX_BULK_IO_USE_DRAM
+ mem_free(nb->buf);
+#endif
+ nfi_extend_exit(nb);
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_base.h b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_base.h
new file mode 100644
index 0000000..1b96a91
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_base.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NFI_BASE_H__
+#define __NFI_BASE_H__
+
+#define NFI_TIMEOUT 1000000
+
+#ifndef writew
+#define writew(v, a) (*REG16(a) = (v))
+#endif
+#ifndef readw
+#define readw(a) (*REG16(a))
+#endif
+
+enum randomizer_op {
+ RAND_ENCODE,
+ RAND_DECODE
+};
+
+struct bad_mark_ctrl {
+ void (*bad_mark_swap)(struct nfi *nfi, u8 *buf, u8 *fdm);
+ u8 *(*fdm_shift)(struct nfi *nfi, u8 *fdm, int sector);
+ u32 sector;
+ u32 position;
+};
+
+struct nfi_caps {
+ u8 max_fdm_size;
+ u8 fdm_ecc_size;
+ u8 ecc_parity_bits;
+ const int *spare_size;
+ u32 spare_size_num;
+};
+
+struct nfi_base {
+ struct nfi nfi;
+ struct nfi_resource res;
+ struct nfiecc *ecc;
+ struct nfi_format format;
+ struct nfi_caps *caps;
+ struct bad_mark_ctrl bad_mark_ctrl;
+
+ /* page_size + spare_size */
+ u8 *buf;
+
+ /* used for spi nand */
+ u8 cmd_mode;
+ u32 op_mode;
+
+ int page_sectors;
+
+ void *done;
+
+ /* for read/write */
+ int col;
+ int row;
+ int access_len;
+ int rw_sectors;
+ void *dma_addr;
+ int read_status;
+
+ bool dma_en;
+ bool nfi_irq_en;
+ bool page_irq_en;
+ bool auto_format;
+ bool ecc_en;
+ bool ecc_irq_en;
+ bool ecc_clk_en;
+ bool randomize_en;
+ bool custom_sector_en;
+ bool bad_mark_swap_en;
+
+ enum nfiecc_deccon ecc_deccon;
+ enum nfiecc_mode ecc_mode;
+
+ void (*set_op_mode)(void *regs, u32 mode);
+ bool (*is_page_empty)(struct nfi_base *nb, u8 *data, u8 *fdm,
+ int sectors);
+
+ int (*rw_prepare)(struct nfi_base *nb, int sectors, u8 *data, u8 *fdm,
+ bool read);
+ void (*rw_trigger)(struct nfi_base *nb, bool read);
+ int (*rw_wait_done)(struct nfi_base *nb, int sectors, bool read);
+ int (*rw_data)(struct nfi_base *nb, u8 *data, u8 *fdm, int sectors,
+ bool read);
+ void (*rw_complete)(struct nfi_base *nb, u8 *data, u8 *fdm, bool read);
+};
+
+static inline struct nfi_base *nfi_to_base(struct nfi *nfi)
+{
+ return container_of(nfi, struct nfi_base, nfi);
+}
+
+struct nfi *nfi_extend_init(struct nfi_base *nb);
+void nfi_extend_exit(struct nfi_base *nb);
+
+#endif /* __NFI_BASE_H__ */
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_regs.h b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_regs.h
new file mode 100644
index 0000000..4b0a3dd
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_regs.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NFI_REGS_H__
+#define __NFI_REGS_H__
+
+#define NFI_CNFG 0x000
+#define CNFG_AHB BIT(0)
+#define CNFG_READ_EN BIT(1)
+#define CNFG_DMA_BURST_EN BIT(2)
+#define CNFG_RESEED_SEC_EN BIT(4)
+#define CNFG_RAND_SEL BIT(5)
+#define CNFG_BYTE_RW BIT(6)
+#define CNFG_HW_ECC_EN BIT(8)
+#define CNFG_AUTO_FMT_EN BIT(9)
+#define CNFG_RAND_MASK GENMASK(5, 4)
+#define CNFG_OP_MODE_MASK GENMASK(14, 12)
+#define CNFG_IDLE_MOD 0
+#define CNFG_READ_MODE (1 << 12)
+#define CNFG_SINGLE_READ_MODE (2 << 12)
+#define CNFG_PROGRAM_MODE (3 << 12)
+#define CNFG_ERASE_MODE (4 << 12)
+#define CNFG_RESET_MODE (5 << 12)
+#define CNFG_CUSTOM_MODE (6 << 12)
+#define NFI_PAGEFMT 0x004
+#define PAGEFMT_SPARE_SHIFT 16
+#define PAGEFMT_FDM_ECC_SHIFT 12
+#define PAGEFMT_FDM_SHIFT 8
+#define PAGEFMT_SEC_SEL_512 BIT(2)
+#define PAGEFMT_512_2K 0
+#define PAGEFMT_2K_4K 1
+#define PAGEFMT_4K_8K 2
+#define PAGEFMT_8K_16K 3
+#define NFI_CON 0x008
+#define CON_FIFO_FLUSH BIT(0)
+#define CON_NFI_RST BIT(1)
+#define CON_BRD BIT(8)
+#define CON_BWR BIT(9)
+#define CON_SEC_SHIFT 12
+#define NFI_ACCCON 0x00c
+#define NFI_INTR_EN 0x010
+#define INTR_BUSY_RETURN_EN BIT(4)
+#define INTR_AHB_DONE_EN BIT(6)
+#define NFI_INTR_STA 0x014
+#define NFI_CMD 0x020
+#define NFI_ADDRNOB 0x030
+#define ROW_SHIFT 4
+#define NFI_COLADDR 0x034
+#define NFI_ROWADDR 0x038
+#define NFI_STRDATA 0x040
+#define STAR_EN 1
+#define STAR_DE 0
+#define NFI_CNRNB 0x044
+#define NFI_DATAW 0x050
+#define NFI_DATAR 0x054
+#define NFI_PIO_DIRDY 0x058
+#define PIO_DI_RDY 1
+#define NFI_STA 0x060
+#define STA_CMD BIT(0)
+#define STA_ADDR BIT(1)
+#define FLASH_MACRO_IDLE BIT(5)
+#define STA_BUSY BIT(8)
+#define STA_BUSY2READY BIT(9)
+#define STA_EMP_PAGE BIT(12)
+#define NFI_FSM_CUSTDATA (0xe << 16)
+#define NFI_FSM_MASK GENMASK(19, 16)
+#define NAND_FSM_MASK GENMASK(29, 23)
+#define NFI_ADDRCNTR 0x070
+#define CNTR_VALID_MASK GENMASK(16, 0)
+#define CNTR_MASK GENMASK(16, 12)
+#define ADDRCNTR_SEC_SHIFT 12
+#define ADDRCNTR_SEC(val) \
+ (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
+#define NFI_STRADDR 0x080
+#define NFI_BYTELEN 0x084
+#define NFI_CSEL 0x090
+#define NFI_FDML(x) (0x0a0 + (x) * 8)
+#define NFI_FDMM(x) (0x0a4 + (x) * 8)
+#define NFI_DEBUG_CON1 0x220
+#define STROBE_MASK GENMASK(4, 3)
+#define STROBE_SHIFT 3
+#define ECC_CLK_EN BIT(11)
+#define AUTOC_SRAM_MODE BIT(12)
+#define BYPASS_MASTER_EN BIT(15)
+#define NFI_MASTER_STA 0x224
+#define MASTER_BUS_BUSY 0x3
+#define NFI_SECCUS_SIZE 0x22c
+#define SECCUS_SIZE_EN BIT(17)
+#define NFI_RANDOM_CNFG 0x238
+#define RAN_ENCODE_EN BIT(0)
+#define ENCODE_SEED_SHIFT 1
+#define RAN_DECODE_EN BIT(16)
+#define DECODE_SEED_SHIFT 17
+#define RAN_SEED_MASK 0x7fff
+#define NFI_EMPTY_THRESH 0x23c
+#define NFI_NAND_TYPE_CNFG 0x240
+#define NAND_TYPE_ASYNC 0
+#define NAND_TYPE_TOGGLE 1
+#define NAND_TYPE_SYNC 2
+#define NFI_ACCCON1 0x244
+#define NFI_DELAY_CTRL 0x248
+#define NFI_TLC_RD_WHR2 0x300
+#define TLC_RD_WHR2_EN BIT(12)
+#define TLC_RD_WHR2_MASK GENMASK(11, 0)
+#define SNF_SNF_CNFG 0x55c
+#define SPI_MODE_EN 1
+#define SPI_MODE_DIS 0
+
+#endif /* __NFI_REGS_H__ */
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_spi.c b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_spi.c
new file mode 100644
index 0000000..0293f69
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_spi.c
@@ -0,0 +1,689 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "../nfi.h"
+#include "nfiecc.h"
+#include "nfi_regs.h"
+#include "nfi_base.h"
+#include "nfi_spi_regs.h"
+#include "nfi_spi.h"
+
+#define NFI_CMD_DUMMY_RD 0x00
+#define NFI_CMD_DUMMY_WR 0x80
+
+static struct nfi_spi_delay spi_delay[SPI_NAND_MAX_DELAY] = {
+ /*
+ * tCLK_SAM_DLY, tCLK_OUT_DLY, tCS_DLY, tWR_EN_DLY,
+ * tIO_IN_DLY[4], tIO_OUT_DLY[4], tREAD_LATCH_LATENCY
+ */
+ {0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
+ {21, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
+ {63, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
+ {0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1},
+ {21, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1},
+ {63, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1}
+};
+
+static inline struct nfi_spi *base_to_snfi(struct nfi_base *nb)
+{
+ return container_of(nb, struct nfi_spi, base);
+}
+
+static void snfi_mac_enable(struct nfi_base *nb)
+{
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+
+ val = readl(regs + SNF_MAC_CTL);
+ val &= ~MAC_XIO_SEL;
+ val |= SF_MAC_EN;
+
+ writel(val, regs + SNF_MAC_CTL);
+}
+
+static void snfi_mac_disable(struct nfi_base *nb)
+{
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+
+ val = readl(regs + SNF_MAC_CTL);
+ val &= ~(SF_TRIG | SF_MAC_EN);
+ writel(val, regs + SNF_MAC_CTL);
+}
+
+static int snfi_mac_trigger(struct nfi_base *nb)
+{
+ void *regs = nb->res.nfi_regs;
+ int ret;
+ u32 val;
+
+ val = readl(regs + SNF_MAC_CTL);
+ val |= SF_TRIG;
+ writel(val, regs + SNF_MAC_CTL);
+
+ ret = readl_poll_timeout_atomic(regs + SNF_MAC_CTL, val,
+ val & WIP_READY, 10,
+ NFI_TIMEOUT);
+ if (ret) {
+ pr_err("polling wip ready for read timeout\n");
+ return ret;
+ }
+
+ return readl_poll_timeout_atomic(regs + SNF_MAC_CTL, val,
+ !(val & WIP), 10,
+ NFI_TIMEOUT);
+}
+
+static int snfi_mac_op(struct nfi_base *nb)
+{
+ int ret;
+
+ snfi_mac_enable(nb);
+ ret = snfi_mac_trigger(nb);
+ snfi_mac_disable(nb);
+
+ return ret;
+}
+
+static void snfi_write_mac(struct nfi_spi *nfi_spi, u8 *data, int count)
+{
+ struct nandx_split32 split = {0};
+ u32 reg_offset = round_down(nfi_spi->tx_count, 4);
+ void *regs = nfi_spi->base.res.nfi_regs;
+ u32 data_offset = 0, i, val;
+ u8 *p_val = (u8 *)(&val);
+
+ nandx_split(&split, nfi_spi->tx_count, count, val, 4);
+
+ if (split.head_len) {
+ val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
+
+ for (i = 0; i < split.head_len; i++)
+ p_val[split.head + i] = data[i];
+
+ writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
+ }
+
+ if (split.body_len) {
+ reg_offset = split.body;
+ data_offset = split.head_len;
+
+ for (i = 0; i < split.body_len; i++) {
+ p_val[i & 3] = data[data_offset + i];
+
+ if ((i & 3) == 3) {
+ writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
+ reg_offset += 4;
+ }
+ }
+ }
+
+ if (split.tail_len) {
+ reg_offset = split.tail;
+ data_offset += split.body_len;
+
+ for (i = 0; i < split.tail_len; i++) {
+ p_val[i] = data[data_offset + i];
+
+ if (i == split.tail_len - 1)
+ writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
+ }
+ }
+}
+
+static void snfi_read_mac(struct nfi_spi *nfi_spi, u8 *data, int count)
+{
+ void *regs = nfi_spi->base.res.nfi_regs;
+ u32 reg_offset = round_down(nfi_spi->tx_count, 4);
+ struct nandx_split32 split = {0};
+ u32 data_offset = 0, i, val;
+ u8 *p_val = (u8 *)&val;
+
+ nandx_split(&split, nfi_spi->tx_count, count, val, 4);
+
+ if (split.head_len) {
+ val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
+
+ for (i = 0; i < split.head_len; i++)
+ data[data_offset + i] = p_val[split.head + i];
+ }
+
+ if (split.body_len) {
+ reg_offset = split.body;
+ data_offset = split.head_len;
+
+ for (i = 0; i < split.body_len; i++) {
+ if ((i & 3) == 0) {
+ val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
+ reg_offset += 4;
+ }
+
+ data[data_offset + i] = p_val[i % 4];
+ }
+ }
+
+ if (split.tail_len) {
+ reg_offset = split.tail;
+ data_offset += split.body_len;
+ val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
+
+ for (i = 0; i < split.tail_len; i++)
+ data[data_offset + i] = p_val[i];
+ }
+}
+
+static int snfi_send_command(struct nfi *nfi, short cmd)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+
+ if (cmd == -1)
+ return 0;
+
+ if (nfi_spi->snfi_mode == SNFI_MAC_MODE) {
+ snfi_write_mac(nfi_spi, (u8 *)&cmd, 1);
+ nfi_spi->tx_count++;
+ return 0;
+ }
+
+ nfi_spi->cmd[nfi_spi->cur_cmd_idx++] = cmd;
+ return 0;
+}
+
+static int snfi_send_address(struct nfi *nfi, int col, int row,
+ int col_cycle,
+ int row_cycle)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ u32 addr, cycle, temp;
+
+ nb->col = col;
+ nb->row = row;
+
+ if (nfi_spi->snfi_mode == SNFI_MAC_MODE) {
+ addr = row;
+ cycle = row_cycle;
+
+ if (!row_cycle) {
+ addr = col;
+ cycle = col_cycle;
+ }
+
+ temp = nandx_cpu_to_be32(addr) >> ((4 - cycle) << 3);
+ snfi_write_mac(nfi_spi, (u8 *)&temp, cycle);
+ nfi_spi->tx_count += cycle;
+ } else {
+ nfi_spi->row_addr[nfi_spi->cur_addr_idx++] = row;
+ nfi_spi->col_addr[nfi_spi->cur_addr_idx++] = col;
+ }
+
+ return 0;
+}
+
+static int snfi_trigger(struct nfi *nfi)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ void *regs = nb->res.nfi_regs;
+
+ writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
+ writel(0, regs + SNF_MAC_INL);
+
+ nfi_spi->tx_count = 0;
+ nfi_spi->cur_cmd_idx = 0;
+ nfi_spi->cur_addr_idx = 0;
+
+ return snfi_mac_op(nb);
+}
+
+static int snfi_select_chip(struct nfi *nfi, int cs)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+
+ val = readl(regs + SNF_MISC_CTL);
+
+ if (cs == 0) {
+ val &= ~SF2CS_SEL;
+ val &= ~SF2CS_EN;
+ } else if (cs == 1) {
+ val |= SF2CS_SEL;
+ val |= SF2CS_EN;
+ } else {
+ return -EIO;
+ }
+
+ writel(val, regs + SNF_MISC_CTL);
+
+ return 0;
+}
+
+static int snfi_set_delay(struct nfi_base *nb, u8 delay_mode)
+{
+ void *regs = nb->res.nfi_regs;
+ struct nfi_spi_delay *delay;
+ u32 val;
+
+ if (delay_mode < 0 || delay_mode > SPI_NAND_MAX_DELAY)
+ return -EINVAL;
+
+ delay = &spi_delay[delay_mode];
+
+ val = delay->tIO_OUT_DLY[0] | delay->tIO_OUT_DLY[1] << 8 |
+ delay->tIO_OUT_DLY[2] << 16 |
+ delay->tIO_OUT_DLY[3] << 24;
+ writel(val, regs + SNF_DLY_CTL1);
+
+ val = delay->tIO_IN_DLY[0] | (delay->tIO_IN_DLY[1] << 8) |
+ delay->tIO_IN_DLY[2] << 16 |
+ delay->tIO_IN_DLY[3] << 24;
+ writel(val, regs + SNF_DLY_CTL2);
+
+ val = delay->tCLK_SAM_DLY | delay->tCLK_OUT_DLY << 8 |
+ delay->tCS_DLY << 16 |
+ delay->tWR_EN_DLY << 24;
+ writel(val, regs + SNF_DLY_CTL3);
+
+ writel(delay->tCS_DLY, regs + SNF_DLY_CTL4);
+
+ val = readl(regs + SNF_MISC_CTL);
+ val |= (delay->tREAD_LATCH_LATENCY) <<
+ LATCH_LAT_SHIFT;
+ writel(val, regs + SNF_MISC_CTL);
+
+ return 0;
+}
+
+static int snfi_set_timing(struct nfi *nfi, void *timing, int type)
+{
+ /* Nothing need to do. */
+ return 0;
+}
+
+static int snfi_wait_ready(struct nfi *nfi, int type, u32 timeout)
+{
+ /* Nothing need to do. */
+ return 0;
+}
+
+static int snfi_ctrl(struct nfi *nfi, int cmd, void *args)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ int ret = 0;
+
+ if (!args)
+ return -EINVAL;
+
+ switch (cmd) {
+ case NFI_CTRL_DMA:
+ nb->dma_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_NFI_IRQ:
+ nb->nfi_irq_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_ECC_IRQ:
+ nb->ecc_irq_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_PAGE_IRQ:
+ nb->page_irq_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_ECC:
+ nb->ecc_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_BAD_MARK_SWAP:
+ nb->bad_mark_swap_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_ECC_CLOCK:
+ nb->ecc_clk_en = *(bool *)args;
+ break;
+
+ case SNFI_CTRL_OP_MODE:
+ nfi_spi->snfi_mode = *(u8 *)args;
+ break;
+
+ case SNFI_CTRL_RX_MODE:
+ nfi_spi->read_cache_mode = *(u8 *)args;
+ break;
+
+ case SNFI_CTRL_TX_MODE:
+ nfi_spi->write_cache_mode = *(u8 *)args;
+ break;
+
+ case SNFI_CTRL_DELAY_MODE:
+ ret = snfi_set_delay(nb, *(u8 *)args);
+ break;
+
+ default:
+ pr_err("operation not support.\n");
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int snfi_read_bytes(struct nfi *nfi, u8 *data, int count)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ void *regs = nb->res.nfi_regs;
+ int ret;
+
+ writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
+ writel(count, regs + SNF_MAC_INL);
+
+ ret = snfi_mac_op(nb);
+ if (ret)
+ return ret;
+
+ snfi_read_mac(nfi_spi, data, count);
+
+ nfi_spi->tx_count = 0;
+
+ return 0;
+}
+
+static int snfi_write_bytes(struct nfi *nfi, u8 *data, int count)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ void *regs = nb->res.nfi_regs;
+
+ snfi_write_mac(nfi_spi, data, count);
+ nfi_spi->tx_count += count;
+
+ writel(0, regs + SNF_MAC_INL);
+ writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
+
+ nfi_spi->tx_count = 0;
+
+ return snfi_mac_op(nb);
+}
+
+static int snfi_reset(struct nfi *nfi)
+{
+ struct nfi_base *nb = nfi_to_base(nfi);
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+ int ret;
+
+ ret = nfi_spi->parent->nfi.reset(nfi);
+ if (ret)
+ return ret;
+
+ val = readl(regs + SNF_MISC_CTL);
+ val |= SW_RST;
+ writel(val, regs + SNF_MISC_CTL);
+
+ ret = readx_poll_timeout_atomic(readw, regs + SNF_STA_CTL1, val,
+ !(val & SPI_STATE), 50,
+ NFI_TIMEOUT);
+ if (ret) {
+ pr_warn("spi state active in reset [0x%x] = 0x%x\n",
+ SNF_STA_CTL1, val);
+ return ret;
+ }
+
+ val = readl(regs + SNF_MISC_CTL);
+ val &= ~SW_RST;
+ writel(val, regs + SNF_MISC_CTL);
+
+ return 0;
+}
+
+static int snfi_config_for_write(struct nfi_base *nb, int count)
+{
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+
+ nb->set_op_mode(regs, CNFG_CUSTOM_MODE);
+
+ val = readl(regs + SNF_MISC_CTL);
+
+ if (nfi_spi->write_cache_mode == SNFI_TX_114)
+ val |= PG_LOAD_X4_EN;
+
+ if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
+ val |= PG_LOAD_CUSTOM_EN;
+
+ writel(val, regs + SNF_MISC_CTL);
+
+ val = count * (nb->nfi.sector_size + nb->nfi.sector_spare_size);
+ writel(val << PG_LOAD_SHIFT, regs + SNF_MISC_CTL2);
+
+ val = readl(regs + SNF_PG_CTL1);
+
+ if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
+ val |= nfi_spi->cmd[0] << PG_LOAD_CMD_SHIFT;
+ else {
+ val |= nfi_spi->cmd[0] | nfi_spi->cmd[1] << PG_LOAD_CMD_SHIFT |
+ nfi_spi->cmd[2] << PG_EXE_CMD_SHIFT;
+
+ writel(nfi_spi->row_addr[1], regs + SNF_PG_CTL3);
+ writel(nfi_spi->cmd[3] << GF_CMD_SHIFT | nfi_spi->col_addr[2] <<
+ GF_ADDR_SHIFT, regs + SNF_GF_CTL1);
+ }
+
+ writel(val, regs + SNF_PG_CTL1);
+ writel(nfi_spi->col_addr[1], regs + SNF_PG_CTL2);
+
+ writel(NFI_CMD_DUMMY_WR, regs + NFI_CMD);
+
+ return 0;
+}
+
+static int snfi_config_for_read(struct nfi_base *nb, int count)
+{
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+ int ret = 0;
+
+ nb->set_op_mode(regs, CNFG_CUSTOM_MODE);
+
+ val = readl(regs + SNF_MISC_CTL);
+ val &= ~DARA_READ_MODE_MASK;
+
+ switch (nfi_spi->read_cache_mode) {
+
+ case SNFI_RX_111:
+ break;
+
+ case SNFI_RX_112:
+ val |= X2_DATA_MODE << READ_MODE_SHIFT;
+ break;
+
+ case SNFI_RX_114:
+ val |= X4_DATA_MODE << READ_MODE_SHIFT;
+ break;
+
+ case SNFI_RX_122:
+ val |= DUAL_IO_MODE << READ_MODE_SHIFT;
+ break;
+
+ case SNFI_RX_144:
+ val |= QUAD_IO_MODE << READ_MODE_SHIFT;
+ break;
+
+ default:
+ pr_err("Not support this read operarion: %d!\n",
+ nfi_spi->read_cache_mode);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
+ val |= DATARD_CUSTOM_EN;
+
+ writel(val, regs + SNF_MISC_CTL);
+
+ val = count * (nb->nfi.sector_size + nb->nfi.sector_spare_size);
+ writel(val, regs + SNF_MISC_CTL2);
+
+ val = readl(regs + SNF_RD_CTL2);
+
+ if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE) {
+ val |= nfi_spi->cmd[0];
+ writel(nfi_spi->col_addr[1], regs + SNF_RD_CTL3);
+ } else {
+ val |= nfi_spi->cmd[2];
+ writel(nfi_spi->cmd[0] << PAGE_READ_CMD_SHIFT |
+ nfi_spi->row_addr[0], regs + SNF_RD_CTL1);
+ writel(nfi_spi->cmd[1] << GF_CMD_SHIFT |
+ nfi_spi->col_addr[1] << GF_ADDR_SHIFT,
+ regs + SNF_GF_CTL1);
+ writel(nfi_spi->col_addr[2], regs + SNF_RD_CTL3);
+ }
+
+ writel(val, regs + SNF_RD_CTL2);
+
+ writel(NFI_CMD_DUMMY_RD, regs + NFI_CMD);
+
+ return ret;
+}
+
+static bool is_page_empty(struct nfi_base *nb, u8 *data, u8 *fdm,
+ int sectors)
+{
+ u32 *data32 = (u32 *)data;
+ u32 *fdm32 = (u32 *)fdm;
+ u32 i, count = 0;
+
+ for (i = 0; i < nb->format.page_size >> 2; i++) {
+ if (data32[i] != 0xffff) {
+ count += zero_popcount(data32[i]);
+ if (count > 10) {
+ pr_debug("%s %d %d count:%d\n",
+ __func__, __LINE__, i, count);
+ return false;
+ }
+ }
+ }
+
+ if (fdm) {
+ for (i = 0; i < (nb->nfi.fdm_size * sectors >> 2); i++)
+ if (fdm32[i] != 0xffff) {
+ count += zero_popcount(fdm32[i]);
+ if (count > 10) {
+ pr_debug("%s %d %d count:%d\n",
+ __func__, __LINE__, i, count);
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+static int rw_prepare(struct nfi_base *nb, int sectors, u8 *data,
+ u8 *fdm,
+ bool read)
+{
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ int ret;
+
+ ret = nfi_spi->parent->rw_prepare(nb, sectors, data, fdm, read);
+ if (ret)
+ return ret;
+
+ if (read)
+ ret = snfi_config_for_read(nb, sectors);
+ else
+ ret = snfi_config_for_write(nb, sectors);
+
+ return ret;
+}
+
+static void rw_complete(struct nfi_base *nb, u8 *data, u8 *fdm,
+ bool read)
+{
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+ void *regs = nb->res.nfi_regs;
+ u32 val;
+
+ nfi_spi->parent->rw_complete(nb, data, fdm, read);
+
+ val = readl(regs + SNF_MISC_CTL);
+
+ if (read)
+ val &= ~DATARD_CUSTOM_EN;
+ else
+ val &= ~PG_LOAD_CUSTOM_EN;
+
+ writel(val, regs + SNF_MISC_CTL);
+
+ nfi_spi->tx_count = 0;
+ nfi_spi->cur_cmd_idx = 0;
+ nfi_spi->cur_addr_idx = 0;
+}
+
+static void set_nfi_base_funcs(struct nfi_base *nb)
+{
+ nb->nfi.reset = snfi_reset;
+ nb->nfi.set_timing = snfi_set_timing;
+ nb->nfi.wait_ready = snfi_wait_ready;
+
+ nb->nfi.send_cmd = snfi_send_command;
+ nb->nfi.send_addr = snfi_send_address;
+ nb->nfi.trigger = snfi_trigger;
+ nb->nfi.nfi_ctrl = snfi_ctrl;
+ nb->nfi.select_chip = snfi_select_chip;
+
+ nb->nfi.read_bytes = snfi_read_bytes;
+ nb->nfi.write_bytes = snfi_write_bytes;
+
+ nb->rw_prepare = rw_prepare;
+ nb->rw_complete = rw_complete;
+ nb->is_page_empty = is_page_empty;
+
+}
+
+struct nfi *nfi_extend_init(struct nfi_base *nb)
+{
+ struct nfi_spi *nfi_spi;
+
+ nfi_spi = mem_alloc(1, sizeof(struct nfi_spi));
+ if (!nfi_spi) {
+ pr_err("snfi alloc memory fail @%s.\n", __func__);
+ return NULL;
+ }
+
+ memcpy(&nfi_spi->base, nb, sizeof(struct nfi_base));
+ nfi_spi->parent = nb;
+
+ nfi_spi->read_cache_mode = SNFI_RX_114;
+ nfi_spi->write_cache_mode = SNFI_TX_114;
+
+ set_nfi_base_funcs(&nfi_spi->base);
+
+ /* Change nfi to spi mode */
+ writel(SPI_MODE, nb->res.nfi_regs + SNF_SNF_CNFG);
+
+ return &(nfi_spi->base.nfi);
+}
+
+void nfi_extend_exit(struct nfi_base *nb)
+{
+ struct nfi_spi *nfi_spi = base_to_snfi(nb);
+
+ mem_free(nfi_spi->parent);
+ mem_free(nfi_spi);
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_spi.h b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_spi.h
new file mode 100644
index 0000000..a522556
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_spi.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NFI_SPI_H__
+#define __NFI_SPI_H__
+
+#define SPI_NAND_MAX_DELAY 6
+#define SPI_NAND_MAX_OP 4
+
+/*TODO - add comments */
+struct nfi_spi_delay {
+ u8 tCLK_SAM_DLY;
+ u8 tCLK_OUT_DLY;
+ u8 tCS_DLY;
+ u8 tWR_EN_DLY;
+ u8 tIO_IN_DLY[4];
+ u8 tIO_OUT_DLY[4];
+ u8 tREAD_LATCH_LATENCY;
+};
+
+/* SPI Nand structure */
+struct nfi_spi {
+ struct nfi_base base;
+ struct nfi_base *parent;
+
+ u8 snfi_mode;
+ u8 tx_count;
+
+ u8 cmd[SPI_NAND_MAX_OP];
+ u8 cur_cmd_idx;
+
+ u32 row_addr[SPI_NAND_MAX_OP];
+ u32 col_addr[SPI_NAND_MAX_OP];
+ u8 cur_addr_idx;
+
+ u8 read_cache_mode;
+ u8 write_cache_mode;
+};
+
+#endif /* __NFI_SPI_H__ */
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_spi_regs.h b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_spi_regs.h
new file mode 100644
index 0000000..77adf46
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfi_spi_regs.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NFI_SPI_REGS_H__
+#define __NFI_SPI_REGS_H__
+
+#define SNF_MAC_CTL 0x500
+#define WIP BIT(0)
+#define WIP_READY BIT(1)
+#define SF_TRIG BIT(2)
+#define SF_MAC_EN BIT(3)
+#define MAC_XIO_SEL BIT(4)
+#define SNF_MAC_OUTL 0x504
+#define SNF_MAC_INL 0x508
+#define SNF_RD_CTL1 0x50c
+#define PAGE_READ_CMD_SHIFT 24
+#define SNF_RD_CTL2 0x510
+#define SNF_RD_CTL3 0x514
+#define SNF_GF_CTL1 0x518
+#define GF_ADDR_SHIFT 16
+#define GF_CMD_SHIFT 24
+#define SNF_GF_CTL3 0x520
+#define SNF_PG_CTL1 0x524
+#define PG_EXE_CMD_SHIFT 16
+#define PG_LOAD_CMD_SHIFT 8
+#define SNF_PG_CTL2 0x528
+#define SNF_PG_CTL3 0x52c
+#define SNF_ER_CTL 0x530
+#define SNF_ER_CTL2 0x534
+#define SNF_MISC_CTL 0x538
+#define SW_RST BIT(28)
+#define PG_LOAD_X4_EN BIT(20)
+#define X2_DATA_MODE 1
+#define X4_DATA_MODE 2
+#define DUAL_IO_MODE 5
+#define QUAD_IO_MODE 6
+#define READ_MODE_SHIFT 16
+#define LATCH_LAT_SHIFT 8
+#define LATCH_LAT_MASK GENMASK(9, 8)
+#define DARA_READ_MODE_MASK GENMASK(18, 16)
+#define SF2CS_SEL BIT(13)
+#define SF2CS_EN BIT(12)
+#define PG_LOAD_CUSTOM_EN BIT(7)
+#define DATARD_CUSTOM_EN BIT(6)
+#define SNF_MISC_CTL2 0x53c
+#define PG_LOAD_SHIFT 16
+#define SNF_DLY_CTL1 0x540
+#define SNF_DLY_CTL2 0x544
+#define SNF_DLY_CTL3 0x548
+#define SNF_DLY_CTL4 0x54c
+#define SNF_STA_CTL1 0x550
+#define SPI_STATE GENMASK(3, 0)
+#define SNF_STA_CTL2 0x554
+#define SNF_STA_CTL3 0x558
+#define SNF_SNF_CNFG 0x55c
+#define SPI_MODE BIT(0)
+#define SNF_DEBUG_SEL 0x560
+#define SPI_GPRAM_ADDR 0x800
+
+#endif /* __NFI_SPI_REGS_H__ */
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfiecc.c b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfiecc.c
new file mode 100644
index 0000000..d41f667
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfiecc.c
@@ -0,0 +1,507 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "nfiecc_regs.h"
+#include "nfiecc.h"
+
+#define NFIECC_IDLE_REG(op) \
+ ((op) == ECC_ENCODE ? NFIECC_ENCIDLE : NFIECC_DECIDLE)
+#define IDLE_MASK 1
+#define NFIECC_CTL_REG(op) \
+ ((op) == ECC_ENCODE ? NFIECC_ENCCON : NFIECC_DECCON)
+#define NFIECC_IRQ_REG(op) \
+ ((op) == ECC_ENCODE ? NFIECC_ENCIRQEN : NFIECC_DECIRQEN)
+#define NFIECC_ADDR(op) \
+ ((op) == ECC_ENCODE ? NFIECC_ENCDIADDR : NFIECC_DECDIADDR)
+
+#define ECC_TIMEOUT 500000
+
+/* ecc strength that each IP supports */
+static const int ecc_strength_mt8512[] = {
+ 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24
+};
+
+static int nfiecc_irq_handler(void *data)
+{
+ struct nfiecc *ecc = data;
+ void *regs = ecc->res.regs;
+ u32 status;
+
+ status = readl(regs + NFIECC_DECIRQSTA) & DEC_IRQSTA_GEN;
+ if (status) {
+ status = readl(regs + NFIECC_DECDONE);
+ if (!(status & ecc->config.sectors))
+ return NAND_IRQ_NONE;
+
+ /*
+ * Clear decode IRQ status once again to ensure that
+ * there will be no extra IRQ.
+ */
+ readl(regs + NFIECC_DECIRQSTA);
+ ecc->config.sectors = 0;
+ nandx_event_complete(ecc->done);
+ } else {
+ status = readl(regs + NFIECC_ENCIRQSTA) & ENC_IRQSTA_GEN;
+ if (!status)
+ return NAND_IRQ_NONE;
+
+ nandx_event_complete(ecc->done);
+ }
+
+ return NAND_IRQ_HANDLED;
+}
+
+static inline int nfiecc_wait_idle(struct nfiecc *ecc)
+{
+ int op = ecc->config.op;
+ int ret, val;
+
+ ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_IDLE_REG(op),
+ val, val & IDLE_MASK,
+ 10, ECC_TIMEOUT);
+ if (ret)
+ pr_warn("%s not idle\n",
+ op == ECC_ENCODE ? "encoder" : "decoder");
+
+ return ret;
+}
+
+static int nfiecc_wait_encode_done(struct nfiecc *ecc)
+{
+ int ret, val;
+
+ if (ecc->ecc_irq_en) {
+ /* poll one time to avoid missing irq event */
+ ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_ENCSTA,
+ val, val & ENC_FSM_IDLE, 1, 1);
+ if (!ret)
+ return 0;
+
+ /* irq done, if not, we can go on to poll status for a while */
+ ret = nandx_event_wait_complete(ecc->done, ECC_TIMEOUT);
+ if (ret)
+ return 0;
+ }
+
+ ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_ENCSTA,
+ val, val & ENC_FSM_IDLE,
+ 10, ECC_TIMEOUT);
+ if (ret)
+ pr_err("encode timeout\n");
+
+ return ret;
+
+}
+
+static int nfiecc_wait_decode_done(struct nfiecc *ecc)
+{
+ u32 secbit = BIT(ecc->config.sectors - 1);
+ void *regs = ecc->res.regs;
+ int ret, val;
+
+ if (ecc->ecc_irq_en) {
+ ret = readl_poll_timeout_atomic(regs + NFIECC_DECDONE,
+ val, val & secbit, 1, 1);
+ if (!ret)
+ return 0;
+
+ ret = nandx_event_wait_complete(ecc->done, ECC_TIMEOUT);
+ if (ret)
+ return 0;
+ }
+
+ ret = readl_poll_timeout_atomic(regs + NFIECC_DECDONE,
+ val, val & secbit,
+ 10, ECC_TIMEOUT);
+ if (ret) {
+ pr_err("decode timeout\n");
+ return ret;
+ }
+
+ /* decode done does not stands for ecc all work done.
+ * we need check syn, bma, chien, autoc all idle.
+ * just check it when ECC_DECCNFG[13:12] is 3,
+ * which means auto correct.
+ */
+ ret = readl_poll_timeout_atomic(regs + NFIECC_DECFSM,
+ val, (val & FSM_MASK) == FSM_IDLE,
+ 10, ECC_TIMEOUT);
+ if (ret)
+ pr_err("decode fsm(0x%x) is not idle\n",
+ readl(regs + NFIECC_DECFSM));
+
+ return ret;
+}
+
+static int nfiecc_wait_done(struct nfiecc *ecc)
+{
+ if (ecc->config.op == ECC_ENCODE)
+ return nfiecc_wait_encode_done(ecc);
+
+ return nfiecc_wait_decode_done(ecc);
+}
+
+static void nfiecc_encode_config(struct nfiecc *ecc, u32 ecc_idx)
+{
+ struct nfiecc_config *config = &ecc->config;
+ u32 val;
+
+ val = ecc_idx | (config->mode << ecc->caps->ecc_mode_shift);
+
+ if (config->mode == ECC_DMA_MODE)
+ val |= ENC_BURST_EN;
+
+ val |= (config->len << 3) << ENCCNFG_MS_SHIFT;
+ writel(val, ecc->res.regs + NFIECC_ENCCNFG);
+}
+
+static void nfiecc_decode_config(struct nfiecc *ecc, u32 ecc_idx)
+{
+ struct nfiecc_config *config = &ecc->config;
+ u32 dec_sz = (config->len << 3) +
+ config->strength * ecc->caps->parity_bits;
+ u32 val;
+
+ val = ecc_idx | (config->mode << ecc->caps->ecc_mode_shift);
+
+ if (config->mode == ECC_DMA_MODE)
+ val |= DEC_BURST_EN;
+
+ val |= (dec_sz << DECCNFG_MS_SHIFT) |
+ (config->deccon << DEC_CON_SHIFT);
+ val |= DEC_EMPTY_EN;
+ writel(val, ecc->res.regs + NFIECC_DECCNFG);
+}
+
+static void nfiecc_config(struct nfiecc *ecc)
+{
+ u32 idx;
+
+ for (idx = 0; idx < ecc->caps->ecc_strength_num; idx++) {
+ if (ecc->config.strength == ecc->caps->ecc_strength[idx])
+ break;
+ }
+
+ if (ecc->config.op == ECC_ENCODE)
+ nfiecc_encode_config(ecc, idx);
+ else
+ nfiecc_decode_config(ecc, idx);
+}
+
+static int nfiecc_enable(struct nfiecc *ecc)
+{
+ enum nfiecc_operation op = ecc->config.op;
+ void *regs = ecc->res.regs;
+
+ nfiecc_config(ecc);
+
+ writel(ECC_OP_EN, regs + NFIECC_CTL_REG(op));
+
+ if (ecc->ecc_irq_en) {
+ writel(ECC_IRQEN, regs + NFIECC_IRQ_REG(op));
+
+ if (ecc->page_irq_en)
+ writel(ECC_IRQEN | ECC_PG_IRQ_SEL,
+ regs + NFIECC_IRQ_REG(op));
+
+ nandx_event_init(ecc->done);
+ }
+
+ return 0;
+}
+
+static int nfiecc_disable(struct nfiecc *ecc)
+{
+ enum nfiecc_operation op = ecc->config.op;
+ void *regs = ecc->res.regs;
+
+ nfiecc_wait_idle(ecc);
+
+ writel(0, regs + NFIECC_IRQ_REG(op));
+ writel(~ECC_OP_EN, regs + NFIECC_CTL_REG(op));
+
+ return 0;
+}
+
+static int nfiecc_correct_data(struct nfiecc *ecc,
+ struct nfiecc_status *status,
+ u8 *data, u32 sector)
+{
+ u32 err, offset, i;
+ u32 loc, byteloc, bitloc;
+
+ status->corrected = 0;
+ status->failed = 0;
+
+ offset = (sector >> 2);
+ err = readl(ecc->res.regs + NFIECC_DECENUM(offset));
+ err >>= (sector % 4) * 8;
+ err &= ecc->caps->err_mask;
+
+ if (err == ecc->caps->err_mask) {
+ status->failed++;
+ return -ENANDREAD;
+ }
+
+ status->corrected += err;
+ status->bitflips = max_t(u32, status->bitflips, err);
+
+ for (i = 0; i < err; i++) {
+ loc = readl(ecc->res.regs + NFIECC_DECEL(i >> 1));
+ loc >>= ((i & 0x1) << 4);
+ byteloc = loc >> 3;
+ bitloc = loc & 0x7;
+ data[byteloc] ^= (1 << bitloc);
+ }
+
+ return 0;
+}
+
+static int nfiecc_fill_data(struct nfiecc *ecc, u8 *data)
+{
+ struct nfiecc_config *config = &ecc->config;
+ void *regs = ecc->res.regs;
+ int size, ret, i;
+ u32 val;
+
+ if (config->mode == ECC_DMA_MODE) {
+ if ((unsigned long)config->dma_addr & 0x3)
+ pr_info("encode address is not 4B aligned: 0x%x\n",
+ (u32)(unsigned long)config->dma_addr);
+
+ writel((unsigned long)config->dma_addr,
+ regs + NFIECC_ADDR(config->op));
+ } else if (config->mode == ECC_PIO_MODE) {
+ if (config->op == ECC_ENCODE) {
+ size = (config->len + 3) >> 2;
+ } else {
+ size = config->strength * ecc->caps->parity_bits;
+ size = (size + 7) >> 3;
+ size += config->len;
+ size >>= 2;
+ }
+
+ for (i = 0; i < size; i++) {
+ ret = readl_poll_timeout_atomic(regs + NFIECC_PIO_DIRDY,
+ val, val & PIO_DI_RDY,
+ 10, ECC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ writel(*((u32 *)data + i), regs + NFIECC_PIO_DI);
+ }
+ }
+
+ return 0;
+}
+
+static int nfiecc_encode(struct nfiecc *ecc, u8 *data)
+{
+ struct nfiecc_config *config = &ecc->config;
+ u32 len, i, val = 0;
+ u8 *p;
+ int ret;
+
+ /* Under NFI mode, nothing need to do */
+ if (config->mode == ECC_NFI_MODE)
+ return 0;
+
+ ret = nfiecc_fill_data(ecc, data);
+ if (ret)
+ return ret;
+
+ ret = nfiecc_wait_encode_done(ecc);
+ if (ret)
+ return ret;
+
+ ret = nfiecc_wait_idle(ecc);
+ if (ret)
+ return ret;
+
+ /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
+ len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
+ p = data + config->len;
+
+ /* Write the parity bytes generated by the ECC back to the OOB region */
+ for (i = 0; i < len; i++) {
+ if ((i % 4) == 0)
+ val = readl(ecc->res.regs + NFIECC_ENCPAR(i / 4));
+
+ p[i] = (val >> ((i % 4) * 8)) & 0xff;
+ }
+
+ return 0;
+}
+
+static int nfiecc_decode(struct nfiecc *ecc, u8 *data)
+{
+ int ret;
+
+ /* Under NFI mode, nothing need to do */
+ if (ecc->config.mode == ECC_NFI_MODE)
+ return 0;
+
+ ret = nfiecc_fill_data(ecc, data);
+ if (ret)
+ return ret;
+
+ return nfiecc_wait_decode_done(ecc);
+}
+
+static int nfiecc_decode_status(struct nfiecc *ecc, u32 start_sector,
+ u32 sectors)
+{
+ void *regs = ecc->res.regs;
+ u32 i, val = 0, err;
+ u32 bitflips = 0;
+
+ for (i = start_sector; i < start_sector + sectors; i++) {
+ if ((i % 4) == 0)
+ val = readl(regs + NFIECC_DECENUM(i / 4));
+
+ err = val >> ((i % 4) * 8);
+ err &= ecc->caps->err_mask;
+
+ if (err == ecc->caps->err_mask)
+ pr_info("sector %d is uncorrect\n", i);
+
+ bitflips = max_t(u32, bitflips, err);
+ }
+
+ if (bitflips == ecc->caps->err_mask)
+ return -ENANDREAD;
+
+ return bitflips;
+}
+
+static int nfiecc_adjust_strength(struct nfiecc *ecc, int strength)
+{
+ struct nfiecc_caps *caps = ecc->caps;
+ int i, count = caps->ecc_strength_num;
+
+ if (strength >= caps->ecc_strength[count - 1])
+ return caps->ecc_strength[count - 1];
+
+ if (strength < caps->ecc_strength[0])
+ return -EINVAL;
+
+ for (i = 1; i < count; i++) {
+ if (strength < caps->ecc_strength[i])
+ return caps->ecc_strength[i - 1];
+ }
+
+ return -EINVAL;
+}
+
+static int nfiecc_ctrl(struct nfiecc *ecc, int cmd, void *args)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case NFI_CTRL_ECC_IRQ:
+ ecc->ecc_irq_en = *(bool *)args;
+ break;
+
+ case NFI_CTRL_ECC_PAGE_IRQ:
+ ecc->page_irq_en = *(bool *)args;
+ break;
+
+ default:
+ pr_err("invalid arguments.\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int nfiecc_hw_init(struct nfiecc *ecc)
+{
+ int ret;
+
+ ret = nfiecc_wait_idle(ecc);
+ if (ret)
+ return ret;
+
+ writel(~ECC_OP_EN, ecc->res.regs + NFIECC_ENCCON);
+
+ ret = nfiecc_wait_idle(ecc);
+ if (ret)
+ return ret;
+
+ writel(~ECC_OP_EN, ecc->res.regs + NFIECC_DECCON);
+
+ return 0;
+}
+
+static struct nfiecc_caps nfiecc_caps_mt8512 = {
+ .err_mask = 0x1f,
+ .ecc_mode_shift = 5,
+ .parity_bits = 14,
+ .ecc_strength = ecc_strength_mt8512,
+ .ecc_strength_num = 11,
+};
+
+static struct nfiecc_caps *nfiecc_get_match_data(enum mtk_ic_version ic)
+{
+ /* NOTE: add other IC's data */
+ return &nfiecc_caps_mt8512;
+}
+
+struct nfiecc *nfiecc_init(struct nfiecc_resource *res)
+{
+ struct nfiecc *ecc;
+ int ret;
+
+ ecc = mem_alloc(1, sizeof(struct nfiecc));
+ if (!ecc)
+ return NULL;
+
+ ecc->res = *res;
+
+ ret = nandx_irq_register(res->dev, res->irq_id, nfiecc_irq_handler,
+ "mtk-ecc", ecc);
+ if (ret) {
+ pr_err("ecc irq register failed!\n");
+ goto error;
+ }
+
+ ecc->ecc_irq_en = false;
+ ecc->page_irq_en = false;
+ ecc->done = nandx_event_create();
+ ecc->caps = nfiecc_get_match_data(res->ic_ver);
+
+ ecc->adjust_strength = nfiecc_adjust_strength;
+ ecc->enable = nfiecc_enable;
+ ecc->disable = nfiecc_disable;
+ ecc->decode = nfiecc_decode;
+ ecc->encode = nfiecc_encode;
+ ecc->wait_done = nfiecc_wait_done;
+ ecc->decode_status = nfiecc_decode_status;
+ ecc->correct_data = nfiecc_correct_data;
+ ecc->nfiecc_ctrl = nfiecc_ctrl;
+
+ ret = nfiecc_hw_init(ecc);
+ if (ret)
+ return NULL;
+
+ return ecc;
+
+error:
+ mem_free(ecc);
+
+ return NULL;
+}
+
+void nfiecc_exit(struct nfiecc *ecc)
+{
+ nandx_event_destroy(ecc->done);
+ mem_free(ecc);
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfiecc.h b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfiecc.h
new file mode 100644
index 0000000..b02a5c3
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfiecc.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NFIECC_H__
+#define __NFIECC_H__
+
+enum nfiecc_mode {
+ ECC_DMA_MODE,
+ ECC_NFI_MODE,
+ ECC_PIO_MODE
+};
+
+enum nfiecc_operation {
+ ECC_ENCODE,
+ ECC_DECODE
+};
+
+enum nfiecc_deccon {
+ ECC_DEC_FER = 1,
+ ECC_DEC_LOCATE = 2,
+ ECC_DEC_CORRECT = 3
+};
+
+struct nfiecc_resource {
+ int ic_ver;
+ void *dev;
+ void *regs;
+ int irq_id;
+
+};
+
+struct nfiecc_status {
+ u32 corrected;
+ u32 failed;
+ u32 bitflips;
+};
+
+struct nfiecc_caps {
+ u32 err_mask;
+ u32 ecc_mode_shift;
+ u32 parity_bits;
+ const int *ecc_strength;
+ u32 ecc_strength_num;
+};
+
+struct nfiecc_config {
+ enum nfiecc_operation op;
+ enum nfiecc_mode mode;
+ enum nfiecc_deccon deccon;
+
+ void *dma_addr; /* DMA use only */
+ u32 strength;
+ u32 sectors;
+ u32 len;
+};
+
+struct nfiecc {
+ struct nfiecc_resource res;
+ struct nfiecc_config config;
+ struct nfiecc_caps *caps;
+
+ bool ecc_irq_en;
+ bool page_irq_en;
+
+ void *done;
+
+ int (*adjust_strength)(struct nfiecc *ecc, int strength);
+ int (*enable)(struct nfiecc *ecc);
+ int (*disable)(struct nfiecc *ecc);
+
+ int (*decode)(struct nfiecc *ecc, u8 *data);
+ int (*encode)(struct nfiecc *ecc, u8 *data);
+
+ int (*decode_status)(struct nfiecc *ecc, u32 start_sector, u32 sectors);
+ int (*correct_data)(struct nfiecc *ecc,
+ struct nfiecc_status *status,
+ u8 *data, u32 sector);
+ int (*wait_done)(struct nfiecc *ecc);
+
+ int (*nfiecc_ctrl)(struct nfiecc *ecc, int cmd, void *args);
+};
+
+struct nfiecc *nfiecc_init(struct nfiecc_resource *res);
+void nfiecc_exit(struct nfiecc *ecc);
+
+#endif /* __NFIECC_H__ */
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfiecc_regs.h b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfiecc_regs.h
new file mode 100644
index 0000000..f794030
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/core/nfi/nfiecc_regs.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NFIECC_REGS_H__
+#define __NFIECC_REGS_H__
+
+#define NFIECC_ENCCON 0x000
+/* NFIECC_DECCON has same bit define */
+#define ECC_OP_EN BIT(0)
+#define NFIECC_ENCCNFG 0x004
+#define ENCCNFG_MS_SHIFT 16
+#define ENC_BURST_EN BIT(8)
+#define NFIECC_ENCDIADDR 0x008
+#define NFIECC_ENCIDLE 0x00c
+#define NFIECC_ENCSTA 0x07c
+#define ENC_FSM_IDLE 1
+#define NFIECC_ENCIRQEN 0x080
+/* NFIECC_DECIRQEN has same bit define */
+#define ECC_IRQEN BIT(0)
+#define ECC_PG_IRQ_SEL BIT(1)
+#define NFIECC_ENCIRQSTA 0x084
+#define ENC_IRQSTA_GEN BIT(0)
+#define NFIECC_PIO_DIRDY 0x090
+#define PIO_DI_RDY BIT(0)
+#define NFIECC_PIO_DI 0x094
+#define NFIECC_DECCON 0x100
+#define NFIECC_DECCNFG 0x104
+#define DEC_BURST_EN BIT(8)
+#define DEC_EMPTY_EN BIT(31)
+#define DEC_CON_SHIFT 12
+#define DECCNFG_MS_SHIFT 16
+#define NFIECC_DECDIADDR 0x108
+#define NFIECC_DECIDLE 0x10c
+#define NFIECC_DECENUM(x) (0x114 + (x) * 4)
+#define NFIECC_DECDONE 0x124
+#define NFIECC_DECIRQEN 0x200
+#define NFIECC_DECIRQSTA 0x204
+#define DEC_IRQSTA_GEN BIT(0)
+#define NFIECC_DECFSM 0x208
+#define FSM_MASK 0x3f3fff0f
+#define FSM_IDLE 0x01011101
+#define NFIECC_BYPASS 0x20c
+#define NFIECC_BYPASS_EN BIT(0)
+#define NFIECC_ENCPAR(x) (0x300 + (x) * 4)
+#define NFIECC_DECEL(x) (0x500 + (x) * 4)
+
+#endif /* __NFIECC_REGS_H__ */
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/driver/Nandx.mk b/src/bsp/lk/platform/mt8512/drivers/nandx/driver/Nandx.mk
new file mode 100644
index 0000000..bde78b9
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/driver/Nandx.mk
@@ -0,0 +1,17 @@
+#
+# Copyright (C) 2017 MediaTek Inc.
+# Licensed under either
+# BSD Licence, (see NOTICE for more details)
+# GNU General Public License, version 2.0, (see NOTICE for more details)
+#
+
+nandx-$(NANDX_SIMULATOR_SUPPORT) += simulator/driver.c
+
+nandx-$(NANDX_CTP_SUPPORT) += ctp/ts_nand.c
+nandx-$(NANDX_CTP_SUPPORT) += ctp/nand_test.c
+nandx-header-$(NANDX_CTP_SUPPORT) += ctp/nand_test.h
+
+nandx-$(NANDX_BBT_SUPPORT) += bbt/bbt.c
+nandx-$(NANDX_BROM_SUPPORT) += brom/driver.c
+nandx-$(NANDX_KERNEL_SUPPORT) += kernel/driver.c
+nandx-$(NANDX_LK_SUPPORT) += lk/driver-nftl.c
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/driver/bbt/bbt.c b/src/bsp/lk/platform/mt8512/drivers/nandx/driver/bbt/bbt.c
new file mode 100644
index 0000000..b7ab6e2
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/driver/bbt/bbt.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#include "nandx_util.h"
+#include "nandx_core.h"
+#include "bbt.h"
+
+/* Not support: multi-chip */
+static u8 main_bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_bbt_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct bbt_manager g_bbt_manager = {
+ { {{main_bbt_pattern, 4}, 0, BBT_INVALID_ADDR},
+ {{mirror_bbt_pattern, 4}, 0, BBT_INVALID_ADDR}
+ },
+ NAND_BBT_SCAN_MAXBLOCKS, NULL
+};
+
+static inline void set_bbt_mark(u8 *bbt, int block, u8 mark)
+{
+ int index, offset;
+
+ index = GET_ENTRY(block);
+ offset = GET_POSITION(block);
+
+ bbt[index] &= ~(BBT_ENTRY_MASK << offset);
+ bbt[index] |= (mark & BBT_ENTRY_MASK) << offset;
+ pr_info("%s %d block:%d, bbt[%d]:0x%x, offset:%d, mark:%d\n",
+ __func__, __LINE__, block, index, bbt[index], offset, mark);
+}
+
+static inline u8 get_bbt_mark(u8 *bbt, int block)
+{
+ int offset = GET_POSITION(block);
+ int index = GET_ENTRY(block);
+ u8 value = bbt[index];
+
+ return (value >> offset) & BBT_ENTRY_MASK;
+}
+
+static void mark_nand_bad(struct nandx_info *nand, int block)
+{
+ u8 *buf;
+
+ buf = mem_alloc(1, nand->page_size + nand->oob_size);
+ if (buf == NULL) {
+ pr_err("%s, %d, memory alloc fail, pagesize:%d, oobsize:%d\n",
+ __func__, __LINE__, nand->page_size, nand->oob_size);
+ return;
+ }
+ memset(buf, 0, nand->page_size + nand->oob_size);
+ nandx_erase(block * nand->block_size, nand->block_size);
+ nandx_write(buf, buf + nand->page_size, block * nand->block_size,
+ nand->page_size);
+ mem_free(buf);
+}
+
+static inline bool is_bbt_data(u8 *buf, struct bbt_pattern *pattern)
+{
+ int i;
+
+ for (i = 0; i < pattern->len; i++) {
+ if (buf[i] != pattern->data[i])
+ return false;
+ }
+
+ return true;
+}
+
+static u64 get_bbt_address(struct nandx_info *nand, u8 *bbt,
+ u64 mirror_addr,
+ int max_blocks)
+{
+ u64 addr, end_addr;
+ u8 mark;
+
+ addr = nand->total_size;
+ end_addr = nand->total_size - nand->block_size * max_blocks;
+
+ while (addr > end_addr) {
+ addr -= nand->block_size;
+ mark = get_bbt_mark(bbt, div_down(addr, nand->block_size));
+
+ if (mark == BBT_BLOCK_WORN || mark == BBT_BLOCK_FACTORY_BAD)
+ continue;
+ if (addr != mirror_addr)
+ return addr;
+ }
+
+ return BBT_INVALID_ADDR;
+}
+
+static int read_bbt(struct bbt_desc *desc, u8 *bbt, u32 len)
+{
+ int ret;
+
+ ret = nandx_read(bbt, NULL, desc->bbt_addr + desc->pattern.len + 1,
+ len);
+ if (ret < 0)
+ pr_err("nand_bbt: error reading BBT page, ret:-%x\n", ret);
+
+ return ret;
+}
+
+static void create_bbt(struct nandx_info *nand, u8 *bbt)
+{
+ u32 offset = 0, block = 0;
+
+ do {
+ if (nandx_is_bad_block(offset)) {
+ pr_info("Create bbt at bad block:%d\n", block);
+ set_bbt_mark(bbt, block, BBT_BLOCK_FACTORY_BAD);
+ }
+ block++;
+ offset += nand->block_size;
+ } while (offset < nand->total_size);
+}
+
+static int search_bbt(struct nandx_info *nand, struct bbt_desc *desc,
+ int max_blocks)
+{
+ u64 addr, end_addr;
+ u8 *buf;
+ int ret;
+
+ buf = mem_alloc(1, nand->page_size);
+ if (buf == NULL) {
+ pr_err("%s, %d, mem alloc fail!!! len:%d\n",
+ __func__, __LINE__, nand->page_size);
+ return -ENOMEM;
+ }
+
+ addr = nand->total_size;
+ end_addr = nand->total_size - max_blocks * nand->block_size;
+ while (addr > end_addr) {
+ addr -= nand->block_size;
+
+ nandx_read(buf, NULL, addr, nand->page_size);
+
+ if (is_bbt_data(buf, &desc->pattern)) {
+ desc->bbt_addr = addr;
+ desc->version = buf[desc->pattern.len];
+ pr_info("BBT is found at addr 0x%llx, version %d\n",
+ desc->bbt_addr, desc->version);
+ ret = 0;
+ break;
+ }
+ ret = -EFAULT;
+ }
+
+ mem_free(buf);
+ return ret;
+}
+
+static int save_bbt(struct nandx_info *nand, struct bbt_desc *desc,
+ u8 *bbt)
+{
+ u32 page_size_mask, total_block;
+ int write_len;
+ u8 *buf;
+ int ret;
+
+ ret = nandx_erase(desc->bbt_addr, nand->block_size);
+ if (ret) {
+ pr_err("erase addr 0x%llx fail !!!, ret %d\n",
+ desc->bbt_addr, ret);
+ return ret;
+ }
+
+ total_block = div_down(nand->total_size, nand->block_size);
+ write_len = GET_BBT_LENGTH(total_block) + desc->pattern.len + 1;
+ page_size_mask = nand->page_size - 1;
+ write_len = (write_len + page_size_mask) & (~page_size_mask);
+
+ buf = (u8 *)mem_alloc(1, write_len);
+ if (buf == NULL) {
+ pr_err("%s, %d, mem alloc fail!!! len:%d\n",
+ __func__, __LINE__, write_len);
+ return -ENOMEM;
+ }
+ memset(buf, 0xFF, write_len);
+
+ memcpy(buf, desc->pattern.data, desc->pattern.len);
+ buf[desc->pattern.len] = desc->version;
+
+ memcpy(buf + desc->pattern.len + 1, bbt, GET_BBT_LENGTH(total_block));
+
+ ret = nandx_write(buf, NULL, desc->bbt_addr, write_len);
+
+ if (ret)
+ pr_err("nandx_write fail(%d), offset:0x%llx, len(%d)\n",
+ ret, desc->bbt_addr, write_len);
+ mem_free(buf);
+
+ return ret;
+}
+
+static int write_bbt(struct nandx_info *nand, struct bbt_desc *main,
+ struct bbt_desc *mirror, u8 *bbt, int max_blocks)
+{
+ int block;
+ int ret;
+
+ do {
+ if (main->bbt_addr == BBT_INVALID_ADDR) {
+ main->bbt_addr = get_bbt_address(nand, bbt,
+ mirror->bbt_addr, max_blocks);
+ if (main->bbt_addr == BBT_INVALID_ADDR)
+ return -ENOSPC;
+ }
+
+ ret = save_bbt(nand, main, bbt);
+ if (!ret)
+ break;
+
+ block = div_down(main->bbt_addr, nand->block_size);
+ set_bbt_mark(bbt, block, BBT_BLOCK_WORN);
+ main->version++;
+ mark_nand_bad(nand, block);
+ main->bbt_addr = BBT_INVALID_ADDR;
+ } while (1);
+
+ return 0;
+}
+
+static void mark_bbt_region(struct nandx_info *nand, u8 *bbt, int bbt_blocks)
+{
+ int total_block;
+ int block;
+ u8 mark;
+
+ total_block = div_down(nand->total_size, nand->block_size);
+ block = total_block - bbt_blocks;
+
+ while (bbt_blocks) {
+ mark = get_bbt_mark(bbt, block);
+ if (mark == BBT_BLOCK_GOOD)
+ set_bbt_mark(bbt, block, BBT_BLOCK_RESERVED);
+ block++;
+ bbt_blocks--;
+ }
+}
+
+static void unmark_bbt_region(struct nandx_info *nand, u8 *bbt, int bbt_blocks)
+{
+ int total_block;
+ int block;
+ u8 mark;
+
+ total_block = div_down(nand->total_size, nand->block_size);
+ block = total_block - bbt_blocks;
+
+ while (bbt_blocks) {
+ mark = get_bbt_mark(bbt, block);
+ if (mark == BBT_BLOCK_RESERVED)
+ set_bbt_mark(bbt, block, BBT_BLOCK_GOOD);
+ block++;
+ bbt_blocks--;
+ }
+}
+
+static int update_bbt(struct nandx_info *nand, struct bbt_desc *desc,
+ u8 *bbt,
+ int max_blocks)
+{
+ int ret = 0, i;
+
+ /* The reserved info is not stored in NAND*/
+ unmark_bbt_region(nand, bbt, max_blocks);
+
+ desc[0].version++;
+ for (i = 0; i < 2; i++) {
+ if (i > 0)
+ desc[i].version = desc[i - 1].version;
+
+ ret = write_bbt(nand, &desc[i], &desc[1 - i], bbt, max_blocks);
+ if (ret)
+ break;
+ }
+ mark_bbt_region(nand, bbt, max_blocks);
+
+ return ret;
+}
+
+int scan_bbt(struct nandx_info *nand)
+{
+ struct bbt_manager *manager = &g_bbt_manager;
+ struct bbt_desc *pdesc;
+ int total_block, len, i;
+ int valid_desc = 0;
+ int ret = 0;
+ u8 *bbt;
+
+ total_block = div_down(nand->total_size, nand->block_size);
+ len = GET_BBT_LENGTH(total_block);
+
+ if (manager->bbt == NULL) {
+ manager->bbt = (u8 *)mem_alloc(1, len);
+ if (manager->bbt == NULL) {
+ pr_err("%s, %d, mem alloc fail!!! len:%d\n",
+ __func__, __LINE__, len);
+ return -ENOMEM;
+ }
+ }
+ bbt = manager->bbt;
+ memset(bbt, 0xFF, len);
+
+ /* scan bbt */
+ for (i = 0; i < 2; i++) {
+ pdesc = &manager->desc[i];
+ pdesc->bbt_addr = BBT_INVALID_ADDR;
+ pdesc->version = 0;
+ ret = search_bbt(nand, pdesc, manager->max_blocks);
+ if (!ret && (pdesc->bbt_addr != BBT_INVALID_ADDR))
+ valid_desc += 1 << i;
+ }
+
+ pdesc = &manager->desc[0];
+ if ((valid_desc == 0x3) && (pdesc[0].version != pdesc[1].version))
+ valid_desc = (pdesc[0].version > pdesc[1].version) ? 1 : 2;
+
+ /* read bbt */
+ for (i = 0; i < 2; i++) {
+ if (!(valid_desc & (1 << i)))
+ continue;
+ ret = read_bbt(&pdesc[i], bbt, len);
+ if (ret) {
+ pdesc->bbt_addr = BBT_INVALID_ADDR;
+ pdesc->version = 0;
+ valid_desc &= ~(1 << i);
+ }
+ /* If two BBT version is same, only need to read the first bbt*/
+ if ((valid_desc == 0x3) &&
+ (pdesc[0].version == pdesc[1].version))
+ break;
+ }
+
+ if (!valid_desc) {
+ create_bbt(nand, bbt);
+ pdesc[0].version = 1;
+ pdesc[1].version = 1;
+ }
+
+ pdesc[0].version = max_t(u8, pdesc[0].version, pdesc[1].version);
+ pdesc[1].version = pdesc[0].version;
+
+ for (i = 0; i < 2; i++) {
+ if (valid_desc & (1 << i))
+ continue;
+
+ ret = write_bbt(nand, &pdesc[i], &pdesc[1 - i], bbt,
+ manager->max_blocks);
+ if (ret) {
+ pr_err("write bbt(%d) fail, ret:%d\n", i, ret);
+ manager->bbt = NULL;
+ return ret;
+ }
+ }
+
+ /* Prevent the bbt regions from erasing / writing */
+ mark_bbt_region(nand, manager->bbt, manager->max_blocks);
+
+ for (i = 0; i < total_block; i++) {
+ if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_WORN)
+ pr_info("Checked WORN bad blk: %d\n", i);
+ else if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_FACTORY_BAD)
+ pr_info("Checked Factory bad blk: %d\n", i);
+ else if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_RESERVED)
+ pr_info("Checked Reserved blk: %d\n", i);
+ else if (get_bbt_mark(manager->bbt, i) != BBT_BLOCK_GOOD)
+ pr_info("Checked unknown blk: %d\n", i);
+ }
+
+ return 0;
+}
+
+int bbt_mark_bad(struct nandx_info *nand, off_t offset)
+{
+ struct bbt_manager *manager = &g_bbt_manager;
+ int block = div_down(offset, nand->block_size);
+ int ret;
+
+ set_bbt_mark(manager->bbt, block, BBT_BLOCK_WORN);
+ mark_nand_bad(nand, block);
+
+ /* Update flash-based bad block table */
+ ret = update_bbt(nand, manager->desc, manager->bbt,
+ manager->max_blocks);
+ pr_info("block %d, update result %d.\n", block, ret);
+
+ return ret;
+}
+
+int bbt_is_bad(struct nandx_info *nand, off_t offset)
+{
+ int block;
+
+ block = div_down(offset, nand->block_size);
+
+ return get_bbt_mark(g_bbt_manager.bbt, block) != BBT_BLOCK_GOOD;
+}
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/driver/lk/driver-nftl.c b/src/bsp/lk/platform/mt8512/drivers/nandx/driver/lk/driver-nftl.c
new file mode 100644
index 0000000..a6f5030
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/driver/lk/driver-nftl.c
@@ -0,0 +1,193 @@
+#include <lib/bio.h>
+#include <lib/partition.h>
+#include <malloc.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <err.h>
+#include <errno.h>
+#include <platform/nand.h>
+#include <platform/pll.h>
+#include <lib/nftl.h>
+
+#include "nandx_core.h"
+#include "nandx_util.h"
+#include "bbt.h"
+
+typedef int (*func_nandx_operation)(u8 *, u8 *, u64, size_t);
+struct nandx_info nandxi;
+
+static ssize_t erase_operation(off_t offset, size_t len, bool force_erase)
+{
+ u32 block_size = nandxi.block_size;
+ ssize_t bytes_erase = 0;
+ int ret;
+
+ while (len) {
+ if (bbt_is_bad(&nandxi, offset) && (!force_erase)) {
+ pr_err("block(0x%x) is bad, not erase\n", offset);
+ } else {
+ ret = nandx_erase(offset, block_size);
+ if (ret < 0)
+ pr_err("erase fail at blk %d, force:%d\n",
+ offset, force_erase);
+ else
+ bytes_erase += block_size;
+ }
+
+ offset += block_size;
+ len -= block_size;
+ }
+
+ pr_debug("%s end, bytes_erase:0x%x\n", __func__, bytes_erase);
+ return bytes_erase;
+}
+
+static ssize_t nand_force_erase_all()
+{
+ return erase_operation(0, nandxi.total_size, true);
+}
+
+static ssize_t nand_erase(struct nftl_info *nftl, off_t offset, size_t len)
+{
+ return erase_operation(offset, len, false);
+}
+
+static ssize_t rw_operation(void *buf, off_t offset, size_t len, bool read)
+{
+ struct nandx_split64 split = {0};
+ func_nandx_operation operation;
+ int ret, i, pages;
+ size_t read_len = 0;
+ u8 *lbuf = (u8 *)buf;
+ u64 val;
+
+ operation = read ? nandx_read : nandx_write;
+
+ nandx_split(&split, offset, len, val, nandxi.page_size);
+
+ if (split.head_len) {
+ ret = operation(lbuf, NULL, split.head, split.head_len);
+ lbuf += split.head_len;
+ }
+
+ if (split.body_len) {
+ pages = div_down(split.body_len, nandxi.page_size);
+ for (i = 0; i < pages; i++) {
+ operation(lbuf + i * nandxi.page_size , NULL,
+ split.body + i * nandxi.page_size,
+ nandxi.page_size);
+ }
+
+ lbuf += split.body_len;
+ }
+
+ if (split.tail_len) {
+ operation(lbuf, NULL, split.tail, split.tail_len);
+ }
+
+ return len;
+}
+
+static ssize_t nand_read(struct nftl_info *nftl, void *buf,
+ off_t offset, size_t len)
+{
+ return rw_operation(buf, offset, len, true);
+}
+
+static ssize_t nand_write(struct nftl_info *nftl, void *buf,
+ off_t offset, size_t len)
+{
+ return rw_operation(buf, offset, len, false);
+}
+
+static int nand_is_bad_block(struct nftl_info *nftl, u32 page)
+{
+ return bbt_is_bad(&nandxi, (off_t)page * nandxi.page_size);
+}
+
+static void nand_gpio_init(void)
+{
+ nandx_set_bits32(GPIO_BASE + 0x250, 0xFFF << 18, 0x6DB << 18);
+ nandx_set_bits32(GPIO_BASE + 0x260, 0x7 << 6 | 0x7, 0x3 << 6 | 0x3);
+
+ nandx_set_bits32(GPIO_BASE + 0x740, 0xf << 16, 1 << 16);
+ nandx_set_bits32(GPIO_BASE + 0x750, 0xf | (0xF << 8), 1 | (1 << 8));
+}
+
+static u32 nand_clock_init(void)
+{
+ /* use default clk 26Mhz as temporary solution
+ * should return correct value
+ */
+ return 26* 1000 * 1000;
+}
+
+static void nand_hard_reset(void)
+{
+ u32 val;
+
+ val = readl(INFRACFG_AO_BASE + 0x130);
+ val |= BIT(15);
+ writel(val, INFRACFG_AO_BASE + 0x130);
+
+ nandx_udelay(5);
+
+ val = readl(INFRACFG_AO_BASE + 0x134);
+ val |= BIT(15);
+ writel(val, INFRACFG_AO_BASE + 0x134);
+}
+
+int nand_init_device()
+{
+ struct nfi_resource res = {
+ NANDX_MT8512, NULL,
+ (void *)NFIECC_BASE, 0,
+ (void *)NFI_BASE, 0,
+ 26000000, NULL, 0, 32
+ };
+ struct nftl_info *nftl;
+ int ret = 0, arg = 1;
+
+ nand_gpio_init();
+
+ res.clock_1x = nand_clock_init();
+
+ nand_hard_reset();
+
+ ret = nandx_init(&res);
+ if (ret) {
+ pr_err("nandx init error (%d)!\n", ret);
+ return ret;
+ }
+
+ nandx_ioctl(NFI_CTRL_DMA, &arg);
+ nandx_ioctl(NFI_CTRL_ECC, &arg);
+ nandx_ioctl(NFI_CTRL_BAD_MARK_SWAP, &arg);
+ nandx_ioctl(CORE_CTRL_NAND_INFO, &nandxi);
+
+ ret = scan_bbt(&nandxi);
+ if (ret) {
+ pr_err("bbt init error (%d)!\n", ret);
+ return ret;
+ }
+
+ nftl = nftl_add_master("nand0");
+ if (!nftl)
+ return -ENOMEM;
+
+ nftl->erase_size = nandxi.block_size;
+ nftl->write_size = nandxi.page_size;
+ nftl->total_size = nandxi.total_size;
+ nftl->block_isbad = nand_is_bad_block;
+ nftl->erase = nand_erase;
+ nftl->read = nand_read;
+ nftl->write = nand_write;
+
+ ret = nftl_mount_bdev(nftl);
+ if(ret)
+ pr_err("nftl mount bdev fail.\n");
+
+ return ret;
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/include/Nandx.mk b/src/bsp/lk/platform/mt8512/drivers/nandx/include/Nandx.mk
new file mode 100644
index 0000000..27871cf
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/include/Nandx.mk
@@ -0,0 +1,15 @@
+#
+# Copyright (C) 2017 MediaTek Inc.
+# Licensed under either
+# BSD Licence, (see NOTICE for more details)
+# GNU General Public License, version 2.0, (see NOTICE for more details)
+#
+
+nandx-header-y += internal/nandx_core.h
+nandx-header-y += internal/nandx_errno.h
+nandx-header-y += internal/nandx_util.h
+nandx-header-$(NANDX_BBT_SUPPORT) += internal/bbt.h
+nandx-header-$(NANDX_SIMULATOR_SUPPORT) += simulator/nandx_os.h
+nandx-header-$(NANDX_CTP_SUPPORT) += ctp/nandx_os.h
+nandx-header-$(NANDX_LK_SUPPORT) += lk/nandx_os.h
+nandx-header-$(NANDX_KERNEL_SUPPORT) += kernel/nandx_os.h
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/include/internal/bbt.h b/src/bsp/lk/platform/mt8512/drivers/nandx/include/internal/bbt.h
new file mode 100644
index 0000000..4676def
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/include/internal/bbt.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __BBT_H__
+#define __BBT_H__
+
+#define BBT_BLOCK_GOOD 0x03
+#define BBT_BLOCK_WORN 0x02
+#define BBT_BLOCK_RESERVED 0x01
+#define BBT_BLOCK_FACTORY_BAD 0x00
+
+#define BBT_INVALID_ADDR 0
+/* The maximum number of blocks to scan for a bbt */
+#define NAND_BBT_SCAN_MAXBLOCKS 4
+#define NAND_BBT_USE_FLASH 0x00020000
+#define NAND_BBT_NO_OOB 0x00040000
+
+/* Search good / bad pattern on the first and the second page */
+#define NAND_BBT_SCAN2NDPAGE 0x00008000
+/* Search good / bad pattern on the last page of the eraseblock */
+#define NAND_BBT_SCANLASTPAGE 0x00010000
+
+#define NAND_DRAM_BUF_DATABUF_ADDR (NAND_BUF_ADDR)
+
+struct bbt_pattern {
+ u8 *data;
+ int len;
+};
+
+struct bbt_desc {
+ struct bbt_pattern pattern;
+ u8 version;
+ u64 bbt_addr;/*0: invalid value; otherwise, valid value*/
+};
+
+struct bbt_manager {
+ /* main bbt descriptor and mirror descriptor */
+ struct bbt_desc desc[2];/* 0: main bbt; 1: mirror bbt */
+ int max_blocks;
+ u8 *bbt;
+};
+
+#define BBT_ENTRY_MASK 0x03
+#define BBT_ENTRY_SHIFT 2
+
+#define GET_BBT_LENGTH(blocks) (blocks >> 2)
+#define GET_ENTRY(block) ((block) >> BBT_ENTRY_SHIFT)
+#define GET_POSITION(block) (((block) & BBT_ENTRY_MASK) * 2)
+#define GET_MARK_VALUE(block, mark) \
+ (((mark) & BBT_ENTRY_MASK) << GET_POSITION(block))
+
+int scan_bbt(struct nandx_info *nand);
+
+int bbt_mark_bad(struct nandx_info *nand, off_t offset);
+
+int bbt_is_bad(struct nandx_info *nand, off_t offset);
+
+#endif /*__BBT_H__*/
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/include/internal/nandx_core.h b/src/bsp/lk/platform/mt8512/drivers/nandx/include/internal/nandx_core.h
new file mode 100644
index 0000000..dc10fdd
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/include/internal/nandx_core.h
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NANDX_CORE_H__
+#define __NANDX_CORE_H__
+
+/**
+ * mtk_ic_version - indicates specifical IC, IP need this to load some info
+ */
+enum mtk_ic_version {
+ NANDX_MT8512,
+};
+
+/**
+ * nandx_ioctl_cmd - operations supported by nandx
+ *
+ * @NFI_CTRL_DMA dma enable or not
+ * @NFI_CTRL_NFI_MODE customer/read/program/erase...
+ * @NFI_CTRL_ECC ecc enable or not
+ * @NFI_CTRL_ECC_MODE nfi/dma/pio
+ * @CHIP_CTRL_DRIVE_STRENGTH enum chip_ctrl_drive_strength
+ */
+enum nandx_ctrl_cmd {
+ CORE_CTRL_NAND_INFO,
+
+ NFI_CTRL_DMA,
+ NFI_CTRL_NFI_MODE,
+ NFI_CTRL_AUTOFORMAT,
+ NFI_CTRL_NFI_IRQ,
+ NFI_CTRL_PAGE_IRQ,
+ NFI_CTRL_RANDOMIZE,
+ NFI_CTRL_BAD_MARK_SWAP,
+
+ NFI_CTRL_ECC,
+ NFI_CTRL_ECC_MODE,
+ NFI_CTRL_ECC_CLOCK,
+ NFI_CTRL_ECC_IRQ,
+ NFI_CTRL_ECC_PAGE_IRQ,
+ NFI_CTRL_ECC_DECODE_MODE,
+
+ SNFI_CTRL_OP_MODE,
+ SNFI_CTRL_RX_MODE,
+ SNFI_CTRL_TX_MODE,
+ SNFI_CTRL_DELAY_MODE,
+
+ CHIP_CTRL_OPS_CACHE,
+ CHIP_CTRL_OPS_MULTI,
+ CHIP_CTRL_PSLC_MODE,
+ CHIP_CTRL_DRIVE_STRENGTH,
+ CHIP_CTRL_DDR_MODE,
+ CHIP_CTRL_ONDIE_ECC,
+ CHIP_CTRL_TIMING_MODE
+};
+
+enum snfi_ctrl_op_mode {
+ SNFI_CUSTOM_MODE,
+ SNFI_AUTO_MODE,
+ SNFI_MAC_MODE
+};
+
+enum snfi_ctrl_rx_mode {
+ SNFI_RX_111,
+ SNFI_RX_112,
+ SNFI_RX_114,
+ SNFI_RX_122,
+ SNFI_RX_144
+};
+
+enum snfi_ctrl_tx_mode {
+ SNFI_TX_111,
+ SNFI_TX_114,
+};
+
+enum chip_ctrl_drive_strength {
+ CHIP_DRIVE_NORMAL,
+ CHIP_DRIVE_HIGH,
+ CHIP_DRIVE_MIDDLE,
+ CHIP_DRIVE_LOW
+};
+
+enum chip_ctrl_timing_mode {
+ CHIP_TIMING_MODE0,
+ CHIP_TIMING_MODE1,
+ CHIP_TIMING_MODE2,
+ CHIP_TIMING_MODE3,
+ CHIP_TIMING_MODE4,
+ CHIP_TIMING_MODE5,
+};
+
+/**
+ * nandx_info - basic information
+ */
+struct nandx_info {
+ u32 max_io_count;
+ u32 min_write_pages;
+ u32 plane_num;
+ u32 oob_size;
+ u32 page_parity_size;
+ u32 page_size;
+ u32 block_size;
+ u64 total_size;
+ u32 fdm_reg_size;
+ u32 fdm_ecc_size;
+ u32 ecc_strength;
+ u32 sector_size;
+};
+
+/**
+ * nfi_resource - the resource needed by nfi & ecc to do initialization
+ */
+struct nfi_resource {
+ int ic_ver;
+ void *dev;
+
+ void *ecc_regs;
+ int ecc_irq_id;
+
+ void *nfi_regs;
+ int nfi_irq_id;
+
+ u32 clock_1x;
+ u32 *clock_2x;
+ int clock_2x_num;
+
+ int min_oob_req;
+};
+
+/**
+ * nandx_init - init all related modules below
+ *
+ * @res: basic resource of the project
+ *
+ * return 0 if init success, otherwise return negative error code
+ */
+int nandx_init(struct nfi_resource *res);
+
+/**
+ * nandx_exit - release resource those that obtained in init flow
+ */
+void nandx_exit(void);
+
+/**
+ * nandx_read - read data from nand this function can read data and related
+ * oob from specifical address
+ * if do multi_ops, set one operation per time, and call nandx_sync at last
+ * in multi mode, not support page partial read
+ * oob not support partial read
+ *
+ * @data: buf to receive data from nand
+ * @oob: buf to receive oob data from nand which related to data page
+ * length of @oob should oob size aligned, oob not support partial read
+ * @offset: offset address on the whole flash
+ * @len: the length of @data that need to read
+ *
+ * if read success return 0, otherwise return negative error code
+ */
+int nandx_read(u8 *data, u8 *oob, u64 offset, size_t len);
+
+/**
+ * nandx_write - write data to nand
+ * this function can write data and related oob to specifical address
+ * if do multi_ops, set one operation per time, and call nandx_sync at last
+ *
+ * @data: source data to be written to nand,
+ * for multi operation, the length of @data should be page size aliged
+ * @oob: source oob which related to data page to be written to nand,
+ * length of @oob should oob size aligned
+ * @offset: offset address on the whole flash, the value should be start address
+ * of a page
+ * @len: the length of @data that need to write,
+ * for multi operation, the len should be page size aliged
+ *
+ * if write success return 0, otherwise return negative error code
+ * if return value > 0, it indicates that how many pages still need to write,
+ * and data has not been written to nand
+ * please call nandx_sync after pages alligned $nandx_info.min_write_pages
+ */
+int nandx_write(u8 *data, u8 *oob, u64 offset, size_t len);
+
+/**
+ * nandx_erase - erase an area of nand
+ * if do multi_ops, set one operation per time, and call nandx_sync at last
+ *
+ * @offset: offset address on the flash
+ * @len: erase length which should be block size aligned
+ *
+ * if erase success return 0, otherwise return negative error code
+ */
+int nandx_erase(u64 offset, size_t len);
+
+/**
+ * nandx_sync - sync all operations to nand
+ * when do multi_ops, this function will be called at last operation
+ * when write data, if number of pages not alligned
+ * by $nandx_info.min_write_pages, this interface could be called to do
+ * force write, 0xff will be padded to blanked pages.
+ */
+int nandx_sync(void);
+
+/**
+ * nandx_is_bad_block - check if the block is bad
+ * only check the flag that marked by the flash vendor
+ *
+ * @offset: offset address on the whole flash
+ *
+ * return true if the block is bad, otherwise return false
+ */
+bool nandx_is_bad_block(u64 offset);
+
+/**
+ * nandx_ioctl - set/get property of nand chip
+ *
+ * @cmd: parameter that defined in enum nandx_ioctl_cmd
+ * @arg: operate parameter
+ *
+ * return 0 if operate success, otherwise return negative error code
+ */
+int nandx_ioctl(int cmd, void *arg);
+
+/**
+ * nandx_suspend - suspend nand, and store some data
+ *
+ * return 0 if suspend success, otherwise return negative error code
+ */
+int nandx_suspend(void);
+
+/**
+ * nandx_resume - resume nand, and replay some data
+ *
+ * return 0 if resume success, otherwise return negative error code
+ */
+int nandx_resume(void);
+
+/**
+ * nandx_unit_test - unit test
+ *
+ * @offset: offset address on the whole flash
+ * @len: should be not larger than a block size, we only test a block per time
+ *
+ * return 0 if test success, otherwise return negative error code
+ */
+int nandx_unit_test(u64 offset, size_t len);
+
+#endif /* __NANDX_CORE_H__ */
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/include/internal/nandx_errno.h b/src/bsp/lk/platform/mt8512/drivers/nandx/include/internal/nandx_errno.h
new file mode 100644
index 0000000..51fb299
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/include/internal/nandx_errno.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NANDX_ERRNO_H__
+#define __NANDX_ERRNO_H__
+
+#ifndef EIO
+#define EIO 5 /* I/O error */
+#define ENOMEM 12 /* Out of memory */
+#define EFAULT 14 /* Bad address */
+#define EBUSY 16 /* Device or resource busy */
+#define ENODEV 19 /* No such device */
+#define EINVAL 22 /* Invalid argument */
+#define ENOSPC 28 /* No space left on device */
+/* Operation not supported on transport endpoint */
+#define EOPNOTSUPP 95
+#define ETIMEDOUT 110 /* Connection timed out */
+#endif
+
+#define ENANDFLIPS 1024 /* Too many bitflips, uncorrected */
+#define ENANDREAD 1025 /* Read fail, can't correct */
+#define ENANDWRITE 1026 /* Write fail */
+#define ENANDERASE 1027 /* Erase fail */
+#define ENANDBAD 1028 /* Bad block */
+#define ENANDWP 1029
+
+#define IS_NAND_ERR(err) ((err) >= -ENANDBAD && (err) <= -ENANDFLIPS)
+
+#ifndef MAX_ERRNO
+#define MAX_ERRNO 4096
+#define ERR_PTR(errno) ((void *)((long)errno))
+#define PTR_ERR(ptr) ((long)(ptr))
+#define IS_ERR(ptr) ((unsigned long)(ptr) > (unsigned long)-MAX_ERRNO)
+#endif
+
+#endif /* __NANDX_ERRNO_H__ */
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/include/internal/nandx_util.h b/src/bsp/lk/platform/mt8512/drivers/nandx/include/internal/nandx_util.h
new file mode 100644
index 0000000..1990b00
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/include/internal/nandx_util.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NANDX_UTIL_H__
+#define __NANDX_UTIL_H__
+
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+
+enum nand_irq_return {
+ NAND_IRQ_NONE,
+ NAND_IRQ_HANDLED,
+};
+
+enum nand_dma_operation {
+ NDMA_FROM_DEV,
+ NDMA_TO_DEV,
+};
+
+
+/*
+ * Compatible function
+ * used for preloader/lk/kernel environment
+ */
+#include "nandx_os.h"
+#include "nandx_errno.h"
+
+#ifndef BIT
+#define BIT(a) (1 << (a))
+#endif
+
+#ifndef min_t
+#define min_t(type, x, y) ({ \
+ type __min1 = (x); \
+ type __min2 = (y); \
+ __min1 < __min2 ? __min1 : __min2; })
+
+#define max_t(type, x, y) ({ \
+ type __max1 = (x); \
+ type __max2 = (y); \
+ __max1 > __max2 ? __max1 : __max2; })
+#endif
+
+#ifndef GENMASK
+#define GENMASK(h, l) \
+ (((~0UL) << (l)) & (~0UL >> ((sizeof(unsigned long) * 8) - 1 - (h))))
+#endif
+
+#ifndef __weak
+#define __weak __attribute__((__weak__))
+#endif
+
+#ifndef __packed
+#define __packed __attribute__((__packed__))
+#endif
+
+#ifndef KB
+#define KB(x) ((x) << 10)
+#define MB(x) (KB(x) << 10)
+#define GB(x) (MB(x) << 10)
+#endif
+
+#ifndef offsetof
+#define offsetof(type, member) ((size_t)&((type *)0)->member)
+#endif
+
+#ifndef NULL
+#define NULL (void *)0
+#endif
+static inline u32 nandx_popcount(u32 x)
+{
+ x = (x & 0x55555555) + ((x >> 1) & 0x55555555);
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x & 0x0F0F0F0F) + ((x >> 4) & 0x0F0F0F0F);
+ x = (x & 0x00FF00FF) + ((x >> 8) & 0x00FF00FF);
+ x = (x & 0x0000FFFF) + ((x >> 16) & 0x0000FFFF);
+
+ return x;
+}
+
+#ifndef zero_popcount
+#define zero_popcount(x) (32 - nandx_popcount(x))
+#endif
+
+#ifndef do_div
+#define do_div(n, base) \
+ ({ \
+ u32 __base = (base); \
+ u32 __rem; \
+ __rem = ((u64)(n)) % __base; \
+ (n) = ((u64)(n)) / __base; \
+ __rem; \
+ })
+#endif
+
+#define div_up(x, y) \
+ ({ \
+ u64 __temp = ((x) + (y) - 1); \
+ do_div(__temp, (y)); \
+ __temp; \
+ })
+
+#define div_down(x, y) \
+ ({ \
+ u64 __temp = (x); \
+ do_div(__temp, (y)); \
+ __temp; \
+ })
+
+#define div_round_up(x, y) (div_up(x, y) * (y))
+#define div_round_down(x, y) (div_down(x, y) * (y))
+
+#define reminder(x, y) \
+ ({ \
+ u64 __temp = (x); \
+ do_div(__temp, (y)); \
+ })
+
+#ifndef round_up
+#define round_up(x, y) ((((x) - 1) | ((y) - 1)) + 1)
+#define round_down(x, y) ((x) & ~((y) - 1))
+#endif
+
+#ifndef readx_poll_timeout_atomic
+#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
+ ({ \
+ u64 end = get_current_time_us() + timeout_us; \
+ for (;;) { \
+ u64 now = get_current_time_us(); \
+ (val) = op(addr); \
+ if (cond) \
+ break; \
+ if (now > end) { \
+ (val) = op(addr); \
+ break; \
+ } \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+ })
+
+#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us)
+#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us)
+#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us)
+#endif
+
+struct nandx_split64 {
+ u64 head;
+ size_t head_len;
+ u64 body;
+ size_t body_len;
+ u64 tail;
+ size_t tail_len;
+};
+
+struct nandx_split32 {
+ u32 head;
+ u32 head_len;
+ u32 body;
+ u32 body_len;
+ u32 tail;
+ u32 tail_len;
+};
+
+#define nandx_split(split, offset, len, val, align) \
+ do { \
+ (split)->head = (offset); \
+ (val) = div_round_down((offset), (align)); \
+ (val) = (align) - ((offset) - (val)); \
+ if ((val) == (align)) \
+ (split)->head_len = 0; \
+ else if ((val) > (len)) \
+ (split)->head_len = len; \
+ else \
+ (split)->head_len = val; \
+ (split)->body = (offset) + (split)->head_len; \
+ (split)->body_len = div_round_down((len) - \
+ (split)->head_len,\
+ (align)); \
+ (split)->tail = (split)->body + (split)->body_len; \
+ (split)->tail_len = (len) - (split)->head_len - \
+ (split)->body_len; \
+ } while (0)
+
+#ifndef container_of
+#define container_of(ptr, type, member) \
+ ({const __typeof__(((type *)0)->member) * __mptr = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+#endif
+
+static inline u32 nandx_cpu_to_be32(u32 val)
+{
+ u32 temp = 1;
+ u8 *p_temp = (u8 *)&temp;
+
+ if (*p_temp)
+ return ((val & 0xff) << 24) | ((val & 0xff00) << 8) |
+ ((val >> 8) & 0xff00) | ((val >> 24) & 0xff);
+
+ return val;
+}
+
+static inline void nandx_set_bits32(unsigned long addr, u32 mask,
+ u32 val)
+{
+ u32 temp = readl((void *)addr);
+
+ temp &= ~(mask);
+ temp |= val;
+ writel(temp, (void *)addr);
+}
+
+#endif /* __NANDX_UTIL_H__ */
diff --git a/src/bsp/lk/platform/mt8512/drivers/nandx/include/lk/nandx_os.h b/src/bsp/lk/platform/mt8512/drivers/nandx/include/lk/nandx_os.h
new file mode 100644
index 0000000..e81f771
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/nandx/include/lk/nandx_os.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2017 MediaTek Inc.
+ * Licensed under either
+ * BSD Licence, (see NOTICE for more details)
+ * GNU General Public License, version 2.0, (see NOTICE for more details)
+ */
+
+#ifndef __NANDX_OS_H__
+#define __NANDX_OS_H__
+
+#include <arch/ops.h>
+#include <debug.h>
+#include <malloc.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <reg.h>
+#include <kernel/vm.h>
+#include <platform.h>
+#include <platform/mt_reg_base.h>
+#include <platform/timer.h>
+#include <platform/mtk_timer.h>
+
+#define pr_err(fmt, ...) \
+ dprintf(CRITICAL, "[ERR]" fmt, ##__VA_ARGS__)
+#define pr_warn(fmt, ...) \
+ dprintf(ALWAYS, "[WARN]" fmt, ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+ dprintf(INFO, fmt, ##__VA_ARGS__)
+#define pr_debug(fmt, ...) \
+ do {} while (0)
+
+#define NANDX_LOG(fmt, ...) dprintf(CRITICAL, fmt, ##__VA_ARGS__)
+
+#define NANDX_ASSERT assert
+
+#define NANDX_BULK_IO_USE_DRAM 1
+#define NANDX_NFI_BUF_ADDR NAND_BUF_ADDR
+#define NANDX_NFI_BUF_ADDR_LEN (4096 + 256)
+#define NANDX_CORE_BUF_ADDR (NANDX_NFI_BUF_ADDR + NANDX_NFI_BUF_ADDR_LEN)
+#define NANDX_CORE_BUF_LEN (2 * (4096 + 256))
+#define NANDX_BBT_BUF_ADDR (NANDX_CORE_BUF_ADDR + NANDX_CORE_BUF_LEN)
+#define NANDX_BBT_BUF_LEN (8192)
+#define NANDX_BBT_MAN_BUF_ADDR (NANDX_BBT_BUF_ADDR + NANDX_BBT_BUF_LEN)
+#define NANDX_BBT_MAN_BUF_LEN (8192)
+#define NANDX_UT_SRC_ADDR (NANDX_BBT_MAN_BUF_ADDR + NANDX_BBT_MAN_BUF_LEN)
+#define NANDX_UT_SRC_LEN 0x41000
+#define NANDX_UT_DST_ADDR (NANDX_UT_SRC_ADDR + NANDX_UT_SRC_LEN)
+#define NANDX_UT_DST_LEN 0x41000
+
+#define nandx_udelay(x) udelay(x)
+
+static inline void *mem_alloc(u32 count, u32 size)
+{
+ return calloc(count, size);
+}
+
+static inline void mem_free(void *mem)
+{
+ if (mem)
+ free(mem);
+}
+
+#define nandx_irq_register(dev, irq, irq_handler, name, data) (0)
+#define nandx_event_create() NULL
+#define nandx_event_destroy(event)
+#define nandx_event_complete(event)
+#define nandx_event_init(event)
+#define nandx_event_wait_complete(event, timeout) true
+
+static inline u64 get_current_time_us(void)
+{
+ return current_time();
+}
+
+static inline u32 nandx_dma_map(void *dev, void *buf, u64 len,
+ enum nand_dma_operation op)
+{
+ u32 addr;
+
+#if WITH_KERNEL_VM
+ addr = (u32)kvaddr_to_paddr(buf);
+#else
+ addr = (u32)buf;
+#endif
+
+ if (op == NDMA_FROM_DEV)
+ arch_clean_cache_range((addr_t)buf, (size_t)len);
+ else
+ arch_clean_invalidate_cache_range((addr_t)buf, (size_t)len);
+
+ return addr;
+}
+
+static inline void nandx_dma_unmap(void *dev, void *buf, void *addr,
+ u64 len,
+ enum nand_dma_operation op)
+{
+ if (op == NDMA_FROM_DEV)
+ arch_clean_cache_range((addr_t)buf, len);
+ else
+ arch_clean_invalidate_cache_range((addr_t)buf, len);
+}
+
+#define container_of containerof
+
+#endif /* __NANDX_OS_H__ */
diff --git a/src/bsp/lk/platform/mt8512/drivers/pll/pll.c b/src/bsp/lk/platform/mt8512/drivers/pll/pll.c
new file mode 100644
index 0000000..01a017b
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/pll/pll.c
@@ -0,0 +1,580 @@
+/* Copyright Statement:
+ *
+ * This software/firmware and related documentation ("MediaTek Software") are
+ * protected under relevant copyright laws. The information contained herein is
+ * confidential and proprietary to MediaTek Inc. and/or its licensors. Without
+ * the prior written permission of MediaTek inc. and/or its licensors, any
+ * reproduction, modification, use or disclosure of MediaTek Software, and
+ * information contained herein, in whole or in part, shall be strictly
+ * prohibited.
+ *
+ * MediaTek Inc. (C) 2010. All rights reserved.
+ *
+ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER
+ * ON AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL
+ * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR
+ * NONINFRINGEMENT. NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH
+ * RESPECT TO THE SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY,
+ * INCORPORATED IN, OR SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES
+ * TO LOOK ONLY TO SUCH THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO.
+ * RECEIVER EXPRESSLY ACKNOWLEDGES THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO
+ * OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES CONTAINED IN MEDIATEK
+ * SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE
+ * RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
+ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S
+ * ENTIRE AND CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE
+ * RELEASED HEREUNDER WILL BE, AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE
+ * MEDIATEK SOFTWARE AT ISSUE, OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE
+ * CHARGE PAID BY RECEIVER TO MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+ *
+ * The following software/firmware and/or related documentation ("MediaTek
+ * Software") have been modified by MediaTek Inc. All revisions are subject to
+ * any receiver's applicable license agreements with MediaTek Inc.
+ */
+
+#include <debug.h>
+#include <reg.h>
+#include <platform/mt8512.h>
+#include <platform/pll.h>
+#include <platform/spm_mtcmos.h>
+#include <platform/mtk_timer.h>
+
+#define ALL_CLK_ON 0
+#define DEBUG_FQMTR 0
+
+#define RGU_KEY_CODE (0x88 << 24)
+#define CONNSYS_CPU_SW_RST (0x1 << 12)
+
+#define DRV_WriteReg32(addr, data) writel(data, addr)
+#define DRV_Reg32(addr) readl(addr)
+
+enum rg_fqmtr_abist_clk {
+ FM_ABIST_CLK_NULL = 0,
+
+ FM_AD_ARMPLL_CK = 1,
+ FM_AD_MAINPLL_CK = 3,
+ FM_AD_XO_LP_CLK_26M = 4,
+ FM_AD_MAINPLL_H546M_CK = 10,
+ FM_AD_MAINPLL_H364M_CK = 11,
+ FM_AD_MAINPLL_H218P4M_CK = 12,
+ FM_AD_MAINPLL_H156M_CK = 13,
+ FM_AD_UNIVPLL_1248M_CK = 14,
+ FM_AD_USB20_192M_CK = 15,
+ FM_AD_UNIVPLL_624M_CK = 16,
+ FM_AD_UNIVPLL_416M_CK = 17,
+ FM_AD_UNIVPLL_249P6M_CK = 18,
+ FM_AD_UNIVPLL_178P3M_CK = 19,
+ FM_AD_MDPLL1_FS26M_CK = 20,
+ FM_CLKRTC = 25,
+ FM_DA_PLLGP1_26M_CK = 26,
+ FM_DA_PLLGP2_26M_CK = 27,
+ FM_AD_MSDCPLL_CK = 28,
+ FM_AD_APLL1_CK = 32,
+ FM_AD_APLL2_CK = 33,
+ FM_DA_USB20_48M_DIV_CK = 41,
+ FM_DA_UNIV_48M_DIV_CK = 42,
+ FM_AD_TCONPLL_CK = 51,
+ FM_AD_DSPPLL_CK = 52,
+ FM_AD_XO_HP_CLK_26M = 53,
+ FM_AD_IPPLL_CK = 54,
+ FM_AD_SYS_26M_CK = 56,
+ FM_AD_CLKSQ_26M_CK = 57,
+ FM_AD_26M_CLKMON = 58,
+
+ FM_ABIST_CLK_END = 59,
+};
+
+enum rg_fqmtr_ckgen_clk {
+ FM_CKGEN_CLK_NULL = 0,
+
+ FM_AXI_CK = 1,
+ FM_MEM_CK = 2,
+ FM_UART_CK = 3,
+ FM_SPI_CK = 4,
+ FM_SPIS_CK = 5,
+ FM_MSDC50_0_HCLK_CK = 6,
+ FM_MSDC2_2_HCLK_CK = 7,
+ FM_MSDC50_0_CK = 8,
+ FM_MSDC50_2_CK = 9,
+ FM_MSDC30_1_CK = 10,
+ FM_AUDIO_CK = 11,
+ FM_AUD_INTBUS_CK = 12,
+ FM_HAPLL1_CK = 13,
+ FM_HAPLL2_CK = 14,
+ FM_A2SYS_CK = 15,
+ FM_A1SYS_CK = 16,
+ FM_ASM_L_CK = 17,
+ FM_ASM_M_CK = 18,
+ FM_ASM_H_CK = 19,
+ FM_AUD_SPDIF_IN_CK = 20,
+ FM_AUD_1_CK = 21,
+ FM_AUD_2_CK = 22,
+ FM_SSUSB_SYS_CK = 23,
+ FM_SSUSB_XHCI_CK = 24,
+ FM_SPM_CK = 25,
+ FM_I2C_CK = 26,
+ FM_PWM_CK = 27,
+ FM_DSP_CK = 28,
+ FM_NFI2X_CK = 29,
+ FM_SPINFI_CK = 30,
+ FM_ECC_CK = 31,
+ FM_GCPU_CK = 32,
+ FM_GCPUM_CK = 33,
+ FM_MBIST_DIAG_CK = 34,
+ FM_IP0_NNA_CK = 35,
+ FM_IP1_NNA_CK = 36,
+ FM_IP2_WFST_CK = 37,
+ FM_SFLASH_CK = 38,
+ FM_SRAM_CK = 39,
+ FM_MM_CK = 40,
+ FM_DPI0_CK = 41,
+ FM_DBG_ATCLK_CK = 42,
+ FM_OCC_104M_CK = 43,
+ FM_OCC_68M_CK = 44,
+ FM_OCC_182M_CK = 45,
+ FM_F_UFS_MP_SAP_CFG_CK = 48,
+ FM_F_UFS_TICK1US_CK = 49,
+ FM_HD_FAXI_EAST_CK = 50,
+ FM_HD_FAXI_WEST_CK = 51,
+ FM_HD_FAXI_NORTH_CK = 52,
+ FM_HD_FAXI_SOUTH_CK = 53,
+ FM_HG_FMIPICFG_TX_CK = 54,
+
+ FM_CKGEN_CLK_END = 55,
+};
+
+static const char* abist_clk[] = {
+ [FM_AD_ARMPLL_CK] = "AD_ARMPLL_CK",
+ [FM_AD_MAINPLL_CK] = "AD_MAINPLL_CK",
+ [FM_AD_XO_LP_CLK_26M] = "AD_XO_LP_CLK_26M",
+ [FM_AD_MAINPLL_H546M_CK] = "AD_MAINPLL_H546M_CK",
+ [FM_AD_MAINPLL_H364M_CK] = "AD_MAINPLL_H364M_CK",
+ [FM_AD_MAINPLL_H218P4M_CK] = "AD_MAINPLL_H218P4M_CK",
+ [FM_AD_MAINPLL_H156M_CK] = "AD_MAINPLL_H156M_CK",
+ [FM_AD_UNIVPLL_1248M_CK] = "AD_UNIVPLL_1248M_CK",
+ [FM_AD_USB20_192M_CK] = "AD_USB20_192M_CK",
+ [FM_AD_UNIVPLL_624M_CK] = "AD_UNIVPLL_624M_CK",
+ [FM_AD_UNIVPLL_416M_CK] = "AD_UNIVPLL_416M_CK",
+ [FM_AD_UNIVPLL_249P6M_CK] = "AD_UNIVPLL_249P6M_CK",
+ [FM_AD_UNIVPLL_178P3M_CK] = "AD_UNIVPLL_178P3M_CK",
+ [FM_AD_MDPLL1_FS26M_CK] = "AD_MDPLL1_FS26M_CK",
+ [FM_CLKRTC] = "clkrtc",
+ [FM_DA_PLLGP1_26M_CK] = "DA_PLLGP1_26M_CK",
+ [FM_DA_PLLGP2_26M_CK] = "DA_PLLGP2_26M_CK",
+ [FM_AD_MSDCPLL_CK] = "AD_MSDCPLL_CK",
+ [FM_AD_APLL1_CK] = "AD_APLL1_CK",
+ [FM_AD_APLL2_CK] = "AD_APLL2_CK",
+ [FM_DA_USB20_48M_DIV_CK] = "DA_USB20_48M_DIV_CK",
+ [FM_DA_UNIV_48M_DIV_CK] = "DA_UNIV_48M_DIV_CK",
+ [FM_AD_TCONPLL_CK] = "AD_TCONPLL_CK",
+ [FM_AD_DSPPLL_CK] = "AD_DSPPLL_CK",
+ [FM_AD_XO_HP_CLK_26M] = "AD_XO_HP_CLK_26M",
+ [FM_AD_IPPLL_CK] = "AD_IPPLL_CK",
+ [FM_AD_SYS_26M_CK] = "AD_SYS_26M_CK",
+ [FM_AD_CLKSQ_26M_CK] = "AD_CLKSQ_26M_CK",
+ [FM_AD_26M_CLKMON] = "AD_26M_CLKMON",
+};
+
+static const char* ckgen_clk[] = {
+ [FM_AXI_CK] = "axi_ck",
+ [FM_MEM_CK] = "mem_ck",
+ [FM_UART_CK] = "uart_ck",
+ [FM_SPI_CK] = "spi_ck",
+ [FM_SPIS_CK] = "spis_ck",
+ [FM_MSDC50_0_HCLK_CK] = "msdc50_0_hclk_ck",
+ [FM_MSDC2_2_HCLK_CK] = "msdc2_2_hclk_ck",
+ [FM_MSDC50_0_CK] = "msdc50_0_ck",
+ [FM_MSDC50_2_CK] = "msdc50_2_ck",
+ [FM_MSDC30_1_CK] = "msdc30_1_ck",
+ [FM_AUDIO_CK] = "audio_ck",
+ [FM_AUD_INTBUS_CK] = "aud_intbus_ck",
+ [FM_HAPLL1_CK] = "hapll1_ck",
+ [FM_HAPLL2_CK] = "hapll2_ck",
+ [FM_A2SYS_CK] = "a2sys_ck",
+ [FM_A1SYS_CK] = "a1sys_ck",
+ [FM_ASM_L_CK] = "asm_l_ck",
+ [FM_ASM_M_CK] = "asm_m_ck",
+ [FM_ASM_H_CK] = "asm_h_ck",
+ [FM_AUD_SPDIF_IN_CK] = "aud_spdif_in_ck",
+ [FM_AUD_1_CK] = "aud_1_ck",
+ [FM_AUD_2_CK] = "aud_2_ck",
+ [FM_SSUSB_SYS_CK] = "ssusb_sys_ck",
+ [FM_SSUSB_XHCI_CK] = "ssusb_xhci_ck",
+ [FM_SPM_CK] = "spm_ck",
+ [FM_I2C_CK] = "i2c_ck",
+ [FM_PWM_CK] = "pwm_ck",
+ [FM_DSP_CK] = "dsp_ck",
+ [FM_NFI2X_CK] = "nfi2x_ck",
+ [FM_SPINFI_CK] = "spinfi_ck",
+ [FM_ECC_CK] = "ecc_ck",
+ [FM_GCPU_CK] = "gcpu_ck",
+ [FM_GCPUM_CK] = "gcpum_ck",
+ [FM_MBIST_DIAG_CK] = "mbist_diag_ck",
+ [FM_IP0_NNA_CK] = "ip0_nna_ck",
+ [FM_IP1_NNA_CK] = "ip1_nna_ck",
+ [FM_IP2_WFST_CK] = "ip2_wfst_ck",
+ [FM_SFLASH_CK] = "sflash_ck",
+ [FM_SRAM_CK] = "sram_ck",
+ [FM_MM_CK] = "mm_ck",
+ [FM_DPI0_CK] = "dpi0_ck",
+ [FM_DBG_ATCLK_CK] = "dbg_atclk_ck",
+ [FM_OCC_104M_CK] = "occ_104m_ck",
+ [FM_OCC_68M_CK] = "occ_68m_ck",
+ [FM_OCC_182M_CK] = "occ_182m_ck",
+ [FM_F_UFS_MP_SAP_CFG_CK] = "f_ufs_mp_sap_cfg_ck",
+ [FM_F_UFS_TICK1US_CK] = "f_ufs_tick1us_ck",
+ [FM_HD_FAXI_EAST_CK] = "hd_faxi_east_ck",
+ [FM_HD_FAXI_WEST_CK] = "hd_faxi_west_ck",
+ [FM_HD_FAXI_NORTH_CK] = "hd_faxi_north_ck",
+ [FM_HD_FAXI_SOUTH_CK] = "hd_faxi_south_ck",
+ [FM_HG_FMIPICFG_TX_CK] = "hg_fmipicfg_tx_ck",
+};
+
+unsigned int mt_get_abist_freq(unsigned int ID)
+{
+ int output = 0, i = 0;
+ unsigned int temp, clk26cali_0, clk_dbg_cfg, clk_misc_cfg_0, clk26cali_1;
+
+ clk_dbg_cfg = DRV_Reg32(CLK_DBG_CFG);
+ DRV_WriteReg32(CLK_DBG_CFG, (clk_dbg_cfg & 0xFFC0FFFC)|(ID << 16)); //sel abist_cksw and enable freq meter sel abist
+
+ clk_misc_cfg_0 = DRV_Reg32(CLK_MISC_CFG_0);
+ DRV_WriteReg32(CLK_MISC_CFG_0, (clk_misc_cfg_0 & 0x00FFFFFF) | (0x3 << 24)); // select divider, WAIT CONFIRM
+
+ clk26cali_0 = DRV_Reg32(CLK26CALI_0);
+ clk26cali_1 = DRV_Reg32(CLK26CALI_1);
+ DRV_WriteReg32(CLK26CALI_0, (DRV_Reg32(CLK26CALI_0) & ~0x1000) | 0x1000); // bit[12] = 1, enable fmeter
+ DRV_WriteReg32(CLK26CALI_0, (DRV_Reg32(CLK26CALI_0) & ~0x10) | 0x10); // bit[4] = 1, start fmeter
+
+ /* wait frequency meter finish */
+ while (DRV_Reg32(CLK26CALI_0) & 0x10)
+ {
+ mdelay(10);
+ i++;
+ if(i > 10)
+ break;
+ }
+
+ temp = DRV_Reg32(CLK26CALI_1) & 0xFFFF;
+ output = ((temp * 26000) ) / 1024; // Khz
+
+ DRV_WriteReg32(CLK_DBG_CFG, clk_dbg_cfg);
+ DRV_WriteReg32(CLK_MISC_CFG_0, clk_misc_cfg_0);
+ DRV_WriteReg32(CLK26CALI_0, clk26cali_0);
+ DRV_WriteReg32(CLK26CALI_1, clk26cali_1);
+
+ return output * 4;
+}
+static unsigned int mt_get_ckgen_freq(unsigned int ID)
+{
+ int output = 0, i = 0;
+ unsigned int temp, clk26cali_0, clk_dbg_cfg, clk_misc_cfg_0, clk26cali_1;
+
+ clk_dbg_cfg = DRV_Reg32(CLK_DBG_CFG);
+ DRV_WriteReg32(CLK_DBG_CFG, (clk_dbg_cfg & 0xFFFFC0FC)|(ID << 8)|(0x1)); //sel ckgen_cksw[22] and enable freq meter sel ckgen[21:16], 01:hd_faxi_ck
+
+ clk_misc_cfg_0 = DRV_Reg32(CLK_MISC_CFG_0);
+ DRV_WriteReg32(CLK_MISC_CFG_0, (clk_misc_cfg_0 & 0x00FFFFFF)); // select divider?dvt set zero
+
+ clk26cali_0 = DRV_Reg32(CLK26CALI_0);
+ clk26cali_1 = DRV_Reg32(CLK26CALI_1);
+ DRV_WriteReg32(CLK26CALI_0, (DRV_Reg32(CLK26CALI_0) & ~0x1000) | 0x1000); // bit[12] = 1, enable fmeter
+ DRV_WriteReg32(CLK26CALI_0, (DRV_Reg32(CLK26CALI_0) & ~0x10) | 0x10); // bit[4] = 1, start fmeter
+
+ /* wait frequency meter finish */
+ while (DRV_Reg32(CLK26CALI_0) & 0x10)
+ {
+ mdelay(10);
+ i++;
+ if(i > 10)
+ break;
+ }
+
+ temp = DRV_Reg32(CLK26CALI_1) & 0xFFFF;
+ output = ((temp * 26000) ) / 1024; // Khz
+
+ DRV_WriteReg32(CLK_DBG_CFG, clk_dbg_cfg);
+ DRV_WriteReg32(CLK_MISC_CFG_0, clk_misc_cfg_0);
+ DRV_WriteReg32(CLK26CALI_0, clk26cali_0);
+ DRV_WriteReg32(CLK26CALI_1, clk26cali_1);
+
+ return output;
+}
+
+void dump_fqmtr(void)
+{
+ unsigned int temp;
+
+ dprintf(CRITICAL, "abist:\n");
+ for (temp = 0; temp < FM_ABIST_CLK_END; temp++) {
+ if (!abist_clk[temp])
+ continue;
+ dprintf(CRITICAL, "%d: %s: %d KHz\n", temp, abist_clk[temp],
+ mt_get_abist_freq(temp));
+ }
+
+ dprintf(CRITICAL, "ckegen:\n");
+ for (temp = 0; temp < FM_CKGEN_CLK_END; temp++) {
+ if (!ckgen_clk[temp])
+ continue;
+ dprintf(CRITICAL, "%d: %s: %d KHz\n", temp, ckgen_clk[temp],
+ mt_get_ckgen_freq(temp));
+ }
+}
+
+unsigned int mt_get_cpu_freq(void)
+{
+#if FPGA_PLATFORM
+ return 0;
+#else
+ return mt_get_abist_freq(FM_AD_ARMPLL_CK);
+#endif
+}
+
+unsigned int mt_get_bus_freq(void)
+{
+#if FPGA_PLATFORM
+ return 0;
+#else
+ return mt_get_ckgen_freq(FM_AXI_CK);
+#endif
+}
+
+/* mt_pll_post_init() should be invoked after pmic_init */
+void mt_pll_post_init(void)
+{
+#if DEBUG_FQMTR
+ dump_fqmtr();
+
+ dprintf(CRITICAL, "AP_PLL_CON1= 0x%x\n", DRV_Reg32(AP_PLL_CON1));
+ dprintf(CRITICAL, "AP_PLL_CON2= 0x%x\n", DRV_Reg32(AP_PLL_CON2));
+ dprintf(CRITICAL, "CLKSQ_STB_CON0= 0x%x\n", DRV_Reg32(CLKSQ_STB_CON0));
+ dprintf(CRITICAL, "PLL_ISO_CON0= 0x%x\n", DRV_Reg32(PLL_ISO_CON0));
+ dprintf(CRITICAL, "ARMPLL_CON0= 0x%x\n", DRV_Reg32(ARMPLL_CON0));
+ dprintf(CRITICAL, "ARMPLL_CON1= 0x%x\n", DRV_Reg32(ARMPLL_CON1));
+ dprintf(CRITICAL, "ARMPLL_CON3= 0x%x\n", DRV_Reg32(ARMPLL_CON3));
+ dprintf(CRITICAL, "MAINPLL_CON0 = 0x%x\n", DRV_Reg32(MAINPLL_CON0));
+ dprintf(CRITICAL, "MAINPLL_CON1 = 0x%x\n", DRV_Reg32(MAINPLL_CON1));
+ dprintf(CRITICAL, "MAINPLL_CON3 = 0x%x\n", DRV_Reg32(MAINPLL_CON3));
+ dprintf(CRITICAL, "UPLL_CON0= 0x%x\n", DRV_Reg32(UNIVPLL_CON0));
+ dprintf(CRITICAL, "UPLL_CON1= 0x%x\n", DRV_Reg32(UNIVPLL_CON1));
+ dprintf(CRITICAL, "UPLL_CON3= 0x%x", DRV_Reg32(UNIVPLL_CON3));
+ dprintf(CRITICAL, "MMSYS_CG_CON0= 0x%x, \n", DRV_Reg32(MMSYS_CG_CON0));
+#endif /* DEBUG_FQMTR */
+ dprintf(CRITICAL, "cpu_freq = %d KHz\n", mt_get_cpu_freq());
+ dprintf(CRITICAL, "bus_freq = %d KHz\n", mt_get_bus_freq());
+}
+
+void mt_pll_init(void)
+{
+ dprintf(CRITICAL, "mt_pll_init +\n");
+ unsigned int temp;
+/*************
+ * CLKSQ
+ * ***********/
+ DRV_WriteReg32(AP_PLL_CON0, (DRV_Reg32(AP_PLL_CON0) | 0x1)); // [0] CLKSQ_EN = 1
+ udelay(100); // wait 100us
+ DRV_WriteReg32(AP_PLL_CON0, (DRV_Reg32(AP_PLL_CON0) | 0x2)); // [1] CLKSQ_LPF_EN =1
+
+/*************
+ * xPLL PWR ON
+ **************/
+ DRV_WriteReg32(ARMPLL_CON3, (DRV_Reg32(ARMPLL_CON3) | 0x1)); // [0]ARMPLL_PWR_ON = 1
+ DRV_WriteReg32(MAINPLL_CON3, (DRV_Reg32(MAINPLL_CON3) | 0x1)); // [0]MAINPLL_PWR_ON = 1
+ DRV_WriteReg32(UNIVPLL_CON3, (DRV_Reg32(UNIVPLL_CON3) | 0x1)); // [0]UNIVPLL_PWR_ON = 1
+ DRV_WriteReg32(APLL1_CON4, (DRV_Reg32(APLL1_CON4) | 0x1)); // [0]APLL1_PWR_ON = 1
+ DRV_WriteReg32(APLL2_CON4, (DRV_Reg32(APLL2_CON4) | 0x1)); // [0]APLL2_PWR_ON = 1
+ DRV_WriteReg32(IPPLL_CON3, (DRV_Reg32(IPPLL_CON3) | 0x1)); // [0]IPPLL_PWR_ON = 1
+ DRV_WriteReg32(TCONPLL_CON3, (DRV_Reg32(TCONPLL_CON3) | 0x1)); // [0]TCONPLL_PWR_ON = 1
+ DRV_WriteReg32(DSPPLL_CON3, (DRV_Reg32(DSPPLL_CON3) | 0x1)); // [0]DSPPLL_PWR_ON = 1
+ DRV_WriteReg32(MSDCPLL_CON3, (DRV_Reg32(MSDCPLL_CON3) | 0x1)); // [0]MSDCPLL_PWR_ON = 1
+
+/*************
+ * Wait PWR ready(30ns)
+ **************/
+ udelay(30);
+
+/******************
+* xPLL ISO Disable
+*******************/
+ DRV_WriteReg32(ARMPLL_CON3, (DRV_Reg32(ARMPLL_CON3) & 0xFFFFFFFD)); // [2]ARMPLL_ISO_EN = 0
+ DRV_WriteReg32(MAINPLL_CON3, (DRV_Reg32(MAINPLL_CON3) & 0xFFFFFFFD)); // [2]MAINPLL_ISO_EN = 0
+ DRV_WriteReg32(UNIVPLL_CON3, (DRV_Reg32(UNIVPLL_CON3) & 0xFFFFFFFD)); // [2]UNIVPLL_ISO_EN = 0
+ DRV_WriteReg32(APLL1_CON4, (DRV_Reg32(APLL1_CON4) & 0xFFFFFFFD)); // [2]APLL1_ISO_EN = 0
+ DRV_WriteReg32(APLL2_CON4, (DRV_Reg32(APLL2_CON4) & 0xFFFFFFFD)); // [2]APLL2_ISO_EN = 0
+ DRV_WriteReg32(IPPLL_CON3, (DRV_Reg32(IPPLL_CON3) & 0xFFFFFFFD)); // [2]IPPLL_ISO_EN = 0
+ DRV_WriteReg32(TCONPLL_CON3, (DRV_Reg32(TCONPLL_CON3) & 0xFFFFFFFD)); // [2]TCONPLL_ISO_EN = 0
+ DRV_WriteReg32(DSPPLL_CON3, (DRV_Reg32(DSPPLL_CON3) & 0xFFFFFFFD)); // [2]DSPPLL_ISO_EN = 0
+ DRV_WriteReg32(MSDCPLL_CON3, (DRV_Reg32(MSDCPLL_CON3) & 0xFFFFFFFD)); // [2]MSDCPLL_ISO_EN = 0
+
+/********************
+ * xPLL Frequency Set
+ *********************/
+ DRV_WriteReg32(ARMPLL_CON1, 0x811AEC4E); // 1400 MHz
+ DRV_WriteReg32(MAINPLL_CON1, 0x81150000); // 1092 MHz
+ DRV_WriteReg32(UNIVPLL_CON1, 0x80180000); // 1248 MHz
+
+ DRV_WriteReg32(APLL1_CON2, 0x6F28BD4C); // 180.6 MHz
+ DRV_WriteReg32(APLL1_CON1, 0x84000000);
+
+ DRV_WriteReg32(APLL2_CON2, 0x78FD5265); // 196.6 MHz
+ DRV_WriteReg32(APLL2_CON1, 0x84000000);
+
+ DRV_WriteReg32(IPPLL_CON1, 0x821713B1); // 600 MHz
+ DRV_WriteReg32(TCONPLL_CON1, 0x83189D89); // 320 MHz
+ DRV_WriteReg32(DSPPLL_CON1, 0x841E0000); // 195 MHz
+ DRV_WriteReg32(MSDCPLL_CON1, 0x831EC4EC); // 400 MHz
+
+/***********************
+ * xPLL Frequency Enable
+ ************************/
+ DRV_WriteReg32(ARMPLL_CON0, (DRV_Reg32(ARMPLL_CON0) | 0x1)); // [0]ARMPLL_EN = 1
+ DRV_WriteReg32(MAINPLL_CON0, (DRV_Reg32(MAINPLL_CON0) | 0x1)); // [0]MAINPLL_EN = 1
+ DRV_WriteReg32(UNIVPLL_CON0, (DRV_Reg32(UNIVPLL_CON0) | 0x1)); // [0]UNIVPLL_EN = 1
+ DRV_WriteReg32(APLL1_CON0, (DRV_Reg32(APLL1_CON0) | 0x1)); // [0]APLL1_EN = 1
+ DRV_WriteReg32(APLL2_CON0, (DRV_Reg32(APLL2_CON0) | 0x1)); // [0]APLL2_EN = 1
+ DRV_WriteReg32(IPPLL_CON0, (DRV_Reg32(IPPLL_CON0) | 0x1)); // [0]IPPLL_EN = 1
+ DRV_WriteReg32(TCONPLL_CON0, (DRV_Reg32(TCONPLL_CON0) | 0x1)); // [0]TCONPLL_EN = 1
+ DRV_WriteReg32(DSPPLL_CON0, (DRV_Reg32(DSPPLL_CON0) | 0x1)); // [0]DSPPLL_EN = 1
+ DRV_WriteReg32(MSDCPLL_CON0, (DRV_Reg32(MSDCPLL_CON0) | 0x1)); // [0]MSDCPLL_EN = 1
+
+/*************
+ * Wait PWR ready(20ns)
+ **************/
+ udelay(20); // wait for PLL stable (min delay is 20us)
+
+/***************
+ * xPLL DIV RSTB
+ ****************/
+ DRV_WriteReg32(MAINPLL_CON0, (DRV_Reg32(MAINPLL_CON0) | 0x00800000)); // [23]MAINPLL_DIV_RSTB = 1
+ DRV_WriteReg32(UNIVPLL_CON0, (DRV_Reg32(UNIVPLL_CON0) | 0x00800000)); // [23]UNIVPLL_DIV_RSTB = 1
+
+ DRV_WriteReg32(PLLON_CON0, 0x1111F0F0); // armpll/mainpll/mpll sleep control
+
+ DRV_WriteReg32(ACLKEN_DIV, 0x12); // CPU BUS clock freq is divided by 2
+
+/*****************
+ * switch CPU clock to ARMPLL
+ ******************/
+ DRV_WriteReg32(CLK_MISC_CFG_0, DRV_Reg32(CLK_MISC_CFG_0) | 0x30);
+
+ temp = DRV_Reg32(MCU_BUS_MUX) & ~0x600;
+ DRV_WriteReg32(MCU_BUS_MUX, temp | 0x200);
+
+/*****************
+ * AXI BUS DCM Setting
+ ******************/
+
+/*****************
+ * 32k setting
+ ******************/
+#if WITH_EXT_32K
+ DRV_WriteReg32(CLK26CALI_2, (DRV_Reg32(CLK26CALI_2) & ~0x3000));
+#else
+ DRV_WriteReg32(CLK26CALI_0, (DRV_Reg32(CLK26CALI_0) & ~0x7FFF0000 | 0x1D3F0000));
+ DRV_WriteReg32(CLK26CALI_2, (DRV_Reg32(CLK26CALI_2) & ~0x3000 | 0x3000));
+ DRV_WriteReg32(CLK26CALI_0, (DRV_Reg32(CLK26CALI_0) & ~0x7FFF0000 | 0x5D3F0000));
+#endif
+
+/************
+ * TOP CLKMUX
+ *************/
+ DRV_WriteReg32(CLK_CFG_0_CLR, 0x07010307);
+ DRV_WriteReg32(CLK_CFG_1_CLR, 0x07030307);
+ DRV_WriteReg32(CLK_CFG_2_CLR, 0x07030707);
+ DRV_WriteReg32(CLK_CFG_3_CLR, 0x07070707);
+ DRV_WriteReg32(CLK_CFG_4_CLR, 0x03030303);
+ DRV_WriteReg32(CLK_CFG_5_CLR, 0x03030101);
+ DRV_WriteReg32(CLK_CFG_6_CLR, 0x07070701);
+ DRV_WriteReg32(CLK_CFG_7_CLR, 0x07030707);
+ DRV_WriteReg32(CLK_CFG_8_CLR, 0x07070103);
+ DRV_WriteReg32(CLK_CFG_9_CLR, 0x07070707);
+ DRV_WriteReg32(CLK_CFG_10_CLR, 0x01010307);
+ DRV_WriteReg32(CLK_CFG_11_CLR, 0x00000003);
+
+ DRV_WriteReg32(CLK_CFG_0_SET, 0x01000001);
+ DRV_WriteReg32(CLK_CFG_1_SET, 0x01010301);
+ DRV_WriteReg32(CLK_CFG_2_SET, 0x01000202);
+ DRV_WriteReg32(CLK_CFG_3_SET, 0x04040101);
+ DRV_WriteReg32(CLK_CFG_4_SET, 0x01020202);
+ DRV_WriteReg32(CLK_CFG_5_SET, 0x03030101);
+ DRV_WriteReg32(CLK_CFG_6_SET, 0x01000301);
+ DRV_WriteReg32(CLK_CFG_7_SET, 0x02030000);
+ DRV_WriteReg32(CLK_CFG_8_SET, 0x04040001);
+ DRV_WriteReg32(CLK_CFG_9_SET, 0x07010701);
+ DRV_WriteReg32(CLK_CFG_10_SET, 0x00000102);
+ DRV_WriteReg32(CLK_CFG_11_SET, 0x00000000);
+
+ DRV_WriteReg32(CLK_CFG_UPDATE, 0xffffffff);
+ DRV_WriteReg32(CLK_CFG_UPDATE1, 0x00001fff);
+
+ /* CONNSYS MCU reset */
+ temp = DRV_Reg32(WDT_SWSYSRST);
+ dprintf(CRITICAL, "before: WDT_SWSYSRST = 0x%x\n", DRV_Reg32(WDT_SWSYSRST));
+ DRV_WriteReg32(WDT_SWSYSRST, (temp | CONNSYS_CPU_SW_RST | RGU_KEY_CODE));
+ dprintf(CRITICAL, "after: WDT_SWSYSRST = 0x%x\n", DRV_Reg32(WDT_SWSYSRST));
+
+#if ALL_CLK_ON
+/************
+ * TOP CG
+ *************/
+ DRV_WriteReg32(CLK_AUDDIV_4, (DRV_Reg32(CLK_AUDDIV_4) & ~0x00000007));
+ DRV_WriteReg32(CLK_MISC_CFG_0, (DRV_Reg32(CLK_MISC_CFG_0) & ~0x00c00300) | 0x00c00300);
+ DRV_WriteReg32(CLK_MODE, (DRV_Reg32(CLK_MODE) & ~0x00030c00));
+
+/************
+ * INFRA_AO CG
+ *************/
+ DRV_WriteReg32(MODULE_SW_CG_0_CLR, 0x9dff8740);
+ DRV_WriteReg32(MODULE_SW_CG_1_CLR, 0x23044796);
+ DRV_WriteReg32(MODULE_SW_CG_2_CLR, 0x0800005b);
+ DRV_WriteReg32(MODULE_SW_CG_3_CLR, 0x07c00780);
+ DRV_WriteReg32(MODULE_SW_CG_4_CLR, 0x00000a8e);
+ DRV_WriteReg32(INFRA_MFG_MASTER_M0_GALS_CTRL, (DRV_Reg32(INFRA_MFG_MASTER_M0_GALS_CTRL) & ~0x00000100) | 0x00000100);
+
+ DRV_WriteReg32(IPSYS_EMI_CK_CG, (DRV_Reg32(IPSYS_EMI_CK_CG) & ~0x1) | 0x1);
+ DRV_WriteReg32(IPSYS_SRAM_CK_CG, (DRV_Reg32(IPSYS_SRAM_CK_CG) & ~0x1) | 0x1);
+ DRV_WriteReg32(IPSYS_AXI_CK_CG, (DRV_Reg32(IPSYS_AXI_CK_CG) & ~0x1) | 0x1);
+ DRV_WriteReg32(IPSYS_NNA0_PWR_ON, (DRV_Reg32(IPSYS_NNA0_PWR_ON) & ~0x1) | 0x1);
+ DRV_WriteReg32(IPSYS_NNA1_PWR_ON, (DRV_Reg32(IPSYS_NNA1_PWR_ON) & ~0x1) | 0x1);
+ DRV_WriteReg32(IPSYS_WFST_PWR_ON, (DRV_Reg32(IPSYS_WFST_PWR_ON) & ~0x1) | 0x1);
+ DRV_WriteReg32(IPSYS_NNAO_CK_CG, (DRV_Reg32(IPSYS_NNAO_CK_CG) & ~0x1) | 0x1);
+ DRV_WriteReg32(IPSYS_NNA1_CK_CG, (DRV_Reg32(IPSYS_NNA1_CK_CG) & ~0x1) | 0x1);
+ DRV_WriteReg32(IPSYS_WFST_CK_CG, (DRV_Reg32(IPSYS_WFST_CK_CG) & ~0x1) | 0x1);
+ DRV_WriteReg32(IPSYS_26M_CK_CG, (DRV_Reg32(IPSYS_26M_CK_CG) & ~0x1) | 0x1);
+
+/*************
+ * for MTCMOS
+ *************/
+ spm_mtcmos_ctrl_conn(STA_POWER_ON);
+ spm_mtcmos_ctrl_mm(STA_POWER_ON);
+ spm_mtcmos_ctrl_img(STA_POWER_ON);
+ spm_mtcmos_ctrl_ip0(STA_POWER_ON);
+ spm_mtcmos_ctrl_ip1(STA_POWER_ON);
+ spm_mtcmos_ctrl_ip2(STA_POWER_ON);
+ spm_mtcmos_ctrl_usb_mac_p1(STA_POWER_ON);
+ spm_mtcmos_ctrl_dsp(STA_POWER_ON);
+ /*spm_mtcmos_ctrl_audio(STA_POWER_ON);*/
+ spm_mtcmos_ctrl_asrc(STA_POWER_ON);
+
+/*************
+ * for Subsys CG
+ *************/
+ DRV_WriteReg32(MMSYS_CG_CLR0, 0x03f800bf);
+ DRV_WriteReg32(IMGSYS_CG_CLR0, 0x00a18935);
+ DRV_WriteReg32(IMGSYS_CG_CLR1, 0x0000000e);
+#endif /* ALL_CLK_ON */
+
+#if WITH_CLKSQ_OFF
+ DRV_WriteReg32(AP_PLL_CON3, (DRV_Reg32(AP_PLL_CON3) & ~0x6) | 0x6);
+ DRV_WriteReg32(AP_PLL_CON0, (DRV_Reg32(AP_PLL_CON0) & ~0x1));
+ DRV_WriteReg32(AP_PLL_CON0, (DRV_Reg32(AP_PLL_CON0) & ~0x80) | 0x80);
+#else
+ DRV_WriteReg32(AP_PLL_CON3, (DRV_Reg32(AP_PLL_CON3) & ~0x6)); // CLKSQ sleep control
+#endif
+
+ dprintf(CRITICAL, "mt_pll_init done\n");
+}
diff --git a/src/bsp/lk/platform/mt8512/drivers/pmic/bd71828.c b/src/bsp/lk/platform/mt8512/drivers/pmic/bd71828.c
new file mode 100644
index 0000000..8ee9706
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/pmic/bd71828.c
@@ -0,0 +1,166 @@
+/* Copyright Statement:
+ *
+ * This software/firmware and related documentation ("MediaTek Software") are
+ * protected under relevant copyright laws. The information contained herein
+ * is confidential and proprietary to MediaTek Inc. and/or its licensors.
+ * Without the prior written permission of MediaTek inc. and/or its licensors,
+ * any reproduction, modification, use or disclosure of MediaTek Software,
+ * and information contained herein, in whole or in part, shall be strictly prohibited.
+ */
+/* MediaTek Inc. (C) 2015. All rights reserved.
+ *
+ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
+ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
+ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
+ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
+ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
+ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
+ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
+ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
+ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
+ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
+ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
+ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
+ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+ */
+
+#include <platform/mt_i2c.h>
+#include <platform/bd71828.h>
+
+/**********************************************************
+ * I2C Slave Setting
+ *********************************************************/
+#define BD71828_SLAVE_ADDR 0x4b
+#define BD71828_I2C_ID 1
+#define I2C_SPEED_400_KHZ 400 /* kHz */
+
+/**********************************************************
+ * Init Setting
+ *********************************************************/
+struct pmic_setting {
+ unsigned short addr;
+ unsigned short val;
+ unsigned short mask;
+ unsigned char shift;
+};
+
+static struct pmic_setting init_setting[] = {
+ /* [7:0]: DCIN_CLPS, input voltage limit 4640 mv */
+ {0x71, 0x3A, 0xFF, 0},
+ /* [7:7]: WDT_DIS, Battery Charger Watch Dog Timer disable */
+ {0x75, 0x0, 0x1, 7},
+ /* [1:1]: BTMP_EN, Thermal control enable for charge voltage */
+ {0x75, 0x0, 0x1, 1},
+ /* [7:0]: CHG_VBAT_1, CV 4200mv */
+ {0x7D, 0x30, 0xFF, 0},
+ /* [1:1]: BUCK1_IDLE_ON, Vproc 0V */
+ {0x8, 0x0, 0x1, 1},
+ /* [1:1]: BUCK6_IDLE_ON, Vsram_proc 0V */
+ {0x25, 0x0, 0x1, 1},
+ /* [1:1]: BUCK2_IDLE_ON, Vcore sw mode */
+ {0x12, 0x1, 0x1, 1},
+ /* [1:1]: BUCK2_IDLE_VID, Vcore 600 mv */
+ {0x15, 0x10, 0xFF, 0},
+ /* [1:1]: BUCK7_IDLE_ON, Vsram_core sw mode */
+ {0x2F, 0x1, 0x1, 1},
+ /* [1:1]: BUCK7_IDLE_VID, Vsram_core 800 mv */
+ {0x32, 0x30, 0xFF, 0},
+};
+
+/**********************************************************
+ *
+ * [I2C Function For Read/Write bd71828]
+ *
+ *********************************************************/
+static u32 bd71828_write_byte(u8 addr, u8 value)
+{
+ int ret_code = 0;
+ u8 write_data[2];
+ u16 len;
+
+ write_data[0]= addr;
+ write_data[1] = value;
+ len = 2;
+
+ ret_code = mtk_i2c_write(BD71828_I2C_ID, BD71828_SLAVE_ADDR, I2C_SPEED_400_KHZ, write_data, len);
+
+ if(ret_code == 0)
+ return 0; // ok
+ else
+ return -1; // fail
+}
+
+static u32 bd71828_read_byte(u8 addr, u8 *dataBuffer)
+{
+ int ret_code = 0;
+ u16 len;
+ *dataBuffer = addr;
+
+ len = 1;
+
+ ret_code = mtk_i2c_write_read(BD71828_I2C_ID, BD71828_SLAVE_ADDR, I2C_SPEED_400_KHZ,
+ dataBuffer, dataBuffer, len, len);
+
+ if(ret_code == 0)
+ return 0; // ok
+ else
+ return -1; // fail
+}
+
+/**********************************************************
+ *
+ * [Read / Write Function]
+ *
+ *********************************************************/
+u32 pmic_read_interface(u8 RegNum, u8 *val, u8 MASK, u8 SHIFT)
+{
+ u8 bd71828_reg = 0;
+ u32 ret = 0;
+
+ ret = bd71828_read_byte(RegNum, &bd71828_reg);
+
+ bd71828_reg &= (MASK << SHIFT);
+ *val = (bd71828_reg >> SHIFT);
+
+ return ret;
+}
+
+u32 pmic_config_interface(u8 RegNum, u8 val, u8 MASK, u8 SHIFT)
+{
+ u8 bd71828_reg = 0;
+ u32 ret = 0;
+
+ ret = bd71828_read_byte(RegNum, &bd71828_reg);
+
+ bd71828_reg &= ~(MASK << SHIFT);
+ bd71828_reg |= (val << SHIFT);
+
+ ret = bd71828_write_byte(RegNum, bd71828_reg);
+
+ return ret;
+}
+
+static void bd71828_init_setting(void)
+{
+ for (int i = 0; i < ARRAY_SIZE(init_setting); i++)
+ pmic_config_interface(
+ init_setting[i].addr, init_setting[i].val,
+ init_setting[i].mask, init_setting[i].shift);
+}
+
+//==============================================================================
+// PMIC6398 Init Code
+//==============================================================================
+void pmic_init_bd71828(void)
+{
+ /* initial setting */
+ bd71828_init_setting();
+
+ dprintf(CRITICAL, "[%s] BD71828 LK End\n", __func__);
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/pmic/pmic_6398.c b/src/bsp/lk/platform/mt8512/drivers/pmic/pmic_6398.c
new file mode 100644
index 0000000..6a4f9f1
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/pmic/pmic_6398.c
@@ -0,0 +1,165 @@
+/* Copyright Statement:
+ *
+ * This software/firmware and related documentation ("MediaTek Software") are
+ * protected under relevant copyright laws. The information contained herein
+ * is confidential and proprietary to MediaTek Inc. and/or its licensors.
+ * Without the prior written permission of MediaTek inc. and/or its licensors,
+ * any reproduction, modification, use or disclosure of MediaTek Software,
+ * and information contained herein, in whole or in part, shall be strictly prohibited.
+ */
+/* MediaTek Inc. (C) 2015. All rights reserved.
+ *
+ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
+ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
+ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
+ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
+ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
+ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
+ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
+ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
+ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
+ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
+ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
+ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
+ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+ */
+
+#include <platform/mt_i2c.h>
+#include <platform/pmic_6398.h>
+
+/**********************************************************
+ * I2C Slave Setting
+ *********************************************************/
+#define mt6398_SLAVE_ADDR 0x62
+
+/**********************************************************
+ * Global Variable
+ *********************************************************/
+#define mt6398_I2C_ID 1
+
+/**********************************************************
+ *
+ * [I2C Function For Read/Write mt6398]
+ *
+ *********************************************************/
+u32 mt6398_write_byte(u8 addr, u8 value)
+{
+ int ret_code = 0;
+ u8 write_data[2];
+ u16 len;
+
+ write_data[0]= addr;
+ write_data[1] = value;
+ len = 2;
+
+ ret_code = mtk_i2c_write(mt6398_I2C_ID, mt6398_SLAVE_ADDR, 400, write_data, len);
+
+ if(ret_code == 0)
+ return 0; // ok
+ else
+ return -1; // fail
+}
+
+u32 mt6398_read_byte (u8 addr, u8 *dataBuffer)
+{
+ int ret_code = 0;
+ u16 len;
+ *dataBuffer = addr;
+
+ len = 1;
+
+ ret_code = mtk_i2c_write_read(mt6398_I2C_ID, mt6398_SLAVE_ADDR, 400,
+ dataBuffer, dataBuffer, len, len);
+
+ if(ret_code == 0)
+ return 0; // ok
+ else
+ return -1; // fail
+}
+
+/**********************************************************
+ *
+ * [Read / Write Function]
+ *
+ *********************************************************/
+u32 pmic_read_interface (u8 RegNum, u8 *val, u8 MASK, u8 SHIFT)
+{
+ u8 mt6398_reg = 0;
+ u32 ret = 0;
+
+ ret = mt6398_read_byte(RegNum, &mt6398_reg);
+
+ mt6398_reg &= (MASK << SHIFT);
+ *val = (mt6398_reg >> SHIFT);
+
+ return ret;
+}
+
+u32 pmic_config_interface (u8 RegNum, u8 val, u8 MASK, u8 SHIFT)
+{
+ u8 mt6398_reg = 0;
+ u32 ret = 0;
+
+ ret = mt6398_read_byte(RegNum, &mt6398_reg);
+
+ mt6398_reg &= ~(MASK << SHIFT);
+ mt6398_reg |= (val << SHIFT);
+
+ ret = mt6398_write_byte(RegNum, mt6398_reg);
+
+ return ret;
+}
+
+int pmic_detect_powerkey(void)
+{
+ u8 val=0;
+
+ pmic_read_interface(0x1E, &val, 0x01, 7);
+
+ if (val==1) {
+ printf("pl pmic powerkey Release\n");
+ return 0;
+ } else {
+ printf("pl pmic powerkey Press\n");
+ return 1;
+ }
+}
+
+void PMIC_INIT_SETTING_V1(void)
+{
+ unsigned int ret = 0;
+
+ ret = pmic_config_interface(0x40, 0x98, 0xFF, 0); //enter test mode
+ ret = pmic_config_interface(0x4C, 0x1, 0xFF, 0); //disable reset i2c slave function
+
+ ret = pmic_config_interface(0x0B, 0x1, 0x01, 3); //WDOG_RST_EN enable watchdog reset
+ ret = pmic_config_interface(0x45, 0x0, 0x03, 6); //CH1_SR set slew rate to 20mV/us
+
+ /*low power setting*/
+ ret = pmic_config_interface(0x0C, 0x1, 0x01, 7); //SLEEP_SEL enter sleep mode control by pin SUSPEND
+ ret = pmic_config_interface(0x0C, 0x0, 0x01, 1); //set vproc poweroff when enter suspend mode
+
+ if (ret)
+ printf("[pmic6398_init] PMIC MT6398 init setting fail\n");
+ else
+ printf("[pmic6398_init] PMIC MT6398 init setting success\n");
+}
+
+//==============================================================================
+// PMIC6398 Init Code
+//==============================================================================
+void pmic_init_mt6398 (void)
+{
+ printf("[pmic6398_init] Preloader INIT Start..................\n");
+
+ /* pmic initial setting */
+ PMIC_INIT_SETTING_V1();
+
+ printf("[pmic6398_init] Preloader INIT Done...................\n");
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/pmic/pwm_buck.c b/src/bsp/lk/platform/mt8512/drivers/pmic/pwm_buck.c
new file mode 100644
index 0000000..f291072
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/pmic/pwm_buck.c
@@ -0,0 +1,245 @@
+/* Copyright Statement:
+ *
+ * This software/firmware and related documentation ("MediaTek Software") are
+ * protected under relevant copyright laws. The information contained herein
+ * is confidential and proprietary to MediaTek Inc. and/or its licensors.
+ * Without the prior written permission of MediaTek inc. and/or its licensors,
+ * any reproduction, modification, use or disclosure of MediaTek Software,
+ * and information contained herein, in whole or in part, shall be strictly prohibited.
+ */
+/* MediaTek Inc. (C) 2018. All rights reserved.
+ *
+ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
+ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
+ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
+ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
+ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
+ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
+ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
+ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
+ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
+ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
+ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
+ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
+ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+ */
+#include <platform/pwm-buck.h>
+#include <platform/pwm_buck_property.h>
+#include <platform/pwm.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+#include <reg.h>
+
+struct pwm_buck pwm_buck_prop[2];
+
+/*actually when voltage is adjusted, pwm is enabled*/
+static int pwm_is_enabled(void)
+{
+ return 1;
+}
+
+static int _regulator_get_voltage(unsigned int n, struct pwm_volt_map *table, unsigned int pwm_num)
+{
+ unsigned int pwm_period;
+ unsigned int duty_cycle_period;
+ unsigned int duty_cycle;
+ unsigned int i;
+
+ pwm_period = pwm_get_period(pwm_num); //pwm_period is the clk number in one period
+
+ duty_cycle_period = pwm_get_duty_cycle(pwm_num); //duty_cycle_period is the clk number in one period for the high level
+
+ duty_cycle = ((duty_cycle_period * 100) + pwm_period - 1) / pwm_period;
+
+ for (i = 0; i < n; i++) {
+ if (table[i].duty_cycle == duty_cycle)
+ return table[i].volt;
+ }
+
+ BUCKERR("Do not recognize the duty_cycle and voltage mapping relation!\n");
+
+ return -BUCK_EINVAL;
+}
+
+static int _regulator_set_voltage(unsigned int n, struct pwm_volt_map *table, unsigned int volt, unsigned int pwm_num)
+{
+ unsigned int pwm_period;
+ unsigned int duty_cycle_period;
+ unsigned int duty_cycle;
+ unsigned int i;
+
+ pwm_period = pwm_get_period(pwm_num);
+
+ if (table[0].volt >= volt) {
+ duty_cycle = table[0].duty_cycle;
+ goto set_volt;
+ }
+
+ if (table[n-1].volt <= volt) {
+ duty_cycle = table[n-1].duty_cycle;
+ goto set_volt;
+ }
+
+ for (i = 0; i < n-1; i++) {
+ if ((table[i].volt < volt) && (table[i+1].volt >= volt)) {
+ duty_cycle = table[i+1].duty_cycle;
+ goto set_volt;
+ }
+ }
+
+ return -BUCK_EINVAL;
+
+set_volt:
+ duty_cycle_period = pwm_period * duty_cycle / 100;
+
+ pwm_set_duty(pwm_num, duty_cycle_period);
+ pwm_enable(pwm_num);
+
+ return BUCK_OK;
+}
+
+static int _regulator_is_enabled(void)
+{
+ return pwm_is_enabled();
+}
+
+static int _regulator_enable(unsigned int enable, unsigned int pwm_num)
+{
+ int ret = 0;
+
+ pwm_enable(pwm_num);
+
+ return ret;
+}
+
+int regulator_get_voltage()
+{
+ int ret = 0;
+
+ ret = _regulator_get_voltage(pwm_buck_prop[0].n_table, pwm_buck_prop[0].table, PWM_NUM_VCORE);
+ if (ret < 0)
+ {
+ BUCKERR("[regulator_get_voltage] _regulator_get_voltage fail, ret = %d!\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+int regulator_set_voltage(unsigned int volt)
+{
+ int ret = 0;
+
+ ret = _regulator_set_voltage(pwm_buck_prop[0].n_table, pwm_buck_prop[0].table, volt, PWM_NUM_VCORE);
+ if (ret < 0)
+ {
+ BUCKERR("[regulator_set_voltage] _regulator_set_voltage fail, ret = %d!\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+
+int regulator_is_enabled()
+{
+ int ret = 0;
+
+ ret = _regulator_is_enabled();
+ if (ret < 0)
+ {
+ BUCKERR("[regulator_is_enabled] _regulator_is_enabled fail, ret = %d!\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+
+int regulator_enable(int enable)
+{
+ int is_enabled = 0;
+ int ret = 0;
+
+ is_enabled = regulator_is_enabled();
+
+ if (is_enabled < 0)
+ {
+ BUCKERR("[regulator_enable] regulator_is_enabled fail, ret = %d!\n", is_enabled);
+ return is_enabled;
+ }
+
+ if (is_enabled == enable)
+ {
+ BUCKERR("[regulator_enable] regulator is already %d!\n", enable);
+ return BUCK_OK;
+ }
+
+ ret = _regulator_enable(enable, PWM_NUM_VCORE);
+ if (ret < 0)
+ {
+ BUCKERR("[regulator_enable] _regulator_enable fail, ret = %d!\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+
+static int PWM_BUCK_VCORE_INIT_SETTING(void)
+{
+ uint32_t gpio_reg;
+
+ pwm_buck_prop[0].table = vcore_map;
+ pwm_buck_prop[0].n_table = sizeof(vcore_map) / sizeof(struct pwm_volt_map);
+
+ /* set pwm period*/
+ pwm_buck_prop[0].pwm_period = PWM_PERIOD_INIT;
+ if (!pwm_buck_prop[0].pwm_period) {
+ BUCKERR("There is no vcore pwm buck period!\n");
+ goto err;
+ }
+ pwm_config_freq(PWM_NUM_VCORE, pwm_buck_prop[0].pwm_period);
+
+ gpio_reg = (readl(GPIO_VCORE_BASE) &
+ (~(0x7 << GPIO_VCORE_PWM_OFFSET))) |
+ (GPIO_VCORE_PWM_MODE << GPIO_VCORE_PWM_OFFSET);
+ writel(gpio_reg, GPIO_VCORE_BASE);
+
+ pwm_set_duty(PWM_NUM_VCORE, 0); //for 0.8V
+ pwm_enable(PWM_NUM_VCORE);
+
+ return BUCK_OK;
+err:
+ return -BUCK_EINVAL;
+}
+
+
+//==============================================================================
+// BUCK RT5748 Init Code
+//==============================================================================
+int pwm_buck_init (void)
+{
+ int ret;
+
+ printf("[pwm_buck_init] Init Start..................\n");
+
+ /*Do the initial setting for struct pwm_buck*/
+ //memset(pwm_buck_prop, 0, sizeof(struct pwm_buck)*2);
+
+ ret = PWM_BUCK_VCORE_INIT_SETTING();
+ if (ret)
+ goto err1;
+
+ return BUCK_OK;
+
+err1:
+ return -BUCK_EINVAL;
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/pmic/rt5748.c b/src/bsp/lk/platform/mt8512/drivers/pmic/rt5748.c
new file mode 100644
index 0000000..0bb2f97
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/pmic/rt5748.c
@@ -0,0 +1,220 @@
+/* Copyright Statement:
+ *
+ * This software/firmware and related documentation ("MediaTek Software") are
+ * protected under relevant copyright laws. The information contained herein
+ * is confidential and proprietary to MediaTek Inc. and/or its licensors.
+ * Without the prior written permission of MediaTek inc. and/or its licensors,
+ * any reproduction, modification, use or disclosure of MediaTek Software,
+ * and information contained herein, in whole or in part, shall be strictly prohibited.
+ */
+/* MediaTek Inc. (C) 2015. All rights reserved.
+ *
+ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
+ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
+ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
+ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
+ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
+ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
+ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
+ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
+ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
+ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
+ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
+ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
+ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
+ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
+ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
+ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
+ */
+
+#include <platform/mt_i2c.h>
+#include <platform/rt5748.h>
+
+#define VOLT_TO_BUCK_VAL(volt) (((volt) - 300000 + 5000 - 1) / 5000)
+#define BUCK_VAL_TO_VOLT(val) (((val) * 5000) + 300000)
+
+/**********************************************************
+ *
+ * [I2C Function For Read/Write rt5749]
+ *
+ *********************************************************/
+u32 rt5749_write_byte(u8 addr, u8 value, u8 slave_addr, u8 i2c_bus)
+{
+ int ret_code = 0;
+ u8 write_data[2];
+ u8 len;
+
+ write_data[0]= addr;
+ write_data[1] = value;
+
+ len = 2;
+
+ ret_code = mtk_i2c_write(i2c_bus, slave_addr, 400, write_data, len);
+
+ if(ret_code == 0)
+ return 0; // ok
+ else
+ return -1; // fail
+}
+
+u32 rt5749_read_byte (u8 addr, u8 *dataBuffer, u8 slave_addr, u8 i2c_bus)
+{
+ int ret_code = 0;
+ u8 len;
+ *dataBuffer = addr;
+
+ len = 1;
+
+ ret_code = mtk_i2c_write_read(i2c_bus, slave_addr, 400,
+ dataBuffer, dataBuffer, len, len);
+
+ if(ret_code == 0)
+ return 0; // ok
+ else
+ return -1; // fail
+}
+
+/**********************************************************
+ *
+ * [Read / Write Function]
+ *
+ *********************************************************/
+u32 rt5749_read_interface (u8 RegNum, u8 *val, u8 MASK, u8 SHIFT, u8 slave_addr, u8 i2c_bus)
+{
+ u8 rt5749_reg = 0;
+ u32 ret = 0;
+
+ ret = rt5749_read_byte(RegNum, &rt5749_reg, slave_addr, i2c_bus);
+
+ rt5749_reg &= (MASK << SHIFT);
+ *val = (rt5749_reg >> SHIFT);
+
+ return ret;
+}
+
+u32 rt5749_config_interface (u8 RegNum, u8 val, u8 MASK, u8 SHIFT, u8 slave_addr, u8 i2c_bus)
+{
+ u8 rt5749_reg = 0;
+ u32 ret = 0;
+
+ ret = rt5749_read_byte(RegNum, &rt5749_reg, slave_addr, i2c_bus);
+
+ rt5749_reg &= ~(MASK << SHIFT);
+ rt5749_reg |= (val << SHIFT);
+
+ ret = rt5749_write_byte(RegNum, rt5749_reg, slave_addr, i2c_bus);
+
+ return ret;
+}
+
+int rt5749_regulator_get_voltage(BUCK_USER_TYPE type)
+{
+ int ret = 0;
+ u8 val;
+ int volt;
+
+ switch (type)
+ {
+ case VCORE:
+ ret = rt5749_read_interface(RT5749_REG_VSEL1, &val, rt5749_vselh_vol_mask, rt5749_vselh_vol_shift,
+ rt5749_VCORE_SLAVE_ADDR, rt5749_VCORE_I2C_ID);
+ if (ret < 0)
+ {
+ printf("[regulator_get_voltage] _regulator_get_voltage fail, ret = %d!\n", ret);
+ return ret;
+ }
+ volt = BUCK_VAL_TO_VOLT(val);
+ break;
+ case VCORE_SRAM:
+ ret = rt5749_read_interface(RT5749_REG_VSEL1, &val, rt5749_vselh_vol_mask, rt5749_vselh_vol_shift,
+ rt5749_VCORE_SRAM_SLAVE_ADDR, rt5749_VCORE_SRAM_I2C_ID);
+ if (ret < 0)
+ {
+ printf("[regulator_get_voltage] _regulator_get_voltage fail, ret = %d!\n", ret);
+ return ret;
+ }
+ volt = BUCK_VAL_TO_VOLT(val);
+ break;
+ case VPROC_SRAM:
+ ret = rt5749_read_interface(RT5749_REG_VSEL1, &val, rt5749_vselh_vol_mask, rt5749_vselh_vol_shift,
+ rt5749_VPROC_SRAM_SLAVE_ADDR, rt5749_VPROC_SRAM_I2C_ID);
+ if (ret < 0)
+ {
+ printf("[regulator_get_voltage] _regulator_get_voltage fail, ret = %d!\n", ret);
+ return ret;
+ }
+ volt = BUCK_VAL_TO_VOLT(val);
+ break;
+ default:
+ printf("BUCK_USER_TYPE don't support! Please use or VCORE or VCORE_SRAM or VPROC_SRAM\n");
+ return -1;
+ }
+
+ return volt;
+}
+
+int rt5749_regulator_set_voltage(BUCK_USER_TYPE type, unsigned int volt)
+{
+ int ret = 0;
+ u8 val;
+
+ val = VOLT_TO_BUCK_VAL(volt);
+
+ switch (type)
+ {
+ case VCORE:
+ ret = rt5749_config_interface(RT5749_REG_VSEL1, val, rt5749_vselh_vol_mask, rt5749_vselh_vol_shift,
+ rt5749_VCORE_SLAVE_ADDR, rt5749_VCORE_I2C_ID);
+ if (ret < 0)
+ {
+ printf("[regulator_set_voltage] _regulator_get_voltage fail, ret = %d!\n", ret);
+ return ret;
+ }
+ break;
+ case VCORE_SRAM:
+ ret = rt5749_config_interface(RT5749_REG_VSEL1, val, rt5749_vselh_vol_mask, rt5749_vselh_vol_shift,
+ rt5749_VCORE_SRAM_SLAVE_ADDR, rt5749_VCORE_SRAM_I2C_ID);
+ if (ret < 0)
+ {
+ printf("[regulator_set_voltage] _regulator_get_voltage fail, ret = %d!\n", ret);
+ return ret;
+ }
+ break;
+ case VPROC_SRAM:
+ ret = rt5749_config_interface(RT5749_REG_VSEL1, val, rt5749_vselh_vol_mask, rt5749_vselh_vol_shift,
+ rt5749_VPROC_SRAM_SLAVE_ADDR, rt5749_VPROC_SRAM_I2C_ID);
+ if (ret < 0)
+ {
+ printf("[regulator_set_voltage] _regulator_get_voltage fail, ret = %d!\n", ret);
+ return ret;
+ }
+ break;
+ default:
+ printf("BUCK_USER_TYPE don't support! Please use or VCORE or VCORE_SRAM or VPROC_SRAM\n");
+ return -1;
+ }
+
+ return ret;
+}
+
+
+void RT5749_INIT_SETTING_V1(void)
+{
+
+}
+
+//==============================================================================
+// BUCK RT5749 Init Code
+//==============================================================================
+void rt5749_init (void)
+{
+ printf("[buck5749_init] Preloader Start..................\n");
+
+ /* buck initial setting */
+ RT5749_INIT_SETTING_V1();
+
+ printf("[buck5749_init] Done...................\n");
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/pwm/pwm.c b/src/bsp/lk/platform/mt8512/drivers/pwm/pwm.c
new file mode 100644
index 0000000..2db5579
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/pwm/pwm.c
@@ -0,0 +1,548 @@
+#include <platform/mt8512.h>
+#include <platform/pwm.h>
+#include <platform/mt_reg_base.h>
+#include <platform/pll.h>
+#include <reg.h>
+
+void pwm_dump(int pwm_no);
+
+#define u32 unsigned int
+
+#define PRINTF_I printf
+#define PRINTF_W printf
+#define PRINTF_E printf
+
+#ifndef BIT
+#define BIT(_bit_) (u32)(1U << (_bit_))
+#endif
+
+#if 0
+#define DRV_Reg32(addr) (*(volatile u32 *)(addr))
+#define DRV_WriteReg32(addr,data) ((*(volatile u32 *)(addr)) = (u32)data)
+#define DRV_SetReg32(REG,BS) ((*(volatile u32*)(REG)) |= (u32)(BS))
+#define DRV_ClrReg32(REG,BS) ((*(volatile u32*)(REG)) &= ~((u32)(BS)))
+#else
+#define DRV_Reg32(addr) readl(addr)
+#define DRV_WriteReg32(addr,data) writel(data, addr)
+#define DRV_SetReg32(addr,BS) writel((readl(addr)) | (BS), addr)
+#define DRV_ClrReg32(addr,BS) writel((readl(addr)) & ~(BS), addr)
+#endif
+
+#define PWM_EN_REG 0x0000
+#define PWMCON 0x00
+#define PWMGDUR 0x0c
+#define PWMWAVENUM 0x28
+#define PWMDWIDTH 0x2c
+#define PWMTHRES 0x30
+#define PWM_SEND_WAVENUM 0x34
+#define PWM_CK_SEL 0x210
+
+#define PWM_BASE_ADDR PWM_BASE
+#define CLK_BASE_ADDR CKSYS_BASE
+
+#define PWM_CLK_DIV_MAX 7
+#define PWM_NUM_MAX 7
+
+#define PWM_CLK_NAME_MAIN "main"
+
+#define ENOMEM 12 /* Out of memory */
+#define ENODEV 19 /* No such device */
+#define EINVAL 22 /* Invalid argument */
+
+#define CLK_API_IS_READY 0
+
+/*
+static const char * const pwm_clk_name[PWM_NUM_MAX] = {
+ "pwm0", "pwm1", "pwm2", "pwm3", "pwm4", "pwm5", "pwm6",
+};
+*/
+
+/*==========================================*/
+static const unsigned long pwm_com_register[] = {
+ 0x0010, 0x0050, 0x0090, 0x00d0, 0x0110, 0x0150, 0x0190
+};
+/*==========================================*/
+
+static int pwm_duties[PWM_NUM_MAX];
+static int pwm_periods[PWM_NUM_MAX];
+
+static int pwm_flag;
+
+static int pwm_get_clk_rate(int pwm_no)
+{
+ return 26000000;
+}
+
+static void pwm_enable_main_clock(void)
+{
+#if CLK_API_IS_READY
+#else
+ DRV_SetReg32(CLK_BASE_ADDR + 0x84, BIT(21));
+ DRV_SetReg32(CLK_BASE_ADDR + 0x84, BIT(15));
+#endif
+}
+
+static void pwm_disable_main_clock(void)
+{
+#if CLK_API_IS_READY
+#else
+ DRV_SetReg32(CLK_BASE_ADDR + 0x80, BIT(15));
+ DRV_SetReg32(CLK_BASE_ADDR + 0x80, BIT(21));
+#endif
+}
+
+static void pwm_enable_fbclk(int pwm_no)
+{
+#if CLK_API_IS_READY
+#else
+ switch(pwm_no) {
+ case 0:
+ DRV_SetReg32(CLK_BASE_ADDR + 0x84, BIT(16));
+ break;
+
+ case 1:
+ DRV_SetReg32(CLK_BASE_ADDR + 0x84, BIT(17));
+ break;
+
+ case 2:
+ DRV_SetReg32(CLK_BASE_ADDR + 0x84, BIT(18));
+ break;
+
+ case 3:
+ DRV_SetReg32(CLK_BASE_ADDR + 0x84, BIT(19));
+ break;
+
+ case 4:
+ DRV_SetReg32(CLK_BASE_ADDR + 0x84, BIT(20));
+ break;
+
+ case 5:
+ DRV_SetReg32(CLK_BASE_ADDR + 0xA8, BIT(0));
+ break;
+
+ case 6:
+ DRV_SetReg32(CLK_BASE_ADDR + 0xA8, BIT(1));
+ break;
+
+ default:
+ break;
+ }
+#endif
+}
+
+static void pwm_disable_fbclk(int pwm_no)
+{
+#if CLK_API_IS_READY
+#else
+ switch(pwm_no) {
+ case 0:
+ DRV_SetReg32(CLK_BASE_ADDR + 0x80, BIT(16));
+ break;
+
+ case 1:
+ DRV_SetReg32(CLK_BASE_ADDR + 0x80, BIT(17));
+ break;
+
+ case 2:
+ DRV_SetReg32(CLK_BASE_ADDR + 0x80, BIT(18));
+ break;
+
+ case 3:
+ DRV_SetReg32(CLK_BASE_ADDR + 0x80, BIT(19));
+ break;
+
+ case 4:
+ DRV_SetReg32(CLK_BASE_ADDR + 0x80, BIT(20));
+ break;
+
+ case 5:
+ DRV_SetReg32(CLK_BASE_ADDR + 0xA4, BIT(0));
+ break;
+
+ case 6:
+ DRV_SetReg32(CLK_BASE_ADDR + 0xA4, BIT(1));
+ break;
+
+ default:
+ break;
+ }
+#endif
+}
+
+static int pwm_clk_enable(int pwm_no)
+{
+ int ret = 0;
+
+ pwm_enable_main_clock();
+ pwm_enable_fbclk(pwm_no);
+ pwm_flag |= BIT(pwm_no);
+
+ PRINTF_I("pwm_clk_enable:main:0x%x, top:0x%x, fbclk0:%d, fbclk1:%d, fbclk2:%d, fbclk3:%d\n",
+ DRV_Reg32(CLK_BASE_ADDR + 0x90) & BIT(21),
+ DRV_Reg32(CLK_BASE_ADDR + 0x90) & BIT(15),
+ DRV_Reg32(CLK_BASE_ADDR + 0x90) & BIT(16),
+ DRV_Reg32(CLK_BASE_ADDR + 0x90) & BIT(17),
+ DRV_Reg32(CLK_BASE_ADDR + 0x90) & BIT(18),
+ DRV_Reg32(CLK_BASE_ADDR + 0x90) & BIT(19));
+ return ret;
+}
+
+static void pwm_clk_disable(int pwm_no)
+{
+ pwm_disable_fbclk(pwm_no);
+ pwm_flag &= (~ BIT(pwm_no));
+
+ if(pwm_flag == 0)
+ pwm_disable_main_clock();
+
+ PRINTF_I("pwm_clk_enable:main:0x%x, top:0x%x, fbclk0:%d, fbclk1:%d, fbclk2:%d, fbclk3:%d\n",
+ DRV_Reg32(CLK_BASE_ADDR + 0x90) & BIT(21),
+ DRV_Reg32(CLK_BASE_ADDR + 0x90) & BIT(15),
+ DRV_Reg32(CLK_BASE_ADDR + 0x90) & BIT(16),
+ DRV_Reg32(CLK_BASE_ADDR + 0x90) & BIT(17),
+ DRV_Reg32(CLK_BASE_ADDR + 0x90) & BIT(18),
+ DRV_Reg32(CLK_BASE_ADDR + 0x90) & BIT(19));
+}
+
+static inline u32 pwm_readl(int pwm_no, unsigned long offset)
+{
+ void *reg = (void *)(PWM_BASE_ADDR + pwm_com_register[pwm_no] + offset);
+
+ return DRV_Reg32(reg);
+}
+
+static inline void pwm_writel(int pwm_no, unsigned long offset, unsigned int val)
+{
+ void *reg = (void *)(PWM_BASE_ADDR + pwm_com_register[pwm_no] + offset);
+
+ DRV_WriteReg32(reg, val);
+}
+
+#if 0
+static int pwm_config(int pwm_no, int duty_ns, int period_ns)
+{
+ u32 value;
+ int resolution;
+ u32 clkdiv = 0;
+ u32 clksrc_rate;
+
+ int data_width, thresh;
+
+ pwm_clk_enable(pwm_no);
+
+ /* this use pwm clock, not fixed 26M: so the period_ns and duty_ns is not as what you want from 26M clock...*/
+ clksrc_rate = pwm_get_clk_rate(pwm_no);
+ resolution = 1000000000 / clksrc_rate;
+
+ while (period_ns / resolution > 8191) {
+ clkdiv++;
+ resolution *= 2;
+ }
+
+ if (clkdiv > PWM_CLK_DIV_MAX) {
+ PRINTF_E("period %d not supported\n", period_ns);
+ return -EINVAL;
+ }
+
+ data_width = period_ns / resolution;
+ thresh = duty_ns / resolution;
+
+ if(data_width > 1)
+ --data_width;
+ if(thresh >= 1)
+ --thresh;
+
+ value = pwm_readl(pwm_no, PWMCON);
+ value = value | BIT(15) | clkdiv;
+ pwm_writel(pwm_no, PWMCON, value);
+
+ pwm_writel(pwm_no, PWMDWIDTH, data_width);
+ pwm_writel(pwm_no, PWMTHRES, thresh);
+
+ pwm_dump(pwm_no);
+
+ pwm_clk_disable(pwm_no);
+
+ return 0;
+}
+#endif
+
+int pwm_config_freq(int pwm_no, int freq)
+{
+ u32 value;
+ u32 resolution = 1;
+ u32 clkdiv = 0;
+ u32 clksrc_rate;
+
+ u32 data_width, thresh;
+
+ if (freq <= 0 || freq > 26000000) {
+ PRINTF_E("freq %d not supported\n", freq);
+ return -EINVAL;
+ }
+
+ pwm_clk_enable(pwm_no);
+
+ /* currently: fixed 26M */
+ clksrc_rate = pwm_get_clk_rate(pwm_no);
+ data_width = clksrc_rate / freq;
+
+ while (data_width > 8191) {
+ clkdiv++;
+ resolution *= 2;
+ }
+
+ if (clkdiv > PWM_CLK_DIV_MAX) {
+ PRINTF_E("clkdiv %d not supported\n", clkdiv);
+ return -EINVAL;
+ }
+
+ data_width = data_width / resolution;
+ thresh = 0;
+
+ pwm_periods[pwm_no] = data_width;
+ pwm_duties[pwm_no] = 1;
+
+ if(data_width > 1)
+ --data_width;
+
+ value = pwm_readl(pwm_no, PWMCON);
+ value = value | BIT(15) | clkdiv;
+ pwm_writel(pwm_no, PWMCON, value);
+
+ pwm_writel(pwm_no, PWMDWIDTH, data_width);
+ pwm_writel(pwm_no, PWMTHRES, thresh);
+
+ pwm_dump(pwm_no);
+
+ pwm_clk_disable(pwm_no);
+
+ return 0;
+}
+
+int pwm_set_duty(int pwm_no, int duty)
+{
+ if (duty < 0 || duty > 8191 || (pwm_periods[pwm_no] > 0 && duty > pwm_periods[pwm_no])) {
+ PRINTF_E("duty %d not supported(period:%u)\n", duty, pwm_periods[pwm_no]);
+ return -EINVAL;
+ }
+
+ pwm_clk_enable(pwm_no);
+
+ pwm_duties[pwm_no] = duty;
+ if(duty >= 1)
+ --duty;
+
+ PRINTF_E("pwm%d: set duty:%d\n", pwm_no, duty);
+ pwm_writel(pwm_no, PWMTHRES, duty);
+
+ pwm_dump(pwm_no);
+
+ pwm_disable(pwm_no);
+
+ return 0;
+}
+
+int pwm_get_period(int pwm_no)
+{
+ if(pwm_no < 0 || pwm_no >= PWM_NUM_MAX) {
+ PRINTF_E("pwm_no: %d is too big!\n", pwm_no);
+ return -1;
+ }
+ return pwm_periods[pwm_no];
+}
+
+int pwm_get_duty_cycle(int pwm_no)
+{
+ if(pwm_no < 0 || pwm_no >= PWM_NUM_MAX) {
+ PRINTF_E("pwm_no: %d is too big!\n", pwm_no);
+ return -1;
+ }
+
+ return pwm_duties[pwm_no];
+}
+
+int pwm_enable(int pwm_no)
+{
+ u32 val;
+
+ if(pwm_no < 0 || pwm_no >= PWM_NUM_MAX) {
+ PRINTF_E("pwm_no: %d is too big!\n", pwm_no);
+ return -1;
+ }
+
+ pwm_clk_enable(pwm_no);
+
+ val = DRV_Reg32(PWM_BASE_ADDR + PWM_EN_REG);
+ val |= BIT(pwm_no);
+ DRV_WriteReg32(PWM_BASE_ADDR + PWM_EN_REG, val);
+
+ return 0;
+}
+
+int pwm_disable(int pwm_no)
+{
+ u32 val;
+
+ if(pwm_no < 0 || pwm_no >= PWM_NUM_MAX) {
+ PRINTF_E("pwm_no: %d is too big!\n", pwm_no);
+ return -1;
+ }
+
+ val = DRV_Reg32(PWM_BASE_ADDR + PWM_EN_REG);
+ val &= ~ BIT(pwm_no);
+ DRV_WriteReg32(PWM_BASE_ADDR + PWM_EN_REG, val);
+
+ pwm_clk_disable(pwm_no);
+
+ return 0;
+}
+
+int pwm_get_send_wavenums(int pwm_no)
+{
+ u32 val;
+
+ if(pwm_no < 0 || pwm_no >= PWM_NUM_MAX) {
+ PRINTF_E("pwm_no: %d is too big!\n", pwm_no);
+ return -1;
+ }
+
+ val = pwm_readl(pwm_no, PWM_SEND_WAVENUM);
+ PRINTF_I("pwm%d: send wavenum:%u\n", pwm_no, val);
+
+ return (int)val;
+}
+
+void pwm_dump(int pwm_no)
+{
+ u32 value;
+
+ if(pwm_no < 0 || pwm_no >= PWM_NUM_MAX) {
+ PRINTF_E("pwm_no: %d is too big!\n", pwm_no);
+ return;
+ }
+
+ value = pwm_readl(pwm_no, PWM_SEND_WAVENUM);
+
+ PRINTF_I("pwm%d: send wavenum:%u, duty/period:%d%%\n", pwm_no, value,
+ (pwm_readl(pwm_no, PWMTHRES) + 1) * 100 / (pwm_readl(pwm_no, PWMDWIDTH) + 1));
+ PRINTF_I("\tDATA_WIDTH:%u, THRESH:%u, CON:0x%x, EN:0x%x, CLK_SEL:0x%x\n",
+ pwm_readl(pwm_no, PWMDWIDTH),
+ pwm_readl(pwm_no, PWMTHRES),
+ pwm_readl(pwm_no, PWMCON),
+ DRV_Reg32(PWM_BASE_ADDR + PWM_EN_REG),
+ DRV_Reg32(PWM_BASE_ADDR + PWM_CK_SEL));
+}
+
+void pwm_dump_all(void)
+{
+ u32 value;
+ int i;
+
+ for(i = 0; i < PWM_NUM_MAX; ++i) {
+ value = pwm_readl(i, PWM_SEND_WAVENUM);
+ if(value > 0) {
+ pwm_dump(i);
+ } else
+ PRINTF_W("pwm %d: no waves!\n", i);
+ }
+}
+
+// set gpio_no to mode
+// return value: if return 0, means set susscessful, if return is not 0, means failed
+static int pwm_set_gpio_mode(int gpio_no, int mode)
+{
+ u32 uval;
+ PRINTF_I("pwm_set_gpio_mode +: sizeof(uintptr_t):%zu\n", sizeof(uintptr_t));
+ if(gpio_no == 88 && mode == 5) {
+ PRINTF_I("pwm_set_gpio_mode 88 5\n");
+ // switch GPIO88 to pwm mode 5: bit[11~9]: value: 5
+ uval = DRV_Reg32(GPIO_BASE + 0x410);
+ PRINTF_I("0x10005410: 0x%x\n", uval);
+
+ DRV_WriteReg32(GPIO_BASE + 0x410, (uval & 0xFFFFF1FF) | 0xA00);
+
+ uval = DRV_Reg32(GPIO_BASE + 0x410);
+ PRINTF_I("after set to gpio%d to pwm mode %d: 0x10005410: 0x%x\n", gpio_no, mode, uval);
+
+ // verify gpio88 is mode 5
+ if((uval & 0xA00) != 0xA00) {
+ PRINTF_E("gpio 88 is not mode 5!\n");
+ return -1;
+ }
+ return 0;
+ }
+ return -2;
+}
+
+// set the specified pwm to specified freq: eg: 0, 32000 means: pwm0, 32K
+// return value: if return 0, means set susscessful, if return is not 0, means failed
+static int pwm_set_pwm_freq(int pwm_no, int freq)
+{
+ int ret;
+ int val;
+
+ PRINTF_I("pwm_set_pwm_freq: pwm_no:%d, freq:%d\n", pwm_no, freq);
+
+ ret = pwm_config_freq(pwm_no, freq);
+ if(ret == 0) {
+ val = pwm_get_period(pwm_no);
+ ret = pwm_set_duty(pwm_no, val / 2); // duty is set to period / 2
+ if(ret == 0)
+ ret = pwm_enable(0);
+ else {
+ PRINTF_E("set duty failed!\n");
+ goto error;
+ }
+
+ if(ret == 0) {
+ PRINTF_I("enable pwm %d successfully!\n", 0);
+ } else {
+ PRINTF_E("enable pwm %d failed!\n", 0);
+ goto error;
+ }
+ } else {
+ PRINTF_E("config freq failed!\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ return -1;
+}
+
+// #define PWM_TEST
+void pwm_init (void)
+{
+#ifdef PWM_TEST
+ int ret;
+#endif
+
+ PRINTF_I("[pwm_init] Init Start..................\n");
+
+ #if CLK_API_IS_READY
+ // get clock here
+ #endif
+
+#ifdef PWM_TEST
+ ret = pwm_set_gpio_mode(88, 5);
+ if (ret != 0) {
+ PRINTF_E("set gpio 88 to mode 5 failed!\n");
+ return;
+ }
+ ret = pwm_set_pwm_freq(0, 1000000);
+ if (ret != 0) {
+ PRINTF_E("pwm_set_pwm_freq: set pwm_no:%d to freq:%d failed!\n", 0, 1000000);
+ return;
+ }
+
+ PRINTF_I("Now, we stop here to let you check: the pwm wave should be working!\n");
+
+ #if 0
+ while(1)
+ ;
+ #endif
+#endif
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/rules.mk b/src/bsp/lk/platform/mt8512/drivers/rules.mk
new file mode 100644
index 0000000..812c020
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/rules.mk
@@ -0,0 +1,62 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+MODULE := $(LOCAL_DIR)
+
+
+MODULE_SRCS += \
+ $(LOCAL_DIR)/uart/uart.c \
+ $(LOCAL_DIR)/pll/pll.c \
+ $(LOCAL_DIR)/spm/spm_mtcmos.c \
+ $(LOCAL_DIR)/wdt/mtk_wdt.c \
+ $(LOCAL_DIR)/i2c/mt_i2c.c \
+ $(LOCAL_DIR)/usb/mtu3.c \
+ $(LOCAL_DIR)/usb/mtu3_qmu.c \
+ $(LOCAL_DIR)/gic/mt_gic_v3.c \
+ $(LOCAL_DIR)/gic/mt_gic.S \
+ $(LOCAL_DIR)/key/mtk_key.c \
+ $(LOCAL_DIR)/trng/mtk_trng.c \
+ $(LOCAL_DIR)/pwm/pwm.c \
+
+ifeq ($(SPI_NAND_PROJECT), y)
+ MODULE_DEPS += \
+ lib/bio \
+ lib/partition \
+ lib/nftl
+
+ include $(LOCAL_DIR)/nandx/Nandx.mk
+endif
+
+ifeq ($(WITH_PMIC_MT6398), 1)
+ MODULE_SRCS += $(LOCAL_DIR)/pmic/pmic_6398.c
+endif
+
+ifeq ($(WITH_PMIC_BD71828), 1)
+ MODULE_SRCS += $(LOCAL_DIR)/pmic/bd71828.c
+endif
+
+ifeq ($(WITH_CLKSQ_OFF),1)
+ GLOBAL_DEFINES += WITH_CLKSQ_OFF=$(WITH_CLKSQ_OFF)
+endif
+
+ifeq ($(WITH_VCORE_I2C_BUCK), 1)
+ MODULE_SRCS += $(LOCAL_DIR)/pmic/rt5748.c
+endif
+
+ifeq ($(WITH_VCORE_PWM_BUCK), 1)
+ MODULE_SRCS += $(LOCAL_DIR)/pmic/pwm_buck.c
+endif
+
+ifeq ($(WITH_USB_MD1122), 1)
+ MODULE_SRCS += $(LOCAL_DIR)/usb/u3phy-i2c.c
+ MODULE_SRCS += $(LOCAL_DIR)/usb/md1122.c
+else
+ MODULE_SRCS += $(LOCAL_DIR)/usb/usbphy.c
+endif
+
+
+MODULE_EXTRA_OBJS += \
+ $(LOCAL_DIR)/../lib/libdevinfo.o
+
+MODULE_DEPS += \
+ $(LOCAL_DIR)/../../../../dramk_8512/dram
+
+include make/module.mk
diff --git a/src/bsp/lk/platform/mt8512/drivers/scp/mt_scp.c b/src/bsp/lk/platform/mt8512/drivers/scp/mt_scp.c
new file mode 100644
index 0000000..97dba64
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/scp/mt_scp.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <app.h>
+#include <reg.h>
+#include <errno.h>
+#include <string.h>
+#include <lib/bio.h>
+#include <lib/mempool.h>
+#include <platform/mt8512.h>
+#include <platform/mt_reg_base.h>
+#include <platform/mt_scp.h>
+
+void start_scpsys(void)
+{
+ u32 reg;
+ reg = readl(SCP_BASE_CFG + CMSYS_CLKGAT_CTL);
+ reg |= CPUCK_EN;
+ writel(reg, SCP_BASE_CFG + CMSYS_CLKGAT_CTL);
+
+ reg = readl(SCP_BASE_CFG + CMSYS_RESET_CTL);
+ reg |= CPU_RST_SW;
+ writel(reg, SCP_BASE_CFG + CMSYS_RESET_CTL);
+}
+
+void stop_scpsys(void)
+{
+ u32 reg;
+
+ reg = readl(SCP_BASE_CFG + CMSYS_RESET_CTL);
+ reg &= ~CPU_RST_SW;
+ writel(reg, SCP_BASE_CFG + CMSYS_RESET_CTL);
+
+ reg = readl(SCP_BASE_CFG + CMSYS_CLKGAT_CTL);
+ reg &= ~CPUCK_EN;
+ writel(reg, SCP_BASE_CFG + CMSYS_CLKGAT_CTL);
+
+}
+
+static int get_scpsys(const char *name, union fm_hdr_t *fm_hdr, void *buf)
+{
+ bdev_t *bdev;
+ size_t totalsize; //sram size
+
+ bdev = bio_open_by_label(name);
+ if (!bdev) {
+ dprintf(CRITICAL, "Partition [%s] is not exist.\n", name);
+ return -ENODEV;
+ }
+
+ totalsize = bio_read(bdev, fm_hdr, 0, sizeof(union fm_hdr_t));
+ if (totalsize <= 0) {
+ dprintf(CRITICAL, "error reading scp header\n");
+ return totalsize;
+ }
+
+ if (fm_hdr->info.magic != PART_MAGIC || fm_hdr->info.dsize > MAX_SCPSYS_SIZE) {
+ dprintf(CRITICAL, "scp: firmware information incorrect!\n");
+ return -EINVAL;
+ }
+
+ if (fm_hdr->info.dsize > MAX_SCPSYS_SIZE){
+ dprintf(CRITICAL, "scp: fm_hdr->info.dsize > MAX_SCPSYS_SIZE!\n");
+ return -EINVAL;
+ }
+
+ totalsize = bio_read(bdev, buf, sizeof(union fm_hdr_t),
+ fm_hdr->info.dsize);
+ if (totalsize <= 0) {
+ dprintf(CRITICAL, "error reading scp data\n");
+ return totalsize;
+ }
+
+ dprintf(CRITICAL, "scp: load scp image success!\n");
+ return 0;
+}
+
+int load_scpsys(void)
+{
+ int err = 0;
+ void *buf = mempool_alloc(MAX_SCPSYS_SIZE, MEMPOOL_ANY);
+ union fm_hdr_t *fm_hdr = mempool_alloc(sizeof(union fm_hdr_t), MEMPOOL_ANY);
+
+ if (!buf || !fm_hdr)
+ {
+ dprintf(CRITICAL, "scp: fail to alloc memory!\n");
+ err = -1;
+ goto done;
+ }
+
+ //load cmsys from flash to fit_data->buf.
+ err = get_scpsys(SCPSYS_PART_NAME, fm_hdr, buf);
+ if (err) {
+ dprintf(CRITICAL, "scp: fail to load scp image!\n");
+ err = -1;
+ goto done;
+ }
+ if (fm_hdr->info.dsize > (SCP_BASE_CFG - SCP_BASE_SRAM))
+ {
+ dprintf(CRITICAL, "scp: fail fm_hdr->info.dsize is overflow!\n");
+ err = -1;
+ goto done;
+ }
+ memcpy(SCP_BASE_SRAM, buf, fm_hdr->info.dsize);
+
+done:
+ mempool_free(buf);
+ mempool_free(fm_hdr);
+ return err;
+}
+
diff --git a/src/bsp/lk/platform/mt8512/drivers/scp/rules.mk b/src/bsp/lk/platform/mt8512/drivers/scp/rules.mk
new file mode 100644
index 0000000..88d5a5d
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/scp/rules.mk
@@ -0,0 +1,20 @@
+LOCAL_DIR := $(GET_LOCAL_DIR)
+MODULE := $(LOCAL_DIR)
+
+CFLAGS := $(filter-out -Werror, $(CFLAGS))
+GLOBAL_CFLAGS := $(filter-out -Werror, $(GLOBAL_CFLAGS))
+
+CFLAGS := $(filter-out -Werror=return-type, $(CFLAGS))
+GLOBAL_CFLAGS := $(filter-out -Werror=return-type, $(GLOBAL_CFLAGS))
+
+CFLAGS := $(filter-out -Werror=implicit-function-declaration, $(CFLAGS))
+GLOBAL_CFLAGS := $(filter-out -Werror=implicit-function-declaration, $(GLOBAL_CFLAGS))
+
+MODULE_DEPS += \
+ lib/bio \
+ lib/partition \
+
+MODULE_SRCS += \
+ $(LOCAL_DIR)/mt_scp.c
+
+include make/module.mk
diff --git a/src/bsp/lk/platform/mt8512/drivers/spm/spm_mtcmos.c b/src/bsp/lk/platform/mt8512/drivers/spm/spm_mtcmos.c
new file mode 100644
index 0000000..3f17123
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/spm/spm_mtcmos.c
@@ -0,0 +1,1175 @@
+#include <reg.h>
+#include <platform/mt8512.h>
+#include <platform/pll.h>
+#include <platform/spm_mtcmos.h>
+#include <platform/mtk_timer.h>
+
+#define INFRACFG_AO_BASE (IO_PHYS + 0x00001000)
+#define SPM_BASE (IO_PHYS + 0x00006000)
+
+#define INFRA_TOPAXI_PROTECTEN (INFRACFG_AO_BASE + 0x220)
+#define INFRA_TOPAXI_PROTECTEN_SET (INFRACFG_AO_BASE + 0x2A0)
+#define INFRA_TOPAXI_PROTECTEN_CLR (INFRACFG_AO_BASE + 0x2A4)
+#define INFRA_TOPAXI_PROTECTEN_STA0 (INFRACFG_AO_BASE + 0x224)
+#define INFRA_TOPAXI_PROTECTEN_STA1 (INFRACFG_AO_BASE + 0x228)
+#define INFRA_TOPAXI_PROTECTEN_1 (INFRACFG_AO_BASE + 0x250)
+#define INFRA_TOPAXI_PROTECTEN_1_SET (INFRACFG_AO_BASE + 0x2A8)
+#define INFRA_TOPAXI_PROTECTEN_1_CLR (INFRACFG_AO_BASE + 0x2AC)
+#define INFRA_TOPAXI_PROTECTEN_STA0_1 (INFRACFG_AO_BASE + 0x254)
+#define INFRA_TOPAXI_PROTECTEN_STA1_1 (INFRACFG_AO_BASE + 0x258)
+#define INFRA_TOPAXI_PROTECTEN_2 (INFRACFG_AO_BASE + 0x420)
+#define INFRA_TOPAXI_PROTECTEN_2_SET (INFRACFG_AO_BASE + 0x42C)
+#define INFRA_TOPAXI_PROTECTEN_2_CLR (INFRACFG_AO_BASE + 0x430)
+#define INFRA_TOPAXI_PROTECTEN_STA0_2 (INFRACFG_AO_BASE + 0x424)
+#define INFRA_TOPAXI_PROTECTEN_STA1_2 (INFRACFG_AO_BASE + 0x428)
+#define PERI_BUS_DCM_CTRL (INFRACFG_AO_BASE + 0x74)
+#define AUDIO_BUS_AUD_SI0 (INFRACFG_AO_BASE + 0x800)
+#define AUDIO_BUS_INFRA_SI0 (INFRACFG_AO_BASE + 0x808)
+
+#define POWERON_CONFIG_EN (SPM_BASE + 0x000)
+#define CONN_PWR_CON (SPM_BASE + 0x32C)
+#define MM_PWR_CON (SPM_BASE + 0x374)
+#define IMG_PWR_CON (SPM_BASE + 0x38C)
+#define IP0_PWR_CON (SPM_BASE + 0x39C)
+#define IP1_PWR_CON (SPM_BASE + 0x384)
+#define IP2_PWR_CON (SPM_BASE + 0x388)
+#define USB_MAC_P1_PWR_CON (SPM_BASE + 0x3A4)
+#define AUDIO_PWR_CON (SPM_BASE + 0x314)
+#define ASRC_PWR_CON (SPM_BASE + 0x328)
+#define DSP_PWR_CON (SPM_BASE + 0x37C)
+#define PWR_STATUS (SPM_BASE + 0x180)
+#define PWR_STATUS_2ND (SPM_BASE + 0x184)
+
+#define SPM_PROJECT_CODE 0xb16
+
+/* Define MTCMOS power control */
+#define PWR_RST_B (0x1 << 0)
+#define PWR_ISO (0x1 << 1)
+#define PWR_ON (0x1 << 2)
+#define PWR_ON_2ND (0x1 << 3)
+#define PWR_CLK_DIS (0x1 << 4)
+
+/* Define MTCMOS Bus Protect Mask */
+#define DIS_PROT_STEP1_0_MASK ((0x1 << 16) |(0x1 << 17))
+#define DIS_PROT_STEP1_0_ACK_MASK ((0x1 << 16) |(0x1 << 17))
+#define DIS_PROT_STEP2_0_MASK ((0x1 << 1) |(0x1 << 2) |(0x1 << 10) |(0x1 << 11))
+#define DIS_PROT_STEP2_0_ACK_MASK ((0x1 << 1) |(0x1 << 2) |(0x1 << 10) |(0x1 << 11))
+
+//STEP0 -> SLV port, STEP1 -> MST port
+//AFE
+#define AFE_PROT_STEP0_0_MASK ((0x1 << 28))
+#define AFE_PROT_STEP0_0_ACK_MASK ((0x1 << 28))
+
+#define AFE_PROT_STEP1_0_MASK ((0x1 << 22))
+#define AFE_PROT_STEP1_0_ACK_MASK ((0x1 << 22))
+
+#define AFE_CLK_DCM_EN ((0x1 << 29))
+
+//SRC
+#define SRC_PROT_STEP1_0_MASK ((0x1 << 21))
+#define SRC_PROT_STEP1_0_ACK_MASK ((0x1 << 21))
+
+//MM
+#define MM_PROT_STEP0_0_MASK ((0x1 << 15))
+#define MM_PROT_STEP0_0_ACK_MASK ((0x1 << 15))
+
+#define MM_PROT_STEP1_00_MASK ((0x1 << 16) |(0x1 << 17))
+#define MM_PROT_STEP1_00_ACK_MASK ((0x1 << 16) |(0x1 << 17))
+
+#define MM_PROT_STEP1_10_MASK ((0x1 << 8) |(0x1 << 9) |(0x1 << 10) |(0x1 << 11))
+
+#define MM_PROT_STEP1_10_ACK_MASK ((0x1 << 8) |(0x1 << 9) |(0x1 << 10) |(0x1 << 11))
+//IP0(NNA1)
+#define NNA1_PROT_STEP0_0_MASK ((0x1 << 4))
+#define NNA1_PROT_STEP0_0_ACK_MASK ((0x1 << 4))
+
+#define NNA1_PROT_STEP1_0_MASK ((0x1 << 2))
+#define NNA1_PROT_STEP1_0_ACK_MASK ((0x1 << 2))
+
+//IP1(WFST)
+#define WFST_PROT_STEP0_0_MASK ((0x1 << 5))
+#define WFST_PROT_STEP0_0_ACK_MASK ((0x1 << 5))
+
+#define WFST_PROT_STEP1_0_MASK ((0x1 << 3))
+#define WFST_PROT_STEP1_0_ACK_MASK ((0x1 << 3))
+
+//IP2(NNA0)
+#define NNA0_PROT_STEP0_0_MASK ((0x1 << 12))
+#define NNA0_PROT_STEP0_0_ACK_MASK ((0x1 << 12))
+
+#define NNA0_PROT_STEP1_00_MASK ((0x1 << 5) |(0x1 << 6) |(0x1 << 7))
+#define NNA0_PROT_STEP1_00_ACK_MASK ((0x1 << 5) |(0x1 << 6) |(0x1 << 7))
+#define NNA0_PROT_STEP1_10_MASK ((0x1 << 14) |(0x1 << 18) |(0x1 << 16))
+#define NNA0_PROT_STEP1_10_ACK_MASK ((0x1 << 14) |(0x1 << 18) |(0x1 << 16))
+
+//DSP
+#define DSP_PROT_STEP1_00_MASK ((0x1 << 1) |(0x1 << 24))
+#define DSP_PROT_STEP1_00_ACK_MASK ((0x1 << 1) |(0x1 << 24))
+
+#define DSP_PROT_STEP1_10_MASK ((0x1 << 7) |(0x1 << 10) |(0x1 << 11))
+#define DSP_PROT_STEP1_10_ACK_MASK ((0x1 << 7) |(0x1 << 10) |(0x1 << 11))
+
+//CONN
+#define CONN_PROT_STEP0_0_MASK ((0x1 << 13))
+#define CONN_PROT_STEP0_0_ACK_MASK ((0x1 << 13))
+
+#define CONN_PROT_STEP1_00_MASK ((0x1 << 18))
+#define CONN_PROT_STEP1_00_ACK_MASK ((0x1 << 18))
+#define CONN_PROT_STEP1_10_MASK ((0x1 << 14))
+#define CONN_PROT_STEP1_10_ACK_MASK ((0x1 << 14))
+
+//USB
+#define USB_MAC_PROT_STEP0_0_MASK ((0x1 << 0) |(0x1 << 1))
+#define USB_MAC_PROT_STEP0_0_ACK_MASK ((0x1 << 0) |(0x1 << 1))
+
+/* Define MTCMOS Power Status Mask */
+
+#define CONN_PWR_STA_MASK (0x1 << 1)
+#define MM_PWR_STA_MASK (0x1 << 15)
+#define IMG_PWR_STA_MASK (0x1 << 16)
+#define DSP_PWR_STA_MASK (0x1 << 17)
+#define USB_MAC_P1_PWR_STA_MASK (0x1 << 20)
+#define ASRC_PWR_STA_MASK (0x1 << 23)
+#define AUDIO_PWR_STA_MASK (0x1 << 24)
+#define IP0_PWR_STA_MASK (0x1 << 25)
+#define IP1_PWR_STA_MASK (0x1 << 26)
+#define IP2_PWR_STA_MASK (0x1 << 27)
+
+/* Define Non-CPU SRAM Mask */
+#define MM_SRAM_PDN (0x1 << 8)
+#define MM_SRAM_PDN_ACK (0x1 << 12)
+#define MM_SRAM_PDN_ACK_BIT0 (0x1 << 12)
+#define IMG_SRAM_PDN (0x1 << 8)
+#define IMG_SRAM_PDN_ACK (0x1 << 12)
+#define IMG_SRAM_PDN_ACK_BIT0 (0x1 << 12)
+#define DSP_SRAM_PDN (0xF << 8)
+#define DSP_SRAM_PDN_ACK (0xF << 12)
+#define DSP_SRAM_PDN_ACK_BIT0 (0x1 << 12)
+#define DSP_SRAM_PDN_ACK_BIT1 (0x1 << 13)
+#define DSP_SRAM_PDN_ACK_BIT2 (0x1 << 14)
+#define DSP_SRAM_PDN_ACK_BIT3 (0x1 << 15)
+#define IP0_SRAM_PDN (0x1 << 8)
+#define IP0_SRAM_PDN_ACK (0x1 << 12)
+#define IP0_SRAM_PDN_ACK_BIT0 (0x1 << 12)
+#define IP1_SRAM_PDN (0x1 << 8)
+#define IP1_SRAM_PDN_ACK (0x1 << 12)
+#define IP1_SRAM_PDN_ACK_BIT0 (0x1 << 12)
+#define IP2_SRAM_PDN (0x1 << 8)
+#define IP2_SRAM_PDN_ACK (0x1 << 12)
+#define IP2_SRAM_PDN_ACK_BIT0 (0x1 << 12)
+#define USB_SRAM_PDN (0x1 << 8)
+#define USB_SRAM_PDN_ACK (0x1 << 12)
+#define AUDIO_SRAM_PDN (0xF << 8)
+#define AUDIO_SRAM_PDN_ACK (0xF << 13)
+#define AUDIO_SRAM_PDN_ACK_BIT0 (0x1 << 13)
+#define AUDIO_SRAM_PDN_ACK_BIT1 (0x1 << 14)
+#define AUDIO_SRAM_PDN_ACK_BIT2 (0x1 << 15)
+#define AUDIO_SRAM_PDN_ACK_BIT3 (0x1 << 16)
+#define ASRC_SRAM_PDN (0x1 << 8)
+#define ASRC_SRAM_PDN_ACK (0x1 << 12)
+#define ASRC_SRAM_PDN_ACK_BIT0 (0x1 << 12)
+
+#define spm_read(addr) readl(addr)
+#define spm_write(addr, val) writel(val, addr)
+
+int spm_mtcmos_ctrl_conn(int state)
+{
+ int err = 0;
+
+ /* TINFO="enable SPM register control" */
+ spm_write(POWERON_CONFIG_EN, (SPM_PROJECT_CODE << 16) | (0x1 << 0));
+
+ if (state == STA_POWER_DOWN) {
+ /* TINFO="Start to turn off CONN" */
+ /* TINFO="Set bus protect - step0 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_SET, CONN_PROT_STEP0_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & CONN_PROT_STEP0_0_ACK_MASK) != CONN_PROT_STEP0_0_ACK_MASK) {
+ }
+#endif
+ /* TINFO="Set bus protect - step1 : 00" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_1_SET, CONN_PROT_STEP1_00_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_1) & CONN_PROT_STEP1_00_ACK_MASK) != CONN_PROT_STEP1_00_ACK_MASK) {
+ }
+#endif
+
+ spm_write(INFRA_TOPAXI_PROTECTEN_1_SET, ((0x1 << 21)));
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_1) & (0x1 << 21)) != (0x1 << 21)) {
+ }
+#endif
+
+ /* TINFO="Set bus protect - step1 : 10" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_SET, CONN_PROT_STEP1_10_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & CONN_PROT_STEP1_10_ACK_MASK) != CONN_PROT_STEP1_10_ACK_MASK) {
+ }
+#endif
+#ifndef IGNORE_MTCMOS_CHECK
+#endif
+ /* TINFO="Set PWR_ISO = 1" */
+ spm_write(CONN_PWR_CON, spm_read(CONN_PWR_CON) | PWR_ISO);
+ /* TINFO="Set PWR_CLK_DIS = 1" */
+ spm_write(CONN_PWR_CON, spm_read(CONN_PWR_CON) | PWR_CLK_DIS);
+ /* TINFO="Set PWR_RST_B = 0" */
+ spm_write(CONN_PWR_CON, spm_read(CONN_PWR_CON) & ~PWR_RST_B);
+ /* TINFO="Set PWR_ON = 0" */
+ spm_write(CONN_PWR_CON, spm_read(CONN_PWR_CON) & ~PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 0" */
+ spm_write(CONN_PWR_CON, spm_read(CONN_PWR_CON) & ~PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 0 and PWR_STATUS_2ND = 0" */
+ while ((spm_read(PWR_STATUS) & CONN_PWR_STA_MASK)
+ || (spm_read(PWR_STATUS_2ND) & CONN_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Finish to turn off CONN" */
+ } else { /* STA_POWER_ON */
+ /* TINFO="Start to turn on CONN" */
+ /* TINFO="Set PWR_ON = 1" */
+ spm_write(CONN_PWR_CON, spm_read(CONN_PWR_CON) | PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 1" */
+ spm_write(CONN_PWR_CON, spm_read(CONN_PWR_CON) | PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 1 and PWR_STATUS_2ND = 1" */
+ while (((spm_read(PWR_STATUS) & CONN_PWR_STA_MASK) != CONN_PWR_STA_MASK)
+ || ((spm_read(PWR_STATUS_2ND) & CONN_PWR_STA_MASK) != CONN_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Set PWR_CLK_DIS = 0" */
+ spm_write(CONN_PWR_CON, spm_read(CONN_PWR_CON) & ~PWR_CLK_DIS);
+ /* TINFO="Set PWR_ISO = 0" */
+ spm_write(CONN_PWR_CON, spm_read(CONN_PWR_CON) & ~PWR_ISO);
+ /* TINFO="Set PWR_RST_B = 1" */
+ spm_write(CONN_PWR_CON, spm_read(CONN_PWR_CON) | PWR_RST_B);
+#ifndef IGNORE_MTCMOS_CHECK
+#endif
+ /* TINFO="Release bus protect - step1 : 10" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_CLR, CONN_PROT_STEP1_10_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & CONN_PROT_STEP1_10_MASK)) {
+ }
+#endif
+
+ spm_write(INFRA_TOPAXI_PROTECTEN_1_CLR, ((0x1 << 21)));
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_1) & (0x1 << 21))) {
+ }
+#endif
+
+ /* TINFO="Release bus protect - step1 : 00" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_1_CLR, CONN_PROT_STEP1_00_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_1) & CONN_PROT_STEP1_00_MASK)) {
+ }
+#endif
+ /* TINFO="Release bus protect - step0 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_CLR, CONN_PROT_STEP0_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & CONN_PROT_STEP0_0_MASK)) {
+ }
+#endif
+ /* TINFO="Finish to turn on CONN" */
+ }
+ return err;
+}
+
+int spm_mtcmos_ctrl_mm(int state)
+{
+ int err = 0;
+
+ /* TINFO="enable SPM register control" */
+ spm_write(POWERON_CONFIG_EN, (SPM_PROJECT_CODE << 16) | (0x1 << 0));
+
+ if (state == STA_POWER_DOWN) {
+ /* TINFO="Start to turn off MM" */
+ /* TINFO="enable mm gals in_ck bit25 & bit24" */
+ spm_write(MMSYS_CG_CLR0, 0x03000000); // & 0xFCFFFFFF;
+ /* TINFO="Set bus protect - step0 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_2_SET, MM_PROT_STEP0_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_2) & MM_PROT_STEP0_0_ACK_MASK) != MM_PROT_STEP0_0_ACK_MASK) {
+ }
+#endif
+ /* TINFO="Set bus protect - step1 : 00" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_1_SET, MM_PROT_STEP1_00_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_1) & MM_PROT_STEP1_00_ACK_MASK) != MM_PROT_STEP1_00_ACK_MASK) {
+ }
+#endif
+ /* TINFO="Set bus protect - step1 : 10" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_2_SET, MM_PROT_STEP1_10_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_2) & MM_PROT_STEP1_10_ACK_MASK) != MM_PROT_STEP1_10_ACK_MASK) {
+ }
+#endif
+
+ /* TINFO="Set SRAM_PDN = 1" */
+ spm_write(MM_PWR_CON, spm_read(MM_PWR_CON) | MM_SRAM_PDN);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until MM_SRAM_PDN_ACK = 1" */
+ while ((spm_read(MM_PWR_CON) & MM_SRAM_PDN_ACK) != MM_SRAM_PDN_ACK) {
+ /* */
+ }
+#endif
+ /* TINFO="Set PWR_ISO = 1" */
+ spm_write(MM_PWR_CON, spm_read(MM_PWR_CON) | PWR_ISO);
+ /* TINFO="Set PWR_CLK_DIS = 1" */
+ spm_write(MM_PWR_CON, spm_read(MM_PWR_CON) | PWR_CLK_DIS);
+ /* TINFO="Set PWR_RST_B = 0" */
+ spm_write(MM_PWR_CON, spm_read(MM_PWR_CON) & ~PWR_RST_B);
+ /* TINFO="Set PWR_ON = 0" */
+ spm_write(MM_PWR_CON, spm_read(MM_PWR_CON) & ~PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 0" */
+ spm_write(MM_PWR_CON, spm_read(MM_PWR_CON) & ~PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 0 and PWR_STATUS_2ND = 0" */
+ while ((spm_read(PWR_STATUS) & MM_PWR_STA_MASK)
+ || (spm_read(PWR_STATUS_2ND) & MM_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Finish to turn off MM" */
+ } else { /* STA_POWER_ON */
+ /* TINFO="Start to turn on MM" */
+ /* TINFO="Set PWR_ON = 1" */
+ spm_write(MM_PWR_CON, spm_read(MM_PWR_CON) | PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 1" */
+ spm_write(MM_PWR_CON, spm_read(MM_PWR_CON) | PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 1 and PWR_STATUS_2ND = 1" */
+ while (((spm_read(PWR_STATUS) & MM_PWR_STA_MASK) != MM_PWR_STA_MASK)
+ || ((spm_read(PWR_STATUS_2ND) & MM_PWR_STA_MASK) != MM_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Set PWR_CLK_DIS = 0" */
+ spm_write(MM_PWR_CON, spm_read(MM_PWR_CON) & ~PWR_CLK_DIS);
+ /* TINFO="Set PWR_ISO = 0" */
+ spm_write(MM_PWR_CON, spm_read(MM_PWR_CON) & ~PWR_ISO);
+ /* TINFO="Set PWR_RST_B = 1" */
+ spm_write(MM_PWR_CON, spm_read(MM_PWR_CON) | PWR_RST_B);
+ /* TINFO="Set SRAM_PDN = 0" */
+ spm_write(MM_PWR_CON, spm_read(MM_PWR_CON) & ~(0x1 << 8));
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until MM_SRAM_PDN_ACK_BIT0 = 0" */
+ while (spm_read(MM_PWR_CON) & MM_SRAM_PDN_ACK_BIT0) {
+ /* */
+ }
+#endif
+ /* TINFO="Release bus protect - step1 : 10" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_2_CLR, MM_PROT_STEP1_10_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_2) & MM_PROT_STEP1_10_ACK_MASK)) {
+ }
+#endif
+ /* TINFO="Release bus protect - step1 : 00" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_1_CLR, MM_PROT_STEP1_00_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_1) & MM_PROT_STEP1_00_ACK_MASK)) {
+ }
+#endif
+ /* TINFO="Release bus protect - step0 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_2_CLR, MM_PROT_STEP0_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_2) & MM_PROT_STEP0_0_ACK_MASK)) {
+ }
+#endif
+
+
+ /* TINFO="Finish to turn on MM" */
+ }
+ return err;
+}
+
+int spm_mtcmos_ctrl_img(int state)
+{
+ int err = 0;
+
+ /* TINFO="enable SPM register control" */
+ spm_write(POWERON_CONFIG_EN, (SPM_PROJECT_CODE << 16) | (0x1 << 0));
+
+ if (state == STA_POWER_DOWN) {
+ /* TINFO="Start to turn off IMG" */
+ /* TINFO="Set SRAM_PDN = 1" */
+ spm_write(IMG_PWR_CON, spm_read(IMG_PWR_CON) | IMG_SRAM_PDN);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until IMG_SRAM_PDN_ACK = 1" */
+ while ((spm_read(IMG_PWR_CON) & IMG_SRAM_PDN_ACK) != IMG_SRAM_PDN_ACK) {
+ /* */
+ }
+#endif
+ /* TINFO="Set PWR_ISO = 1" */
+ spm_write(IMG_PWR_CON, spm_read(IMG_PWR_CON) | PWR_ISO);
+ /* TINFO="Set PWR_CLK_DIS = 1" */
+ spm_write(IMG_PWR_CON, spm_read(IMG_PWR_CON) | PWR_CLK_DIS);
+ /* TINFO="Set PWR_RST_B = 0" */
+ spm_write(IMG_PWR_CON, spm_read(IMG_PWR_CON) & ~PWR_RST_B);
+ /* TINFO="Set PWR_ON = 0" */
+ spm_write(IMG_PWR_CON, spm_read(IMG_PWR_CON) & ~PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 0" */
+ spm_write(IMG_PWR_CON, spm_read(IMG_PWR_CON) & ~PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 0 and PWR_STATUS_2ND = 0" */
+ while ((spm_read(PWR_STATUS) & IMG_PWR_STA_MASK)
+ || (spm_read(PWR_STATUS_2ND) & IMG_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Finish to turn off IMG" */
+ } else { /* STA_POWER_ON */
+ /* TINFO="Start to turn on IMG" */
+ /* TINFO="Set PWR_ON = 1" */
+ spm_write(IMG_PWR_CON, spm_read(IMG_PWR_CON) | PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 1" */
+ spm_write(IMG_PWR_CON, spm_read(IMG_PWR_CON) | PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 1 and PWR_STATUS_2ND = 1" */
+ while (((spm_read(PWR_STATUS) & IMG_PWR_STA_MASK) != IMG_PWR_STA_MASK)
+ || ((spm_read(PWR_STATUS_2ND) & IMG_PWR_STA_MASK) != IMG_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Set PWR_CLK_DIS = 0" */
+ spm_write(IMG_PWR_CON, spm_read(IMG_PWR_CON) & ~PWR_CLK_DIS);
+ /* TINFO="Set PWR_ISO = 0" */
+ spm_write(IMG_PWR_CON, spm_read(IMG_PWR_CON) & ~PWR_ISO);
+ /* TINFO="Set PWR_RST_B = 1" */
+ spm_write(IMG_PWR_CON, spm_read(IMG_PWR_CON) | PWR_RST_B);
+ /* TINFO="Set SRAM_PDN = 0" */
+ spm_write(IMG_PWR_CON, spm_read(IMG_PWR_CON) & ~(0x1 << 8));
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until IMG_SRAM_PDN_ACK_BIT0 = 0" */
+ while (spm_read(IMG_PWR_CON) & IMG_SRAM_PDN_ACK_BIT0) {
+ /* */
+ }
+#endif
+ /* TINFO="Finish to turn on IMG" */
+ }
+ return err;
+}
+
+int spm_mtcmos_ctrl_ip0(int state)
+{
+ int err = 0;
+
+ /* TINFO="enable SPM register control" */
+ spm_write(POWERON_CONFIG_EN, (SPM_PROJECT_CODE << 16) | (0x1 << 0));
+
+ if (state == STA_POWER_DOWN) {
+ /* TINFO="Start to turn off IP0" */
+
+ /* TINFO="Set bus protect - step0 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_2_SET, NNA0_PROT_STEP0_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_2) & NNA0_PROT_STEP0_0_ACK_MASK) != NNA0_PROT_STEP0_0_ACK_MASK) {
+ }
+#endif
+ /* TINFO="Set bus protect - step1 : 00" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_2_SET, NNA0_PROT_STEP1_00_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_2) & NNA0_PROT_STEP1_00_ACK_MASK) != NNA0_PROT_STEP1_00_ACK_MASK) {
+ }
+#endif
+ /* TINFO="Set bus protect - step1 : 10" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_2_SET, NNA0_PROT_STEP1_10_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_2) & NNA0_PROT_STEP1_10_ACK_MASK) != NNA0_PROT_STEP1_10_ACK_MASK) {
+ }
+#endif
+
+ /* TINFO="Set SRAM_PDN = 1" */
+ spm_write(IP0_PWR_CON, spm_read(IP0_PWR_CON) | IP0_SRAM_PDN);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until IP0_SRAM_PDN_ACK = 1" */
+ while ((spm_read(IP0_PWR_CON) & IP0_SRAM_PDN_ACK) != IP0_SRAM_PDN_ACK) {
+ /* */
+ }
+#endif
+ /* TINFO="Set PWR_ISO = 1" */
+ spm_write(IP0_PWR_CON, spm_read(IP0_PWR_CON) | PWR_ISO);
+ /* TINFO="Set PWR_CLK_DIS = 1" */
+ spm_write(IP0_PWR_CON, spm_read(IP0_PWR_CON) | PWR_CLK_DIS);
+ /* TINFO="Set PWR_RST_B = 0" */
+ spm_write(IP0_PWR_CON, spm_read(IP0_PWR_CON) & ~PWR_RST_B);
+ /* TINFO="Set PWR_ON = 0" */
+ spm_write(IP0_PWR_CON, spm_read(IP0_PWR_CON) & ~PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 0" */
+ spm_write(IP0_PWR_CON, spm_read(IP0_PWR_CON) & ~PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 0 and PWR_STATUS_2ND = 0" */
+ while ((spm_read(PWR_STATUS) & IP0_PWR_STA_MASK)
+ || (spm_read(PWR_STATUS_2ND) & IP0_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Finish to turn off IP0" */
+ } else { /* STA_POWER_ON */
+ /* TINFO="Start to turn on IP0" */
+ /* TINFO="Set PWR_ON = 1" */
+ spm_write(IP0_PWR_CON, spm_read(IP0_PWR_CON) | PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 1" */
+ spm_write(IP0_PWR_CON, spm_read(IP0_PWR_CON) | PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 1 and PWR_STATUS_2ND = 1" */
+ while (((spm_read(PWR_STATUS) & IP0_PWR_STA_MASK) != IP0_PWR_STA_MASK)
+ || ((spm_read(PWR_STATUS_2ND) & IP0_PWR_STA_MASK) != IP0_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Set PWR_CLK_DIS = 0" */
+ spm_write(IP0_PWR_CON, spm_read(IP0_PWR_CON) & ~PWR_CLK_DIS);
+ /* TINFO="Set PWR_ISO = 0" */
+ spm_write(IP0_PWR_CON, spm_read(IP0_PWR_CON) & ~PWR_ISO);
+ /* TINFO="Set PWR_RST_B = 1" */
+ spm_write(IP0_PWR_CON, spm_read(IP0_PWR_CON) | PWR_RST_B);
+ /* TINFO="Set SRAM_PDN = 0" */
+ spm_write(IP0_PWR_CON, spm_read(IP0_PWR_CON) & ~(0x1 << 8));
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until IP0_SRAM_PDN_ACK_BIT0 = 0" */
+ while (spm_read(IP0_PWR_CON) & IP0_SRAM_PDN_ACK_BIT0) {
+ /* */
+ }
+#endif
+ /* TINFO="Release bus protect - step1 : 10" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_2_CLR, NNA0_PROT_STEP1_10_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_2) & NNA0_PROT_STEP1_10_ACK_MASK)) {
+ }
+#endif
+ /* TINFO="Release bus protect - step1 : 00" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_2_CLR, NNA0_PROT_STEP1_00_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_2) & NNA0_PROT_STEP1_00_ACK_MASK)) {
+ }
+
+#endif
+ /* TINFO="Release bus protect - step0 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_2_CLR, NNA0_PROT_STEP0_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_2) & NNA0_PROT_STEP0_0_ACK_MASK)) {
+ }
+#endif
+
+
+ /* TINFO="Finish to turn on IP0" */
+ }
+ return err;
+}
+
+int spm_mtcmos_ctrl_ip1(int state)
+{
+ int err = 0;
+
+ /* TINFO="enable SPM register control" */
+ spm_write(POWERON_CONFIG_EN, (SPM_PROJECT_CODE << 16) | (0x1 << 0));
+
+ if (state == STA_POWER_DOWN) {
+ /* TINFO="Start to turn off IP1" */
+ /* TINFO="Set bus protect - step0 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_SET, NNA1_PROT_STEP0_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & NNA1_PROT_STEP0_0_ACK_MASK) != NNA1_PROT_STEP0_0_ACK_MASK) {
+ }
+#endif
+ /* TINFO="Set bus protect - step1 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_SET, NNA1_PROT_STEP1_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & NNA1_PROT_STEP1_0_ACK_MASK) != NNA1_PROT_STEP1_0_ACK_MASK) {
+ }
+#endif
+
+ /* TINFO="Set SRAM_PDN = 1" */
+ spm_write(IP1_PWR_CON, spm_read(IP1_PWR_CON) | IP1_SRAM_PDN);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until IP1_SRAM_PDN_ACK = 1" */
+ while ((spm_read(IP1_PWR_CON) & IP1_SRAM_PDN_ACK) != IP1_SRAM_PDN_ACK) {
+ /* */
+ }
+#endif
+ /* TINFO="Set PWR_ISO = 1" */
+ spm_write(IP1_PWR_CON, spm_read(IP1_PWR_CON) | PWR_ISO);
+ /* TINFO="Set PWR_CLK_DIS = 1" */
+ spm_write(IP1_PWR_CON, spm_read(IP1_PWR_CON) | PWR_CLK_DIS);
+ /* TINFO="Set PWR_RST_B = 0" */
+ spm_write(IP1_PWR_CON, spm_read(IP1_PWR_CON) & ~PWR_RST_B);
+ /* TINFO="Set PWR_ON = 0" */
+ spm_write(IP1_PWR_CON, spm_read(IP1_PWR_CON) & ~PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 0" */
+ spm_write(IP1_PWR_CON, spm_read(IP1_PWR_CON) & ~PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 0 and PWR_STATUS_2ND = 0" */
+ while ((spm_read(PWR_STATUS) & IP1_PWR_STA_MASK)
+ || (spm_read(PWR_STATUS_2ND) & IP1_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Finish to turn off IP1" */
+ } else { /* STA_POWER_ON */
+ /* TINFO="Start to turn on IP1" */
+ /* TINFO="Set PWR_ON = 1" */
+ spm_write(IP1_PWR_CON, spm_read(IP1_PWR_CON) | PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 1" */
+ spm_write(IP1_PWR_CON, spm_read(IP1_PWR_CON) | PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 1 and PWR_STATUS_2ND = 1" */
+ while (((spm_read(PWR_STATUS) & IP1_PWR_STA_MASK) != IP1_PWR_STA_MASK)
+ || ((spm_read(PWR_STATUS_2ND) & IP1_PWR_STA_MASK) != IP1_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Set PWR_CLK_DIS = 0" */
+ spm_write(IP1_PWR_CON, spm_read(IP1_PWR_CON) & ~PWR_CLK_DIS);
+ /* TINFO="Set PWR_ISO = 0" */
+ spm_write(IP1_PWR_CON, spm_read(IP1_PWR_CON) & ~PWR_ISO);
+ /* TINFO="Set PWR_RST_B = 1" */
+ spm_write(IP1_PWR_CON, spm_read(IP1_PWR_CON) | PWR_RST_B);
+ /* TINFO="Set SRAM_PDN = 0" */
+ spm_write(IP1_PWR_CON, spm_read(IP1_PWR_CON) & ~(0x1 << 8));
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until IP1_SRAM_PDN_ACK_BIT0 = 0" */
+ while (spm_read(IP1_PWR_CON) & IP1_SRAM_PDN_ACK_BIT0) {
+ /* */
+ }
+#endif
+
+ /* TINFO="Release bus protect - step1 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_CLR, NNA1_PROT_STEP1_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & NNA1_PROT_STEP1_0_ACK_MASK)) {
+ }
+#endif
+ /* TINFO="Release bus protect - step0 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_CLR, NNA1_PROT_STEP0_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & NNA1_PROT_STEP0_0_ACK_MASK)) {
+ }
+#endif
+
+ /* TINFO="Finish to turn on IP1" */
+ }
+ return err;
+}
+
+int spm_mtcmos_ctrl_ip2(int state)
+{
+ int err = 0;
+
+ /* TINFO="enable SPM register control" */
+ spm_write(POWERON_CONFIG_EN, (SPM_PROJECT_CODE << 16) | (0x1 << 0));
+
+ if (state == STA_POWER_DOWN) {
+ /* TINFO="Start to turn off IP2" */
+ /* TINFO="Set bus protect - step0 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_SET, WFST_PROT_STEP0_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & WFST_PROT_STEP0_0_ACK_MASK) != WFST_PROT_STEP0_0_ACK_MASK) {
+ }
+#endif
+ /* TINFO="Set bus protect - step1 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_SET, WFST_PROT_STEP1_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & WFST_PROT_STEP1_0_ACK_MASK) != WFST_PROT_STEP1_0_ACK_MASK) {
+ }
+#endif
+
+ /* TINFO="Set SRAM_PDN = 1" */
+ spm_write(IP2_PWR_CON, spm_read(IP2_PWR_CON) | IP2_SRAM_PDN);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until IP2_SRAM_PDN_ACK = 1" */
+ while ((spm_read(IP2_PWR_CON) & IP2_SRAM_PDN_ACK) != IP2_SRAM_PDN_ACK) {
+ /* */
+ }
+#endif
+ /* TINFO="Set PWR_ISO = 1" */
+ spm_write(IP2_PWR_CON, spm_read(IP2_PWR_CON) | PWR_ISO);
+ /* TINFO="Set PWR_CLK_DIS = 1" */
+ spm_write(IP2_PWR_CON, spm_read(IP2_PWR_CON) | PWR_CLK_DIS);
+ /* TINFO="Set PWR_RST_B = 0" */
+ spm_write(IP2_PWR_CON, spm_read(IP2_PWR_CON) & ~PWR_RST_B);
+ /* TINFO="Set PWR_ON = 0" */
+ spm_write(IP2_PWR_CON, spm_read(IP2_PWR_CON) & ~PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 0" */
+ spm_write(IP2_PWR_CON, spm_read(IP2_PWR_CON) & ~PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 0 and PWR_STATUS_2ND = 0" */
+ while ((spm_read(PWR_STATUS) & IP2_PWR_STA_MASK)
+ || (spm_read(PWR_STATUS_2ND) & IP2_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Finish to turn off IP2" */
+ } else { /* STA_POWER_ON */
+ /* TINFO="Start to turn on IP2" */
+ /* TINFO="Set PWR_ON = 1" */
+ spm_write(IP2_PWR_CON, spm_read(IP2_PWR_CON) | PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 1" */
+ spm_write(IP2_PWR_CON, spm_read(IP2_PWR_CON) | PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 1 and PWR_STATUS_2ND = 1" */
+ while (((spm_read(PWR_STATUS) & IP2_PWR_STA_MASK) != IP2_PWR_STA_MASK)
+ || ((spm_read(PWR_STATUS_2ND) & IP2_PWR_STA_MASK) != IP2_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Set PWR_CLK_DIS = 0" */
+ spm_write(IP2_PWR_CON, spm_read(IP2_PWR_CON) & ~PWR_CLK_DIS);
+ /* TINFO="Set PWR_ISO = 0" */
+ spm_write(IP2_PWR_CON, spm_read(IP2_PWR_CON) & ~PWR_ISO);
+ /* TINFO="Set PWR_RST_B = 1" */
+ spm_write(IP2_PWR_CON, spm_read(IP2_PWR_CON) | PWR_RST_B);
+ /* TINFO="Set SRAM_PDN = 0" */
+ spm_write(IP2_PWR_CON, spm_read(IP2_PWR_CON) & ~(0x1 << 8));
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until IP2_SRAM_PDN_ACK_BIT0 = 0" */
+ while (spm_read(IP2_PWR_CON) & IP2_SRAM_PDN_ACK_BIT0) {
+ /* */
+ }
+#endif
+ /* TINFO="Release bus protect - step1 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_CLR, WFST_PROT_STEP1_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & WFST_PROT_STEP1_0_ACK_MASK)) {
+ }
+#endif
+ /* TINFO="Release bus protect - step0 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_CLR, WFST_PROT_STEP0_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & WFST_PROT_STEP0_0_ACK_MASK)) {
+ }
+#endif
+
+
+ /* TINFO="Finish to turn on IP2" */
+ }
+ return err;
+}
+
+int spm_mtcmos_ctrl_usb_mac_p1(int state)
+{
+ int err = 0;
+
+ /* TINFO="enable SPM register control" */
+ spm_write(POWERON_CONFIG_EN, (SPM_PROJECT_CODE << 16) | (0x1 << 0));
+
+ if (state == STA_POWER_DOWN) {
+ spm_write(INFRA_TOPAXI_PROTECTEN_2_SET, USB_MAC_PROT_STEP0_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_2) & USB_MAC_PROT_STEP0_0_ACK_MASK) != USB_MAC_PROT_STEP0_0_ACK_MASK) {
+ }
+#endif
+
+ /* TINFO="Set USB_SRAM_PDN = 1" */
+ spm_write(USB_MAC_P1_PWR_CON, spm_read(USB_MAC_P1_PWR_CON) | USB_SRAM_PDN);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until USB_SRAM_PDN_ACK = 1" */
+ while ((spm_read(USB_MAC_P1_PWR_CON) & USB_SRAM_PDN_ACK) != USB_SRAM_PDN_ACK) {
+ /* */
+ }
+#endif
+ /* TINFO="Start to turn off USB_MAC_P1" */
+ /* TINFO="Set PWR_ISO = 1" */
+ spm_write(USB_MAC_P1_PWR_CON, spm_read(USB_MAC_P1_PWR_CON) | PWR_ISO);
+ /* TINFO="Set PWR_CLK_DIS = 1" */
+ spm_write(USB_MAC_P1_PWR_CON, spm_read(USB_MAC_P1_PWR_CON) | PWR_CLK_DIS);
+ /* TINFO="Set PWR_RST_B = 0" */
+ spm_write(USB_MAC_P1_PWR_CON, spm_read(USB_MAC_P1_PWR_CON) & ~PWR_RST_B);
+ /* TINFO="Set PWR_ON = 0" */
+ spm_write(USB_MAC_P1_PWR_CON, spm_read(USB_MAC_P1_PWR_CON) & ~PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 0" */
+ spm_write(USB_MAC_P1_PWR_CON, spm_read(USB_MAC_P1_PWR_CON) & ~PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 0 and PWR_STATUS_2ND = 0" */
+ while ((spm_read(PWR_STATUS) & USB_MAC_P1_PWR_STA_MASK)
+ || (spm_read(PWR_STATUS_2ND) & USB_MAC_P1_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Finish to turn off USB_MAC_P1" */
+ } else { /* STA_POWER_ON */
+ /* TINFO="Start to turn on USB_MAC_P1" */
+ /* TINFO="Set PWR_ON = 1" */
+ spm_write(USB_MAC_P1_PWR_CON, spm_read(USB_MAC_P1_PWR_CON) | PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 1" */
+ spm_write(USB_MAC_P1_PWR_CON, spm_read(USB_MAC_P1_PWR_CON) | PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 1 and PWR_STATUS_2ND = 1" */
+ while (((spm_read(PWR_STATUS) & USB_MAC_P1_PWR_STA_MASK) != USB_MAC_P1_PWR_STA_MASK)
+ || ((spm_read(PWR_STATUS_2ND) & USB_MAC_P1_PWR_STA_MASK) != USB_MAC_P1_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Set PWR_CLK_DIS = 0" */
+ spm_write(USB_MAC_P1_PWR_CON, spm_read(USB_MAC_P1_PWR_CON) & ~PWR_CLK_DIS);
+ /* TINFO="Set PWR_ISO = 0" */
+ spm_write(USB_MAC_P1_PWR_CON, spm_read(USB_MAC_P1_PWR_CON) & ~PWR_ISO);
+ /* TINFO="Set PWR_RST_B = 1" */
+ spm_write(USB_MAC_P1_PWR_CON, spm_read(USB_MAC_P1_PWR_CON) | PWR_RST_B);
+
+ /* TINFO="Set SRAM_PDN = 0" */
+ spm_write(USB_MAC_P1_PWR_CON, spm_read(USB_MAC_P1_PWR_CON) & ~USB_SRAM_PDN);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until USB_SRAM_PDN_ACK = 0" */
+ while (spm_read(USB_MAC_P1_PWR_CON) & USB_SRAM_PDN_ACK) {
+ /* */
+ }
+#endif
+
+ /* TINFO="Release bus protect - step0 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_2_CLR, USB_MAC_PROT_STEP0_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1_2) & USB_MAC_PROT_STEP0_0_ACK_MASK)) {
+ }
+#endif
+ /* TINFO="Finish to turn on USB_MAC_P1" */
+ }
+ return err;
+}
+
+int spm_mtcmos_ctrl_dsp(int state)
+{
+ int err = 0;
+
+ /* TINFO="enable SPM register control" */
+ spm_write(POWERON_CONFIG_EN, (SPM_PROJECT_CODE << 16) | (0x1 << 0));
+
+ if (state == STA_POWER_DOWN) {
+ /* TINFO="Start to turn off DSP" */
+ /* TINFO="Set bus protect - step1 : 10" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_SET, DSP_PROT_STEP1_10_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & DSP_PROT_STEP1_10_ACK_MASK) != DSP_PROT_STEP1_10_ACK_MASK) {
+ }
+#endif
+ /* TINFO="Set bus protect - step1 : 01" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_SET, DSP_PROT_STEP1_00_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & DSP_PROT_STEP1_00_ACK_MASK) != DSP_PROT_STEP1_00_ACK_MASK) {
+ }
+#endif
+
+ /* TINFO="AUDIO_BUS_AUD_SI0[0]=0"*/
+ spm_write(AUDIO_BUS_AUD_SI0, spm_read(AUDIO_BUS_AUD_SI0) & ~(0x1 << 0));
+ /* TINFO="AUDIO_BUS_INFRA_SI0[0]=0"*/
+ spm_write(AUDIO_BUS_INFRA_SI0, spm_read(AUDIO_BUS_INFRA_SI0) & ~(0x1 << 0));
+ /* TINFO="Set SRAM_PDN = 1" */
+ spm_write(DSP_PWR_CON, spm_read(DSP_PWR_CON) | DSP_SRAM_PDN);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until DSP_SRAM_PDN_ACK = 1" */
+ while ((spm_read(DSP_PWR_CON) & DSP_SRAM_PDN_ACK) != DSP_SRAM_PDN_ACK) {
+ /* */
+ }
+#endif
+ /* TINFO="Set PWR_ISO = 1" */
+ spm_write(DSP_PWR_CON, spm_read(DSP_PWR_CON) | PWR_ISO);
+ /* TINFO="Set PWR_CLK_DIS = 1" */
+ spm_write(DSP_PWR_CON, spm_read(DSP_PWR_CON) | PWR_CLK_DIS);
+ /* TINFO="Set PWR_RST_B = 0" */
+ spm_write(DSP_PWR_CON, spm_read(DSP_PWR_CON) & ~PWR_RST_B);
+ /* TINFO="Set PWR_ON = 0" */
+ spm_write(DSP_PWR_CON, spm_read(DSP_PWR_CON) & ~PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 0" */
+ spm_write(DSP_PWR_CON, spm_read(DSP_PWR_CON) & ~PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 0 and PWR_STATUS_2ND = 0" */
+ while ((spm_read(PWR_STATUS) & DSP_PWR_STA_MASK)
+ || (spm_read(PWR_STATUS_2ND) & DSP_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Finish to turn off DSP" */
+ } else { /* STA_POWER_ON */
+ /* TINFO="Start to turn on DSP" */
+ /* TINFO="Set PWR_ON = 1" */
+ spm_write(DSP_PWR_CON, spm_read(DSP_PWR_CON) | PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 1" */
+ spm_write(DSP_PWR_CON, spm_read(DSP_PWR_CON) | PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 1 and PWR_STATUS_2ND = 1" */
+ while (((spm_read(PWR_STATUS) & DSP_PWR_STA_MASK) != DSP_PWR_STA_MASK)
+ || ((spm_read(PWR_STATUS_2ND) & DSP_PWR_STA_MASK) != DSP_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Set PWR_CLK_DIS = 0" */
+ spm_write(DSP_PWR_CON, spm_read(DSP_PWR_CON) & ~PWR_CLK_DIS);
+ /* TINFO="Set PWR_ISO = 0" */
+ spm_write(DSP_PWR_CON, spm_read(DSP_PWR_CON) & ~PWR_ISO);
+ /* TINFO="Set PWR_RST_B = 1" */
+ spm_write(DSP_PWR_CON, spm_read(DSP_PWR_CON) | PWR_RST_B);
+ /* TINFO="Set SRAM_PDN = 0" */
+ spm_write(DSP_PWR_CON, spm_read(DSP_PWR_CON) & ~(0x1 << 8));
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until DSP_SRAM_PDN_ACK_BIT0 = 0" */
+ while (spm_read(DSP_PWR_CON) & DSP_SRAM_PDN_ACK_BIT0) {
+ /* */
+ }
+#endif
+ spm_write(DSP_PWR_CON, spm_read(DSP_PWR_CON) & ~(0x1 << 9));
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until DSP_SRAM_PDN_ACK_BIT1 = 0" */
+ while (spm_read(DSP_PWR_CON) & DSP_SRAM_PDN_ACK_BIT1) {
+ /* */
+ }
+#endif
+ spm_write(DSP_PWR_CON, spm_read(DSP_PWR_CON) & ~(0x1 << 10));
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until DSP_SRAM_PDN_ACK_BIT2 = 0" */
+ while (spm_read(DSP_PWR_CON) & DSP_SRAM_PDN_ACK_BIT2) {
+ /* */
+ }
+#endif
+ spm_write(DSP_PWR_CON, spm_read(DSP_PWR_CON) & ~(0x1 << 11));
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until DSP_SRAM_PDN_ACK_BIT3 = 0" */
+ while (spm_read(DSP_PWR_CON) & DSP_SRAM_PDN_ACK_BIT3) {
+ /* */
+ }
+#endif
+ /* TINFO="AUDIO_BUS_AUD_SI0[0]=1"*/
+ spm_write(AUDIO_BUS_AUD_SI0, spm_read(AUDIO_BUS_AUD_SI0) | (0x1 << 0));
+ /* TINFO="AUDIO_BUS_INFRA_SI0[0]=1"*/
+ spm_write(AUDIO_BUS_INFRA_SI0, spm_read(AUDIO_BUS_INFRA_SI0) | (0x1 << 0));
+
+ /* TINFO="Release bus protect - step1 : 00" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_CLR, DSP_PROT_STEP1_00_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & DSP_PROT_STEP1_00_ACK_MASK)) {
+ }
+#endif
+ /* TINFO="Release bus protect - step1 : 10" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_CLR, DSP_PROT_STEP1_10_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & DSP_PROT_STEP1_10_ACK_MASK)) {
+ }
+#endif
+
+
+
+ /* TINFO="Finish to turn on DSP" */
+ }
+ return err;
+}
+
+int spm_mtcmos_ctrl_audio(int state)
+{
+ int err = 0;
+
+ /* TINFO="enable SPM register control" */
+ spm_write(POWERON_CONFIG_EN, (SPM_PROJECT_CODE << 16) | (0x1 << 0));
+
+ if (state == STA_POWER_DOWN) {
+ /* TINFO="Start to turn off AUDIO AFE" */
+ /* TINFO="disable audio dcm en bit29" */
+ spm_write(PERI_BUS_DCM_CTRL, spm_read(PERI_BUS_DCM_CTRL) & 0xDFFFFFFF);
+ /* TINFO="Set bus protect - step0 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_SET, AFE_PROT_STEP0_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & AFE_PROT_STEP0_0_ACK_MASK) != AFE_PROT_STEP0_0_ACK_MASK) {
+ }
+#endif
+ /* TINFO="Set bus protect - step1 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_SET, AFE_PROT_STEP1_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & AFE_PROT_STEP1_0_ACK_MASK) != AFE_PROT_STEP1_0_ACK_MASK) {
+ }
+#endif
+
+
+ /* TINFO="Set SRAM_PDN = 1" */
+ spm_write(AUDIO_PWR_CON, spm_read(AUDIO_PWR_CON) | AUDIO_SRAM_PDN);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until AUDIO_SRAM_PDN_ACK = 1" */
+ while ((spm_read(AUDIO_PWR_CON) & AUDIO_SRAM_PDN_ACK) != AUDIO_SRAM_PDN_ACK) {
+ /* */
+ }
+#endif
+ /* TINFO="Set PWR_ISO = 1" */
+ spm_write(AUDIO_PWR_CON, spm_read(AUDIO_PWR_CON) | PWR_ISO);
+ /* TINFO="Set PWR_CLK_DIS = 1" */
+ spm_write(AUDIO_PWR_CON, spm_read(AUDIO_PWR_CON) | PWR_CLK_DIS);
+ /* TINFO="Set PWR_RST_B = 0" */
+ spm_write(AUDIO_PWR_CON, spm_read(AUDIO_PWR_CON) & ~PWR_RST_B);
+ /* TINFO="Set PWR_ON = 0" */
+ spm_write(AUDIO_PWR_CON, spm_read(AUDIO_PWR_CON) & ~PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 0" */
+ spm_write(AUDIO_PWR_CON, spm_read(AUDIO_PWR_CON) & ~PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 0 and PWR_STATUS_2ND = 0" */
+ while ((spm_read(PWR_STATUS) & AUDIO_PWR_STA_MASK)
+ || (spm_read(PWR_STATUS_2ND) & AUDIO_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Finish to turn off AUDIO AFE" */
+ } else { /* STA_POWER_ON */
+ /* TINFO="Start to turn on AUDIO AFE" */
+ /* TINFO="Set PWR_ON = 1" */
+ spm_write(AUDIO_PWR_CON, spm_read(AUDIO_PWR_CON) | PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 1" */
+ spm_write(AUDIO_PWR_CON, spm_read(AUDIO_PWR_CON) | PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 1 and PWR_STATUS_2ND = 1" */
+ while (((spm_read(PWR_STATUS) & AUDIO_PWR_STA_MASK) != AUDIO_PWR_STA_MASK)
+ || ((spm_read(PWR_STATUS_2ND) & AUDIO_PWR_STA_MASK) != AUDIO_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Set PWR_CLK_DIS = 0" */
+ spm_write(AUDIO_PWR_CON, spm_read(AUDIO_PWR_CON) & ~PWR_CLK_DIS);
+ /* TINFO="Set PWR_ISO = 0" */
+ spm_write(AUDIO_PWR_CON, spm_read(AUDIO_PWR_CON) & ~PWR_ISO);
+ /* TINFO="Set PWR_RST_B = 1" */
+ spm_write(AUDIO_PWR_CON, spm_read(AUDIO_PWR_CON) | PWR_RST_B);
+ /* TINFO="Set SRAM_PDN = 0" */
+ spm_write(AUDIO_PWR_CON, spm_read(AUDIO_PWR_CON) & ~(0x1 << 8));
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until AUDIO_SRAM_PDN_ACK_BIT0 = 0" */
+ while (spm_read(AUDIO_PWR_CON) & AUDIO_SRAM_PDN_ACK_BIT0) {
+ /* */
+ }
+#endif
+ spm_write(AUDIO_PWR_CON, spm_read(AUDIO_PWR_CON) & ~(0x1 << 9));
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until AUDIO_SRAM_PDN_ACK_BIT1 = 0" */
+ while (spm_read(AUDIO_PWR_CON) & AUDIO_SRAM_PDN_ACK_BIT1) {
+ /* */
+ }
+#endif
+ spm_write(AUDIO_PWR_CON, spm_read(AUDIO_PWR_CON) & ~(0x1 << 10));
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until AUDIO_SRAM_PDN_ACK_BIT2 = 0" */
+ while (spm_read(AUDIO_PWR_CON) & AUDIO_SRAM_PDN_ACK_BIT2) {
+ /* */
+ }
+#endif
+ spm_write(AUDIO_PWR_CON, spm_read(AUDIO_PWR_CON) & ~(0x1 << 11));
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until AUDIO_SRAM_PDN_ACK_BIT3 = 0" */
+ while (spm_read(AUDIO_PWR_CON) & AUDIO_SRAM_PDN_ACK_BIT3) {
+ /* */
+ }
+#endif
+ /* TINFO="Release bus protect - step1 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_CLR, AFE_PROT_STEP1_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & AFE_PROT_STEP1_0_ACK_MASK)) {
+ }
+#endif
+ /* TINFO="Release bus protect - step0 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_CLR, AFE_PROT_STEP0_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & AFE_PROT_STEP0_0_ACK_MASK)) {
+ }
+#endif
+
+ /* TINFO="Finish to turn on AUDIO AFE" */
+ }
+ return err;
+}
+
+int spm_mtcmos_ctrl_asrc(int state)
+{
+ int err = 0;
+
+ /* TINFO="enable SPM register control" */
+ spm_write(POWERON_CONFIG_EN, (SPM_PROJECT_CODE << 16) | (0x1 << 0));
+
+ if (state == STA_POWER_DOWN) {
+ /* TINFO="Start to turn off AUDIO ASRC" */
+
+ /* TINFO="Set bus protect - step1 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_SET, SRC_PROT_STEP1_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & SRC_PROT_STEP1_0_ACK_MASK) != SRC_PROT_STEP1_0_ACK_MASK) {
+ }
+#endif
+
+ /* TINFO="Set SRAM_PDN = 1" */
+ spm_write(ASRC_PWR_CON, spm_read(ASRC_PWR_CON) | ASRC_SRAM_PDN);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until ASRC_SRAM_PDN_ACK = 1" */
+ while ((spm_read(ASRC_PWR_CON) & ASRC_SRAM_PDN_ACK) != ASRC_SRAM_PDN_ACK) {
+ /* */
+ }
+#endif
+ /* TINFO="Set PWR_ISO = 1" */
+ spm_write(ASRC_PWR_CON, spm_read(ASRC_PWR_CON) | PWR_ISO);
+ /* TINFO="Set PWR_CLK_DIS = 1" */
+ spm_write(ASRC_PWR_CON, spm_read(ASRC_PWR_CON) | PWR_CLK_DIS);
+ /* TINFO="Set PWR_RST_B = 0" */
+ spm_write(ASRC_PWR_CON, spm_read(ASRC_PWR_CON) & ~PWR_RST_B);
+ /* TINFO="Set PWR_ON = 0" */
+ spm_write(ASRC_PWR_CON, spm_read(ASRC_PWR_CON) & ~PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 0" */
+ spm_write(ASRC_PWR_CON, spm_read(ASRC_PWR_CON) & ~PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 0 and PWR_STATUS_2ND = 0" */
+ while ((spm_read(PWR_STATUS) & ASRC_PWR_STA_MASK)
+ || (spm_read(PWR_STATUS_2ND) & ASRC_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Finish to turn off AUDIO ASRC" */
+ } else { /* STA_POWER_ON */
+ /* TINFO="Start to turn on AUDIO AFE" */
+ /* TINFO="Set PWR_ON = 1" */
+ spm_write(ASRC_PWR_CON, spm_read(ASRC_PWR_CON) | PWR_ON);
+ /* TINFO="Set PWR_ON_2ND = 1" */
+ spm_write(ASRC_PWR_CON, spm_read(ASRC_PWR_CON) | PWR_ON_2ND);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until PWR_STATUS = 1 and PWR_STATUS_2ND = 1" */
+ while (((spm_read(PWR_STATUS) & ASRC_PWR_STA_MASK) != ASRC_PWR_STA_MASK)
+ || ((spm_read(PWR_STATUS_2ND) & ASRC_PWR_STA_MASK) != ASRC_PWR_STA_MASK)) {
+ /* No logic between pwr_on and pwr_ack. Print SRAM / MTCMOS control and PWR_ACK for debug. */
+ }
+#endif
+ /* TINFO="Set PWR_CLK_DIS = 0" */
+ spm_write(ASRC_PWR_CON, spm_read(ASRC_PWR_CON) & ~PWR_CLK_DIS);
+ /* TINFO="Set PWR_ISO = 0" */
+ spm_write(ASRC_PWR_CON, spm_read(ASRC_PWR_CON) & ~PWR_ISO);
+ /* TINFO="Set PWR_RST_B = 1" */
+ spm_write(ASRC_PWR_CON, spm_read(ASRC_PWR_CON) | PWR_RST_B);
+ /* TINFO="Set SRAM_PDN = 0" */
+ spm_write(ASRC_PWR_CON, spm_read(ASRC_PWR_CON) & ~(0x1 << 8));
+#ifndef IGNORE_MTCMOS_CHECK
+ /* TINFO="Wait until ASRC_SRAM_PDN_ACK_BIT0 = 0" */
+ while (spm_read(ASRC_PWR_CON) & ASRC_SRAM_PDN_ACK_BIT0) {
+ /* */
+ }
+#endif
+ /* TINFO="Release bus protect - step1 : 0" */
+ spm_write(INFRA_TOPAXI_PROTECTEN_CLR, SRC_PROT_STEP1_0_MASK);
+#ifndef IGNORE_MTCMOS_CHECK
+ /* Note that this protect ack check after releasing protect has been ignored */
+ while ((spm_read(INFRA_TOPAXI_PROTECTEN_STA1) & SRC_PROT_STEP1_0_ACK_MASK)) {
+ }
+#endif
+
+
+ /* TINFO="Finish to turn on AUDIO ASRC" */
+ }
+ return err;
+}
diff --git a/src/bsp/lk/platform/mt8512/drivers/trng/mtk_trng.c b/src/bsp/lk/platform/mt8512/drivers/trng/mtk_trng.c
new file mode 100644
index 0000000..24aaed4
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/trng/mtk_trng.c
@@ -0,0 +1,61 @@
+#include <debug.h>
+#include <reg.h>
+#include <platform/mt_reg_base.h>
+//#include <platform/pll.h>
+#include <string.h>
+
+#define TRNG_CTRL_REG (TRNG_BASE+0x00)
+#define TRNG_DATA_REG (TRNG_BASE+0x08)
+#define TRNG_CONF_REG (TRNG_BASE+0x0C)
+
+/* #define TRNG_PDN_VALUE 0x1 */
+
+/* TRNG_CTRL_REG */
+#define TRNG_RDY (0x80000000)
+#define TRNG_START (0x00000001)
+
+/* Assume clock setting for trng is on */
+s32 trng_drv_get_random_data(u8 *buf, u32 len)
+{
+ s32 retval = 0;
+
+ if (0 == len)
+ return 0;
+
+ if (NULL == buf) {
+ dprintf(CRITICAL, "[TRNG] Error: input buffer is NULL\n");
+ return -1;
+ }
+
+ /*if (readl(TRNG_PDN_STATUS) & TRNG_PDN_VALUE) //TRNG clock is off
+ writel(TRNG_PDN_VALUE, TRNG_PDN_CLR); //ungate TRNG clock*/
+
+ if (TRNG_START != (readl(TRNG_CTRL_REG) & TRNG_START)) {
+ writel(TRNG_START, TRNG_CTRL_REG); //start TRNG
+ if (TRNG_START != (readl(TRNG_CTRL_REG) & TRNG_START)) {
+ dprintf(CRITICAL, "[TRNG] Error: fail to start TRNG because clock is disabled\n");
+ return -2;
+ }
+ }
+
+ /* clear output buffer */
+ memset(buf, 0, len);
+
+ /* generate random data with default rings */
+ while (len >= sizeof(u32)) {
+ if(TRNG_RDY != (readl(TRNG_CTRL_REG) & TRNG_RDY)) {
+ spin(1);
+ continue;
+ }
+
+ *(u32 *)buf = readl(TRNG_DATA_REG);
+ retval += sizeof(u32);
+ buf += sizeof(u32);
+ len -= sizeof(u32);
+ }
+
+ writel(0x0, TRNG_CTRL_REG); //stop TRNG
+ /*writel(TRNG_PDN_VALUE, TRNG_PDN_SET); //gate TRNG clock*/
+
+ return retval;
+}
diff --git a/src/bsp/lk/platform/mt8512/drivers/uart/uart.c b/src/bsp/lk/platform/mt8512/drivers/uart/uart.c
new file mode 100644
index 0000000..d15513c
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/uart/uart.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <debug.h>
+#include <reg.h>
+#include <dev/uart.h>
+#include <platform/mt_reg_base.h>
+#include <platform/mt_uart.h>
+#include <string.h>
+
+typedef enum {
+ UART1 = UART1_BASE,
+ UART2 = UART2_BASE,
+ UART3 = UART3_BASE,
+ UART4 = UART4_BASE
+} MTK_UART;
+
+/* FCR */
+#define UART_FCR_FIFOE (1 << 0)
+#define UART_FCR_CLRR (1 << 1)
+#define UART_FCR_CLRT (1 << 2)
+#define UART_FCR_DMA1 (1 << 3)
+#define UART_FCR_RXFIFO_1B_TRI (0 << 6)
+#define UART_FCR_RXFIFO_6B_TRI (1 << 6)
+#define UART_FCR_RXFIFO_12B_TRI (2 << 6)
+#define UART_FCR_RXFIFO_RX_TRI (3 << 6)
+#define UART_FCR_TXFIFO_1B_TRI (0 << 4)
+#define UART_FCR_TXFIFO_4B_TRI (1 << 4)
+#define UART_FCR_TXFIFO_8B_TRI (2 << 4)
+#define UART_FCR_TXFIFO_14B_TRI (3 << 4)
+
+#define UART_FCR_FIFO_INIT (UART_FCR_FIFOE|UART_FCR_CLRR|UART_FCR_CLRT)
+#define UART_FCR_NORMAL (UART_FCR_FIFO_INIT | \
+ UART_FCR_TXFIFO_4B_TRI| \
+ UART_FCR_RXFIFO_12B_TRI)
+
+/* LCR */
+#define UART_LCR_BREAK (1 << 6)
+#define UART_LCR_DLAB (1 << 7)
+
+#define UART_WLS_5 (0 << 0)
+#define UART_WLS_6 (1 << 0)
+#define UART_WLS_7 (2 << 0)
+#define UART_WLS_8 (3 << 0)
+#define UART_WLS_MASK (3 << 0)
+
+#define UART_1_STOP (0 << 2)
+#define UART_2_STOP (1 << 2)
+#define UART_1_5_STOP (1 << 2) /* Only when WLS=5 */
+#define UART_STOP_MASK (1 << 2)
+
+#define UART_NONE_PARITY (0 << 3)
+#define UART_ODD_PARITY (0x1 << 3)
+#define UART_EVEN_PARITY (0x3 << 3)
+#define UART_MARK_PARITY (0x5 << 3)
+#define UART_SPACE_PARITY (0x7 << 3)
+#define UART_PARITY_MASK (0x7 << 3)
+
+/* MCR */
+#define UART_MCR_DTR (1 << 0)
+#define UART_MCR_RTS (1 << 1)
+#define UART_MCR_OUT1 (1 << 2)
+#define UART_MCR_OUT2 (1 << 3)
+#define UART_MCR_LOOP (1 << 4)
+#define UART_MCR_XOFF (1 << 7) /* read only */
+#define UART_MCR_NORMAL (UART_MCR_DTR|UART_MCR_RTS)
+
+/* LSR */
+#define UART_LSR_DR (1 << 0)
+#define UART_LSR_OE (1 << 1)
+#define UART_LSR_PE (1 << 2)
+#define UART_LSR_FE (1 << 3)
+#define UART_LSR_BI (1 << 4)
+#define UART_LSR_THRE (1 << 5)
+#define UART_LSR_TEMT (1 << 6)
+#define UART_LSR_FIFOERR (1 << 7)
+
+/* MSR */
+#define UART_MSR_DCTS (1 << 0)
+#define UART_MSR_DDSR (1 << 1)
+#define UART_MSR_TERI (1 << 2)
+#define UART_MSR_DDCD (1 << 3)
+#define UART_MSR_CTS (1 << 4)
+#define UART_MSR_DSR (1 << 5)
+#define UART_MSR_RI (1 << 6)
+#define UART_MSR_DCD (1 << 7)
+
+#define CONFIG_BAUDRATE 921600
+
+#define UART_BASE(uart) (uart)
+
+#define UART_RBR(uart) (UART_BASE(uart)+0x0) /* Read only */
+#define UART_THR(uart) (UART_BASE(uart)+0x0) /* Write only */
+#define UART_IER(uart) (UART_BASE(uart)+0x4)
+#define UART_IIR(uart) (UART_BASE(uart)+0x8) /* Read only */
+#define UART_FCR(uart) (UART_BASE(uart)+0x8) /* Write only */
+#define UART_LCR(uart) (UART_BASE(uart)+0xc)
+#define UART_MCR(uart) (UART_BASE(uart)+0x10)
+#define UART_LSR(uart) (UART_BASE(uart)+0x14)
+#define UART_MSR(uart) (UART_BASE(uart)+0x18)
+#define UART_SCR(uart) (UART_BASE(uart)+0x1c)
+#define UART_DLL(uart) (UART_BASE(uart)+0x0) /* Only when LCR.DLAB = 1 */
+#define UART_DLH(uart) (UART_BASE(uart)+0x4) /* Only when LCR.DLAB = 1 */
+#define UART_EFR(uart) (UART_BASE(uart)+0x8) /* Only when LCR = 0xbf */
+#define UART_XON1(uart) (UART_BASE(uart)+0x10) /* Only when LCR = 0xbf */
+#define UART_XON2(uart) (UART_BASE(uart)+0x14) /* Only when LCR = 0xbf */
+#define UART_XOFF1(uart) (UART_BASE(uart)+0x18) /* Only when LCR = 0xbf */
+#define UART_XOFF2(uart) (UART_BASE(uart)+0x1c) /* Only when LCR = 0xbf */
+#define UART_AUTOBAUD_EN(uart) (UART_BASE(uart)+0x20)
+#define UART_HIGHSPEED(uart) (UART_BASE(uart)+0x24)
+#define UART_SAMPLE_COUNT(uart) (UART_BASE(uart)+0x28)
+#define UART_SAMPLE_POINT(uart) (UART_BASE(uart)+0x2c)
+#define UART_AUTOBAUD_REG(uart) (UART_BASE(uart)+0x30)
+#define UART_RATE_FIX_AD(uart) (UART_BASE(uart)+0x34)
+#define UART_AUTOBAUD_SAMPLE(uart) (UART_BASE(uart)+0x38)
+#define UART_GUARD(uart) (UART_BASE(uart)+0x3c)
+#define UART_ESCAPE_DAT(uart) (UART_BASE(uart)+0x40)
+#define UART_ESCAPE_EN(uart) (UART_BASE(uart)+0x44)
+#define UART_SLEEP_EN(uart) (UART_BASE(uart)+0x48)
+#define UART_VFIFO_EN(uart) (UART_BASE(uart)+0x4c)
+#define UART_RXTRI_AD(uart) (UART_BASE(uart)+0x50)
+
+#define INVAL_UART_BASE 0xFFFFFFFF
+
+// output uart port
+volatile addr_t g_uart = INVAL_UART_BASE;
+
+//extern unsigned int mtk_get_bus_freq(void);
+#if FPGA_PLATFORM
+#define UART_SRC_CLK 12000000
+#else
+#define UART_SRC_CLK 26000000
+#endif
+
+static void uart_setbrg(void)
+{
+ unsigned int byte,speed;
+ unsigned int highspeed;
+ unsigned int quot, divisor, remainder;
+ unsigned int uartclk;
+ unsigned short data, high_speed_div, sample_count, sample_point;
+ unsigned int tmp_div;
+
+ speed = CONFIG_BAUDRATE;
+ uartclk = UART_SRC_CLK;
+ //uartclk = (unsigned int)(mtk_get_bus_freq()*1000/4);
+ if (speed <= 115200 ) {
+ highspeed = 0;
+ quot = 16;
+ } else {
+ highspeed = 3;
+ quot = 1;
+ }
+
+ if (highspeed < 3) { /*0~2*/
+ /* Set divisor DLL and DLH */
+ divisor = uartclk / (quot * speed);
+ remainder = uartclk % (quot * speed);
+
+ if (remainder >= (quot / 2) * speed)
+ divisor += 1;
+
+ writel(highspeed, UART_HIGHSPEED(g_uart));
+ byte = readl(UART_LCR(g_uart)); /* DLAB start */
+ writel((byte | UART_LCR_DLAB), UART_LCR(g_uart));
+ writel((divisor & 0x00ff), UART_DLL(g_uart));
+ writel(((divisor >> 8)&0x00ff), UART_DLH(g_uart));
+ writel(byte, UART_LCR(g_uart)); /* DLAB end */
+ } else {
+ data=(unsigned short)(uartclk/speed);
+ high_speed_div = (data>>8) + 1; // divided by 256
+
+ tmp_div=uartclk/(speed*high_speed_div);
+ divisor = (unsigned short)tmp_div;
+
+ remainder = (uartclk)%(high_speed_div*speed);
+ /*get (sample_count+1)*/
+ if (remainder >= ((speed)*(high_speed_div))>>1)
+ divisor = (unsigned short)(tmp_div+1);
+ else
+ divisor = (unsigned short)tmp_div;
+
+ sample_count=divisor-1;
+
+ /*get the sample point*/
+ sample_point=(sample_count-1)>>1;
+
+ /*configure register*/
+ writel(highspeed, UART_HIGHSPEED(g_uart));
+
+ byte = readl(UART_LCR(g_uart)); /* DLAB start */
+ writel((byte | UART_LCR_DLAB), UART_LCR(g_uart));
+ writel((high_speed_div & 0x00ff), UART_DLL(g_uart));
+ writel(((high_speed_div >> 8)&0x00ff), UART_DLH(g_uart));
+ writel(sample_count, UART_SAMPLE_COUNT(g_uart));
+ writel(sample_point, UART_SAMPLE_POINT(g_uart));
+ writel(byte, UART_LCR(g_uart)); /* DLAB end */
+ }
+}
+
+static void mtk_set_current_uart(MTK_UART uart_base)
+{
+ g_uart = uart_base;
+}
+
+void uart_init_early(void)
+{
+ mtk_set_current_uart(UART1);
+ /* clear fifo */
+ writel(readl(UART_FCR(g_uart)) + UART_FCR_FIFO_INIT, UART_FCR(g_uart));
+ writel(UART_NONE_PARITY | UART_WLS_8 | UART_1_STOP, UART_LCR(g_uart));
+ uart_setbrg();
+}
+
+#define UART_LSR_TX_READY (UART_LSR_THRE | UART_LSR_TEMT)
+int uart_putc(int port, char ch)
+{
+ if (g_uart == INVAL_UART_BASE) {
+ return -1;
+ }
+
+ while(1) {
+ if ((readl(UART_LSR(g_uart)) & UART_LSR_TX_READY) == UART_LSR_TX_READY) {
+ if (ch == '\n')
+ writel((unsigned int)'\r', UART_THR(g_uart));
+
+ writel((unsigned int)ch, UART_THR(g_uart));
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int uart_getc(int port, bool wait) /* returns -1 if no data available */
+{
+ do{
+ if(!(readl(UART_LSR(g_uart)) & UART_LSR_DR))
+ return (int)readl(UART_RBR(g_uart));
+ }while(wait);
+ return -1;
+}
+
+int uart_pputc(int port, char c)
+{
+ return uart_putc(port, c);
+}
+
+int uart_pgetc(int port)
+{
+ return uart_getc(port, 0);
+}
+
+bool check_uart_enter(void)
+{
+ if ((int)readl(UART_RBR(g_uart)) == 13)
+ return true;
+ else
+ return false;
+}
diff --git a/src/bsp/lk/platform/mt8512/drivers/usb/md1122.c b/src/bsp/lk/platform/mt8512/drivers/usb/md1122.c
new file mode 100644
index 0000000..d8b28d3
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/usb/md1122.c
@@ -0,0 +1,170 @@
+/*
+ * MD1122 usb2.0 phy board for FPGA
+ *
+ * Copyright 2016 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+#include <debug.h>
+
+#include "mtu3_hw_regs.h"
+#include "u3phy-i2c.h"
+
+#define MD1122_I2C_ADDR 0x60
+#define PHY_VERSION_BANK 0x20
+#define PHY_VERSION_ADDR 0xe4
+
+static void *g_ippc_port_addr;
+
+int USB_PHY_Write_Register8(unsigned char data, unsigned char addr)
+{
+ u3phy_write_reg(g_ippc_port_addr, MD1122_I2C_ADDR, addr, data);
+
+ return 0;
+}
+
+unsigned char USB_PHY_Read_Register8(unsigned char addr)
+{
+ unsigned char data;
+
+ data = u3phy_read_reg(g_ippc_port_addr, MD1122_I2C_ADDR, addr);
+
+ return data;
+}
+
+unsigned int get_phy_verison(void)
+{
+ unsigned int version = 0;
+
+ u3phy_write_reg8(g_ippc_port_addr, MD1122_I2C_ADDR, 0xff, PHY_VERSION_BANK);
+
+ version = u3phy_read_reg32(g_ippc_port_addr, MD1122_I2C_ADDR, PHY_VERSION_ADDR);
+ dprintf(ALWAYS, "ssusb phy version: %x %p\n", version, g_ippc_port_addr);
+
+ return version;
+}
+
+
+int md1122_u3phy_init(void *i2c_port_base)
+{
+ g_ippc_port_addr = i2c_port_base;
+
+ if (get_phy_verison() != 0xa60810a) {
+ dprintf(ALWAYS,"get phy version failed\n");
+ return -1;
+ }
+
+ /* usb phy initial sequence */
+ USB_PHY_Write_Register8(0x00, 0xFF);
+ dprintf(INFO,"****************before bank 0x00*************************\n");
+ dprintf(INFO,"0x00~0x0F 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x \n",
+ USB_PHY_Read_Register8(0x00),USB_PHY_Read_Register8(0x01),USB_PHY_Read_Register8(0x02),USB_PHY_Read_Register8(0x03),
+ USB_PHY_Read_Register8(0x04),USB_PHY_Read_Register8(0x05),USB_PHY_Read_Register8(0x06),USB_PHY_Read_Register8(0x07),
+ USB_PHY_Read_Register8(0x08),USB_PHY_Read_Register8(0x09),USB_PHY_Read_Register8(0x0A),USB_PHY_Read_Register8(0x0B),
+ USB_PHY_Read_Register8(0x0C),USB_PHY_Read_Register8(0x0D),USB_PHY_Read_Register8(0x0E),USB_PHY_Read_Register8(0x0F));
+ dprintf(INFO,"[U2P]addr: 0xFF, value: %x\n", USB_PHY_Read_Register8(0xFF));
+ dprintf(INFO,"[U2P]addr: 0x05, value: %x\n", USB_PHY_Read_Register8(0x05));
+ dprintf(INFO,"[U2P]addr: 0x18, value: %x\n", USB_PHY_Read_Register8(0x18));
+ dprintf(INFO,"*****************after **********************************\n");
+ USB_PHY_Write_Register8(0x55, 0x05);
+ USB_PHY_Write_Register8(0x84, 0x18);
+
+
+ dprintf(INFO,"[U2P]addr: 0xFF, value: %x\n", USB_PHY_Read_Register8(0xFF));
+ dprintf(INFO,"[U2P]addr: 0x05, value: %x\n", USB_PHY_Read_Register8(0x05));
+ dprintf(INFO,"[U2P]addr: 0x18, value: %x\n", USB_PHY_Read_Register8(0x18));
+ dprintf(INFO,"****************before bank 0x10*************************\n");
+ USB_PHY_Write_Register8(0x10, 0xFF);
+ dprintf(INFO,"0x00~0x0F 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x \n",
+ USB_PHY_Read_Register8(0x00),USB_PHY_Read_Register8(0x01),USB_PHY_Read_Register8(0x02),USB_PHY_Read_Register8(0x03),
+ USB_PHY_Read_Register8(0x04),USB_PHY_Read_Register8(0x05),USB_PHY_Read_Register8(0x06),USB_PHY_Read_Register8(0x07),
+ USB_PHY_Read_Register8(0x08),USB_PHY_Read_Register8(0x09),USB_PHY_Read_Register8(0x0A),USB_PHY_Read_Register8(0x0B),
+ USB_PHY_Read_Register8(0x0C),USB_PHY_Read_Register8(0x0D),USB_PHY_Read_Register8(0x0E),USB_PHY_Read_Register8(0x0F));
+ dprintf(INFO,"[U2P]addr: 0xFF, value: %x\n", USB_PHY_Read_Register8(0xFF));
+ dprintf(INFO,"[U2P]addr: 0x0A, value: %x\n", USB_PHY_Read_Register8(0x0A));
+ dprintf(INFO,"*****************after **********************************\n");
+
+ USB_PHY_Write_Register8(0x84, 0x0A);
+
+ dprintf(INFO,"[U2P]addr: 0xFF, value: %x\n", USB_PHY_Read_Register8(0xFF));
+ dprintf(INFO,"[U2P]addr: 0x0A, value: %x\n", USB_PHY_Read_Register8(0x0A));
+ dprintf(INFO,"****************before bank 0x40*************************\n");
+ USB_PHY_Write_Register8(0x40, 0xFF);
+ dprintf(INFO,"0x00~0x0F 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x \n",
+ USB_PHY_Read_Register8(0x00),USB_PHY_Read_Register8(0x01),USB_PHY_Read_Register8(0x02),USB_PHY_Read_Register8(0x03),
+ USB_PHY_Read_Register8(0x04),USB_PHY_Read_Register8(0x05),USB_PHY_Read_Register8(0x06),USB_PHY_Read_Register8(0x07),
+ USB_PHY_Read_Register8(0x08),USB_PHY_Read_Register8(0x09),USB_PHY_Read_Register8(0x0A),USB_PHY_Read_Register8(0x0B),
+ USB_PHY_Read_Register8(0x0C),USB_PHY_Read_Register8(0x0D),USB_PHY_Read_Register8(0x0E),USB_PHY_Read_Register8(0x0F));
+ dprintf(INFO,"[U2P]addr: 0xFF, value: %x\n", USB_PHY_Read_Register8(0xFF));
+ dprintf(INFO,"[U2P]addr: 0x38, value: %x\n", USB_PHY_Read_Register8(0x38));
+ dprintf(INFO,"[U2P]addr: 0x42, value: %x\n", USB_PHY_Read_Register8(0x42));
+ dprintf(INFO,"[U2P]addr: 0x08, value: %x\n", USB_PHY_Read_Register8(0x08));
+ dprintf(INFO,"[U2P]addr: 0x09, value: %x\n", USB_PHY_Read_Register8(0x09));
+ dprintf(INFO,"[U2P]addr: 0x0C, value: %x\n", USB_PHY_Read_Register8(0x0C));
+ dprintf(INFO,"[U2P]addr: 0x0E, value: %x\n", USB_PHY_Read_Register8(0x0E));
+ dprintf(INFO,"[U2P]addr: 0x10, value: %x\n", USB_PHY_Read_Register8(0x10));
+ dprintf(INFO,"[U2P]addr: 0x14, value: %x\n", USB_PHY_Read_Register8(0x14));
+ dprintf(INFO,"*****************after **********************************\n");
+
+ USB_PHY_Write_Register8(0x46, 0x38);
+ USB_PHY_Write_Register8(0x40, 0x42);
+ USB_PHY_Write_Register8(0xAB, 0x08);
+ USB_PHY_Write_Register8(0x0C, 0x09);
+ USB_PHY_Write_Register8(0x71, 0x0C);
+ USB_PHY_Write_Register8(0x4F, 0x0E);
+ USB_PHY_Write_Register8(0xE1, 0x10);
+ USB_PHY_Write_Register8(0x5F, 0x14);
+ dprintf(INFO,"[U2P]addr: 0xFF, value: %x\n", USB_PHY_Read_Register8(0xFF));
+ dprintf(INFO,"[U2P]addr: 0x38, value: %x\n", USB_PHY_Read_Register8(0x38));
+ dprintf(INFO,"[U2P]addr: 0x42, value: %x\n", USB_PHY_Read_Register8(0x42));
+ dprintf(INFO,"[U2P]addr: 0x08, value: %x\n", USB_PHY_Read_Register8(0x08));
+ dprintf(INFO,"[U2P]addr: 0x09, value: %x\n", USB_PHY_Read_Register8(0x09));
+ dprintf(INFO,"[U2P]addr: 0x0C, value: %x\n", USB_PHY_Read_Register8(0x0C));
+ dprintf(INFO,"[U2P]addr: 0x0E, value: %x\n", USB_PHY_Read_Register8(0x0E));
+ dprintf(INFO,"[U2P]addr: 0x10, value: %x\n", USB_PHY_Read_Register8(0x10));
+ dprintf(INFO,"[U2P]addr: 0x14, value: %x\n", USB_PHY_Read_Register8(0x14));
+ dprintf(INFO,"****************before bank 0x60*************************\n");
+ USB_PHY_Write_Register8(0x60, 0xFF);
+ dprintf(INFO,"0x00~0x0F 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x \n",
+ USB_PHY_Read_Register8(0x00),USB_PHY_Read_Register8(0x01),USB_PHY_Read_Register8(0x02),USB_PHY_Read_Register8(0x03),
+ USB_PHY_Read_Register8(0x04),USB_PHY_Read_Register8(0x05),USB_PHY_Read_Register8(0x06),USB_PHY_Read_Register8(0x07),
+ USB_PHY_Read_Register8(0x08),USB_PHY_Read_Register8(0x09),USB_PHY_Read_Register8(0x0A),USB_PHY_Read_Register8(0x0B),
+ USB_PHY_Read_Register8(0x0C),USB_PHY_Read_Register8(0x0D),USB_PHY_Read_Register8(0x0E),USB_PHY_Read_Register8(0x0F));
+ dprintf(INFO,"[U2P]addr: 0xFF, value: %x\n", USB_PHY_Read_Register8(0xFF));
+ dprintf(INFO,"[U2P]addr: 0x10, value: %x\n", USB_PHY_Read_Register8(0x14));
+ dprintf(INFO,"*****************after **********************************\n");
+
+ USB_PHY_Write_Register8(0x03, 0x14);
+ dprintf(INFO,"[U2P]addr: 0xFF, value: %x\n", USB_PHY_Read_Register8(0xFF));
+ dprintf(INFO,"[U2P]addr: 0x10, value: %x\n", USB_PHY_Read_Register8(0x14));
+ dprintf(INFO,"****************before bank 0x00*************************\n");
+ USB_PHY_Write_Register8(0x00, 0xFF);
+ dprintf(INFO,"[U2P]addr: 0xFF, value: %x\n", USB_PHY_Read_Register8(0xFF));
+ dprintf(INFO,"[U2P]addr: 0x6A, value: %x\n", USB_PHY_Read_Register8(0x6A));
+ dprintf(INFO,"[U2P]addr: 0x68, value: %x\n", USB_PHY_Read_Register8(0x68));
+ dprintf(INFO,"[U2P]addr: 0x6C, value: %x\n", USB_PHY_Read_Register8(0x6C));
+ dprintf(INFO,"[U2P]addr: 0x6D, value: %x\n", USB_PHY_Read_Register8(0x6D));
+ USB_PHY_Write_Register8(0x04, 0x6A);
+ USB_PHY_Write_Register8(0x08, 0x68);
+ USB_PHY_Write_Register8(0x26, 0x6C);
+ USB_PHY_Write_Register8(0x36, 0x6D);
+ dprintf(INFO,"*****************after **********************************\n");
+ dprintf(INFO,"[U2P]addr: 0xFF, value: %x\n", USB_PHY_Read_Register8(0xFF));
+ dprintf(INFO,"[U2P]addr: 0x6A, value: %x\n", USB_PHY_Read_Register8(0x6A));
+ dprintf(INFO,"[U2P]addr: 0x68, value: %x\n", USB_PHY_Read_Register8(0x68));
+ dprintf(INFO,"[U2P]addr: 0x6C, value: %x\n", USB_PHY_Read_Register8(0x6C));
+ dprintf(INFO,"[U2P]addr: 0x6D, value: %x\n", USB_PHY_Read_Register8(0x6D));
+
+ dprintf(INFO,"[U2P]%s, end\n", __func__);
+ return 0;
+}
+
+void mt_usb_phy_poweron(void)
+{
+ md1122_u3phy_init((void *)U3D_SSUSB_FPGA_I2C_OUT_0P);
+}
+
+void mt_usb_phy_poweroff(void)
+{
+ return;
+}
diff --git a/src/bsp/lk/platform/mt8512/drivers/usb/mtu3.c b/src/bsp/lk/platform/mt8512/drivers/usb/mtu3.c
new file mode 100644
index 0000000..ee204ea
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/usb/mtu3.c
@@ -0,0 +1,1793 @@
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <arch/ops.h>
+#include <debug.h>
+#include <errno.h>
+#include <lib/mempool.h>
+#include <platform.h>
+#include <platform/interrupts.h>
+#include <platform/mt_irq.h>
+#include <platform/mt_usbphy.h>
+#include <platform/reg_utils.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+
+#include "mtu3.h"
+#include "mtu3_qmu.h"
+
+#pragma GCC push_options
+#pragma GCC optimize("O1")
+#undef INFO
+#define INFO 2
+#define SPEW 2
+
+#define DBG_C(x...) dprintf(CRITICAL, "[USB] " x)
+/* use compile option DEBUG=1 to enable following logs */
+#define DBG_I(x...) dprintf(INFO, "[USB] " x)
+#define DBG_S(x...) dprintf(SPEW, "[USB] " x)
+
+/* bits used in ep interrupts etc */
+#define EPT_RX(n) (1 << ((n) + 16))
+#define EPT_TX(n) (1 << (n))
+
+#define EP0 0
+
+/* Request types */
+#define USB_TYPE_STANDARD (0x00 << 5)
+#define USB_TYPE_CLASS (0x01 << 5)
+#define USB_TYPE_VENDOR (0x02 << 5)
+#define USB_TYPE_RESERVED (0x03 << 5)
+
+/* values used in GET_STATUS requests */
+#define USB_STAT_SELFPOWERED 0x01
+
+/* USB recipients */
+#define USB_RECIP_DEVICE 0x00
+#define USB_RECIP_INTERFACE 0x01
+#define USB_RECIP_ENDPOINT 0x02
+#define USB_RECIP_OTHER 0x03
+
+/* Endpoints */
+#define USB_EP_NUM_MASK 0x0f /* in bEndpointAddress */
+#define USB_EP_DIR_MASK 0x80
+
+#define USB_TYPE_MASK 0x60
+#define USB_RECIP_MASK 0x1f
+
+static struct mu3d g_u3d;
+
+
+static void dump_setup_packet(const char *str, struct usb_setup *sp)
+{
+ DBG_I("\n");
+ DBG_I("%s", str);
+ DBG_I(" bmRequestType = %x\n", sp->request_type);
+ DBG_I(" bRequest = %x\n", sp->request);
+ DBG_I(" wValue = %x\n", sp->value);
+ DBG_I(" wIndex = %x\n", sp->index);
+ DBG_I(" wLength = %x\n", sp->length);
+}
+
+static void *udc_zalloc(size_t size)
+{
+ void *buf;
+
+ buf = mempool_alloc(size, MEMPOOL_ANY);
+ if (buf)
+ memset(buf, 0, size);
+
+ return buf;
+}
+
+/* descriptors APIs */
+
+static void change_eps_maxp(struct udc_descriptor *desc, u8 *buf, int len)
+{
+ struct udc_gadget *g = g_u3d.gadget;
+ int n;
+
+ /* only supports HS/FS bulk, default maxp of all eps is 512, change it to 64 when FS; */
+ if ((g_u3d.speed == SSUSB_SPEED_FULL) &&
+ (desc->tag == ((CONFIGURATION << 8) | EP0))) {
+
+ buf += (9 + 9); /* size of (config + interface) */
+ for (n = 0; n < g->ifc_endpoints; n++) {
+ if ((9 + 9 + 5 + (7 * n)) >= len)
+ break;
+
+ buf[4] = 0x40; /* ept->maxpkt; 64bytes */
+ buf[5] = 0x00; /* ept->maxpkt >> 8; 64bytes */
+ buf += 7;
+ }
+ }
+}
+
+static void copy_desc(struct udc_request *req, void *data, int length)
+{
+ memcpy(req->buffer, data, length);
+ req->length = length;
+}
+
+static struct udc_descriptor *udc_descriptor_alloc(unsigned int type,
+ unsigned int num, unsigned int len)
+{
+ struct udc_descriptor *desc;
+
+ if ((len > 255) || (len < 2) || (num > 255) || (type > 255)) {
+ DBG_C("%s invalid argument\n", __func__);
+ return NULL;
+ }
+
+ desc = udc_zalloc(sizeof(struct udc_descriptor) + len);
+ if (!desc) {
+ DBG_C("alloc desc failed (type:%d)\n", type);
+ return NULL;
+ }
+
+ desc->next = 0;
+ desc->tag = (type << 8) | num;
+ desc->len = len;
+ desc->data[0] = len;
+ desc->data[1] = type;
+
+ return desc;
+}
+
+static void udc_descriptor_register(struct udc_descriptor *desc, int dtype)
+{
+ if (dtype == DESC_TYPE_U2) {
+ desc->next = g_u3d.desc_list;
+ g_u3d.desc_list = desc;
+ } else {
+ desc->next = g_u3d.desc_list_u3;
+ g_u3d.desc_list_u3 = desc;
+ }
+}
+
+static unsigned int udc_string_desc_alloc(const char *str, int dtype)
+{
+ struct udc_descriptor *desc;
+ unsigned char *data;
+ unsigned int len;
+ unsigned int *str_id;
+
+ if (dtype == DESC_TYPE_U2)
+ str_id = &g_u3d.next_string_id;
+ else
+ str_id = &g_u3d.next_string_id_u3;
+
+ if (*str_id > 255)
+ return 0;
+
+ if (!str)
+ return 0;
+
+ len = strlen(str);
+ desc = udc_descriptor_alloc(STRING, *str_id, len * 2 + 2);
+ if (!desc)
+ return 0;
+
+ *str_id += 1;
+
+ /* expand ascii string to utf16 */
+ data = desc->data + 2;
+ while (len-- > 0) {
+ *data++ = *str++;
+ *data++ = 0;
+ }
+
+ udc_descriptor_register(desc, dtype);
+ return desc->tag & 0xff;
+}
+
+static void udc_ept_desc_fill(struct udc_endpoint *ept, unsigned char *data, int dtype)
+{
+ data[0] = 7;
+ data[1] = ENDPOINT;
+ data[2] = ept->num | (ept->in ? USB_DIR_IN : USB_DIR_OUT);
+ data[3] = 0x02; /* bulk -- the only kind we support */
+ data[4] = 0x00; /* ept->maxpkt; u2: 512bytes, u3: 1024 by default */
+ data[5] = (dtype == DESC_TYPE_U2) ? 0x02 : 0x04; /* ept->maxpkt >> 8; */
+ data[6] = ept->in ? 0x01 : 0x00;
+}
+
+static void udc_companion_desc_fill(uint8_t *data)
+{
+ data[0] = 6;
+ data[1] = SS_EP_COMP;
+ data[2] = 0x0f; /* max burst: 0x0~0xf */
+ data[3] = 0x00;
+ data[4] = 0x00;
+ data[5] = 0x00;
+}
+
+static unsigned int udc_ifc_desc_size(struct udc_gadget *g, int dtype)
+{
+ int tmp = 7; /* ep desc */
+
+ tmp += ((dtype == DESC_TYPE_U2) ? 0 : 6); /* u3: add Companion desc */
+ return 9 + g->ifc_endpoints * tmp;
+}
+
+static void udc_ifc_desc_fill(struct udc_gadget *g, unsigned char *data, int dtype)
+{
+ unsigned int n;
+
+ data[0] = 0x09;
+ data[1] = INTERFACE;
+ data[2] = 0x00; /* ifc number */
+ data[3] = 0x00; /* alt number */
+ data[4] = g->ifc_endpoints; /* 0x02 */
+ data[5] = g->ifc_class; /* 0xff */
+ data[6] = g->ifc_subclass; /* 0x42 */
+ data[7] = g->ifc_protocol; /* 0x03 */
+ data[8] = udc_string_desc_alloc(g->ifc_string, dtype);
+
+ data += 9;
+ for (n = 0; n < g->ifc_endpoints; n++) {
+ udc_ept_desc_fill(g->ept[n], data, dtype);
+ data += 7;
+ if (dtype == DESC_TYPE_U3) {
+ udc_companion_desc_fill(data);
+ data += 6;
+ }
+ }
+}
+
+/* create our device descriptor */
+static int udc_create_dev_desc(struct udc_device *udev, int dtype)
+{
+ struct udc_descriptor *desc;
+ unsigned char *data;
+
+ desc = udc_descriptor_alloc(DEVICE, EP0, 18);
+ if (!desc)
+ return -ENOMEM;
+
+ data = desc->data;
+ data[2] = 0x00; /* usb spec minor rev */
+ data[3] = (dtype == DESC_TYPE_U2) ? 0x02 : 0x03; /* usb spec major rev */
+ data[4] = 0x00; /* class */
+ data[5] = 0x00; /* subclass */
+ data[6] = 0x00; /* protocol */
+ data[7] = (dtype == DESC_TYPE_U2) ? 0x40 : 0x09; /* maxp on ept 0 */
+ memcpy(data + 8, &udev->vendor_id, sizeof(short));
+ memcpy(data + 10, &udev->product_id, sizeof(short));
+ memcpy(data + 12, &udev->version_id, sizeof(short));
+ data[14] = udc_string_desc_alloc(udev->manufacturer, dtype);
+ data[15] = udc_string_desc_alloc(udev->product, dtype);
+ data[16] = udc_string_desc_alloc(udev->serialno, dtype);
+ data[17] = 1; /* number of configurations */
+ udc_descriptor_register(desc, dtype);
+ return 0;
+}
+
+/* create our BOS Binary Device Object descriptor - USB3 (FULL) */
+static int udc_create_u3_bos_desc(void)
+{
+ struct udc_descriptor *desc;
+ unsigned char *data;
+
+ desc = udc_descriptor_alloc(BOS, EP0, 22);
+ if (!desc)
+ return -ENOMEM;
+
+ data = desc->data;
+ data[0] = 0x05; /* bLength of BOS Header */
+ data[2] = 0x16; /* wTotalLength[0] */
+ data[3] = 0x00; /* wTotalLength[1] */
+ data[4] = 0x02; /* bNumDeviceCaps: number of separate device*/
+ /* capability descriptors in BOS */
+
+ /* BOS 1 */
+ data[5] = 0x07; /* bLength: 7 */
+ data[6] = 0x10; /* bDescriptorType: DEVICE CAPABILITY */
+ data[7] = 0x02; /* bDevCapabilityType: USB 2.0 Ext Descriptor */
+ data[8] = 0x02; /* bmAttributes[4]: LPM (SuperSpeed) */
+ data[9] = 0x00;
+ data[10] = 0x00;
+ data[11] = 0x00;
+
+ /* BOS 2 */
+ data[12] = 0x0A; /* bLength: 10 */
+ data[13] = 0x10; /* bDescriptorType: DEVICE CAPABILITY */
+ data[14] = 0x03; /* bDevCapabilityType: SuperSpeed */
+ data[15] = 0x00; /* bmAttributes: Don't support LTM */
+ data[16] = 0x0E; /* wSpeedsSupported[0]: b'1110 */
+ data[17] = 0x00; /* wSpeedsSupported[1] */
+ data[18] = 0x01; /* bFunctionalitySupport */
+ data[19] = 0x0A; /* bU1DevExitLat: Less than 10us */
+ data[20] = 0x20; /* wU2DevExitLat[0]: 32us */
+ data[21] = 0x00; /* wU2DevExitLat[1] */
+
+ udc_descriptor_register(desc, DESC_TYPE_U3);
+ return 0;
+}
+
+static int udc_create_config_desc(struct udc_gadget *gadget, int dtype)
+{
+ struct udc_descriptor *desc;
+ unsigned char *data;
+ unsigned int size;
+
+ /* create our configuration descriptor */
+ size = 9 + udc_ifc_desc_size(gadget, dtype);
+ desc = udc_descriptor_alloc(CONFIGURATION, EP0, size);
+ if (!desc)
+ return -ENOMEM;
+
+ data = desc->data;
+ data[0] = 0x09;
+ data[2] = size;
+ data[3] = size >> 8;
+ data[4] = 0x01; /* number of interfaces */
+ data[5] = 0x01; /* configuration value */
+ data[6] = 0x00; /* configuration string */
+ data[7] = 0x80; /* attributes */
+ /* max power u2: (250ma), u3: 400ma */
+ data[8] = (dtype == DESC_TYPE_U2) ? 0x80 : 0x32;
+
+ udc_ifc_desc_fill(gadget, data + 9, dtype);
+ udc_descriptor_register(desc, dtype);
+ return 0;
+}
+
+static int udc_create_language_table_desc(int dtype)
+{
+ struct udc_descriptor *desc;
+
+ /* create and register a language table descriptor */
+ /* language 0x0409 is US English */
+ desc = udc_descriptor_alloc(STRING, EP0, 4);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->data[2] = 0x09;
+ desc->data[3] = 0x04;
+ udc_descriptor_register(desc, dtype);
+ return 0;
+}
+
+static int udc_create_descriptors(struct udc_device *udev, struct udc_gadget *gadget)
+{
+ udc_create_dev_desc(udev, DESC_TYPE_U2);
+ udc_create_config_desc(gadget, DESC_TYPE_U2);
+ udc_create_language_table_desc(DESC_TYPE_U2);
+
+ if (g_u3d.is_u3_ip) {
+ udc_create_dev_desc(udev, DESC_TYPE_U3);
+ udc_create_u3_bos_desc();
+ udc_create_config_desc(gadget, DESC_TYPE_U3);
+ udc_create_language_table_desc(DESC_TYPE_U3);
+ }
+
+#if 0
+ {
+ struct udc_descriptor *desc;
+
+ DBG_I("%s: dump u2 desc_list\n", __func__);
+ for (desc = g_u3d.desc_list; desc; desc = desc->next) {
+ DBG_I("tag: %04x\n", desc->tag);
+ DBG_I("len: %d\n", desc->len);
+ DBG_I("data:");
+ hexdump8(desc->data, desc->len);
+ }
+
+ DBG_I("%s: dump u3 desc_list\n", __func__);
+ for (desc = g_u3d.desc_list_u3; desc; desc = desc->next) {
+ DBG_I("tag: %04x\n", desc->tag);
+ DBG_I("len: %d\n", desc->len);
+ DBG_I("data:");
+ hexdump8(desc->data, desc->len);
+ }
+ }
+#endif
+
+ return 0;
+}
+
+/* hardware access APIs */
+
+int wait_for_value(paddr_t addr, u32 msk, u32 value, int us_intvl, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if ((readl(addr) & msk) == value)
+ return 0;
+
+ spin(us_intvl);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static inline void writel_rep(volatile void *addr, const void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ const u32 *buf = buffer;
+
+ do {
+ writel(*buf++, addr);
+ } while (--count);
+ }
+}
+
+static inline void readl_rep(const volatile void *addr, void *buffer,
+ unsigned int count)
+{
+ if (count) {
+ u32 *buf = buffer;
+
+ do {
+ u32 x = readl(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+
+static int pio_read_fifo(int ep_num, u8 *dst, u16 len)
+{
+ void *fifo = (void *)(paddr_t)(USB_FIFO(ep_num));
+ u32 index = 0;
+ u32 value;
+
+ if (len >= 4) {
+ readl_rep(fifo, dst, len >> 2);
+ index = len & ~0x03;
+ }
+ if (len & 0x3) {
+ value = readl(fifo);
+ memcpy(&dst[index], &value, len & 0x3);
+ }
+
+ DBG_I("%s - ep_num: %d, len: %d, dst: %p\n",
+ __func__, ep_num, len, dst);
+
+ return len;
+}
+
+static void pio_write_fifo(int ep_num, u8 *src, u16 len)
+{
+ void *fifo = (void *)(paddr_t)(USB_FIFO(ep_num));
+ u32 index = 0;
+
+ DBG_I("%s - ep_num: %d, len: %d, src: %p\n",
+ __func__, ep_num, len, src);
+
+ if (len >= 4) {
+ writel_rep(fifo, src, len >> 2);
+ index = len & ~0x03;
+ }
+ if (len & 0x02) {
+ writew(*(u16 *)&src[index], fifo);
+ index += 2;
+ }
+ if (len & 0x01)
+ writeb(src[index], fifo);
+}
+
+static int mu3d_check_clk_sts(void)
+{
+ u32 check_val;
+ int ret = 0;
+
+ check_val = SSUSB_SYS125_RST_B_STS | SSUSB_SYSPLL_STABLE |
+ SSUSB_REF_RST_B_STS;
+ if (g_u3d.is_u3_ip)
+ check_val |= SSUSB_U3_MAC_RST_B_STS;
+
+ ret = wait_for_value(U3D_SSUSB_IP_PW_STS1, check_val, check_val, 100, 100);
+ if (ret) {
+ DBG_C("SSUSB_SYS125_RST_B_STS NG\n");
+ goto err;
+ } else {
+ DBG_I("clk sys125:OK\n");
+ }
+
+ ret = wait_for_value(U3D_SSUSB_IP_PW_STS2, SSUSB_U2_MAC_SYS_RST_B_STS,
+ SSUSB_U2_MAC_SYS_RST_B_STS, 100, 100);
+ if (ret) {
+ DBG_C("SSUSB_U2_MAC_SYS_RST_B_STS NG\n");
+ goto err;
+ } else {
+ DBG_I("clk mac2:OK\n");
+ }
+
+ return 0;
+
+err:
+ DBG_C("Refer clocks stability check failed!\n");
+ return ret;
+}
+
+static void mu3d_ssusb_enable(void)
+{
+ clrbits32_r(SSUSB_IP_SW_RST, U3D_SSUSB_IP_PW_CTRL0);
+ clrbits32_r(SSUSB_IP_DEV_PDN, U3D_SSUSB_IP_PW_CTRL2);
+ clrbits32_r((SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN |
+ SSUSB_U2_PORT_HOST_SEL), U3D_SSUSB_U2_CTRL_0P);
+ if (g_u3d.is_u3_ip)
+ clrbits32_r((SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN |
+ SSUSB_U3_PORT_HOST_SEL), U3D_SSUSB_U3_CTRL_0P);
+
+ mu3d_check_clk_sts();
+}
+
+/* enable/disable U3D SS function */
+static void mu3d_ss_func_set(bool enable)
+{
+ /* If usb3_en==0, LTSSM will go to SS.Disable state */
+ if (enable)
+ setbits32_r(USB3_EN, U3D_USB3_CONFIG);
+ else
+ clrbits32_r(USB3_EN, U3D_USB3_CONFIG);
+
+ DBG_C("U3 pullup D%s\n", enable ? "+" : "-");
+}
+
+/* set/clear U3D HS device soft connect */
+static void mu3d_hs_softconn_set(bool enable)
+{
+ if (enable)
+ setbits32_r(SOFT_CONN | SUSPENDM_ENABLE, U3D_POWER_MANAGEMENT);
+ else
+ clrbits32_r(SOFT_CONN | SUSPENDM_ENABLE, U3D_POWER_MANAGEMENT);
+
+ DBG_C("U2 pullup D%s\n", enable ? "+" : "-");
+}
+
+static void mu3d_soft_connect(void)
+{
+ if (g_u3d.is_u3_ip && g_u3d.speed > SSUSB_SPEED_HIGH)
+ mu3d_ss_func_set(true);
+ else
+ mu3d_hs_softconn_set(true);
+}
+
+static void mu3d_soft_disconnect(void)
+{
+ if (g_u3d.is_u3_ip && g_u3d.speed > SSUSB_SPEED_HIGH)
+ mu3d_ss_func_set(false);
+ else
+ mu3d_hs_softconn_set(false);
+}
+
+static void mu3d_dev_reset(void)
+{
+ setbits32_r(SSUSB_DEV_SW_RST, U3D_SSUSB_DEV_RST_CTRL);
+ clrbits32_r(SSUSB_DEV_SW_RST, U3D_SSUSB_DEV_RST_CTRL);
+}
+
+static void mu3d_intr_enable(void)
+{
+ u32 value;
+
+ /* enable LV1 ISR */
+ value = BMU_INTR | QMU_INTR | MAC3_INTR | MAC2_INTR | EP_CTRL_INTR;
+ writel(value, U3D_LV1IESR);
+ /* enable U2 common interrupts */
+ value = SUSPEND_INTR | RESUME_INTR | RESET_INTR;
+ writel(value, U3D_COMMON_USB_INTR_ENABLE);
+
+ clrbits32_r(DC_SESSION, U3D_DEVICE_CONTROL);
+ setbits32_r(VBUS_FRC_EN | VBUS_ON, U3D_MISC_CTRL);
+
+ /* Enable U3 LTSSM interrupts */
+ if (g_u3d.is_u3_ip) {
+ value = HOT_RST_INTR | WARM_RST_INTR | VBUS_RISE_INTR |
+ VBUS_FALL_INTR | ENTER_U3_INTR | EXIT_U3_INTR;
+ writel(value, U3D_LTSSM_INTR_ENABLE);
+ }
+
+ /* Enable QMU interrupts. */
+ value = TXQ_CSERR_INT | TXQ_LENERR_INT | RXQ_CSERR_INT |
+ RXQ_LENERR_INT | RXQ_ZLPERR_INT;
+ writel(value, U3D_QIESR1);
+ /* Enable speed change interrupt */
+ writel(SSUSB_DEV_SPEED_CHG_INTR, U3D_DEV_LINK_INTR_ENABLE);
+}
+
+static void mu3d_intr_disable(void)
+{
+ writel(0xffffffff, U3D_EPISR);
+ writel(0xffffffff, U3D_QISAR0);
+ writel(0xffffffff, U3D_QISAR1);
+ writel(0xffffffff, U3D_TQERRIR0);
+ writel(0xffffffff, U3D_RQERRIR0);
+ writel(0xffffffff, U3D_RQERRIR1);
+ writel(0xffffffff, U3D_LV1IECR);
+ writel(0xffffffff, U3D_EPIECR);
+
+ /* clear registers */
+ writel(0xffffffff, U3D_QIECR0);
+ writel(0xffffffff, U3D_QIECR1);
+ writel(0xffffffff, U3D_TQERRIECR0);
+ writel(0xffffffff, U3D_RQERRIECR0);
+ writel(0xffffffff, U3D_RQERRIECR1);
+ writel(0xffffffff, U3D_COMMON_USB_INTR);
+}
+
+static void mu3d_reg_init(void)
+{
+ if (g_u3d.is_u3_ip) {
+ /* disable LGO_U1/U2 by default */
+ clrbits32_r(SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE |
+ SW_U1_REQUEST_ENABLE | SW_U2_REQUEST_ENABLE,
+ U3D_LINK_POWER_CONTROL);
+ /* device responses to u3_exit from host automatically */
+ clrbits32_r(SOFT_U3_EXIT_EN, U3D_LTSSM_CTRL);
+ /* automatically build U2 link when U3 detect fail */
+ setbits32_r(U2U3_AUTO_SWITCH, U3D_USB2_TEST_MODE);
+ }
+ /* delay about 0.1us from detecting reset to send chirp-K */
+ clrbits32_r(WTCHRP_MSK, U3D_LINK_RESET_INFO);
+ /* U2/U3 detected by HW */
+ writel(0, U3D_DEVICE_CONF);
+ /* enable QMU 16B checksum */
+ setbits32_r(QMU_CS16B_EN, U3D_QCR0);
+ /* vbus detected by HW */
+ clrbits32_r(VBUS_FRC_EN | VBUS_ON, U3D_MISC_CTRL);
+}
+
+static USB_SPEED mu3d_get_speed(void)
+{
+ const char *spd_str[] = {"UNKNOW", "FS", "HS", "SS", "SSP"};
+ USB_SPEED spd;
+
+ switch (SSUSB_DEV_SPEED(readl(U3D_DEVICE_CONF))) {
+ case 1:
+ spd = SSUSB_SPEED_FULL;
+ break;
+ case 3:
+ spd = SSUSB_SPEED_HIGH;
+ break;
+ case 4:
+ spd = SSUSB_SPEED_SUPER;
+ break;
+ case 5:
+ spd = SSUSB_SPEED_SUPER_PLUS;
+ break;
+ default:
+ spd = SSUSB_SPEED_UNKNOWN;
+ break;
+ }
+
+ DBG_C("%s (%d) is detected\n", spd_str[spd % ARRAY_SIZE(spd_str)], spd);
+ return spd;
+}
+
+/* SSP is not supported tmp. */
+static void mu3d_set_speed(USB_SPEED spd)
+{
+ const char *spd_str[] = {"UNKNOW", "FS", "HS", "SS", "SSP"};
+
+ switch (spd) {
+ case SSUSB_SPEED_FULL:
+ clrbits32_r(USB3_EN, U3D_USB3_CONFIG);
+ clrbits32_r(HS_ENABLE, U3D_POWER_MANAGEMENT);
+ break;
+ case SSUSB_SPEED_HIGH:
+ clrbits32_r(USB3_EN, U3D_USB3_CONFIG);
+ setbits32_r(HS_ENABLE, U3D_POWER_MANAGEMENT);
+ break;
+ case SSUSB_SPEED_SUPER:
+ /* fall through */
+ default:
+ clrbits32_r(SSUSB_U3_PORT_SSP_SPEED, U3D_SSUSB_U3_CTRL_0P);
+ break;
+ }
+ DBG_I("%s %s (%d)\n", __func__, spd_str[spd % ARRAY_SIZE(spd_str)], spd);
+}
+
+static inline void mu3d_set_address(int addr)
+{
+ writel(DEV_ADDR(addr), U3D_DEVICE_CONF);
+}
+
+struct udc_endpoint *mtu3_find_ep(int ep_num, u8 dir)
+{
+ struct udc_endpoint *ep_list = g_u3d.eps;
+ int i;
+ u8 in;
+
+ /* convert dir to in */
+ if (dir == USB_DIR_IN)
+ in = 1;
+ else
+ in = 0;
+
+ for (i = 1; i < MT_EP_NUM; i++) {
+ if ((ep_list[i].num == ep_num) && (ep_list[i].in == in))
+ return &ep_list[i];
+ }
+ return NULL;
+}
+
+static void mu3d_flush_fifo(u8 ep_num, u8 dir)
+{
+ if (ep_num == 0) {
+ setbits32_r(EP0_RST, U3D_EP_RST);
+ clrbits32_r(EP0_RST, U3D_EP_RST);
+ } else {
+ setbits32_r(EP_RST((dir == USB_DIR_IN), ep_num), U3D_EP_RST);
+ clrbits32_r(EP_RST((dir == USB_DIR_IN), ep_num), U3D_EP_RST);
+ }
+}
+
+static void ep0_stall_set(bool set, u32 pktrdy)
+{
+ u32 csr;
+
+ /* EP0_SENTSTALL is W1C */
+ csr = readl(U3D_EP0CSR) & EP0_W1C_BITS;
+ if (set)
+ csr |= EP0_SENDSTALL | pktrdy;
+ else
+ csr = (csr & ~EP0_SENDSTALL) | EP0_SENTSTALL;
+ writel(csr, U3D_EP0CSR);
+
+ g_u3d.ep0_state = EP0_IDLE;
+}
+
+/*
+ * Return value indicates the TxFIFO size of 2^n bytes, (ex: value 10 means 2^10 =
+ * 1024 bytes.) TXFIFOSEGSIZE should be equal or bigger than 4. The TxFIFO size of
+ * 2^n bytes also should be equal or bigger than TXMAXPKTSZ. This EndPoint occupy
+ * total memory size (TX_SLOT + 1 )*2^TXFIFOSEGSIZE bytes.
+ */
+static u8 get_seg_size(u32 maxp)
+{
+ /* Set fifo size(double buffering is currently not enabled) */
+ switch (maxp) {
+ case 8:
+ case 16:
+ return USB_FIFOSZ_SIZE_16;
+ case 32:
+ return USB_FIFOSZ_SIZE_32;
+ case 64:
+ return USB_FIFOSZ_SIZE_64;
+ case 128:
+ return USB_FIFOSZ_SIZE_128;
+ case 256:
+ return USB_FIFOSZ_SIZE_256;
+ case 512:
+ return USB_FIFOSZ_SIZE_512;
+ case 1023:
+ case 1024:
+ case 2048:
+ case 3072:
+ case 4096:
+ return USB_FIFOSZ_SIZE_1024;
+ default:
+ DBG_I("The maxp %d is not supported\n", maxp);
+ return USB_FIFOSZ_SIZE_512;
+ }
+}
+
+static void mu3d_setup_ep(unsigned int ep_num, struct udc_endpoint *ept)
+{
+ u32 csr0, csr1, csr2;
+ u32 fifo_addr;
+ u8 seg_size;
+
+ /* Nothing needs to be done for ep0 */
+ if (ep_num == 0)
+ return;
+
+ /* In LK (FASTBOOT) will use BULK transfer only */
+ if (ept->type != USB_EP_XFER_BULK)
+ DBG_C("ep type is wrong, should be bulk,\n");
+
+ /* Set fifo address, fifo size, and fifo max packet size */
+ DBG_I("%s: %s, maxpkt: %d\n", __func__, ept->name, ept->maxpkt);
+
+ /* Set fifo size(only supports single buffering) */
+ seg_size = get_seg_size(ept->maxpkt);
+
+ if (ept->in) { /* TX case */
+ mu3d_flush_fifo(ep_num, USB_DIR_IN);
+
+ csr0 = TX_TXMAXPKTSZ(ept->maxpkt);
+
+ /* only support BULK, set 0 for burst, slot, mult, num_pkts */
+ csr1 = TX_TYPE(TYPE_BULK);
+
+ fifo_addr = g_u3d.tx_fifo_addr + (U3D_FIFO_SIZE_UNIT * ep_num);
+ csr2 = TX_FIFOADDR(fifo_addr >> 4);
+ csr2 |= TX_FIFOSEGSIZE(seg_size);
+
+#ifdef SUPPORT_QMU
+ csr0 |= TX_DMAREQEN;
+ /* Enable QMU Done interrupt */
+ setbits32_r(QMU_TX_DONE_INT(ep_num), U3D_QIESR0);
+#else
+ setbits32_r(EP_TXISR(ep_num), U3D_EPIECR); /* W1C */
+ setbits32_r(EP_TXISR(ep_num), U3D_EPIESR); /* W1S */
+#endif
+
+ writel(csr0, MU3D_EP_TXCR0(ep_num));
+ writel(csr1, MU3D_EP_TXCR1(ep_num));
+ writel(csr2, MU3D_EP_TXCR2(ep_num));
+
+ } else { /* RX case */
+ mu3d_flush_fifo(ep_num, USB_DIR_OUT);
+
+ csr0 = RX_RXMAXPKTSZ(ept->maxpkt);
+
+ /* only support BULK, set 0 for burst, slot, mult, num_pkts */
+ csr1 = RX_TYPE(TYPE_BULK);
+
+ fifo_addr = g_u3d.rx_fifo_addr + (U3D_FIFO_SIZE_UNIT * ep_num);
+ csr2 = RX_FIFOADDR(fifo_addr >> 4);
+ csr2 |= RX_FIFOSEGSIZE(seg_size);
+
+#ifdef SUPPORT_QMU
+ csr0 |= RX_DMAREQEN;
+ /* Enable QMU Done interrupt */
+ setbits32_r(QMU_RX_DONE_INT(ep_num), U3D_QIESR0);
+#else
+ setbits32_r(EP_RXISR(ep_num), U3D_EPIECR); /* W1C */
+ /* enable it when queue RX request */
+ /* setbits32_r(EP_RXISR(ep_num), U3D_EPIESR);*/ /* W1S */
+#endif
+ writel(csr0, MU3D_EP_RXCR0(ep_num));
+ writel(csr1, MU3D_EP_RXCR1(ep_num));
+ writel(csr2, MU3D_EP_RXCR2(ep_num));
+ }
+
+#ifdef SUPPORT_QMU
+ mtu3_qmu_start(ept);
+#endif
+}
+
+static void mu3d_ep0en(void)
+{
+ u32 temp = 0;
+ struct udc_endpoint *ep0 = g_u3d.ep0;
+
+ sprintf(ep0->name, "ep0");
+ ep0->type = USB_EP_XFER_CTRL;
+ ep0->num = EP0;
+ if (g_u3d.speed == SSUSB_SPEED_SUPER)
+ ep0->maxpkt = EP0_MAX_PACKET_SIZE_U3;
+ else
+ ep0->maxpkt = EP0_MAX_PACKET_SIZE;
+
+ temp = readl(U3D_EP0CSR);
+ temp &= ~(EP0_MAXPKTSZ_MSK | EP0_AUTOCLEAR | EP0_AUTOSET | EP0_DMAREQEN);
+ temp |= EP0_MAXPKTSZ(ep0->maxpkt);
+ temp &= EP0_W1C_BITS;
+ writel(temp, U3D_EP0CSR);
+
+ /* enable EP0 interrupts */
+ setbits32_r(EP_EP0ISR, U3D_EPIESR);
+}
+
+static void mu3d_get_ip_vers(void)
+{
+ u32 val;
+
+ val = readl(U3D_SSUSB_IP_DEV_CAP);
+ g_u3d.is_u3_ip = !!SSUSB_IP_DEV_U3_PORT_NUM(val);
+ DBG_C("IP version 0x%x(%s IP)\n", readl(U3D_SSUSB_HW_ID),
+ g_u3d.is_u3_ip ? "U3" : "U2");
+}
+
+static void mu3d_hw_init(void)
+{
+ mu3d_dev_reset();
+ mu3d_ssusb_enable();
+ mu3d_intr_disable();
+ mu3d_reg_init();
+ mu3d_set_speed(g_u3d.speed);
+ mu3d_intr_enable();
+ mu3d_ep0en();
+}
+
+static void mtu3_setup_eps(void)
+{
+ struct udc_endpoint *ep_list = g_u3d.eps;
+ USB_SPEED speed = g_u3d.speed;
+ int maxp = 512;
+ int i;
+
+ if (speed == SSUSB_SPEED_FULL)
+ maxp = 64;
+ else if (speed == SSUSB_SPEED_HIGH)
+ maxp = 512;
+ else if (speed == SSUSB_SPEED_SUPER)
+ maxp = 1024;
+
+ for (i = 1; i < MT_EP_NUM; i++) {
+ if (ep_list[i].num != 0) { /* allocated */
+ ep_list[i].maxpkt = maxp;
+ mu3d_setup_ep(ep_list[i].num, &ep_list[i]);
+ }
+ }
+}
+
+
+/* usb generic functions */
+
+void handle_ept_complete(struct udc_endpoint *ept, int status)
+{
+ struct udc_request *req;
+ struct mu3d_req *mreq;
+ unsigned int actual;
+
+ req = ept->req;
+ mreq = to_mu3d_req(req);
+ if (req) {
+ ept->req = NULL;
+
+ if (status)
+ DBG_C("%s: %s FAIL status: %d\n", __func__, ept->name, status);
+
+ actual = status ? 0 : mreq->actual;
+
+ DBG_I("%s: %s, req: %p: complete: %d/%d: status: %d\n",
+ __func__, ept->name, req, actual, req->length, status);
+
+ if (req->complete)
+ req->complete(req, actual, status);
+ }
+}
+
+static int mtu3_read_fifo(struct udc_endpoint *ept)
+{
+ struct udc_request *req = ept->req;
+ struct mu3d_req *mreq = to_mu3d_req(req);
+ int ep_num = ept->num;
+ u32 count = 0;
+
+ if (mreq) {
+ if (ep_num == 0)
+ count = readl(U3D_RXCOUNT0);
+ else
+ count = EP_RX_COUNT(readl(MU3D_EP_RXCR3(ep_num)));
+
+ count = MIN(req->length - mreq->actual, count);
+ pio_read_fifo(ep_num, req->buffer + mreq->actual, count);
+#if 0
+ if (ep_num != 0) {
+ DBG_I("%s: &req->buffer: %p\n", __func__, req->buffer);
+ DBG_I("dump data:\n");
+ hexdump8(req->buffer, len);
+ }
+#endif
+ mreq->actual += count;
+
+ DBG_I("%s: ep%dout, mreq: %p, buf: %p, length: %d, actual: %d\n",
+ __func__, ep_num, mreq, req->buffer, req->length, mreq->actual);
+ }
+
+ return count;
+}
+
+static int mtu3_write_fifo(struct udc_endpoint *ept)
+{
+ struct udc_request *req = ept->req;
+ struct mu3d_req *mreq = to_mu3d_req(req);
+ unsigned char *buf;
+ int ep_num = ept->num;
+ int count = 0;
+
+ if (mreq) {
+ DBG_I("%s: ep%din mreq: %p, length: %d, actual: %d, maxp: %d\n",
+ __func__, ep_num, mreq, req->length, mreq->actual, ept->maxpkt);
+
+ count = MIN(req->length - mreq->actual, ept->maxpkt);
+ buf = req->buffer + mreq->actual;
+ pio_write_fifo(ep_num, buf, count);
+ mreq->actual += count;
+ }
+
+ return count;
+}
+
+static void mtu3_ep0_write(void)
+{
+ struct udc_endpoint *ep0 = g_u3d.ep0;
+ struct udc_request *req = ep0->req;
+ struct mu3d_req *mreq = to_mu3d_req(req);
+ unsigned int count = 0;
+ u32 csr0;
+
+ csr0 = readl(U3D_EP0CSR);
+ if (csr0 & EP0_TXPKTRDY) {
+ DBG_I("%s: ep0 is not ready to be written\n", __func__);
+ return;
+ }
+
+ count = mtu3_write_fifo(ep0);
+
+ /* hardware limitiation: can't set (EP0_TXPKTRDY | EP0_DATAEND) at same time */
+ csr0 |= (EP0_TXPKTRDY);
+ writel(csr0, U3D_EP0CSR);
+
+ DBG_I("%s: length=%d, actual=%d\n", __func__, req->length, mreq->actual);
+ if ((count < ep0->maxpkt) || (req->length == mreq->actual)) {
+ /* last packet */
+ mreq->actual = 0;
+ g_u3d.ep0_state = EP0_TX_END;
+ }
+}
+
+static void mtu3_ep0_read(void)
+{
+ struct udc_endpoint *ep0 = g_u3d.ep0;
+ struct udc_request *req = ep0->req;
+ struct mu3d_req *mreq = to_mu3d_req(req);
+ unsigned int count = 0;
+ u32 csr0 = 0;
+
+ csr0 = readl(U3D_EP0CSR);
+
+ /* erroneous ep0 interrupt */
+ if (!(csr0 & EP0_RXPKTRDY))
+ return;
+
+ count = mtu3_read_fifo(ep0);
+
+ /* work around: cannot set (EP0_RXPKTRDY | EP0_DATAEND) at same time */
+ csr0 |= (EP0_RXPKTRDY);
+ writel(csr0, U3D_EP0CSR);
+
+ if ((count < ep0->maxpkt) || (mreq->actual == req->length)) {
+ /* last packet */
+ csr0 |= EP0_DATAEND;
+ g_u3d.ep0_state = EP0_IDLE;
+ } else {
+ /* more packets are waiting to be transferred */
+ csr0 |= EP0_RXPKTRDY;
+ }
+
+ writel(csr0, U3D_EP0CSR);
+}
+
+static int std_get_descs(struct udc_request *req, struct usb_setup *setup)
+{
+ struct udc_descriptor *desc = NULL;
+
+ if (g_u3d.speed <= SSUSB_SPEED_HIGH)
+ desc = g_u3d.desc_list;
+ else
+ desc = g_u3d.desc_list_u3;
+
+ for (; desc; desc = desc->next) {
+ if (desc->tag == setup->value) {
+ unsigned int len = desc->len;
+
+ if (len > setup->length)
+ len = setup->length;
+
+ copy_desc(req, desc->data, len);
+ change_eps_maxp(desc, req->buffer, len);
+ return 0;
+ }
+ }
+ /* descriptor lookup failed */
+ return -EINVAL;
+}
+
+static int ep0_standard_setup(struct usb_setup *setup)
+{
+ struct udc_gadget *gadget = g_u3d.gadget;
+ struct udc_request *req = g_u3d.ep0->req;
+ u8 *cp = req->buffer;
+
+ dump_setup_packet("Device Request\n", setup);
+
+ if ((setup->request_type & USB_TYPE_MASK) != 0)
+ return -EINVAL; /* Class-specific requests are handled elsewhere */
+
+ /* handle all requests that return data (direction bit set on bm RequestType) */
+ if ((setup->request_type & USB_EP_DIR_MASK)) {
+ /* send the descriptor */
+ g_u3d.ep0_state = EP0_TX;
+
+ switch (setup->request) {
+ /* data stage: from device to host */
+ case GET_STATUS:
+ DBG_I("GET_STATUS\n");
+ req->length = 2;
+ cp[0] = cp[1] = 0;
+
+ switch (setup->request_type & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ cp[0] = USB_STAT_SELFPOWERED;
+ break;
+ case USB_RECIP_OTHER:
+ req->length = 0;
+ break;
+ default:
+ break;
+ }
+ return 0;
+ case GET_DESCRIPTOR:
+ DBG_I("GET_DESCRIPTOR\n");
+ return std_get_descs(req, setup);
+ case GET_CONFIGURATION:
+ DBG_I("GET_CONFIGURATION\n");
+ break;
+ case GET_INTERFACE:
+ DBG_I("GET_INTERFACE\n");
+ break;
+ default:
+ DBG_C("Unsupported command with TX data stage\n");
+ break;
+ }
+ } else {
+ switch (setup->request) {
+ case SET_ADDRESS:
+ DBG_I("SET_ADDRESS\n");
+ g_u3d.address = (setup->value);
+ mu3d_set_address(g_u3d.address);
+ return 0;
+ case SET_CONFIGURATION:
+ DBG_I("SET_CONFIGURATION\n");
+ g_u3d.usb_online = setup->value ? 1 : 0;
+ if (setup->value == 1) {
+ mtu3_setup_eps();
+ spin(50);
+ gadget->notify(gadget, UDC_EVENT_ONLINE);
+ } else {
+ gadget->notify(gadget, UDC_EVENT_OFFLINE);
+ }
+ DBG_C("usb_online: %d\n", g_u3d.usb_online);
+ return 0;
+ default:
+ DBG_I("setup->request: %x, setup->value: %x\n",
+ setup->request, setup->value);
+ DBG_C("Unsupported command with RX data stage\n");
+ break;
+ } /* switch request */
+ }
+
+ return -EINVAL;
+}
+
+static void mtu3_ep0_setup(void)
+{
+ struct usb_setup setup;
+ int stall = -ENOTSUP;
+ u32 csr0;
+ u32 len;
+
+ csr0 = readl(U3D_EP0CSR);
+ if (!(csr0 & EP0_SETUPPKTRDY))
+ return;
+
+ len = readl(U3D_RXCOUNT0);
+ if (len != 8) {
+ DBG_C("SETUP packet len %d != 8?\n", len);
+ return;
+ }
+
+ /* unload fifo */
+ pio_read_fifo(EP0, (u8 *)&setup, len);
+
+ /* decode command */
+ if (((setup.request_type) & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ DBG_I("Standard Request\n");
+ stall = ep0_standard_setup(&setup);
+ }
+
+ /* command is not supported, inlcude USB_TYPE_CLASS & USB_TYPE_VENDOR */
+ if (stall) {
+ dump_setup_packet("REQUEST NOT SUPPORTED\n", &setup);
+ ep0_stall_set(true, EP0_SETUPPKTRDY);
+ return;
+ }
+
+ /* handle EP0 state */
+ switch (g_u3d.ep0_state) {
+ case EP0_TX:
+ DBG_I("%s: EP0_TX\n", __func__);
+ csr0 = readl(U3D_EP0CSR);
+ csr0 |= (EP0_SETUPPKTRDY | EP0_DPHTX);
+ writel(csr0, U3D_EP0CSR);
+
+ mtu3_ep0_write();
+ break;
+ case EP0_RX:
+ DBG_I("%s: EP0_RX\n", __func__);
+ csr0 = readl(U3D_EP0CSR);
+ csr0 |= (EP0_SETUPPKTRDY);
+ writel(csr0, U3D_EP0CSR);
+ break;
+ case EP0_IDLE:
+ /* no data stage */
+ DBG_I("%s: EP0_IDLE\n", __func__);
+ csr0 = readl(U3D_EP0CSR);
+ csr0 |= (EP0_SETUPPKTRDY | EP0_DATAEND);
+ writel(csr0, U3D_EP0CSR);
+ break;
+ default:
+ break;
+ }
+}
+
+static void mtu3_ep0_isr(void)
+{
+ u32 csr0;
+
+ csr0 = readl(U3D_EP0CSR);
+
+ if (csr0 & EP0_SENTSTALL) {
+ DBG_I("USB: [EP0] SENTSTALL\n");
+ ep0_stall_set(false, 0);
+ csr0 = readl(U3D_EP0CSR);
+ }
+
+ switch (g_u3d.ep0_state) {
+ case EP0_IDLE:
+ DBG_I("%s: EP0_IDLE\n", __func__);
+ mtu3_ep0_setup();
+ break;
+ case EP0_TX:
+ DBG_I("%s: EP0_TX\n", __func__);
+ mtu3_ep0_write();
+ break;
+ case EP0_TX_END:
+ DBG_I("%s: EP0_TX_END\n", __func__);
+ csr0 |= EP0_DATAEND;
+ writel(csr0, U3D_EP0CSR);
+ g_u3d.ep0_state = EP0_IDLE;
+ break;
+ case EP0_RX:
+ DBG_I("%s: EP0_RX\n", __func__);
+ mtu3_ep0_read();
+ g_u3d.ep0_state = EP0_IDLE;
+ break;
+ default:
+ DBG_I("[ERR]: Unrecognized ep0 state %d", g_u3d.ep0_state);
+ break;
+ }
+}
+
+#ifndef SUPPORT_QMU
+/* PIO: TX packet */
+static int mtu3_epx_write(struct udc_endpoint *ept)
+{
+ int ep_num = ept->num;
+ int count;
+ u32 csr;
+
+ /* only for non-ep0 */
+ if (ep_num == 0)
+ return -EACCES;
+
+ if (!ept->in)
+ return -EINVAL;
+
+ csr = readl(MU3D_EP_TXCR0(ep_num));
+ if (csr & TX_TXPKTRDY) {
+ DBG_I("%s: ep%d is busy!\n", __func__, ep_num);
+ return -EBUSY;
+ }
+ count = mtu3_write_fifo(ept);
+
+ csr |= TX_TXPKTRDY;
+ writel(csr, MU3D_EP_TXCR0(ep_num));
+
+ return count;
+}
+
+static void mtu3_epx_isr(u8 ep_num, u8 dir)
+{
+ struct udc_endpoint *ept;
+ struct mu3d_req *mreq;
+ struct udc_request *req;
+ u32 csr;
+ u32 count;
+
+ ept = mtu3_find_ep(ep_num, dir);
+ if (!ept || !ept->req)
+ return;
+
+ DBG_I("%s Interrupt\n", ept->name);
+ req = ept->req;
+ mreq = to_mu3d_req(req);
+
+ if (dir == USB_DIR_IN) {
+ csr = readl(MU3D_EP_TXCR0(ep_num));
+ if (csr & TX_SENTSTALL) {
+ DBG_C("EP%dIN: STALL\n", ep_num);
+ handle_ept_complete(ept, -EPIPE);
+ /* exception handling: implement this!! */
+ return;
+ }
+
+ if (csr & TX_TXPKTRDY) {
+ DBG_C("%s: EP%dIN is busy\n", __func__, ep_num);
+ return;
+ }
+
+ if (req->length == mreq->actual) {
+ handle_ept_complete(ept, 0);
+ return;
+ }
+
+ count = mtu3_write_fifo(ept);
+ if (count) {
+ csr |= TX_TXPKTRDY;
+ writel(csr, MU3D_EP_TXCR0(ep_num));
+ }
+
+ DBG_I("EP%dIN, count=%d, %d/%d\n",
+ ep_num, count, mreq->actual, req->length);
+
+ } else {
+ csr = readl(MU3D_EP_RXCR0(ep_num));
+ if (csr & RX_SENTSTALL) {
+ DBG_C("EP%dOUT: STALL\n", ep_num);
+ /* exception handling: implement this!! */
+ return;
+ }
+
+ if (!(csr & RX_RXPKTRDY)) {
+ DBG_I("EP%dOUT: ERRONEOUS INTERRUPT\n", ep_num);
+ return;
+ }
+
+ count = mtu3_read_fifo(ept);
+
+ DBG_I("EP%dOUT, count = %d\n", ep_num, count);
+
+ /* write 1 to clear RXPKTRDY */
+ csr |= RX_RXPKTRDY;
+ writel(csr, MU3D_EP_RXCR0(ep_num));
+
+ if (readl(MU3D_EP_RXCR0(ep_num)) & RX_RXPKTRDY)
+ DBG_I("%s: rxpktrdy clear failed\n", __func__);
+
+ if ((req->length == mreq->actual) || (count < ept->maxpkt)) {
+ /* disable EP RX intr */
+ setbits32_r(EP_RXISR(ep_num), U3D_EPIECR); /* W1C */
+ handle_ept_complete(ept, 0);
+ }
+ }
+}
+#endif
+
+/* handle abnormal DATA transfer if we had any, like USB unplugged */
+static void mtu3_suspend(void)
+{
+ struct udc_gadget *gadget = g_u3d.gadget;
+ struct udc_endpoint *ep_list = g_u3d.eps;
+ struct udc_endpoint *ept;
+ int i;
+
+ g_u3d.usb_online = 0;
+ gadget->notify(gadget, UDC_EVENT_OFFLINE);
+
+ /* error out any pending reqs, except ep0 */
+ for (i = 1; i < MT_EP_NUM; i++) {
+ ept = &ep_list[i];
+ /* End operation when encounter uninitialized ept */
+ if (ept->num == 0)
+ break;
+
+ DBG_I("%s: %s, req: %p\n", __func__, ept->name, ept->req);
+
+ mtu3_qmu_flush(ept);
+
+ if (ept->req)
+ handle_ept_complete(ept, -ESHUTDOWN);
+ }
+}
+
+static void mtu3_status_reset(void)
+{
+ g_u3d.ep0_state = EP0_IDLE;
+ g_u3d.address = 0;
+}
+
+static void mtu3_link_isr(void)
+{
+ u32 linkint;
+
+ linkint = readl(U3D_DEV_LINK_INTR) & readl(U3D_DEV_LINK_INTR_ENABLE);
+ writel(linkint, U3D_DEV_LINK_INTR);
+
+ if (linkint & SSUSB_DEV_SPEED_CHG_INTR) {
+ DBG_I("[INTR] Speed Change\n");
+ g_u3d.speed = mu3d_get_speed();
+ if (g_u3d.speed == SSUSB_SPEED_UNKNOWN)
+ mtu3_suspend();
+ else
+ mu3d_ep0en();
+ }
+}
+
+static void mtu3_u2_common_isr(void)
+{
+ u32 intrusb = 0;
+
+ intrusb = readl(U3D_COMMON_USB_INTR) & readl(U3D_COMMON_USB_INTR_ENABLE);
+ writel(intrusb, U3D_COMMON_USB_INTR);
+
+ if (intrusb & RESET_INTR) {
+ DBG_I("[INTR] Reset\n");
+ mtu3_status_reset();
+ }
+
+ if (intrusb & SUSPEND_INTR) {
+ DBG_I("[INTR] Suspend\n");
+ mtu3_suspend();
+ }
+
+ if (intrusb & RESUME_INTR)
+ DBG_I("[INTR] Resume\n");
+
+}
+
+static void mtu3_u3_ltssm_isr(void)
+{
+ u32 ltssm;
+
+ ltssm = readl(U3D_LTSSM_INTR) & readl(U3D_LTSSM_INTR_ENABLE);
+ writel(ltssm, U3D_LTSSM_INTR); /* W1C */
+ DBG_I("=== LTSSM[%x] ===\n", ltssm);
+
+ if (ltssm & (HOT_RST_INTR | WARM_RST_INTR))
+ mtu3_status_reset();
+
+ if (ltssm & VBUS_FALL_INTR) {
+ mu3d_ss_func_set(false);
+ mtu3_status_reset();
+ }
+
+ if (ltssm & VBUS_RISE_INTR)
+ mu3d_ss_func_set(true);
+
+ if (ltssm & ENTER_U3_INTR)
+ mtu3_suspend();
+
+ if (ltssm & EXIT_U3_INTR)
+ DBG_I("[INTR] Resume\n");
+
+}
+
+static void mtu3_bmu_isr(void)
+{
+ u32 intrep;
+
+ intrep = readl(U3D_EPISR) & readl(U3D_EPIER);
+ writel(intrep, U3D_EPISR);
+ DBG_I("[INTR] BMU[tx:%x, rx:%x] IER: %x\n",
+ intrep & 0xffff, intrep >> 16, readl(U3D_EPIER));
+
+ /* For EP0 */
+ if (intrep & 0x1) {
+ mtu3_ep0_isr();
+ intrep &= ~0x1;
+ }
+
+#ifndef SUPPORT_QMU
+ if (intrep) {
+ u32 ep_num;
+
+ for (ep_num = 1; ep_num <= (MT_EP_NUM / 2); ep_num++) {
+ if (intrep & EPT_RX(ep_num))
+ mtu3_epx_isr(ep_num, USB_DIR_OUT);
+
+ if (intrep & EPT_TX(ep_num))
+ mtu3_epx_isr(ep_num, USB_DIR_IN);
+ }
+ }
+#endif
+}
+
+static enum handler_return mtu3_isr(void *arg)
+{
+ u32 lv1_isr;
+
+ lv1_isr = readl(U3D_LV1ISR); /* LV1ISR is RU */
+ lv1_isr &= readl(U3D_LV1IER);
+ DBG_I("[INTR] lv1_isr:0x%x\n", lv1_isr);
+
+ if (lv1_isr & EP_CTRL_INTR)
+ mtu3_link_isr();
+
+ if (lv1_isr & MAC2_INTR)
+ mtu3_u2_common_isr();
+
+ if (lv1_isr & MAC3_INTR)
+ mtu3_u3_ltssm_isr();
+
+ if (lv1_isr & BMU_INTR)
+ mtu3_bmu_isr();
+
+ if (lv1_isr & QMU_INTR)
+ mtu3_qmu_isr();
+
+ return INT_RESCHEDULE;
+}
+
+static void mu3d_isr_init(void)
+{
+ mt_irq_set_sens(SSUSB_DEV_INT_ID, LEVEL_SENSITIVE);
+ mt_irq_set_polarity(SSUSB_DEV_INT_ID, MT65xx_POLARITY_LOW);
+ register_int_handler(SSUSB_DEV_INT_ID, mtu3_isr, NULL);
+ unmask_interrupt(SSUSB_DEV_INT_ID);
+}
+
+/* gadget common APIs */
+
+static int g_u3d_init(void)
+{
+ struct mu3d_req *mreq = &g_u3d.ep0_mreq;
+ struct udc_request *req = &mreq->req;
+
+ mu3d_get_ip_vers();
+ g_u3d.ep0_state = EP0_IDLE;
+ g_u3d.address = 0;
+ if (g_u3d.is_u3_ip) {
+ g_u3d.speed = U3D_U3IP_DFT_SPEED;
+ g_u3d.tx_fifo_addr = U3IP_TX_FIFO_START_ADDR;
+ g_u3d.rx_fifo_addr = U3IP_RX_FIFO_START_ADDR;
+ } else {
+ g_u3d.speed = U3D_U2IP_DFT_SPEED;
+ g_u3d.tx_fifo_addr = U2IP_TX_FIFO_START_ADDR;
+ g_u3d.rx_fifo_addr = U2IP_RX_FIFO_START_ADDR;
+ }
+
+ g_u3d.ep0 = &g_u3d.eps[EP0];
+
+ req->buffer = udc_zalloc(512);
+ if (!req->buffer)
+ return -ENOMEM;
+
+ g_u3d.ep0->req = req;
+ g_u3d.ept_alloc_table = EPT_TX(0) | EPT_RX(0);
+ g_u3d.desc_list = NULL;
+ g_u3d.next_string_id = 1;
+ g_u3d.next_string_id_u3 = 1;
+
+ return mtu3_qmu_init();
+}
+
+int udc_init(struct udc_device *dev)
+{
+ g_u3d_init();
+ g_u3d.udev = dev;
+ return 0;
+}
+
+static struct udc_endpoint *_udc_endpoint_alloc(unsigned char num,
+ unsigned char in, unsigned short max_pkt)
+{
+ struct udc_endpoint *ep_list = g_u3d.eps;
+ struct udc_endpoint *ept;
+ int ret;
+ int i;
+
+ /* allocated and enabled by default */
+ if (num == EP0)
+ return NULL;
+
+ /*
+ * find an unused slot in ep_list from EP1 to MAX_EP
+ * for example, EP1 will use 2 eps, one for IN and the other for OUT
+ */
+ for (i = 1; i < MT_EP_NUM; i++) {
+ if (ep_list[i].num == 0) /* usable */
+ break;
+ }
+ if (i == MT_EP_NUM) /* ep has been exhausted. */
+ return NULL;
+
+ ept = &ep_list[i];
+ sprintf(ept->name, "ep%d%s", num, in ? "in" : "out");
+
+ ret = mtu3_gpd_ring_alloc(ept);
+ if (ret) {
+ DBG_C("%s gpd alloc failed\n", ept->name);
+ return NULL;
+ }
+
+ /* only supports BULK */
+ ept->type = USB_EP_XFER_BULK;
+ ept->maxpkt = max_pkt;
+ ept->num = num;
+ ept->in = in;
+ ept->req = NULL;
+
+ /* store EPT_TX/RX info */
+ if (ept->in)
+ ept->bit = EPT_TX(num);
+ else
+ ept->bit = EPT_RX(num);
+
+ /* write parameters to this ep (write to hardware) when SET_CONFIG */
+
+ DBG_I("%s @%p/%p max=%d bit=%x\n", ept->name,
+ ept, &ep_list, max_pkt, ept->bit);
+
+ return &ep_list[i];
+}
+
+struct udc_endpoint *udc_endpoint_alloc(unsigned int type, unsigned int maxpkt)
+{
+ struct udc_endpoint *ept;
+ unsigned int n;
+ unsigned int in;
+
+ DBG_I("%s\n", __func__);
+
+ if (type == UDC_BULK_IN)
+ in = 1;
+ else if (type == UDC_BULK_OUT)
+ in = 0;
+ else
+ return NULL;
+
+ /* udc_endpoint_alloc is used for EPx except EP0 */
+ for (n = 1; n < MT_EP_NUM; n++) {
+ unsigned int bit = in ? EPT_TX(n) : EPT_RX(n);
+
+ if (g_u3d.ept_alloc_table & bit)
+ continue;
+
+ ept = _udc_endpoint_alloc(n, in, maxpkt);
+ if (ept) {
+ g_u3d.ept_alloc_table |= bit;
+ return ept;
+ }
+ }
+
+ return NULL;
+}
+
+void udc_endpoint_free(struct udc_endpoint *ept)
+{
+ if (ept->num)
+ mtu3_gpd_ring_free(ept);
+}
+
+struct udc_request *udc_request_alloc(void)
+{
+ struct mu3d_req *mreq;
+
+ mreq = udc_zalloc(sizeof(*mreq));
+
+ return mreq ? &mreq->req : NULL;
+}
+
+void udc_request_free(struct udc_request *req)
+{
+ free(req);
+}
+
+int udc_request_queue(struct udc_endpoint *ept, struct udc_request *req)
+{
+ struct mu3d_req *mreq = to_mu3d_req(req);
+ int ret = 0;
+
+ DBG_I("%s: %s, req=%p, buf: %p, length=%d\n", __func__,
+ ept->name, req, req->buffer, req->length);
+
+ if (!g_u3d.usb_online)
+ return -ENXIO;
+
+ mask_interrupt(SSUSB_DEV_INT_ID);
+ ept->req = req;
+ mreq->ept = ept;
+ mreq->actual = 0;
+
+#ifdef SUPPORT_QMU
+ if (req->length > GPD_BUF_SIZE) {
+ DBG_C("req length > supported MAX:%d requested:%d\n",
+ GPD_BUF_SIZE, req->length);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (mtu3_prepare_transfer(ept)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ arch_clean_invalidate_cache_range((addr_t)req->buffer, req->length);
+ mtu3_insert_gpd(ept, mreq);
+ mtu3_qmu_resume(ept);
+#else
+
+ /*
+ * PIO mode:
+ * when class driver shares a buffer to TX and RX data,
+ * mtu3 sends a data to host, then host sends a data back immediately,
+ * cause EP TX and RX interrupts arise at the same time,
+ * but the buffer is using by the TX, so no buffer for RX to receive data.
+ * To fix the issue:
+ * disable EP RX intrrupt by default, enable it when queue RX
+ * request and disable it again when complete the request.
+ */
+ if (ept->in)
+ mtu3_epx_write(ept);
+ else
+ setbits32_r(EP_RXISR(ept->num), U3D_EPIESR); /* W1S */
+#endif
+
+out:
+ unmask_interrupt(SSUSB_DEV_INT_ID);
+
+ return ret;
+}
+
+int udc_register_gadget(struct udc_gadget *gadget)
+{
+ if (g_u3d.gadget) {
+ DBG_C("only one gadget supported\n");
+ return -EBUSY;
+ }
+ g_u3d.gadget = gadget;
+
+ return 0;
+}
+
+int udc_start(void)
+{
+ struct udc_device *udev = g_u3d.udev;
+ struct udc_gadget *gadget = g_u3d.gadget;
+
+ if (!udev) {
+ DBG_C("udc cannot start before init\n");
+ return -ENODEV;
+ }
+ if (!gadget) {
+ DBG_C("udc has no gadget registered\n");
+ return -ENXIO;
+ }
+ DBG_I("%s\n", __func__);
+
+ udc_create_descriptors(udev, gadget);
+ mt_usb_phy_poweron();
+ mu3d_hw_init();
+ mu3d_isr_init();
+ mu3d_soft_connect();
+
+ return 0;
+}
+
+int udc_stop(void)
+{
+ mu3d_soft_disconnect();
+ mt_usb_phy_poweroff();
+
+ return 0;
+}
+
+#pragma GCC pop_options
diff --git a/src/bsp/lk/platform/mt8512/drivers/usb/mtu3.h b/src/bsp/lk/platform/mt8512/drivers/usb/mtu3.h
new file mode 100644
index 0000000..5f05d55
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/usb/mtu3.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#include <dev/udc.h>
+#include <hw/usb.h>
+#include <reg.h>
+
+struct udc_endpoint;
+struct mu3d_req;
+
+#include "mtu3_hw_regs.h"
+#include "mtu3_qmu.h"
+
+/* if want to use PIO mode, comment out the following macro */
+#define SUPPORT_QMU
+
+/* two bulk and ep0 */
+#define MT_EP_NUM 3
+#define MAX_EP_NUM 8
+
+#define DESC_TYPE_U2 0
+#define DESC_TYPE_U3 1
+
+/* U3 IP: EP0, TX, RX has separate SRAMs */
+#define U3IP_TX_FIFO_START_ADDR 0
+#define U3IP_RX_FIFO_START_ADDR 0
+
+/* U2 IP: EP0, TX, RX share one SRAM. 0-63 bytes are reserved for EP0 */
+#define U2IP_TX_FIFO_START_ADDR (64)
+#define U2IP_RX_FIFO_START_ADDR (64 + 512 * (MAX_EP_NUM))
+
+#define U3D_U3IP_DFT_SPEED SSUSB_SPEED_SUPER
+#define U3D_U2IP_DFT_SPEED SSUSB_SPEED_HIGH
+
+/*
+ * fastboot only supports BULK, alloc 1024B for each ep and offset are
+ * also fixed, such as, offset-1024 for ep1, offset-2048 for ep2;
+ * so MT_EP_NUM should not greater than 9(ep0 + 4 bulk in + 4 bulk out)
+ */
+#define U3D_FIFO_SIZE_UNIT 1024
+
+#define EP0_MAX_PACKET_SIZE 64
+#define EP0_MAX_PACKET_SIZE_U3 512
+
+#define USB_FIFOSZ_SIZE_8 (0x03)
+#define USB_FIFOSZ_SIZE_16 (0x04)
+#define USB_FIFOSZ_SIZE_32 (0x05)
+#define USB_FIFOSZ_SIZE_64 (0x06)
+#define USB_FIFOSZ_SIZE_128 (0x07)
+#define USB_FIFOSZ_SIZE_256 (0x08)
+#define USB_FIFOSZ_SIZE_512 (0x09)
+#define USB_FIFOSZ_SIZE_1024 (0x0A)
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+typedef enum {
+ EP0_IDLE = 0,
+ EP0_RX,
+ EP0_TX,
+ EP0_TX_END,
+} EP0_STATE;
+
+/* MTK USB3 ssusb defines */
+typedef enum {
+ SSUSB_SPEED_UNKNOWN = 0,
+ SSUSB_SPEED_FULL = 1,
+ SSUSB_SPEED_HIGH = 2,
+ SSUSB_SPEED_SUPER = 3,
+ SSUSB_SPEED_SUPER_PLUS = 4,
+} USB_SPEED;
+
+#define USB_EP_XFER_CTRL 0
+#define USB_EP_XFER_ISO 1
+#define USB_EP_XFER_BULK 2
+#define USB_EP_XFER_INT 3
+
+/* USB transfer directions */
+#define USB_DIR_IN 0x80
+#define USB_DIR_OUT 0x00
+
+struct udc_descriptor {
+ struct udc_descriptor *next;
+ unsigned short tag; /* ((TYPE << 8) | NUM) */
+ unsigned short len; /* total length */
+ unsigned char data[0];
+};
+
+struct mu3d_req {
+ struct udc_request req; /* should be first */
+ struct udc_endpoint *ept;
+ struct qmu_gpd *gpd;
+ unsigned int actual; /* data already sent/rcv */
+};
+
+/* endpoint data */
+struct udc_endpoint {
+ struct udc_request *req;
+ struct mtu3_gpd_ring gpd_ring;
+ char name[12];
+ unsigned int maxpkt;
+ unsigned char num;
+ unsigned char in;
+ unsigned char type; /* Transfer type */
+ unsigned int bit; /* EPT_TX/EPT_RX */
+};
+
+struct mu3d {
+ struct udc_device *udev;
+ struct udc_gadget *gadget;
+ EP0_STATE ep0_state;
+ USB_SPEED speed;
+ u32 tx_fifo_addr;
+ u32 rx_fifo_addr;
+
+ struct udc_endpoint eps[MT_EP_NUM]; /* index 0 is fixed as EP0 */
+ struct udc_endpoint *ep0;
+ struct mu3d_req ep0_mreq;
+ u32 ept_alloc_table;
+
+ struct udc_descriptor *desc_list;
+ unsigned int next_string_id;
+ struct udc_descriptor *desc_list_u3;
+ unsigned int next_string_id_u3;
+
+ u8 address;
+ unsigned usb_online:1;
+ unsigned is_u3_ip:1;
+};
+
+#ifndef writew
+#define writew(v, a) (*REG16(a) = (v))
+#endif
+#ifndef readw
+#define readw(a) (*REG16(a))
+#endif
+
+int wait_for_value(paddr_t addr, u32 msk, u32 value, int us_intvl, int count);
+struct udc_endpoint *mtu3_find_ep(int ep_num, u8 dir);
+void handle_ept_complete(struct udc_endpoint *ept, int status);
+static int pio_read_fifo(int ep_num, u8 *dst, u16 len) __attribute__((noinline));
+
+static inline struct mu3d_req *to_mu3d_req(struct udc_request *req)
+{
+ return (struct mu3d_req *)req;
+}
diff --git a/src/bsp/lk/platform/mt8512/drivers/usb/mtu3_hw_regs.h b/src/bsp/lk/platform/mt8512/drivers/usb/mtu3_hw_regs.h
new file mode 100644
index 0000000..cbf265c
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/usb/mtu3_hw_regs.h
@@ -0,0 +1,495 @@
+/*
+ * mtu3_hw_regs.h - MediaTek USB3 DRD register and field definitions
+ *
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#pragma once
+
+#include <platform/mt_reg_base.h>
+
+/* segment offset of MAC register */
+#define SSUSB_DEV_BASE (USB3_BASE + 0x1000)
+#define SSUSB_EPCTL_CSR_BASE (USB3_BASE + 0x1800)
+#define SSUSB_USB3_MAC_CSR_BASE (USB3_BASE + 0x2400)
+#define SSUSB_USB3_SYS_CSR_BASE (USB3_BASE + 0x2400)
+#define SSUSB_USB2_CSR_BASE (USB3_BASE + 0x3400)
+
+/* IPPC register in Infra */
+#define SSUSB_SIFSLV_IPPC_BASE (USB3_IPPC_BASE)
+
+#define BITS_PER_LONG 32
+#ifndef BIT
+#define BIT(bit) (1UL << (bit))
+#endif
+#ifndef GENMASK
+#define GENMASK(h, l) \
+ ((u32)(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))))
+#endif
+
+/* SSUSB_DEV REGISTER DEFINITION */
+#define U3D_LV1ISR (SSUSB_DEV_BASE + 0x0000)
+#define U3D_LV1IER (SSUSB_DEV_BASE + 0x0004)
+#define U3D_LV1IESR (SSUSB_DEV_BASE + 0x0008)
+#define U3D_LV1IECR (SSUSB_DEV_BASE + 0x000C)
+
+#define U3D_EPISR (SSUSB_DEV_BASE + 0x0080)
+#define U3D_EPIER (SSUSB_DEV_BASE + 0x0084)
+#define U3D_EPIESR (SSUSB_DEV_BASE + 0x0088)
+#define U3D_EPIECR (SSUSB_DEV_BASE + 0x008C)
+
+#define U3D_EP0CSR (SSUSB_DEV_BASE + 0x0100)
+#define U3D_RXCOUNT0 (SSUSB_DEV_BASE + 0x0108)
+#define U3D_RESERVED (SSUSB_DEV_BASE + 0x010C)
+#define U3D_TX1CSR0 (SSUSB_DEV_BASE + 0x0110)
+#define U3D_TX1CSR1 (SSUSB_DEV_BASE + 0x0114)
+#define U3D_TX1CSR2 (SSUSB_DEV_BASE + 0x0118)
+
+#define U3D_RX1CSR0 (SSUSB_DEV_BASE + 0x0210)
+#define U3D_RX1CSR1 (SSUSB_DEV_BASE + 0x0214)
+#define U3D_RX1CSR2 (SSUSB_DEV_BASE + 0x0218)
+#define U3D_RX1CSR3 (SSUSB_DEV_BASE + 0x021C)
+
+#define U3D_FIFO0 (SSUSB_DEV_BASE + 0x0300)
+
+#define U3D_QCR0 (SSUSB_DEV_BASE + 0x0400)
+#define U3D_QCR1 (SSUSB_DEV_BASE + 0x0404)
+#define U3D_QCR2 (SSUSB_DEV_BASE + 0x0408)
+#define U3D_QCR3 (SSUSB_DEV_BASE + 0x040C)
+
+#define U3D_TXQCSR1 (SSUSB_DEV_BASE + 0x0510)
+#define U3D_TXQSAR1 (SSUSB_DEV_BASE + 0x0514)
+#define U3D_TXQCPR1 (SSUSB_DEV_BASE + 0x0518)
+
+#define U3D_RXQCSR1 (SSUSB_DEV_BASE + 0x0610)
+#define U3D_RXQSAR1 (SSUSB_DEV_BASE + 0x0614)
+#define U3D_RXQCPR1 (SSUSB_DEV_BASE + 0x0618)
+#define U3D_RXQLDPR1 (SSUSB_DEV_BASE + 0x061C)
+
+#define U3D_QISAR0 (SSUSB_DEV_BASE + 0x0700)
+#define U3D_QIER0 (SSUSB_DEV_BASE + 0x0704)
+#define U3D_QIESR0 (SSUSB_DEV_BASE + 0x0708)
+#define U3D_QIECR0 (SSUSB_DEV_BASE + 0x070C)
+#define U3D_QISAR1 (SSUSB_DEV_BASE + 0x0710)
+#define U3D_QIER1 (SSUSB_DEV_BASE + 0x0714)
+#define U3D_QIESR1 (SSUSB_DEV_BASE + 0x0718)
+#define U3D_QIECR1 (SSUSB_DEV_BASE + 0x071C)
+
+#define U3D_TQERRIR0 (SSUSB_DEV_BASE + 0x0780)
+#define U3D_TQERRIER0 (SSUSB_DEV_BASE + 0x0784)
+#define U3D_TQERRIESR0 (SSUSB_DEV_BASE + 0x0788)
+#define U3D_TQERRIECR0 (SSUSB_DEV_BASE + 0x078C)
+#define U3D_RQERRIR0 (SSUSB_DEV_BASE + 0x07C0)
+#define U3D_RQERRIER0 (SSUSB_DEV_BASE + 0x07C4)
+#define U3D_RQERRIESR0 (SSUSB_DEV_BASE + 0x07C8)
+#define U3D_RQERRIECR0 (SSUSB_DEV_BASE + 0x07CC)
+#define U3D_RQERRIR1 (SSUSB_DEV_BASE + 0x07D0)
+#define U3D_RQERRIER1 (SSUSB_DEV_BASE + 0x07D4)
+#define U3D_RQERRIESR1 (SSUSB_DEV_BASE + 0x07D8)
+#define U3D_RQERRIECR1 (SSUSB_DEV_BASE + 0x07DC)
+
+#define U3D_CAP_EP0FFSZ (SSUSB_DEV_BASE + 0x0C04)
+#define U3D_CAP_EPNTXFFSZ (SSUSB_DEV_BASE + 0x0C08)
+#define U3D_CAP_EPNRXFFSZ (SSUSB_DEV_BASE + 0x0C0C)
+#define U3D_CAP_EPINFO (SSUSB_DEV_BASE + 0x0C10)
+#define U3D_MISC_CTRL (SSUSB_DEV_BASE + 0x0C84)
+
+/* SSUSB_DEV FIELD DEFINITION */
+/* U3D_LV1ISR */
+#define EP_CTRL_INTR BIT(5)
+#define MAC2_INTR BIT(4)
+#define DMA_INTR BIT(3)
+#define MAC3_INTR BIT(2)
+#define QMU_INTR BIT(1)
+#define BMU_INTR BIT(0)
+
+/* U3D_LV1IECR */
+#define LV1IECR_MSK GENMASK(31, 0)
+
+/* U3D_EPISR */
+#define EP_RXISR(x) (BIT(16) << (x))
+#define EP_TXISR(x) (BIT(0) << (x))
+#define EP_EP0ISR BIT(0)
+
+/* U3D_EP0CSR */
+#define EP0_AUTOCLEAR BIT(30)
+#define EP0_AUTOSET BIT(29)
+#define EP0_DMAREQEN BIT(28)
+#define EP0_SENDSTALL BIT(25)
+#define EP0_FIFOFULL BIT(23)
+#define EP0_SENTSTALL BIT(22)
+#define EP0_DPHTX BIT(20)
+#define EP0_DATAEND BIT(19)
+#define EP0_TXPKTRDY BIT(18)
+#define EP0_SETUPPKTRDY BIT(17)
+#define EP0_RXPKTRDY BIT(16)
+#define EP0_MAXPKTSZ_MSK GENMASK(9, 0)
+#define EP0_MAXPKTSZ(x) ((x) & EP0_MAXPKTSZ_MSK)
+#define EP0_W1C_BITS (~(EP0_RXPKTRDY | EP0_SETUPPKTRDY | EP0_SENTSTALL))
+
+/* U3D_TX1CSR0 */
+#define TX_AUTOSET BIT(30)
+#define TX_DMAREQEN BIT(29)
+#define TX_FIFOFULL BIT(25)
+#define TX_FIFOEMPTY BIT(24)
+#define TX_SENTSTALL BIT(22)
+#define TX_SENDSTALL BIT(21)
+#define TX_TXPKTRDY BIT(16)
+#define TX_TXMAXPKTSZ_MSK GENMASK(10, 0)
+#define TX_TXMAXPKTSZ(x) ((x) & TX_TXMAXPKTSZ_MSK)
+#define TX_W1C_BITS (~(TX_SENTSTALL))
+
+/* U3D_TX1CSR1 */
+#define TX_MULT(x) (((x) & 0x3) << 22)
+#define TX_MAX_PKT(x) (((x) & 0x3f) << 16)
+#define TX_SLOT(x) (((x) & 0x3f) << 8)
+#define TX_TYPE(x) (((x) & 0x3) << 4)
+#define TX_SS_BURST(x) (((x) & 0xf) << 0)
+
+/* for TX_TYPE & RX_TYPE */
+#define TYPE_BULK (0x0)
+#define TYPE_INT (0x1)
+#define TYPE_ISO (0x2)
+#define TYPE_MSK (0x3)
+
+/* U3D_TX1CSR2 */
+#define TX_BINTERVAL(x) (((x) & 0xff) << 24)
+#define TX_FIFOSEGSIZE(x) (((x) & 0xf) << 16)
+#define TX_FIFOADDR(x) (((x) & 0x1fff) << 0)
+
+/* U3D_RX1CSR0 */
+#define RX_AUTOCLEAR BIT(30)
+#define RX_DMAREQEN BIT(29)
+#define RX_SENTSTALL BIT(22)
+#define RX_SENDSTALL BIT(21)
+#define RX_RXPKTRDY BIT(16)
+#define RX_RXMAXPKTSZ_MSK GENMASK(10, 0)
+#define RX_RXMAXPKTSZ(x) ((x) & RX_RXMAXPKTSZ_MSK)
+#define RX_W1C_BITS (~(RX_SENTSTALL | RX_RXPKTRDY))
+
+/* U3D_RX1CSR1 */
+#define RX_MULT(x) (((x) & 0x3) << 22)
+#define RX_MAX_PKT(x) (((x) & 0x3f) << 16)
+#define RX_SLOT(x) (((x) & 0x3f) << 8)
+#define RX_TYPE(x) (((x) & 0x3) << 4)
+#define RX_SS_BURST(x) (((x) & 0xf) << 0)
+
+/* U3D_RX1CSR2 */
+#define RX_BINTERVAL(x) (((x) & 0xff) << 24)
+#define RX_FIFOSEGSIZE(x) (((x) & 0xf) << 16)
+#define RX_FIFOADDR(x) (((x) & 0x1fff) << 0)
+
+/* U3D_RX1CSR3 */
+#define EP_RX_COUNT(x) (((x) >> 16) & 0x7ff)
+
+/* U3D_FIFO: ep(0-15)*/
+#define U3D_FIFO(x) (U3D_FIFO0 + ((x) * 0x10))
+#define USB_FIFO(x) (U3D_FIFO(x))
+
+/* U3D_QCR0 */
+#define QMU_RX_CS_EN(x) (BIT(16) << (x))
+#define QMU_TX_CS_EN(x) (BIT(0) << (x))
+#define QMU_CS16B_EN BIT(0)
+
+/* U3D_QCR1 */
+#define QMU_TX_ZLP(x) (BIT(0) << (x))
+
+/* U3D_QCR3 */
+#define QMU_RX_COZ(x) (BIT(16) << (x))
+#define QMU_RX_ZLP(x) (BIT(0) << (x))
+
+/* U3D_TXQCSR1 */
+/* U3D_RXQCSR1 */
+#define QMU_Q_ACTIVE BIT(15)
+#define QMU_Q_STOP BIT(2)
+#define QMU_Q_RESUME BIT(1)
+#define QMU_Q_START BIT(0)
+
+/* U3D_QISAR0, U3D_QIER0, U3D_QIESR0, U3D_QIECR0 */
+#define QMU_RX_DONE_INT(x) (BIT(16) << (x))
+#define QMU_TX_DONE_INT(x) (BIT(0) << (x))
+
+/* U3D_QISAR1, U3D_QIER1, U3D_QIESR1, U3D_QIECR1 */
+#define RXQ_ZLPERR_INT BIT(20)
+#define RXQ_LENERR_INT BIT(18)
+#define RXQ_CSERR_INT BIT(17)
+#define RXQ_EMPTY_INT BIT(16)
+#define TXQ_LENERR_INT BIT(2)
+#define TXQ_CSERR_INT BIT(1)
+#define TXQ_EMPTY_INT BIT(0)
+
+/* U3D_TQERRIR0, U3D_TQERRIER0, U3D_TQERRIESR0, U3D_TQERRIECR0 */
+#define QMU_TX_LEN_ERR(x) (BIT(16) << (x))
+#define QMU_TX_CS_ERR(x) (BIT(0) << (x))
+
+/* U3D_RQERRIR0, U3D_RQERRIER0, U3D_RQERRIESR0, U3D_RQERRIECR0 */
+#define QMU_RX_LEN_ERR(x) (BIT(16) << (x))
+#define QMU_RX_CS_ERR(x) (BIT(0) << (x))
+
+/* U3D_RQERRIR1, U3D_RQERRIER1, U3D_RQERRIESR1, U3D_RQERRIECR1 */
+#define QMU_RX_ZLP_ERR(n) (BIT(16) << (n))
+
+/* U3D_CAP_EPINFO */
+#define CAP_RX_EP_NUM(x) (((x) >> 8) & 0x1f)
+#define CAP_TX_EP_NUM(x) ((x) & 0x1f)
+
+/* U3D_MISC_CTRL */
+#define VBUS_ON BIT(1)
+#define VBUS_FRC_EN BIT(0)
+
+
+/* SSUSB_EPCTL_CSR REGISTER DEFINITION */
+#define U3D_DEVICE_CONF (SSUSB_EPCTL_CSR_BASE + 0x0000)
+#define U3D_EP_RST (SSUSB_EPCTL_CSR_BASE + 0x0004)
+
+#define U3D_DEV_LINK_INTR_ENABLE (SSUSB_EPCTL_CSR_BASE + 0x0050)
+#define U3D_DEV_LINK_INTR (SSUSB_EPCTL_CSR_BASE + 0x0054)
+
+/* SSUSB_EPCTL_CSR FIELD DEFINITION */
+/* U3D_DEVICE_CONF */
+#define DEV_ADDR_MSK GENMASK(30, 24)
+#define DEV_ADDR(x) ((0x7f & (x)) << 24)
+#define HW_USB2_3_SEL BIT(18)
+#define SW_USB2_3_SEL_EN BIT(17)
+#define SW_USB2_3_SEL BIT(16)
+#define SSUSB_DEV_SPEED(x) ((x) & 0x7)
+
+/* U3D_EP_RST */
+#define EP1_IN_RST BIT(17)
+#define EP1_OUT_RST BIT(1)
+#define EP_RST(is_in, epnum) (((is_in) ? BIT(16) : BIT(0)) << (epnum))
+#define EP0_RST BIT(0)
+
+/* U3D_DEV_LINK_INTR_ENABLE */
+/* U3D_DEV_LINK_INTR */
+#define SSUSB_DEV_SPEED_CHG_INTR BIT(0)
+
+
+/* SSUSB_USB3_MAC_CSR REGISTER DEFINITION */
+#define U3D_LTSSM_CTRL (SSUSB_USB3_MAC_CSR_BASE + 0x0010)
+#define U3D_USB3_CONFIG (SSUSB_USB3_MAC_CSR_BASE + 0x001C)
+
+#define U3D_LTSSM_INTR_ENABLE (SSUSB_USB3_MAC_CSR_BASE + 0x013C)
+#define U3D_LTSSM_INTR (SSUSB_USB3_MAC_CSR_BASE + 0x0140)
+
+/* SSUSB_USB3_MAC_CSR FIELD DEFINITION */
+/* U3D_LTSSM_CTRL */
+#define FORCE_POLLING_FAIL BIT(4)
+#define FORCE_RXDETECT_FAIL BIT(3)
+#define SOFT_U3_EXIT_EN BIT(2)
+#define COMPLIANCE_EN BIT(1)
+#define U1_GO_U2_EN BIT(0)
+
+/* U3D_USB3_CONFIG */
+#define USB3_EN BIT(0)
+
+/* U3D_LTSSM_INTR_ENABLE */
+/* U3D_LTSSM_INTR */
+#define U3_RESUME_INTR BIT(18)
+#define U3_LFPS_TMOUT_INTR BIT(17)
+#define VBUS_FALL_INTR BIT(16)
+#define VBUS_RISE_INTR BIT(15)
+#define RXDET_SUCCESS_INTR BIT(14)
+#define EXIT_U3_INTR BIT(13)
+#define EXIT_U2_INTR BIT(12)
+#define EXIT_U1_INTR BIT(11)
+#define ENTER_U3_INTR BIT(10)
+#define ENTER_U2_INTR BIT(9)
+#define ENTER_U1_INTR BIT(8)
+#define ENTER_U0_INTR BIT(7)
+#define RECOVERY_INTR BIT(6)
+#define WARM_RST_INTR BIT(5)
+#define HOT_RST_INTR BIT(4)
+#define LOOPBACK_INTR BIT(3)
+#define COMPLIANCE_INTR BIT(2)
+#define SS_DISABLE_INTR BIT(1)
+#define SS_INACTIVE_INTR BIT(0)
+
+/* SSUSB_USB3_SYS_CSR REGISTER DEFINITION */
+#define U3D_LINK_UX_INACT_TIMER (SSUSB_USB3_SYS_CSR_BASE + 0x020C)
+#define U3D_LINK_POWER_CONTROL (SSUSB_USB3_SYS_CSR_BASE + 0x0210)
+#define U3D_LINK_ERR_COUNT (SSUSB_USB3_SYS_CSR_BASE + 0x0214)
+
+/* SSUSB_USB3_SYS_CSR FIELD DEFINITION */
+/* U3D_LINK_UX_INACT_TIMER */
+#define DEV_U2_INACT_TIMEOUT_MSK GENMASK(23, 16)
+#define DEV_U2_INACT_TIMEOUT_VALUE(x) (((x) & 0xff) << 16)
+#define U2_INACT_TIMEOUT_MSK GENMASK(15, 8)
+#define U1_INACT_TIMEOUT_MSK GENMASK(7, 0)
+#define U1_INACT_TIMEOUT_VALUE(x) ((x) & 0xff)
+
+/* U3D_LINK_POWER_CONTROL */
+#define SW_U2_ACCEPT_ENABLE BIT(9)
+#define SW_U1_ACCEPT_ENABLE BIT(8)
+#define UX_EXIT BIT(5)
+#define LGO_U3 BIT(4)
+#define LGO_U2 BIT(3)
+#define LGO_U1 BIT(2)
+#define SW_U2_REQUEST_ENABLE BIT(1)
+#define SW_U1_REQUEST_ENABLE BIT(0)
+
+/* U3D_LINK_ERR_COUNT */
+#define CLR_LINK_ERR_CNT BIT(16)
+#define LINK_ERROR_COUNT GENMASK(15, 0)
+
+/* SSUSB_USB2_CSR REGISTER DEFINITION */
+#define U3D_POWER_MANAGEMENT (SSUSB_USB2_CSR_BASE + 0x0004)
+#define U3D_DEVICE_CONTROL (SSUSB_USB2_CSR_BASE + 0x000C)
+#define U3D_USB2_TEST_MODE (SSUSB_USB2_CSR_BASE + 0x0014)
+#define U3D_COMMON_USB_INTR_ENABLE (SSUSB_USB2_CSR_BASE + 0x0018)
+#define U3D_COMMON_USB_INTR (SSUSB_USB2_CSR_BASE + 0x001C)
+#define U3D_LINK_RESET_INFO (SSUSB_USB2_CSR_BASE + 0x0024)
+#define U3D_USB20_FRAME_NUM (SSUSB_USB2_CSR_BASE + 0x003C)
+#define U3D_USB20_LPM_PARAMETER (SSUSB_USB2_CSR_BASE + 0x0044)
+#define U3D_USB20_MISC_CONTROL (SSUSB_USB2_CSR_BASE + 0x004C)
+
+/* SSUSB_USB2_CSR FIELD DEFINITION */
+/* U3D_POWER_MANAGEMENT */
+#define LPM_BESL_STALL BIT(14)
+#define LPM_BESLD_STALL BIT(13)
+#define LPM_RWP BIT(11)
+#define LPM_HRWE BIT(10)
+#define LPM_MODE(x) (((x) & 0x3) << 8)
+#define ISO_UPDATE BIT(7)
+#define SOFT_CONN BIT(6)
+#define HS_ENABLE BIT(5)
+#define RESUME BIT(2)
+#define SUSPENDM_ENABLE BIT(0)
+
+/* U3D_DEVICE_CONTROL */
+#define DC_HOSTREQ BIT(1)
+#define DC_SESSION BIT(0)
+
+/* U3D_USB2_TEST_MODE */
+#define U2U3_AUTO_SWITCH BIT(10)
+#define LPM_FORCE_STALL BIT(8)
+#define FIFO_ACCESS BIT(6)
+#define FORCE_FS BIT(5)
+#define FORCE_HS BIT(4)
+#define TEST_PACKET_MODE BIT(3)
+#define TEST_K_MODE BIT(2)
+#define TEST_J_MODE BIT(1)
+#define TEST_SE0_NAK_MODE BIT(0)
+
+/* U3D_COMMON_USB_INTR_ENABLE */
+/* U3D_COMMON_USB_INTR */
+#define LPM_RESUME_INTR BIT(9)
+#define LPM_INTR BIT(8)
+#define DISCONN_INTR BIT(5)
+#define CONN_INTR BIT(4)
+#define SOF_INTR BIT(3)
+#define RESET_INTR BIT(2)
+#define RESUME_INTR BIT(1)
+#define SUSPEND_INTR BIT(0)
+
+/* U3D_LINK_RESET_INFO */
+#define WTCHRP_MSK GENMASK(19, 16)
+
+/* U3D_USB20_LPM_PARAMETER */
+#define LPM_BESLCK_U3(x) (((x) & 0xf) << 12)
+#define LPM_BESLCK(x) (((x) & 0xf) << 8)
+#define LPM_BESLDCK(x) (((x) & 0xf) << 4)
+#define LPM_BESL GENMASK(3, 0)
+
+/* U3D_USB20_MISC_CONTROL */
+#define LPM_U3_ACK_EN BIT(0)
+
+/* SSUSB_SIFSLV_IPPC REGISTER DEFINITION */
+#define U3D_SSUSB_IP_PW_CTRL0 (SSUSB_SIFSLV_IPPC_BASE + 0x0000)
+#define U3D_SSUSB_IP_PW_CTRL1 (SSUSB_SIFSLV_IPPC_BASE + 0x0004)
+#define U3D_SSUSB_IP_PW_CTRL2 (SSUSB_SIFSLV_IPPC_BASE + 0x0008)
+#define U3D_SSUSB_IP_PW_CTRL3 (SSUSB_SIFSLV_IPPC_BASE + 0x000C)
+#define U3D_SSUSB_IP_PW_STS1 (SSUSB_SIFSLV_IPPC_BASE + 0x0010)
+#define U3D_SSUSB_IP_PW_STS2 (SSUSB_SIFSLV_IPPC_BASE + 0x0014)
+#define U3D_SSUSB_OTG_STS (SSUSB_SIFSLV_IPPC_BASE + 0x0018)
+#define U3D_SSUSB_OTG_STS_CLR (SSUSB_SIFSLV_IPPC_BASE + 0x001C)
+#define U3D_SSUSB_IP_XHCI_CAP (SSUSB_SIFSLV_IPPC_BASE + 0x0024)
+#define U3D_SSUSB_IP_DEV_CAP (SSUSB_SIFSLV_IPPC_BASE + 0x0028)
+#define U3D_SSUSB_OTG_INT_EN (SSUSB_SIFSLV_IPPC_BASE + 0x002C)
+#define U3D_SSUSB_U3_CTRL_0P (SSUSB_SIFSLV_IPPC_BASE + 0x0030)
+#define U3D_SSUSB_U2_CTRL_0P (SSUSB_SIFSLV_IPPC_BASE + 0x0050)
+#define U3D_SSUSB_REF_CK_CTRL (SSUSB_SIFSLV_IPPC_BASE + 0x008C)
+#define U3D_SSUSB_DEV_RST_CTRL (SSUSB_SIFSLV_IPPC_BASE + 0x0098)
+#define U3D_SSUSB_HW_ID (SSUSB_SIFSLV_IPPC_BASE + 0x00A0)
+#define U3D_SSUSB_HW_SUB_ID (SSUSB_SIFSLV_IPPC_BASE + 0x00A4)
+#define U3D_SSUSB_IP_SPARE0 (SSUSB_SIFSLV_IPPC_BASE + 0x00C8)
+#define U3D_SSUSB_FPGA_I2C_OUT_0P (SSUSB_SIFSLV_IPPC_BASE+0x00A8)
+#define U3D_SSUSB_FPGA_I2C_IN_0P (SSUSB_SIFSLV_IPPC_BASE+0x00AC)
+
+/* SSUSB_SIFSLV_IPPC FIELD DEFINITION */
+/* U3D_SSUSB_IP_PW_CTRL0 */
+#define SSUSB_IP_SW_RST BIT(0)
+
+/* U3D_SSUSB_IP_PW_CTRL1 */
+#define SSUSB_IP_HOST_PDN BIT(0)
+
+/* U3D_SSUSB_IP_PW_CTRL2 */
+#define SSUSB_IP_DEV_PDN BIT(0)
+
+/* U3D_SSUSB_IP_PW_CTRL3 */
+#define SSUSB_IP_PCIE_PDN BIT(0)
+
+/* U3D_SSUSB_IP_PW_STS1 */
+#define SSUSB_IP_SLEEP_STS BIT(30)
+#define SSUSB_U3_MAC_RST_B_STS BIT(16)
+#define SSUSB_XHCI_RST_B_STS BIT(11)
+#define SSUSB_SYS125_RST_B_STS BIT(10)
+#define SSUSB_REF_RST_B_STS BIT(8)
+#define SSUSB_SYSPLL_STABLE BIT(0)
+
+/* U3D_SSUSB_IP_PW_STS2 */
+#define SSUSB_U2_MAC_SYS_RST_B_STS BIT(0)
+
+/* U3D_SSUSB_OTG_STS */
+#define SSUSB_VBUS_VALID BIT(9)
+
+/* U3D_SSUSB_OTG_STS_CLR */
+#define SSUSB_VBUS_INTR_CLR BIT(6)
+
+/* U3D_SSUSB_IP_XHCI_CAP */
+#define SSUSB_IP_XHCI_U2_PORT_NUM(x) (((x) >> 8) & 0xff)
+#define SSUSB_IP_XHCI_U3_PORT_NUM(x) ((x) & 0xff)
+
+/* U3D_SSUSB_IP_DEV_CAP */
+#define SSUSB_IP_DEV_U3_PORT_NUM(x) ((x) & 0xff)
+
+/* U3D_SSUSB_OTG_INT_EN */
+#define SSUSB_VBUS_CHG_INT_A_EN BIT(7)
+#define SSUSB_VBUS_CHG_INT_B_EN BIT(6)
+
+/* U3D_SSUSB_U3_CTRL_0P */
+#define SSUSB_U3_PORT_SSP_SPEED BIT(9)
+#define SSUSB_U3_PORT_HOST_SEL BIT(2)
+#define SSUSB_U3_PORT_PDN BIT(1)
+#define SSUSB_U3_PORT_DIS BIT(0)
+
+/* U3D_SSUSB_U2_CTRL_0P */
+#define SSUSB_U2_PORT_OTG_SEL BIT(7)
+#define SSUSB_U2_PORT_HOST_SEL BIT(2)
+#define SSUSB_U2_PORT_PDN BIT(1)
+#define SSUSB_U2_PORT_DIS BIT(0)
+
+/* U3D_SSUSB_DEV_RST_CTRL */
+#define SSUSB_DEV_SW_RST BIT(0)
diff --git a/src/bsp/lk/platform/mt8512/drivers/usb/mtu3_qmu.c b/src/bsp/lk/platform/mt8512/drivers/usb/mtu3_qmu.c
new file mode 100644
index 0000000..7dba054
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/usb/mtu3_qmu.c
@@ -0,0 +1,559 @@
+/*
+ * mtu3_qmu.c - Queue Management Unit driver for device controller
+ *
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Queue Management Unit (QMU) is designed to unload SW effort
+ * to serve DMA interrupts.
+ * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
+ * SW links data buffers and triggers QMU to send / receive data to
+ * host / from device at a time.
+ * And now only GPD is supported.
+ *
+ * For more detailed information, please refer to QMU Programming Guide
+ */
+
+#include <arch/ops.h>
+#include <debug.h>
+#include <errno.h>
+#include <kernel/vm.h>
+#include <lib/mempool.h>
+#include <platform/reg_utils.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/types.h>
+
+#include "mtu3.h"
+#include "mtu3_hw_regs.h"
+#include "mtu3_qmu.h"
+
+#pragma GCC push_options
+#pragma GCC optimize("O1")
+
+#define QMU_CHECKSUM_LEN 16
+
+#define GPD_FLAGS_HWO BIT(0)
+#define GPD_FLAGS_BDP BIT(1)
+#define GPD_FLAGS_BPS BIT(2)
+#define GPD_FLAGS_IOC BIT(7)
+
+#define GPD_EXT_FLAG_ZLP BIT(5)
+#undef INFO
+#define INFO 2
+#define SPEW 2
+
+#define DBG_C(x...) dprintf(CRITICAL, "[USB][QMU] " x)
+#define DBG_I(x...) dprintf(INFO, "[USB][QMU] " x)
+#define DBG_S(x...) dprintf(SPEW, "[USB][QMU] " x)
+
+#ifdef SUPPORT_QMU
+
+static paddr_t va_to_pa(void *vaddr)
+{
+#if WITH_KERNEL_VM
+ return kvaddr_to_paddr(vaddr);
+#else
+ return (paddr_t)vaddr;
+#endif
+}
+
+static void *pa_to_va(paddr_t paddr)
+{
+#if WITH_KERNEL_VM
+ return paddr_to_kvaddr(paddr);
+#else
+ return (void *)paddr;
+#endif
+}
+
+static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
+ paddr_t dma_addr)
+{
+ paddr_t dma_base = ring->dma;
+ struct qmu_gpd *gpd_head = ring->start;
+ u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head);
+
+ if (offset >= MAX_GPD_NUM)
+ return NULL;
+
+ return gpd_head + offset;
+}
+
+static paddr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
+ struct qmu_gpd *gpd)
+{
+ paddr_t dma_base = ring->dma;
+ struct qmu_gpd *gpd_head = ring->start;
+ u32 offset;
+
+ offset = gpd - gpd_head;
+ if (offset >= MAX_GPD_NUM)
+ return 0;
+
+ return dma_base + (offset * sizeof(*gpd));
+}
+
+static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
+{
+ ring->start = gpd;
+ ring->enqueue = gpd;
+ ring->dequeue = gpd;
+ ring->end = gpd + MAX_GPD_NUM - 1;
+}
+
+static void reset_gpd_list(struct udc_endpoint *mep)
+{
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ struct qmu_gpd *gpd = ring->start;
+
+ if (gpd) {
+ gpd->flag &= ~GPD_FLAGS_HWO;
+ gpd_ring_init(ring, gpd);
+ }
+}
+
+int mtu3_gpd_ring_alloc(struct udc_endpoint *mep)
+{
+ struct qmu_gpd *gpd;
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ u32 size;
+
+ /* software own all gpds as default */
+ size = sizeof(struct qmu_gpd) * MAX_GPD_NUM;
+ gpd = (struct qmu_gpd *)mempool_alloc(size, MEMPOOL_UNCACHE);
+ if (gpd == NULL)
+ return -ENOMEM;
+
+ memset(gpd, 0, size);
+ ring->dma = va_to_pa(gpd);
+ gpd_ring_init(ring, gpd);
+ return 0;
+}
+
+void mtu3_gpd_ring_free(struct udc_endpoint *mep)
+{
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+
+ mempool_free(ring->start);
+ memset(ring, 0, sizeof(*ring));
+}
+
+/*
+ * calculate check sum of a gpd or bd
+ * add "noinline" and "mb" to prevent wrong calculation
+ */
+static u8 qmu_calc_checksum(u8 *data)
+{
+ u8 chksum = 0;
+ int i;
+
+ data[1] = 0x0; /* set checksum to 0 */
+
+ mb(); /* ensure the gpd/bd is really up-to-date */
+ for (i = 0; i < QMU_CHECKSUM_LEN; i++)
+ chksum += data[i];
+
+ /* Default: HWO=1, @flag[bit0] */
+ chksum += 1;
+
+ return 0xFF - chksum;
+}
+
+void mtu3_qmu_resume(struct udc_endpoint *mep)
+{
+ int epnum = mep->num;
+ paddr_t qcsr;
+
+ qcsr = mep->in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
+
+ writel(QMU_Q_RESUME, qcsr);
+ if (!(readl(qcsr) & QMU_Q_ACTIVE))
+ writel(QMU_Q_RESUME, qcsr);
+}
+
+static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
+{
+ if (ring->enqueue < ring->end)
+ ring->enqueue++;
+ else
+ ring->enqueue = ring->start;
+
+ return ring->enqueue;
+}
+
+static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
+{
+ if (ring->dequeue < ring->end)
+ ring->dequeue++;
+ else
+ ring->dequeue = ring->start;
+
+ return ring->dequeue;
+}
+
+/* check if a ring is emtpy */
+static int gpd_ring_empty(struct mtu3_gpd_ring *ring)
+{
+ struct qmu_gpd *enq = ring->enqueue;
+ struct qmu_gpd *next;
+
+ if (ring->enqueue < ring->end)
+ next = enq + 1;
+ else
+ next = ring->start;
+
+ /* one gpd is reserved to simplify gpd preparation */
+ return next == ring->dequeue;
+}
+
+int mtu3_prepare_transfer(struct udc_endpoint *mep)
+{
+ return gpd_ring_empty(&mep->gpd_ring);
+}
+
+static int mtu3_prepare_tx_gpd(struct udc_endpoint *mep, struct mu3d_req *mreq)
+{
+ struct qmu_gpd *enq;
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ struct qmu_gpd *gpd = ring->enqueue;
+ struct udc_request *req = &mreq->req;
+
+ /* set all fields to zero as default value */
+ memset(gpd, 0, sizeof(*gpd));
+
+ gpd->buffer = (u32)va_to_pa(req->buffer);
+ gpd->buf_len = (req->length);
+ gpd->flag |= GPD_FLAGS_IOC;
+
+ /* get the next GPD */
+ enq = advance_enq_gpd(ring);
+ DBG_I("TX %s queue gpd=%p, enq=%p\n", mep->name, gpd, enq);
+
+ enq->flag &= ~GPD_FLAGS_HWO;
+ gpd->next_gpd = (u32)gpd_virt_to_dma(ring, enq);
+
+ if (mep->type != USB_EP_XFER_ISO)
+ gpd->ext_flag |= GPD_EXT_FLAG_ZLP;
+
+ gpd->chksum = qmu_calc_checksum((u8 *)gpd);
+ gpd->flag |= GPD_FLAGS_HWO;
+
+ mreq->gpd = gpd;
+
+ return 0;
+}
+
+static int mtu3_prepare_rx_gpd(struct udc_endpoint *mep, struct mu3d_req *mreq)
+{
+ struct qmu_gpd *enq;
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ struct qmu_gpd *gpd = ring->enqueue;
+ struct udc_request *req = &mreq->req;
+
+ /* set all fields to zero as default value */
+ memset(gpd, 0, sizeof(*gpd));
+
+ gpd->buffer = (u32)va_to_pa(req->buffer);
+ gpd->data_buf_len = req->length;
+ gpd->flag |= GPD_FLAGS_IOC;
+
+ /* get the next GPD */
+ enq = advance_enq_gpd(ring);
+ DBG_I("RX %s queue gpd=%p, enq=%p\n", mep->name, gpd, enq);
+
+ enq->flag &= ~GPD_FLAGS_HWO;
+ gpd->next_gpd = (u32)gpd_virt_to_dma(ring, enq);
+ gpd->chksum = qmu_calc_checksum((u8 *)gpd);
+ gpd->flag |= GPD_FLAGS_HWO;
+
+ mreq->gpd = gpd;
+
+ return 0;
+}
+
+void mtu3_insert_gpd(struct udc_endpoint *mep, struct mu3d_req *mreq)
+{
+ if (mep->in)
+ mtu3_prepare_tx_gpd(mep, mreq);
+ else
+ mtu3_prepare_rx_gpd(mep, mreq);
+}
+
+int mtu3_qmu_start(struct udc_endpoint *mep)
+{
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ u8 epnum = mep->num;
+
+ if (mep->in) {
+ /* set QMU start address */
+ writel(ring->dma, USB_QMU_TQSAR(mep->num));
+ setbits32_r(TX_DMAREQEN, MU3D_EP_TXCR0(mep->num));
+ setbits32_r(QMU_TX_CS_EN(epnum), U3D_QCR0);
+ /* send zero length packet according to ZLP flag in GPD */
+ setbits32_r(QMU_TX_ZLP(epnum), U3D_QCR1);
+ writel(QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum), U3D_TQERRIESR0);
+
+ if (readl(USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) {
+ DBG_C("%s Active Now!\n", mep->name);
+ return 0;
+ }
+ writel(QMU_Q_START, USB_QMU_TQCSR(epnum));
+
+ } else {
+ writel(ring->dma, USB_QMU_RQSAR(mep->num));
+ setbits32_r(RX_DMAREQEN, MU3D_EP_RXCR0(mep->num));
+ setbits32_r(QMU_RX_CS_EN(epnum), U3D_QCR0);
+ /* don't expect ZLP */
+ clrbits32_r(QMU_RX_ZLP(epnum), U3D_QCR3);
+ /* move to next GPD when receive ZLP */
+ setbits32_r(QMU_RX_COZ(epnum), U3D_QCR3);
+ writel(QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum), U3D_RQERRIESR0);
+ writel(QMU_RX_ZLP_ERR(epnum), U3D_RQERRIESR1);
+
+ if (readl(USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) {
+ DBG_C("%s Active Now!\n", mep->name);
+ return 0;
+ }
+ writel(QMU_Q_START, USB_QMU_RQCSR(epnum));
+ }
+ DBG_I("%s's qmu start now!\n", mep->name);
+
+ return 0;
+}
+
+/* may called in atomic context */
+static void mtu3_qmu_stop(struct udc_endpoint *mep)
+{
+ int epnum = mep->num;
+ paddr_t qcsr;
+ int ret;
+
+ qcsr = mep->in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
+
+ if (!(readl(qcsr) & QMU_Q_ACTIVE)) {
+ DBG_C("%s's qmu is inactive now!\n", mep->name);
+ return;
+ }
+ writel(QMU_Q_STOP, qcsr);
+
+ ret = wait_for_value(qcsr, QMU_Q_ACTIVE, 0, 10, 100);
+ if (ret) {
+ DBG_C("stop %s's qmu failed\n", mep->name);
+ return;
+ }
+
+ DBG_I("%s's qmu stop now!\n", mep->name);
+}
+
+void mtu3_qmu_flush(struct udc_endpoint *mep)
+{
+ DBG_I("%s flush QMU %s\n", __func__, mep->name);
+
+ /*Stop QMU */
+ mtu3_qmu_stop(mep);
+ reset_gpd_list(mep);
+}
+
+static void qmu_done_tx(u8 epnum)
+{
+ struct udc_endpoint *mep = mtu3_find_ep(epnum, USB_DIR_IN);
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ struct qmu_gpd *gpd = ring->dequeue;
+ struct qmu_gpd *gpd_current = NULL;
+ struct udc_request *request = NULL;
+ struct mu3d_req *mreq;
+ paddr_t gpd_dma;
+
+ gpd_dma = readl(USB_QMU_TQCPR(epnum));
+ /*transfer phy address got from QMU register to virtual address */
+ gpd_current = gpd_dma_to_virt(ring, gpd_dma);
+
+ DBG_I("%s %s, last=%p, current=%p, enq=%p\n",
+ __func__, mep->name, gpd, gpd_current, ring->enqueue);
+
+ while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
+
+ request = mep->req;
+ mreq = to_mu3d_req(request);
+ if (mreq == NULL || mreq->gpd != gpd) {
+ DBG_C("no correct TX req is found\n");
+ break;
+ }
+
+ mreq->actual = gpd->buf_len;
+ handle_ept_complete(mep, 0);
+ gpd = advance_deq_gpd(ring);
+ }
+
+ DBG_I("%s EP%dIN, deq=%p, enq=%p, complete\n",
+ __func__, epnum, ring->dequeue, ring->enqueue);
+}
+
+static void qmu_done_rx(u8 epnum)
+{
+ struct udc_endpoint *mep = mtu3_find_ep(epnum, USB_DIR_OUT);
+ struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+ struct qmu_gpd *gpd = ring->dequeue;
+ struct qmu_gpd *gpd_current = NULL;
+ struct udc_request *request = NULL;
+ struct mu3d_req *mreq;
+ paddr_t gpd_dma;
+
+ gpd_dma = readl(USB_QMU_RQCPR(epnum));
+ gpd_current = gpd_dma_to_virt(ring, gpd_dma);
+
+ DBG_I("%s %s, last=%p, current=%p, enq=%p\n",
+ __func__, mep->name, gpd, gpd_current, ring->enqueue);
+
+ while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
+
+ request = mep->req;
+ mreq = to_mu3d_req(request);
+ if (mreq == NULL || mreq->gpd != gpd) {
+ DBG_C("no correct RX req is found\n");
+ break;
+ }
+ mreq->actual = gpd->buf_len;
+ handle_ept_complete(mep, 0);
+ gpd = advance_deq_gpd(ring);
+ }
+
+ DBG_I("%s EP%dOUT, deq=%p, enq=%p, complete\n",
+ __func__, epnum, ring->dequeue, ring->enqueue);
+}
+
+static void qmu_done_isr(u32 done_status)
+{
+ int i;
+
+ for (i = 1; i <= (MT_EP_NUM / 2); i++) {
+ if (done_status & QMU_RX_DONE_INT(i))
+ qmu_done_rx(i);
+ if (done_status & QMU_TX_DONE_INT(i))
+ qmu_done_tx(i);
+ }
+}
+
+static void qmu_exception_isr(u32 qmu_status)
+{
+ u32 errval;
+ int i;
+
+ if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) {
+ errval = readl(U3D_RQERRIR0);
+ for (i = 1; i <= (MT_EP_NUM / 2); i++) {
+ if (errval & QMU_RX_CS_ERR(i))
+ DBG_C("Rx EP%d CS error!\n", i);
+
+ if (errval & QMU_RX_LEN_ERR(i))
+ DBG_C("RX EP%d Length error\n", i);
+ }
+ writel(errval, U3D_RQERRIR0);
+ }
+
+ if (qmu_status & RXQ_ZLPERR_INT) {
+ errval = readl(U3D_RQERRIR1);
+ for (i = 1; i <= (MT_EP_NUM / 2); i++) {
+ if (errval & QMU_RX_ZLP_ERR(i))
+ DBG_I("RX EP%d Recv ZLP\n", i);
+ }
+ writel(errval, U3D_RQERRIR1);
+ }
+
+ if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) {
+ errval = readl(U3D_TQERRIR0);
+ for (i = 1; i <= (MT_EP_NUM / 2); i++) {
+ if (errval & QMU_TX_CS_ERR(i))
+ DBG_C("Tx EP%d checksum error!\n", i);
+
+ if (errval & QMU_TX_LEN_ERR(i))
+ DBG_I("Tx EP%d send ZLP failed\n", i);
+ }
+ writel(errval, U3D_TQERRIR0);
+ }
+}
+
+enum handler_return mtu3_qmu_isr(void)
+{
+ u32 qmu_status;
+ u32 qmu_done_status;
+
+ /* U3D_QISAR1 is read update */
+ qmu_status = readl(U3D_QISAR1);
+ qmu_status &= readl(U3D_QIER1);
+
+ qmu_done_status = readl(U3D_QISAR0);
+ qmu_done_status &= readl(U3D_QIER0);
+ writel(qmu_done_status, U3D_QISAR0); /* W1C */
+ DBG_I("[INTR] QMUdone[TX=%x, RX=%x] QMUexp[%x]\n",
+ (qmu_done_status & 0xFFFF), qmu_done_status >> 16,
+ qmu_status);
+
+ if (qmu_done_status)
+ qmu_done_isr(qmu_done_status);
+
+ if (qmu_status)
+ qmu_exception_isr(qmu_status);
+
+ return INT_RESCHEDULE;
+}
+
+int mtu3_qmu_init(void)
+{
+ if (QMU_GPD_SIZE != 16) {
+ DBG_C("QMU_GPD size SHOULD be 16 Bytes");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+#else /* PIO mode */
+
+void mtu3_qmu_flush(struct udc_endpoint *mep)
+{}
+
+int mtu3_gpd_ring_alloc(struct udc_endpoint *mep)
+{
+ return 0;
+}
+
+void mtu3_gpd_ring_free(struct udc_endpoint *mep)
+{}
+
+enum handler_return mtu3_qmu_isr(void)
+{
+ return INT_NO_RESCHEDULE;
+}
+
+int mtu3_qmu_init(void)
+{
+ return 0;
+}
+
+#endif /* SUPPORT_QMU */
+
+#pragma GCC pop_options
diff --git a/src/bsp/lk/platform/mt8512/drivers/usb/mtu3_qmu.h b/src/bsp/lk/platform/mt8512/drivers/usb/mtu3_qmu.h
new file mode 100644
index 0000000..9dfaa89
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/usb/mtu3_qmu.h
@@ -0,0 +1,113 @@
+/*
+ * mtu3_qmu.h - Queue Management Unit driver header
+ *
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ * (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#pragma once
+
+#include "mtu3.h"
+
+struct qmu_gpd;
+
+#define MAX_GPD_NUM 4
+#define QMU_GPD_SIZE (sizeof(struct qmu_gpd))
+#define QMU_GPD_RING_SIZE ((MAX_GPD_NUM) * (QMU_GPD_SIZE))
+
+#define GPD_BUF_SIZE 65532
+
+#define MU3D_EP_TXCR0(epnum) (U3D_TX1CSR0 + (((epnum) - 1) * 0x10))
+#define MU3D_EP_TXCR1(epnum) (U3D_TX1CSR1 + (((epnum) - 1) * 0x10))
+#define MU3D_EP_TXCR2(epnum) (U3D_TX1CSR2 + (((epnum) - 1) * 0x10))
+
+#define MU3D_EP_RXCR0(epnum) (U3D_RX1CSR0 + (((epnum) - 1) * 0x10))
+#define MU3D_EP_RXCR1(epnum) (U3D_RX1CSR1 + (((epnum) - 1) * 0x10))
+#define MU3D_EP_RXCR2(epnum) (U3D_RX1CSR2 + (((epnum) - 1) * 0x10))
+#define MU3D_EP_RXCR3(epnum) (U3D_RX1CSR3 + (((epnum) - 1) * 0x10))
+
+#define USB_QMU_RQCSR(epnum) (U3D_RXQCSR1 + (((epnum) - 1) * 0x10))
+#define USB_QMU_RQSAR(epnum) (U3D_RXQSAR1 + (((epnum) - 1) * 0x10))
+#define USB_QMU_RQCPR(epnum) (U3D_RXQCPR1 + (((epnum) - 1) * 0x10))
+
+#define USB_QMU_TQCSR(epnum) (U3D_TXQCSR1 + (((epnum) - 1) * 0x10))
+#define USB_QMU_TQSAR(epnum) (U3D_TXQSAR1 + (((epnum) - 1) * 0x10))
+#define USB_QMU_TQCPR(epnum) (U3D_TXQCPR1 + (((epnum) - 1) * 0x10))
+
+
+/* U3D_QCR0 */
+#define QMU_RX_CS_EN(x) (BIT(16) << (x))
+#define QMU_TX_CS_EN(x) (BIT(0) << (x))
+#define QMU_CS16B_EN BIT(0)
+
+/* U3D_QCR1 */
+#define QMU_TX_ZLP(x) (BIT(0) << (x))
+
+/* U3D_QCR3 */
+#define QMU_RX_COZ(x) (BIT(16) << (x))
+#define QMU_RX_ZLP(x) (BIT(0) << (x))
+
+/* U3D_TXQCSR1 */
+/* U3D_RXQCSR1 */
+#define QMU_Q_ACTIVE BIT(15)
+#define QMU_Q_STOP BIT(2)
+#define QMU_Q_RESUME BIT(1)
+#define QMU_Q_START BIT(0)
+
+/* U3D_QISAR0, U3D_QIER0, U3D_QIESR0, U3D_QIECR0 */
+#define QMU_RX_DONE_INT(x) (BIT(16) << (x))
+#define QMU_TX_DONE_INT(x) (BIT(0) << (x))
+
+
+struct qmu_gpd {
+ u8 flag;
+ u8 chksum;
+ u16 data_buf_len;
+ u32 next_gpd;
+ u32 buffer;
+ u16 buf_len;
+ u8 ext_len;
+ u8 ext_flag;
+} __attribute__((packed));
+
+struct mtu3_gpd_ring {
+ paddr_t dma;
+ struct qmu_gpd *start;
+ struct qmu_gpd *end;
+ struct qmu_gpd *enqueue;
+ struct qmu_gpd *dequeue;
+};
+
+int mtu3_qmu_start(struct udc_endpoint *mep);
+void mtu3_qmu_resume(struct udc_endpoint *mep);
+void mtu3_qmu_flush(struct udc_endpoint *mep);
+
+void mtu3_insert_gpd(struct udc_endpoint *mep, struct mu3d_req *mreq);
+int mtu3_prepare_transfer(struct udc_endpoint *mep);
+
+int mtu3_gpd_ring_alloc(struct udc_endpoint *mep);
+void mtu3_gpd_ring_free(struct udc_endpoint *mep);
+
+enum handler_return mtu3_qmu_isr(void);
+int mtu3_qmu_init(void);
diff --git a/src/bsp/lk/platform/mt8512/drivers/usb/u3phy-i2c.c b/src/bsp/lk/platform/mt8512/drivers/usb/u3phy-i2c.c
new file mode 100644
index 0000000..4f0378a
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/usb/u3phy-i2c.c
@@ -0,0 +1,352 @@
+#include <debug.h>
+#include <reg.h>
+
+#define PHY_TRUE 1
+#define PHY_FALSE 0
+
+#define SDA 0
+#define SCL 1
+
+#define INPUT 0
+#define OUTPUT 1
+
+#define SSUSB_FPGA_I2C_OUT_OFFSET 0
+#define SSUSB_FPGA_I2C_IN_OFFSET 0x04
+
+#define SSUSB_FPGA_I2C_SDA_OUT (1<<0)
+#define SSUSB_FPGA_I2C_SDA_OEN (1<<1)
+#define SSUSB_FPGA_I2C_SCL_OUT (1<<2)
+#define SSUSB_FPGA_I2C_SCL_OEN (1<<3)
+
+#define SSUSB_FPGA_I2C_SDA_IN_OFFSET 0
+#define SSUSB_FPGA_I2C_SCL_IN_OFFSET 1
+
+#define I2C_DELAY 10
+
+typedef unsigned char u8;
+typedef unsigned int u32;
+
+static void i2c_dummy_delay(volatile unsigned int count)
+{
+ do {
+ count--;
+ } while (count>0);
+}
+
+void gpio_set_direction(void *i2c_port_base, unsigned char gpio_dir, unsigned char gpio_pin)
+{
+ unsigned int temp;
+ void *addr;
+
+ addr = i2c_port_base + SSUSB_FPGA_I2C_OUT_OFFSET;
+
+ temp = readl(addr);
+
+ if (gpio_pin == SDA) {
+ if (gpio_dir==OUTPUT) {
+ temp |= SSUSB_FPGA_I2C_SDA_OEN;
+ writel(temp, addr);
+ } else {
+ temp &= ~SSUSB_FPGA_I2C_SDA_OEN;
+ writel(temp, addr);
+ }
+ } else {
+ if (gpio_dir==OUTPUT) {
+ temp |= SSUSB_FPGA_I2C_SCL_OEN;
+ writel(temp, addr);
+ } else {
+ temp &= ~SSUSB_FPGA_I2C_SCL_OEN;
+ writel(temp, addr);
+ }
+ }
+}
+
+void gpio_set_value(void *i2c_port_base, unsigned char value, unsigned char gpio_pin)
+{
+ unsigned int temp;
+ void *addr;
+
+ addr = i2c_port_base + SSUSB_FPGA_I2C_OUT_OFFSET;
+
+ temp = readl(addr);
+
+ if (gpio_pin == SDA) {
+ if (value == 1) {
+ temp |= SSUSB_FPGA_I2C_SDA_OUT;
+ writel(temp, addr);
+ } else {
+ temp &= ~SSUSB_FPGA_I2C_SDA_OUT;
+ writel(temp, addr);
+ }
+ } else {
+ if (value == 1) {
+ temp |= SSUSB_FPGA_I2C_SCL_OUT;
+ writel(temp, addr);
+ } else {
+ temp &= ~SSUSB_FPGA_I2C_SCL_OUT;
+ writel(temp, addr);
+ }
+ }
+}
+
+unsigned char gpio_get_value(void *i2c_port_base, unsigned char gpio_pin)
+{
+ unsigned char temp;
+ void *addr;
+
+ addr = i2c_port_base + SSUSB_FPGA_I2C_IN_OFFSET;
+
+ temp = readl(addr);
+
+ if (gpio_pin == SDA)
+ temp = (temp >> SSUSB_FPGA_I2C_SDA_IN_OFFSET) & 0x01;
+ else
+ temp = (temp >> SSUSB_FPGA_I2C_SCL_IN_OFFSET) & 0x01;
+
+ return temp;
+}
+
+void i2c_stop(void *i2c_port_base)
+{
+ gpio_set_direction(i2c_port_base, OUTPUT, SDA);
+ gpio_set_value(i2c_port_base, 0, SCL);
+ i2c_dummy_delay(I2C_DELAY);
+ gpio_set_value(i2c_port_base, 0, SDA);
+ i2c_dummy_delay(I2C_DELAY);
+ gpio_set_value(i2c_port_base, 1, SCL);
+ i2c_dummy_delay(I2C_DELAY);
+ gpio_set_value(i2c_port_base, 1, SDA);
+ i2c_dummy_delay(I2C_DELAY);
+ gpio_set_direction(i2c_port_base, INPUT, SCL);
+ gpio_set_direction(i2c_port_base, INPUT, SDA);
+}
+
+void i2c_start(void *i2c_port_base) /* Prepare the SDA and SCL for sending/receiving */
+{
+ gpio_set_direction(i2c_port_base, OUTPUT, SCL);
+ gpio_set_direction(i2c_port_base, OUTPUT, SDA);
+ gpio_set_value(i2c_port_base, 1, SDA);
+ i2c_dummy_delay(I2C_DELAY);
+ gpio_set_value(i2c_port_base, 1, SCL);
+ i2c_dummy_delay(I2C_DELAY);
+ gpio_set_value(i2c_port_base, 0, SDA);
+ i2c_dummy_delay(I2C_DELAY);
+ gpio_set_value(i2c_port_base, 0, SCL);
+ i2c_dummy_delay(I2C_DELAY);
+}
+
+u32 i2c_send_byte(void *i2c_port_base, u8 data) /* return 0 --> ack */
+{
+ int i, ack;
+
+ gpio_set_direction(i2c_port_base, OUTPUT, SDA);
+
+ for (i=8; --i>0;) {
+ gpio_set_value(i2c_port_base, (data>>i)&0x01, SDA);
+ i2c_dummy_delay(I2C_DELAY);
+ gpio_set_value(i2c_port_base, 1, SCL); /* high */
+ i2c_dummy_delay(I2C_DELAY);
+ gpio_set_value(i2c_port_base, 0, SCL); /* low */
+ i2c_dummy_delay(I2C_DELAY);
+ }
+ gpio_set_value(i2c_port_base, (data>>i)&0x01, SDA);
+ i2c_dummy_delay(I2C_DELAY);
+ gpio_set_value(i2c_port_base, 1, SCL); /* high */
+ i2c_dummy_delay(I2C_DELAY);
+ gpio_set_value(i2c_port_base, 0, SCL); /* low */
+ i2c_dummy_delay(I2C_DELAY);
+
+ gpio_set_value(i2c_port_base, 0, SDA);
+ i2c_dummy_delay(I2C_DELAY);
+ gpio_set_direction(i2c_port_base, INPUT, SDA);
+ i2c_dummy_delay(I2C_DELAY);
+ gpio_set_value(i2c_port_base, 1, SCL);
+ i2c_dummy_delay(I2C_DELAY);
+ ack = gpio_get_value(i2c_port_base,SDA); /* ack 1: error , 0:ok */
+ gpio_set_value(i2c_port_base, 0, SCL);
+ i2c_dummy_delay(I2C_DELAY);
+
+ if (ack==1)
+ return PHY_FALSE;
+ else
+ return PHY_TRUE;
+}
+
+void i2c_receive_byte(void *i2c_port_base, u8 *data, u8 ack)
+{
+ int i;
+ u32 dataCache;
+
+ dataCache = 0;
+ gpio_set_direction(i2c_port_base, INPUT, SDA);
+
+ for (i=8; --i>=0;) {
+ dataCache <<= 1;
+ i2c_dummy_delay(I2C_DELAY);
+ gpio_set_value(i2c_port_base, 1, SCL);
+ i2c_dummy_delay(I2C_DELAY);
+ dataCache |= gpio_get_value(i2c_port_base,SDA);
+ gpio_set_value(i2c_port_base, 0, SCL);
+ i2c_dummy_delay(I2C_DELAY);
+ }
+
+ gpio_set_direction(i2c_port_base, OUTPUT, SDA);
+ gpio_set_value(i2c_port_base, ack, SDA);
+ i2c_dummy_delay(I2C_DELAY);
+ gpio_set_value(i2c_port_base, 1, SCL);
+ i2c_dummy_delay(I2C_DELAY);
+ gpio_set_value(i2c_port_base, 0, SCL);
+ i2c_dummy_delay(I2C_DELAY);
+ *data = (u8)dataCache;
+}
+
+
+int i2c_write_reg(void *i2c_port_base, u8 dev_id, u8 addr, u8 data)
+{
+ int acknowledge=0;
+
+ i2c_start(i2c_port_base);
+
+ acknowledge=i2c_send_byte(i2c_port_base,(dev_id<<1) & 0xff);
+ if (acknowledge)
+ acknowledge=i2c_send_byte(i2c_port_base,addr);
+ else
+ return PHY_FALSE;
+
+ acknowledge=i2c_send_byte(i2c_port_base,data);
+ if (acknowledge) {
+ i2c_stop(i2c_port_base);
+ return PHY_TRUE;
+ } else {
+ return PHY_FALSE;
+ }
+}
+
+int i2c_read_reg(void *i2c_port_base, u8 dev_id, u8 addr, u8 *data)
+{
+ int acknowledge=0;
+
+ i2c_start(i2c_port_base);
+
+ acknowledge=i2c_send_byte(i2c_port_base,(dev_id<<1) & 0xff);
+ if (acknowledge)
+ acknowledge=i2c_send_byte(i2c_port_base,addr);
+ else
+ return PHY_FALSE;
+
+ i2c_start(i2c_port_base);
+
+ acknowledge=i2c_send_byte(i2c_port_base,((dev_id<<1) & 0xff) | 0x01);
+ if (acknowledge)
+ i2c_receive_byte(i2c_port_base,data, 1); /* ack 0: ok , 1 error */
+ else
+ return PHY_FALSE;
+
+ i2c_stop(i2c_port_base);
+
+ return acknowledge;
+}
+
+int u3phy_write_reg(void *i2c_port_base, u8 dev_id, u8 address, int value)
+{
+ int ret;
+
+ ret = i2c_write_reg(i2c_port_base, dev_id, address, value);
+ if (ret == PHY_FALSE) {
+ dprintf(ALWAYS, "Write failed(dev_id: %x, addr: 0x%x, val: 0x%x)\n", dev_id, address, value);
+ return PHY_FALSE;
+ }
+
+ return PHY_TRUE;
+}
+
+unsigned char u3phy_read_reg(void *i2c_port_base, u8 dev_id, u8 address)
+{
+ u8 buf;
+ int ret;
+
+ /* buf = (char *)kmalloc(1, GFP_NOIO); */
+ ret = i2c_read_reg(i2c_port_base, dev_id, address, &buf);
+ if (ret == PHY_FALSE) {
+ dprintf(ALWAYS, "Read failed(dev_id: %x, addr: 0x%x)\n", dev_id, address);
+ return PHY_FALSE;
+ }
+ ret = buf;
+
+ return ret;
+
+}
+
+int u3phy_write_reg32(void *i2c_port_base, u8 dev_id, u32 addr, u32 data)
+{
+ u8 addr8;
+ u8 data_0, data_1, data_2, data_3;
+
+ addr8 = addr & 0xff;
+ data_0 = data & 0xff;
+ data_1 = (data>>8) & 0xff;
+ data_2 = (data>>16) & 0xff;
+ data_3 = (data>>24) & 0xff;
+
+ u3phy_write_reg(i2c_port_base, dev_id, addr8, data_0);
+ u3phy_write_reg(i2c_port_base, dev_id, addr8+1, data_1);
+ u3phy_write_reg(i2c_port_base, dev_id, addr8+2, data_2);
+ u3phy_write_reg(i2c_port_base, dev_id, addr8+3, data_3);
+
+ return 0;
+}
+
+unsigned int u3phy_read_reg32(void *i2c_port_base, u8 dev_id, u32 addr)
+{
+ u8 addr8;
+ u32 data;
+
+ addr8 = addr & 0xff;
+
+ data = u3phy_read_reg(i2c_port_base, dev_id, addr8);
+ data |= (u3phy_read_reg(i2c_port_base, dev_id, addr8+1) << 8);
+ data |= (u3phy_read_reg(i2c_port_base, dev_id, addr8+2) << 16);
+ data |= (u3phy_read_reg(i2c_port_base, dev_id, addr8+3) << 24);
+
+ return data;
+}
+
+
+int u3phy_write_reg8(void *i2c_port_base, u8 dev_id, u32 addr, u8 data)
+{
+ u8 addr8;
+
+ addr8 = addr & 0xff;
+ u3phy_write_reg(i2c_port_base, dev_id, addr8, data);
+
+ return PHY_TRUE;
+}
+
+unsigned char u3phy_read_reg8(void *i2c_port_base, u8 dev_id, u32 addr)
+{
+ u8 addr8;
+ u32 data;
+
+ addr8 = addr & 0xff;
+ data = u3phy_read_reg(i2c_port_base, dev_id, addr8);
+
+ return data;
+}
+
+
+unsigned int u3phy_readlmsk(void *i2c_port_base, unsigned char i2c_addr, unsigned int reg_addr32, unsigned int offset, unsigned int mask)
+{
+ return ((u3phy_read_reg32(i2c_port_base, i2c_addr, reg_addr32) & mask) >> offset);
+}
+
+int u3phy_writelmsk(void *i2c_port_base, unsigned char i2c_addr, unsigned int reg_addr32, unsigned int offset, unsigned int mask, unsigned int data)
+{
+ unsigned int cur_value;
+ unsigned int new_value;
+
+ cur_value = u3phy_read_reg32(i2c_port_base, i2c_addr, reg_addr32);
+ new_value = (cur_value & (~mask)) | ((data << offset) & mask);
+ u3phy_write_reg32(i2c_port_base, i2c_addr, reg_addr32, new_value);
+
+ return 0;
+}
diff --git a/src/bsp/lk/platform/mt8512/drivers/usb/u3phy-i2c.h b/src/bsp/lk/platform/mt8512/drivers/usb/u3phy-i2c.h
new file mode 100644
index 0000000..fc347e3
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/usb/u3phy-i2c.h
@@ -0,0 +1,9 @@
+int u3phy_write_reg(void *i2c_port_base, unsigned char dev_id, unsigned char address, int value);
+unsigned char u3phy_read_reg(void *i2c_port_base, unsigned char dev_id, unsigned char address);
+int u3phy_write_reg32(void *i2c_port_base, unsigned char dev_id, unsigned int addr, unsigned int data);
+unsigned int u3phy_read_reg32(void *i2c_port_base, unsigned char dev_id, unsigned int addr);
+unsigned int u3phy_read_reg32(void *i2c_port_base, unsigned char dev_id, unsigned int addr);
+int u3phy_write_reg8(void *i2c_port_base, unsigned char dev_id, unsigned int addr, unsigned char data);
+unsigned char u3phy_read_reg8(void *i2c_port_base, unsigned char dev_id, unsigned int addr);
+unsigned int u3phy_readlmsk(void *i2c_port_base, unsigned char i2c_addr, unsigned int reg_addr32, unsigned int offset, unsigned int mask);
+int u3phy_writelmsk(void *i2c_port_base, unsigned char i2c_addr, unsigned int reg_addr32, unsigned int offset, unsigned int mask, unsigned int data);
\ No newline at end of file
diff --git a/src/bsp/lk/platform/mt8512/drivers/usb/usbphy.c b/src/bsp/lk/platform/mt8512/drivers/usb/usbphy.c
new file mode 100644
index 0000000..a5ba14b
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/usb/usbphy.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <debug.h>
+#include <platform/mt_reg_base.h>
+#include <platform/reg_utils.h>
+
+#include "usbphy.h"
+
+#pragma GCC push_options
+#pragma GCC optimize("O1")
+
+#ifdef DBG_USB_PHY
+#define PHY_LOG(x...) dprintf(INFO, "[USB][PHY] " x)
+#else
+#define PHY_LOG(x...) do{} while(0)
+#endif
+
+/* 2712E1 can't set RG_AVALID */
+#define U3D_U2PHYDEV_MASK (E60802_RG_IDDIG | /*E60802_RG_AVALID |*/ \
+ E60802_RG_BVALID | E60802_RG_VBUSVALID)
+
+#define U3D_U2PHYFRCDEV_MASK (E60802_FORCE_IDDIG | /*E60802_FORCE_AVALID |*/ \
+ E60802_FORCE_BVALID | E60802_FORCE_SESSEND | E60802_FORCE_VBUSVALID)
+
+void mt_usb_phy_poweron(void)
+{
+ PHY_LOG("%s\n", __func__);
+
+ /* switch to USB function */
+ clrbits32_r(E60802_FORCE_UART_EN, U3D_U2PHYDTM0);
+ clrbits32_r(E60802_RG_UART_EN, U3D_U2PHYDTM1);
+ clrbits32_r(E60802_RG_USB20_GPIO_CTL, U3D_U2PHYACR4);
+ clrbits32_r(E60802_USB20_GPIO_MODE, U3D_U2PHYACR4);
+ /* DP/DM BC1.1 path Disable */
+ clrbits32_r(E60802_RG_USB20_BC11_SW_EN, U3D_USBPHYACR6);
+ /* Internal R bias enable */
+ setbits32_r(E60802_RG_USB20_INTR_EN, U3D_USBPHYACR0);
+ /* 100U from u2 */
+ clrbits32_r(E60802_RG_USB20_HS_100U_U3_EN, U3D_USBPHYACR5);
+ /* let suspendm=1, enable usb 480MHz pll */
+ setbits32_r(E60802_RG_SUSPENDM, U3D_U2PHYDTM0);
+ /* force_suspendm=1 */
+ setbits32_r(E60802_FORCE_SUSPENDM, U3D_U2PHYDTM0);
+ /* wait 2 ms for USBPLL stable */
+ spin(2000);
+ /* power on device mode */
+ clrbits32_r(E60802_RG_SESSEND, U3D_U2PHYDTM1);
+ /* NOTE: mt2712E1 can't set RG_AVALID */
+ setbits32_r(U3D_U2PHYDEV_MASK, U3D_U2PHYDTM1);
+ /* enable force into device mode */
+ setbits32_r(U3D_U2PHYFRCDEV_MASK, U3D_U2PHYDTM1);
+ /* wait mac ready */
+ spin(2000);
+ /* apply MAC clock related setting after phy init */
+}
+
+void mt_usb_phy_poweroff(void)
+{
+ /* power down device mode */
+ clrbits32_r(E60802_RG_VBUSVALID | E60802_RG_BVALID | E60802_RG_AVALID, U3D_U2PHYDTM1);
+ setbits32_r(E60802_RG_IDDIG | E60802_RG_SESSEND, U3D_U2PHYDTM1);
+
+ /* cleaer device force mode */
+ clrbits32_r(U3D_U2PHYFRCDEV_MASK, U3D_U2PHYDTM1);
+
+ clrbits32_r(E60802_RG_SUSPENDM, U3D_U2PHYDTM0);
+ setbits32_r(E60802_FORCE_SUSPENDM, U3D_U2PHYDTM0);
+ spin(2000);
+ PHY_LOG("%s\n", __func__);
+}
+
+#pragma GCC pop_options
diff --git a/src/bsp/lk/platform/mt8512/drivers/usb/usbphy.h b/src/bsp/lk/platform/mt8512/drivers/usb/usbphy.h
new file mode 100644
index 0000000..21c2b24
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/usb/usbphy.h
@@ -0,0 +1,304 @@
+/*
+ * Copyright(c) 2013 MediaTek Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files
+ *(the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#pragma once
+
+/* APB Module ssusb_top_sif - USB3_SIF2_BASE */
+#define SSUSB_SIFSLV_U2PHY_COM_BASE (USB3_SIF_BASE + 0x300)
+#define SSUSB_SIFSLV_SPLLC_BASE (USB3_SIF_BASE + 0x700)
+#define SSUSB_SIFSLV_U3PHYD_BASE (USB3_SIF_BASE + 0x900)
+#define SSUSB_SIFSLV_U3PHYA_BASE (USB3_SIF_BASE + 0xB00)
+#define SSUSB_SIFSLV_U3PHYA_DA_BASE (USB3_SIF_BASE + 0xC00)
+
+/* referenecd from ssusb_USB20_PHY_regmap_com_T28.xls */
+#define U3D_USBPHYACR0 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x0000) /* bit 2~bit 30 */
+#define U3D_USBPHYACR1 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x0004)
+#define U3D_USBPHYACR2 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x0008) /* bit 0~ bit15 */
+#define U3D_USBPHYACR4 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x0010)
+#define U3D_USBPHYACR5 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x0014)
+#define U3D_USBPHYACR6 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x0018)
+#define U3D_U2PHYACR3 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x001c)
+#define U3D_U2PHYACR4 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x0020) /* bit8~ bit18 */
+#define U3D_U2PHYAMON0 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x0024)
+#define U3D_U2PHYDCR0 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x0060)
+#define U3D_U2PHYDCR1 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x0064)
+#define U3D_U2PHYDTM0 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x0068)
+#define U3D_U2PHYDTM1 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x006C)
+#define U3D_U2PHYDMON0 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x0070)
+#define U3D_U2PHYDMON1 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x0074)
+#define U3D_U2PHYDMON2 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x0078)
+#define U3D_U2PHYDMON3 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x007C)
+#define U3D_U2PHYBC12C (SSUSB_SIFSLV_U2PHY_COM_BASE+0x0080)
+#define U3D_U2PHYBC12C1 (SSUSB_SIFSLV_U2PHY_COM_BASE+0x0084)
+#define U3D_U2PHYREGFPPC (SSUSB_SIFSLV_U2PHY_COM_BASE+0x00e0)
+#define U3D_U2PHYVERSIONC (SSUSB_SIFSLV_U2PHY_COM_BASE+0x00f0)
+#define U3D_U2PHYREGFCOM (SSUSB_SIFSLV_U2PHY_COM_BASE+0x00fc)
+
+/* U3D_USBPHYACR0 */
+#define E60802_RG_USB20_MPX_OUT_SEL (0x7<<28) /* 30:28 */
+#define E60802_RG_USB20_TX_PH_ROT_SEL (0x7<<24) /* 26:24 */
+#define E60802_RG_USB20_PLL_DIVEN (0x7<<20) /* 22:20 */
+#define E60802_RG_USB20_PLL_BR (0x1<<18) /* 18:18 */
+#define E60802_RG_USB20_PLL_BP (0x1<<17) /* 17:17 */
+#define E60802_RG_USB20_PLL_BLP (0x1<<16) /* 16:16 */
+#define E60802_RG_USB20_USBPLL_FORCE_ON (0x1<<15) /* 15:15 */
+#define E60802_RG_USB20_PLL_FBDIV (0x7f<<8) /* 14:8 */
+#define E60802_RG_USB20_PLL_PREDIV (0x3<<6) /* 7:6 */
+#define E60802_RG_USB20_INTR_EN (0x1<<5) /* 5:5 */
+#define E60802_RG_USB20_REF_EN (0x1<<4) /* 4:4 */
+#define E60802_RG_USB20_BGR_DIV (0x3<<2) /* 3:2 */
+#define E60802_RG_SIFSLV_CHP_EN (0x1<<1) /* 1:1 */
+#define E60802_RG_SIFSLV_BGR_EN (0x1<<0) /* 0:0 */
+
+/* U3D_USBPHYACR1 */
+#define E60802_RG_USB20_INTR_CAL (0x1f<<19) /* 23:19 */
+#define E60802_RG_USB20_OTG_VBUSTH (0x7<<16) /* 18:16 */
+#define E60802_RG_USB20_VRT_VREF_SEL (0x7<<12) /* 14:12 */
+#define E60802_RG_USB20_TERM_VREF_SEL (0x7<<8) /* 10:8 */
+#define E60802_RG_USB20_MPX_SEL (0xff<<0) /* 7:0 */
+
+/* U3D_USBPHYACR2 */
+#define E60802_RG_SIFSLV_MAC_BANDGAP_EN (0x1<<17) /* 17:17 */
+#define E60802_RG_SIFSLV_MAC_CHOPPER_EN (0x1<<16) /* 16:16 */
+#define E60802_RG_USB20_CLKREF_REV (0xffff<<0) /* 15:0 */
+
+/* U3D_USBPHYACR4 */
+#define E60802_RG_USB20_DP_ABIST_SOURCE_EN (0x1<<31) /* 31:31 */
+#define E60802_RG_USB20_DP_ABIST_SELE (0xf<<24) /* 27:24 */
+#define E60802_RG_USB20_ICUSB_EN (0x1<<16) /* 16:16 */
+#define E60802_RG_USB20_LS_CR (0x7<<12) /* 14:12 */
+#define E60802_RG_USB20_FS_CR (0x7<<8) /* 10:8 */
+#define E60802_RG_USB20_LS_SR (0x7<<4) /* 6:4 */
+#define E60802_RG_USB20_FS_SR (0x7<<0) /* 2:0 */
+
+/* U3D_USBPHYACR5 */
+#define E60802_RG_USB20_DISC_FIT_EN (0x1<<28) /* 28:28 */
+#define E60802_RG_USB20_INIT_SQ_EN_DG (0x3<<26) /* 27:26 */
+#define E60802_RG_USB20_HSTX_TMODE_SEL (0x3<<24) /* 25:24 */
+#define E60802_RG_USB20_SQD (0x3<<22) /* 23:22 */
+#define E60802_RG_USB20_DISCD (0x3<<20) /* 21:20 */
+#define E60802_RG_USB20_HSTX_TMODE_EN (0x1<<19) /* 19:19 */
+#define E60802_RG_USB20_PHYD_MONEN (0x1<<18) /* 18:18 */
+#define E60802_RG_USB20_INLPBK_EN (0x1<<17) /* 17:17 */
+#define E60802_RG_USB20_CHIRP_EN (0x1<<16) /* 16:16 */
+#define E60802_RG_USB20_HSTX_SRCAL_EN (0x1<<15) /* 15:15 */
+#define E60802_RG_USB20_HSTX_SRCTRL (0x7<<12) /* 14:12 */
+#define E60802_RG_USB20_HS_100U_U3_EN (0x1<<11) /* 11:11 */
+#define E60802_RG_USB20_GBIAS_ENB (0x1<<10) /* 10:10 */
+#define E60802_RG_USB20_DM_ABIST_SOURCE_EN (0x1<<7) /* 7:7 */
+#define E60802_RG_USB20_DM_ABIST_SELE (0xf<<0) /* 3:0 */
+
+/* U3D_USBPHYACR6 */
+#define E60802_RG_USB20_ISO_EN (0x1<<31) /* 31:31 */
+#define E60802_RG_USB20_PHY_REV (0xef<<24) /* 31:24 */
+#define E60802_RG_USB20_BC11_SW_EN (0x1<<23) /* 23:23 */
+#define E60802_RG_USB20_SR_CLK_SEL (0x1<<22) /* 22:22 */
+#define E60802_RG_USB20_OTG_VBUSCMP_EN (0x1<<20) /* 20:20 */
+#define E60802_RG_USB20_OTG_ABIST_EN (0x1<<19) /* 19:19 */
+#define E60802_RG_USB20_OTG_ABIST_SELE (0x7<<16) /* 18:16 */
+#define E60802_RG_USB20_HSRX_MMODE_SELE (0x3<<12) /* 13:12 */
+#define E60802_RG_USB20_HSRX_BIAS_EN_SEL (0x3<<9) /* 10:9 */
+#define E60802_RG_USB20_HSRX_TMODE_EN (0x1<<8) /* 8:8 */
+#define E60802_RG_USB20_DISCTH (0xf<<4) /* 7:4 */
+#define E60802_RG_USB20_SQTH (0xf<<0) /* 3:0 */
+
+/* U3D_U2PHYACR3 */
+#define E60802_RG_USB20_HSTX_DBIST (0xf<<28) /* 31:28 */
+#define E60802_RG_USB20_HSTX_BIST_EN (0x1<<26) /* 26:26 */
+#define E60802_RG_USB20_HSTX_I_EN_MODE (0x3<<24) /* 25:24 */
+#define E60802_RG_USB20_USB11_TMODE_EN (0x1<<19) /* 19:19 */
+#define E60802_RG_USB20_TMODE_FS_LS_TX_EN (0x1<<18) /* 18:18 */
+#define E60802_RG_USB20_TMODE_FS_LS_RCV_EN (0x1<<17) /* 17:17 */
+#define E60802_RG_USB20_TMODE_FS_LS_MODE (0x1<<16) /* 16:16 */
+#define E60802_RG_USB20_HS_TERM_EN_MODE (0x3<<13) /* 14:13 */
+#define E60802_RG_USB20_PUPD_BIST_EN (0x1<<12) /* 12:12 */
+#define E60802_RG_USB20_EN_PU_DM (0x1<<11) /* 11:11 */
+#define E60802_RG_USB20_EN_PD_DM (0x1<<10) /* 10:10 */
+#define E60802_RG_USB20_EN_PU_DP (0x1<<9) /* 9:9 */
+#define E60802_RG_USB20_EN_PD_DP (0x1<<8) /* 8:8 */
+
+/* U3D_U2PHYACR4 */
+#define E60802_RG_USB20_DP_100K_MODE (0x1<<18) /* 18:18 */
+#define E60802_RG_USB20_DM_100K_EN (0x1<<17) /* 17:17 */
+#define E60802_USB20_DP_100K_EN (0x1<<16) /* 16:16 */
+#define E60802_USB20_GPIO_DM_I (0x1<<15) /* 15:15 */
+#define E60802_USB20_GPIO_DP_I (0x1<<14) /* 14:14 */
+#define E60802_USB20_GPIO_DM_OE (0x1<<13) /* 13:13 */
+#define E60802_USB20_GPIO_DP_OE (0x1<<12) /* 12:12 */
+#define E60802_RG_USB20_GPIO_CTL (0x1<<9) /* 9:9 */
+#define E60802_USB20_GPIO_MODE (0x1<<8) /* 8:8 */
+#define E60802_RG_USB20_TX_BIAS_EN (0x1<<5) /* 5:5 */
+#define E60802_RG_USB20_TX_VCMPDN_EN (0x1<<4) /* 4:4 */
+#define E60802_RG_USB20_HS_SQ_EN_MODE (0x3<<2) /* 3:2 */
+#define E60802_RG_USB20_HS_RCV_EN_MODE (0x3<<0) /* 1:0 */
+
+/* U3D_U2PHYAMON0 */
+#define E60802_RGO_USB20_GPIO_DM_O (0x1<<1) /* 1:1 */
+#define E60802_RGO_USB20_GPIO_DP_O (0x1<<0) /* 0:0 */
+
+/* U3D_U2PHYDCR0 */
+#define E60802_RG_USB20_CDR_TST (0x3<<30) /* 31:30 */
+#define E60802_RG_USB20_GATED_ENB (0x1<<29) /* 29:29 */
+#define E60802_RG_USB20_TESTMODE (0x3<<26) /* 27:26 */
+#define E60802_RG_SIFSLV_USB20_PLL_STABLE (0x1<<25) /* 25:25 */
+#define E60802_RG_SIFSLV_USB20_PLL_FORCE_ON (0x1<<24) /* 24:24 */
+#define E60802_RG_USB20_PHYD_RESERVE (0xffff<<8) /* 23:8 */
+#define E60802_RG_USB20_EBTHRLD (0x1<<7) /* 7:7 */
+#define E60802_RG_USB20_EARLY_HSTX_I (0x1<<6) /* 6:6 */
+#define E60802_RG_USB20_TX_TST (0x1<<5) /* 5:5 */
+#define E60802_RG_USB20_NEGEDGE_ENB (0x1<<4) /* 4:4 */
+#define E60802_RG_USB20_CDR_FILT (0xf<<0) /* 3:0 */
+
+/* U3D_U2PHYDCR1 */
+#define E60802_RG_USB20_PROBE_SEL (0xff<<24) /* 31:24 */
+#define E60802_RG_USB20_DRVVBUS (0x1<<23) /* 23:23 */
+#define E60802_RG_DEBUG_EN (0x1<<22) /* 22:22 */
+#define E60802_RG_USB20_OTG_PROBE (0x3<<20) /* 21:20 */
+#define E60802_RG_USB20_SW_PLLMODE (0x3<<18) /* 19:18 */
+#define E60802_RG_USB20_BERTH (0x3<<16) /* 17:16 */
+#define E60802_RG_USB20_LBMODE (0x3<<13) /* 14:13 */
+#define E60802_RG_USB20_FORCE_TAP (0x1<<12) /* 12:12 */
+#define E60802_RG_USB20_TAPSEL (0xfff<<0) /* 11:0 */
+
+/* U3D_U2PHYDTM0 */
+#define E60802_RG_UART_MODE (0x3<<30) /* 31:30 */
+#define E60802_FORCE_UART_I (0x1<<29) /* 29:29 */
+#define E60802_FORCE_UART_BIAS_EN (0x1<<28) /* 28:28 */
+#define E60802_FORCE_UART_TX_OE (0x1<<27) /* 27:27 */
+#define E60802_FORCE_UART_EN (0x1<<26) /* 26:26 */
+#define E60802_FORCE_USB_CLKEN (0x1<<25) /* 25:25 */
+#define E60802_FORCE_DRVVBUS (0x1<<24) /* 24:24 */
+#define E60802_FORCE_DATAIN (0x1<<23) /* 23:23 */
+#define E60802_FORCE_TXVALID (0x1<<22) /* 22:22 */
+#define E60802_FORCE_DM_PULLDOWN (0x1<<21) /* 21:21 */
+#define E60802_FORCE_DP_PULLDOWN (0x1<<20) /* 20:20 */
+#define E60802_FORCE_XCVRSEL (0x1<<19) /* 19:19 */
+#define E60802_FORCE_SUSPENDM (0x1<<18) /* 18:18 */
+#define E60802_FORCE_TERMSEL (0x1<<17) /* 17:17 */
+#define E60802_FORCE_OPMODE (0x1<<16) /* 16:16 */
+#define E60802_UTMI_MUXSEL (0x1<<15) /* 15:15 */
+#define E60802_RG_RESET (0x1<<14) /* 14:14 */
+#define E60802_RG_DATAIN (0xf<<10) /* 13:10 */
+#define E60802_RG_TXVALIDH (0x1<<9) /* 9:9 */
+#define E60802_RG_TXVALID (0x1<<8) /* 8:8 */
+#define E60802_RG_DMPULLDOWN (0x1<<7) /* 7:7 */
+#define E60802_RG_DPPULLDOWN (0x1<<6) /* 6:6 */
+#define E60802_RG_XCVRSEL (0x3<<4) /* 5:4 */
+#define E60802_RG_SUSPENDM (0x1<<3) /* 3:3 */
+#define E60802_RG_TERMSEL (0x1<<2) /* 2:2 */
+#define E60802_RG_OPMODE (0x3<<0) /* 1:0 */
+
+/* U3D_U2PHYDTM1 */
+#define E60802_RG_USB20_PRBS7_EN (0x1<<31) /* 31:31 */
+#define E60802_RG_USB20_PRBS7_BITCNT (0x3f<<24) /* 29:24 */
+#define E60802_RG_USB20_CLK48M_EN (0x1<<23) /* 23:23 */
+#define E60802_RG_USB20_CLK60M_EN (0x1<<22) /* 22:22 */
+#define E60802_RG_UART_I (0x1<<19) /* 19:19 */
+#define E60802_RG_UART_BIAS_EN (0x1<<18) /* 18:18 */
+#define E60802_RG_UART_TX_OE (0x1<<17) /* 17:17 */
+#define E60802_RG_UART_EN (0x1<<16) /* 16:16 */
+#define E60802_RG_IP_U2_PORT_POWER (0x1<<15) /* 15:15 */
+#define E60802_FORCE_IP_U2_PORT_POWER (0x1<<14) /* 14:14 */
+#define E60802_FORCE_VBUSVALID (0x1<<13) /* 13:13 */
+#define E60802_FORCE_SESSEND (0x1<<12) /* 12:12 */
+#define E60802_FORCE_BVALID (0x1<<11) /* 11:11 */
+#define E60802_FORCE_AVALID (0x1<<10) /* 10:10 */
+#define E60802_FORCE_IDDIG (0x1<<9) /* 9:9 */
+#define E60802_FORCE_IDPULLUP (0x1<<8) /* 8:8 */
+#define E60802_RG_VBUSVALID (0x1<<5) /* 5:5 */
+#define E60802_RG_SESSEND (0x1<<4) /* 4:4 */
+#define E60802_RG_BVALID (0x1<<3) /* 3:3 */
+#define E60802_RG_AVALID (0x1<<2) /* 2:2 */
+#define E60802_RG_IDDIG (0x1<<1) /* 1:1 */
+#define E60802_RG_IDPULLUP (0x1<<0) /* 0:0 */
+
+/* U3D_U2PHYDMON0 */
+#define E60802_RG_USB20_PRBS7_BERTH (0xff<<0) /* 7:0 */
+
+/* U3D_U2PHYDMON1 */
+#define E60802_USB20_UART_O (0x1<<31) /* 31:31 */
+#define E60802_RGO_USB20_LB_PASS (0x1<<30) /* 30:30 */
+#define E60802_RGO_USB20_LB_DONE (0x1<<29) /* 29:29 */
+#define E60802_AD_USB20_BVALID (0x1<<28) /* 28:28 */
+#define E60802_USB20_IDDIG (0x1<<27) /* 27:27 */
+#define E60802_AD_USB20_VBUSVALID (0x1<<26) /* 26:26 */
+#define E60802_AD_USB20_SESSEND (0x1<<25) /* 25:25 */
+#define E60802_AD_USB20_AVALID (0x1<<24) /* 24:24 */
+#define E60802_USB20_LINE_STATE (0x3<<22) /* 23:22 */
+#define E60802_USB20_HST_DISCON (0x1<<21) /* 21:21 */
+#define E60802_USB20_TX_READY (0x1<<20) /* 20:20 */
+#define E60802_USB20_RX_ERROR (0x1<<19) /* 19:19 */
+#define E60802_USB20_RX_ACTIVE (0x1<<18) /* 18:18 */
+#define E60802_USB20_RX_VALIDH (0x1<<17) /* 17:17 */
+#define E60802_USB20_RX_VALID (0x1<<16) /* 16:16 */
+#define E60802_USB20_DATA_OUT (0xffff<<0) /* 15:0 */
+
+/* U3D_U2PHYDMON2 */
+#define E60802_RGO_TXVALID_CNT (0xff<<24) /* 31:24 */
+#define E60802_RGO_RXACTIVE_CNT (0xff<<16) /* 23:16 */
+#define E60802_RGO_USB20_LB_BERCNT (0xff<<8) /* 15:8 */
+#define E60802_USB20_PROBE_OUT (0xff<<0) /* 7:0 */
+
+/* U3D_U2PHYDMON3 */
+#define E60802_RGO_USB20_PRBS7_ERRCNT (0xffff<<16) /* 31:16 */
+#define E60802_RGO_USB20_PRBS7_DONE (0x1<<3) /* 3:3 */
+#define E60802_RGO_USB20_PRBS7_LOCK (0x1<<2) /* 2:2 */
+#define E60802_RGO_USB20_PRBS7_PASS (0x1<<1) /* 1:1 */
+#define E60802_RGO_USB20_PRBS7_PASSTH (0x1<<0) /* 0:0 */
+
+/* U3D_U2PHYBC12C */
+#define E60802_RG_SIFSLV_CHGDT_DEGLCH_CNT (0xf<<28) /* 31:28 */
+#define E60802_RG_SIFSLV_CHGDT_CTRL_CNT (0xf<<24) /* 27:24 */
+#define E60802_RG_SIFSLV_CHGDT_FORCE_MODE (0x1<<16) /* 16:16 */
+#define E60802_RG_CHGDT_ISRC_LEV (0x3<<14) /* 15:14 */
+#define E60802_RG_CHGDT_VDATSRC (0x1<<13) /* 13:13 */
+#define E60802_RG_CHGDT_BGVREF_SEL (0x7<<10) /* 12:10 */
+#define E60802_RG_CHGDT_RDVREF_SEL (0x3<<8) /* 9:8 */
+#define E60802_RG_CHGDT_ISRC_DP (0x1<<7) /* 7:7 */
+#define E60802_RG_SIFSLV_CHGDT_OPOUT_DM (0x1<<6) /* 6:6 */
+#define E60802_RG_CHGDT_VDAT_DM (0x1<<5) /* 5:5 */
+#define E60802_RG_CHGDT_OPOUT_DP (0x1<<4) /* 4:4 */
+#define E60802_RG_SIFSLV_CHGDT_VDAT_DP (0x1<<3) /* 3:3 */
+#define E60802_RG_SIFSLV_CHGDT_COMP_EN (0x1<<2) /* 2:2 */
+#define E60802_RG_SIFSLV_CHGDT_OPDRV_EN (0x1<<1) /* 1:1 */
+#define E60802_RG_CHGDT_EN (0x1<<0) /* 0:0 */
+
+/* U3D_U2PHYBC12C1 */
+#define E60802_RG_CHGDT_REV (0xff<<0) /* 7:0 */
+
+/* U3D_REGFPPC */
+#define E60802_USB11_OTG_REG (0x1<<4) /* 4:4 */
+#define E60802_USB20_OTG_REG (0x1<<3) /* 3:3 */
+#define E60802_CHGDT_REG (0x1<<2) /* 2:2 */
+#define E60802_USB11_REG (0x1<<1) /* 1:1 */
+#define E60802_USB20_REG (0x1<<0) /* 0:0 */
+
+/* U3D_VERSIONC */
+#define E60802_VERSION_CODE_REGFILE (0xff<<24) /* 31:24 */
+#define E60802_USB11_VERSION_CODE (0xff<<16) /* 23:16 */
+#define E60802_VERSION_CODE_ANA (0xff<<8) /* 15:8 */
+#define E60802_VERSION_CODE_DIG (0xff<<0) /* 7:0 */
+
+/* U3D_REGFCOM */
+#define E60802_RG_PAGE (0xff<<24) /* 31:24 */
+#define E60802_I2C_MODE (0x1<<16) /* 16:16 */
diff --git a/src/bsp/lk/platform/mt8512/drivers/wdt/mtk_wdt.c b/src/bsp/lk/platform/mt8512/drivers/wdt/mtk_wdt.c
new file mode 100644
index 0000000..4466a4a
--- /dev/null
+++ b/src/bsp/lk/platform/mt8512/drivers/wdt/mtk_wdt.c
@@ -0,0 +1,411 @@
+#include <debug.h>
+#include <platform/mtk_wdt.h>
+#include <reg.h>
+
+#if ENABLE_WDT_MODULE
+
+static bool mtk_wd_CheckNonResetReg2(unsigned int offset)
+{
+ u32 tmp;
+ tmp = readl(MTK_WDT_NONRST_REG2);
+ if (tmp & (1U << offset))
+ return true;
+ else
+ return false;
+}
+
+static void mtk_wd_SetNonResetReg2(unsigned int offset, bool value)
+{
+ u32 reg;
+
+ reg = readl(MTK_WDT_NONRST_REG2);
+ if (value)
+ reg |= (1U << offset);
+ else
+ reg &= ~(1U << offset);
+
+ writel(reg, MTK_WDT_NONRST_REG2);
+}
+
+void set_clr_fastboot_mode(bool flag)
+{
+ if (flag == true)
+ mtk_wd_SetNonResetReg2(0x2, 1);
+ else if (flag == false)
+ mtk_wd_SetNonResetReg2(0x2, 0);
+
+ dprintf(INFO, "set_clr_fastboot_mode\n");
+}
+
+void set_clr_recovery_mode(bool flag)
+{
+ if (flag == true)
+ mtk_wd_SetNonResetReg2(0x1, 1);
+ else if (flag == false)
+ mtk_wd_SetNonResetReg2(0x1, 0);
+
+ dprintf(INFO, "set_clr_recovery_mode\n");
+}
+
+bool check_fastboot_mode(void)
+{
+#if !(CFG_FPGA_PLATFORM)
+ return mtk_wd_CheckNonResetReg2(0x2);
+#else
+ return false;
+#endif
+}
+
+bool check_recovery_mode(void)
+{
+ return mtk_wd_CheckNonResetReg2(0x1);
+}
+
+void mtk_wdt_disable(void)
+{
+ u32 tmp;
+
+ tmp = readl(MTK_WDT_MODE);
+ tmp &= ~MTK_WDT_MODE_ENABLE; /* disable watchdog */
+ tmp |= (MTK_WDT_MODE_KEY); /* need key then write is allowed */
+ writel(tmp, MTK_WDT_MODE);
+}
+
+static void mtk_wdt_reset(char mode)
+{
+ /* Watchdog Rest */
+ unsigned int wdt_mode_val;
+ writel(MTK_WDT_RESTART_KEY, MTK_WDT_RESTART);
+
+ wdt_mode_val = readl(MTK_WDT_MODE);
+ /* clear autorestart bit: autoretart: 1, bypass power key, 0: not bypass power key */
+ wdt_mode_val &=(~MTK_WDT_MODE_AUTO_RESTART);
+ /* make sure WDT mode is hw reboot mode, can not config isr mode */
+ wdt_mode_val &= ~(MTK_WDT_MODE_IRQ | MTK_WDT_MODE_ENABLE | MTK_WDT_MODE_DUAL_MODE);
+
+ wdt_mode_val |= (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN);
+
+ if (mode) /* mode != 0 means by pass power key reboot, We using auto_restart bit as by pass power key flag */
+ wdt_mode_val |= MTK_WDT_MODE_AUTO_RESTART;
+
+ writel(wdt_mode_val, MTK_WDT_MODE);
+
+ spin(100);
+ writel(MTK_WDT_SWRST_KEY, MTK_WDT_SWRST);
+}
+
+static unsigned int mtk_wdt_check_status(void)
+{
+ static unsigned int status = 0;
+
+ /*
+ * Because WDT_STA register will be cleared after writing WDT_MODE,
+ * we use a static variable to store WDT_STA.
+ * After reset, static varialbe will always be clear to 0,
+ * so only read WDT_STA when static variable is 0 is OK
+ */
+ if (0 == status)
+ status = readl(MTK_WDT_STATUS);
+
+ return status;
+}
+
+static void mtk_wdt_mode_config(bool dual_mode_en,
+ bool irq,
+ bool ext_en,
+ bool ext_pol,
+ bool wdt_en)
+{
+ unsigned int tmp;
+
+ tmp = readl(MTK_WDT_MODE);
+ tmp |= MTK_WDT_MODE_KEY;
+
+ // Bit 0 : Whether enable watchdog or not
+ if (wdt_en == true)
+ tmp |= MTK_WDT_MODE_ENABLE;
+ else
+ tmp &= ~MTK_WDT_MODE_ENABLE;
+
+ // Bit 1 : Configure extern reset signal polarity.
+ if (ext_pol == true)
+ tmp |= MTK_WDT_MODE_EXT_POL;
+ else
+ tmp &= ~MTK_WDT_MODE_EXT_POL;
+
+ // Bit 2 : Whether enable external reset signal
+ if (ext_en == true)
+ tmp |= MTK_WDT_MODE_EXTEN;
+ else
+ tmp &= ~MTK_WDT_MODE_EXTEN;
+
+ // Bit 3 : Whether generating interrupt instead of reset signal
+ if (irq == true)
+ tmp |= MTK_WDT_MODE_IRQ;
+ else
+ tmp &= ~MTK_WDT_MODE_IRQ;
+
+ // Bit 6 : Whether enable debug module reset
+ if (dual_mode_en == true)
+ tmp |= MTK_WDT_MODE_DUAL_MODE;
+ else
+ tmp &= ~MTK_WDT_MODE_DUAL_MODE;
+
+ // Bit 4: WDT_Auto_restart, this is a reserved bit, we use it as bypass powerkey flag.
+ // Because HW reboot always need reboot to kernel, we set it always.
+ tmp |= MTK_WDT_MODE_AUTO_RESTART;
+
+ writel(tmp, MTK_WDT_MODE);
+ //dual_mode(1); //always dual mode
+ //mdelay(100);
+ dprintf(INFO,"mtk_wdt_mode_config LK mode value=%x", readl(MTK_WDT_MODE));
+}
+
+static void mtk_wdt_set_time_out_value(uint32_t value)
+{
+ static unsigned int timeout;
+
+ /*
+ * TimeOut = BitField 15:5
+ * Key = BitField 4:0 = 0x08
+ */
+
+ // sec * 32768 / 512 = sec * 64 = sec * 1 << 6
+ timeout = (unsigned int)(value * ( 1 << 6) );
+ timeout = timeout << 5;
+ writel((timeout | MTK_WDT_LENGTH_KEY), MTK_WDT_LENGTH);
+}
+
+void mtk_wdt_restart(void)
+{
+ // Reset WatchDogTimer's counting value to time out value
+ // ie., keepalive()
+ writel(MTK_WDT_RESTART_KEY, MTK_WDT_RESTART);
+}
+
+static void mtk_wdt_sw_reset(void)
+{
+ printf ("UB WDT SW RESET\n");
+ mtk_wdt_reset(1); /* NOTE here, this reset will cause by pass power key */
+
+ while (1) {
+ printf ("UB SW reset fail ... \n");
+ }
+}
+
+static void mtk_wdt_hw_reset(void)
+{
+ dprintf(INFO,"UB WDT_HW_Reset\n");
+
+ // 1. set WDT timeout 1 secs, 1*64*512/32768 = 1sec
+ mtk_wdt_set_time_out_value(1);
+
+ // 2. enable WDT debug reset enable, generating irq disable, ext reset disable
+ // ext reset signal low, wdt enalbe
+ mtk_wdt_mode_config(true, false, false, false, true);
+
+ // 3. reset the watch dog timer to the value set in WDT_LENGTH register
+ mtk_wdt_restart();
+
+ // 4. system will reset
+ while (1);
+}
+
+void mtk_wdt_init(void)
+{
+ /* This function will store the reset reason: Time out/ SW trigger */
+ dprintf(ALWAYS, "RGU STA: %x\n", mtk_wdt_check_status());
+
+ mtk_wdt_mode_config(false, false, false, false, false);
+
+#if (!LK_WDT_DISABLE)
+ mtk_wdt_set_time_out_value(10);
+ mtk_wdt_mode_config(true, true, true, false, true);
+ mtk_wdt_restart();
+#endif
+}
+
+static bool mtk_is_rgu_trigger_reset(void)
+{
+ if (mtk_wdt_check_status())
+ return true;
+ return false;
+}
+
+void mtk_arch_reset(char mode)
+{
+ dprintf(INFO,"UB mtk_arch_reset\n");
+
+ mtk_wdt_reset(mode);
+
+ while (1);
+}
+
+static void rgu_swsys_reset(WD_SYS_RST_TYPE reset_type)
+{
+ if (WD_MD_RST == reset_type) {
+ unsigned int wdt_dbg_ctrl;
+ wdt_dbg_ctrl = readl(MTK_WDT_SWSYSRST);
+ wdt_dbg_ctrl |= MTK_WDT_SWSYS_RST_KEY;
+ wdt_dbg_ctrl |= 0x80;// 1<<7
+ writel(wdt_dbg_ctrl, MTK_WDT_SWSYSRST);
+ spin(1000);
+ wdt_dbg_ctrl = readl(MTK_WDT_SWSYSRST);
+ wdt_dbg_ctrl |= MTK_WDT_SWSYS_RST_KEY;
+ wdt_dbg_ctrl &= (~0x80);// ~(1<<7)
+ writel(wdt_dbg_ctrl, MTK_WDT_SWSYSRST);
+ dprintf(INFO,"rgu pl md reset\n");
+ }
+}
+
+int rgu_dram_reserved(int enable)
+{
+ volatile unsigned int tmp, ret = 0;
+ if(1 == enable)
+ {
+ /* enable ddr reserved mode */
+ tmp = readl(MTK_WDT_MODE);
+ tmp |= (MTK_WDT_MODE_DDR_RESERVE|MTK_WDT_MODE_KEY);
+ writel(tmp, MTK_WDT_MODE);
+
+ } else if(0 == enable)
+ {
+ /* disable ddr reserved mode, set reset mode,
+ disable watchdog output reset signal */
+ tmp = readl(MTK_WDT_MODE);
+ tmp &= (~MTK_WDT_MODE_DDR_RESERVE);
+ tmp |= MTK_WDT_MODE_KEY;
+ writel(tmp, MTK_WDT_MODE);
+ } else
+ {
+ dprintf(CRITICAL,"Wrong input %d, should be 1(enable) or 0(disable) in %s\n", enable, __func__);
+ ret = -1;
+ }
+ dprintf(CRITICAL,"RGU %s:MTK_WDT_MODE(%x)\n", __func__,tmp);
+ return ret;
+}
+
+int rgu_is_reserve_ddr_enabled(void)
+{
+ unsigned int wdt_mode;
+ wdt_mode = readl(MTK_WDT_MODE);
+ if(wdt_mode & MTK_WDT_MODE_DDR_RESERVE)
+ {
+ return 1;
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+int rgu_is_dram_slf(void)
+{
+ unsigned int wdt_dbg_ctrl;
+ wdt_dbg_ctrl = readl(MTK_WDT_DRAMC_CTL);
+ dprintf(CRITICAL,"DDR is in self-refresh. %x\n", wdt_dbg_ctrl);
+ if(wdt_dbg_ctrl & MTK_DDR_SREF_STA)
+ {
+ //dprintf(CRITICAL,"DDR is in self-refresh. %x\n", wdt_dbg_ctrl);
+ return 1;
+ }
+ else
+ {
+ //dprintf(CRITICAL,"DDR is not in self-refresh. %x\n", wdt_dbg_ctrl);
+ return 0;
+ }
+}
+
+void rgu_release_rg_dramc_conf_iso(void)
+{
+ unsigned int wdt_dbg_ctrl;
+ wdt_dbg_ctrl = readl(MTK_WDT_DRAMC_CTL);
+ wdt_dbg_ctrl &= (~MTK_RG_CONF_ISO);
+ wdt_dbg_ctrl |= MTK_DEBUG_CTL_KEY;
+ writel(wdt_dbg_ctrl, MTK_WDT_DRAMC_CTL);
+ dprintf(CRITICAL,"RGU %s:MTK_WDT_DRAMC_CTL(%x)\n", __func__,wdt_dbg_ctrl);
+}
+
+void rgu_release_rg_dramc_iso(void)
+{
+ unsigned int wdt_dbg_ctrl;
+ wdt_dbg_ctrl = readl(MTK_WDT_DRAMC_CTL);
+ wdt_dbg_ctrl &= (~MTK_RG_DRAMC_ISO);
+ wdt_dbg_ctrl |= MTK_DEBUG_CTL_KEY;
+ writel(wdt_dbg_ctrl, MTK_WDT_DRAMC_CTL);
+ dprintf(CRITICAL,"RGU %s:MTK_WDT_DRAMC_CTL(%x)\n", __func__,wdt_dbg_ctrl);
+}
+
+void rgu_release_rg_dramc_sref(void)
+{
+ unsigned int wdt_dbg_ctrl;
+ wdt_dbg_ctrl = readl(MTK_WDT_DRAMC_CTL);
+ wdt_dbg_ctrl &= (~MTK_RG_DRAMC_SREF);
+ wdt_dbg_ctrl |= MTK_DEBUG_CTL_KEY;
+ writel(wdt_dbg_ctrl, MTK_WDT_DRAMC_CTL);
+ dprintf(CRITICAL,"RGU %s:MTK_WDT_DRAMC_CTL(%x)\n", __func__,wdt_dbg_ctrl);
+}
+int rgu_is_reserve_ddr_mode_success(void)
+{
+ unsigned int wdt_dbg_ctrl;
+ wdt_dbg_ctrl = readl(MTK_WDT_DRAMC_CTL);
+ if(wdt_dbg_ctrl & MTK_DDR_RESERVE_RTA)
+ {
+ dprintf(CRITICAL,"WDT DDR reserve mode success! %x\n",wdt_dbg_ctrl);
+ return 1;
+ }
+ else
+ {
+ dprintf(CRITICAL,"WDT DDR reserve mode FAIL! %x\n",wdt_dbg_ctrl);
+ return 0;
+ }
+}
+
+#else
+
+void mtk_wdt_init(void)
+{
+ dprintf(INFO,"UB WDT Dummy init called\n");
+}
+
+static bool mtk_is_rgu_trigger_reset()
+{
+ dprintf(INFO,"UB Dummy mtk_is_rgu_trigger_reset called\n");
+ return FALSE;
+}
+
+void mtk_arch_reset(char mode)
+{
+ dprintf(INFO,"UB WDT Dummy arch reset called\n");
+}
+
+int mtk_wdt_boot_check(void)
+{
+ dprintf(INFO,"UB WDT Dummy mtk_wdt_boot_check called\n");
+ return WDT_NOT_WDT_REBOOT;
+}
+
+void mtk_wdt_disable(void)
+{
+ dprintf(INFO,"UB WDT Dummy mtk_wdt_disable called\n");
+}
+
+static void mtk_wdt_restart(void)
+{
+ dprintf(INFO,"UB WDT Dummy mtk_wdt_restart called\n");
+}
+static void mtk_wdt_sw_reset(void)
+{
+ dprintf(INFO,"UB WDT Dummy mtk_wdt_sw_reset called\n");
+}
+
+static void mtk_wdt_hw_reset(void)
+{
+ dprintf(INFO,"UB WDT Dummy mtk_wdt_hw_reset called\n");
+}
+
+static void rgu_swsys_reset(WD_SYS_RST_TYPE reset_type)
+{
+ dprintf(INFO,"UB WDT Dummy rgu_swsys_reset called\n");
+}
+#endif