[Feature][T106_eSDK]T106-V2.01.01.02P56U06.AP.15.05_CAP.15.05(SDK4.4)diff_15.11

Only Configure: No
Affected branch: master
Affected module: unknow
Is it affected on both ZXIC and MTK: only ZXIC
Self-test: Yes
Doc Update: No

Change-Id: Ief12bb341bd859dd73c7a8c5fa3d25e5ba7e1c6d
diff --git a/upstream/linux-5.10/drivers/spi/spi-zx29.c b/upstream/linux-5.10/drivers/spi/spi-zx29.c
new file mode 100755
index 0000000..d570db1
--- /dev/null
+++ b/upstream/linux-5.10/drivers/spi/spi-zx29.c
@@ -0,0 +1,3681 @@
+/******************************************************************************* 
+* Copyright (C) 2016-2021, ZTE Corporation.
+*
+* File Name:spi-zx29.c
+* File Mark:
+* Description:
+* Others:
+* Version:       1.0
+* Author:        ZTE
+* Date:
+* modify
+********************************************************************************/
+
+/****************************************************************************
+* 	                                           Include files
+****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/pm_runtime.h>
+#include <linux/semaphore.h>
+//#include <linux/wakelock.h> //qhf
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/dma/zx-dma.h>
+#include <linux/dma-direct.h>
+#include <asm/memory.h>
+#include <linux/debugfs.h>
+#include <linux/spi/spi.h>
+//#include <linux/soc/zte/pm/drv_idle.h>
+
+#include "spi-zx29.h"
+#include "pub_debug_info.h"
+/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
+#include <linux/wait.h>
+#include <linux/suspend.h>
+/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
+
+struct zx29_ssp_device_of_data {
+	enum zx29_ssp_device_mode	mode;
+};
+static const struct of_device_id zx29_spi_of_match[];
+
+/****************************************************************************
+* 	                                           Local Macros
+****************************************************************************/
+
+#define CONFIG_SPI_DMA_ENGINE
+#define SPI_PSM_CONTROL        (0) //(1)//qhf
+
+/*
+ * This macro is used to define some register default values.
+ * reg is masked with mask, the OR:ed with an (again masked)
+ * val shifted sb steps to the left.
+ */
+#define SPI_WRITE_BITS(reg, val, mask, sb) \
+ ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
+
+/*
+ * This macro is also used to define some default values.
+ * It will just shift val by sb steps to the left and mask
+ * the result with mask.
+ */
+#define GEN_MASK_BITS(val, mask, sb) \
+ (((val)<<(sb)) & (mask))
+
+
+#define SPI_GPIO_HIGH 		1
+#define SPI_GPIO_LOW 		0
+
+#define	ZX29_CS_ACTIVE		1	/* normally nCS, active low */
+#define	ZX29_CS_INACTIVE	0
+
+#define DRIVE_TX			0
+#define DO_NOT_DRIVE_TX		1
+
+#define DO_NOT_QUEUE_DMA	0
+#define QUEUE_DMA			1
+
+#define RX_TRANSFER			BIT(0)
+#define TX_TRANSFER			BIT(1)
+
+/* registers */
+#define SPI_VER_REG_OFFSET		(0x00)
+#define SPI_COM_CTRL_OFFSET		(0x04)
+#define SPI_FMT_CTRL_OFFSET		(0x08)
+#define SPI_DR_OFFSET			(0x0C)
+#define SPI_FIFO_CTRL_OFFSET	(0x10)
+#define SPI_FIFO_SR_OFFSET		(0x14)
+#define SPI_INTR_EN_OFFSET		(0x18)
+#define SPI_INTR_SR_OFFSET		(0x1C)
+#define SPI_TIMING_OFFSET		(0x20)
+
+/*
+ * SPI Version Register - SPI_VER_REG
+ */
+#define SPI_VER_REG_MASK_Y		(0xFFUL << 16)
+#define SPI_VER_REG_MASK_X		(0xFFUL << 24)
+
+/*
+ * SPI Common Control Register - SPI_COM_CTRL
+ */
+#define SPI_COM_CTRL_MASK_LBM	    	(0x1UL << 0)
+#define SPI_COM_CTRL_MASK_SSPE 			(0x1UL << 1)
+#define SPI_COM_CTRL_MASK_MS	    	(0x1UL << 2)
+#define SPI_COM_CTRL_MASK_SOD	    	(0x1UL << 3)
+#define SPI_COM_CTRL_MASK_SSPE_BACK		(0x1UL << 4)
+
+/*
+ * SPI Format Control Register - SPI_FMT_CTRL
+ */
+#define SPI_FMT_CTRL_MASK_FRF		(0x3UL << 0)
+#define SPI_FMT_CTRL_MASK_POL		(0x1UL << 2)
+#define SPI_FMT_CTRL_MASK_PHA		(0x1UL << 3)
+#define SPI_FMT_CTRL_MASK_DSS		(0x1FUL << 4)
+
+/*
+ * SPI FIFO Control Register - SPI_FIFO_CTRL
+ */
+#define SPI_FIFO_CTRL_MASK_RX_DMA_EN		(0x1UL << 2)
+#define SPI_FIFO_CTRL_MASK_TX_DMA_EN		(0x1UL << 3)
+#define SPI_FIFO_CTRL_MASK_RX_FIFO_THRES   	(0xFUL << 4)
+#define SPI_FIFO_CTRL_MASK_TX_FIFO_THRES   	(0xFUL << 8)
+/*
+ * SPI FIFO Status Register - SPI_FIFO_SR
+ */
+
+#define SPI_FIFO_SR_MASK_RX_BEYOND_THRES	(0x1UL << 0)
+#define SPI_FIFO_SR_MASK_TX_BEYOND_THRES	(0x1UL << 1)
+#define SPI_FIFO_SR_MASK_RX_FIFO_FULL		(0x1UL << 2)
+#define SPI_FIFO_SR_MASK_TX_FIFO_EMPTY		(0x1UL << 3)
+#define SPI_FIFO_SR_MASK_BUSY				(0x1UL << 4)
+#define SPI_FIFO_SR_SHIFT_RX_CNT			5
+
+#define SPI_FIFO_SR_MASK_RX_FIFO_CNTR		(0x1fUL << SPI_FIFO_SR_SHIFT_RX_CNT)
+#define SPI_FIFO_SR_SHIFT_TX_CNT			10
+#define SPI_FIFO_SR_MASK_TX_FIFO_CNTR		(0x1fUL << SPI_FIFO_SR_SHIFT_TX_CNT)
+
+/*
+ * SPI Interrupt Enable Register - SPI_INTR_EN
+ */
+#define SPI_INTR_EN_MASK_RX_OVERRUN_IE		(0x1UL << 0)
+#define SPI_INTR_EN_MASK_TX_UNDERRUN_IE		(0x1UL << 1)
+#define SPI_INTR_EN_MASK_RX_FULL_IE     	(0x1UL << 2)
+#define SPI_INTR_EN_MASK_TX_EMPTY_IE  		(0x1UL << 3)
+#define SPI_INTR_EN_MASK_RX_THRES_IE  		(0x1UL << 4)
+#define SPI_INTR_EN_MASK_TX_THRES_IE  		(0x1UL << 5)
+//yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme.
+#define SPI_INTR_EN_MASK_MST_EOT_IE  		(0x1UL << 6)
+
+/*
+ * SPI Interrupt Status Register OR Interrupt Clear Register - SPI_INTR_SR_SCLR
+ */
+
+#define SPI_INTR_SR_SCLR_MASK_RX_OVERRUN_INTR	(0x1UL << 0)
+#define SPI_INTR_SR_SCLR_MASK_TX_UNDERRUN_INTR	(0x1UL << 1)
+#define SPI_INTR_SR_SCLR_MASK_RX_FULL_INTR		(0x1UL << 2)
+#define SPI_INTR_SR_SCLR_MASK_TX_EMPTY_INTR		(0x1UL << 3)
+#define SPI_INTR_SR_SCLR_MASK_RX_THRES_INTR		(0x1UL << 4)
+#define SPI_INTR_SR_SCLR_MASK_TX_THRES_INTR		(0x1UL << 5)
+
+/*
+ * SPI TIMING Register
+ */
+#define SPI_TIMING_MASK_T_CS_DESEL         0xFUL
+
+/* SPI WCLK Freqency */
+#define SPI_SPICLK_FREQ_26M		(26*1000*1000)
+#define SPI_SPICLK_FREQ_104M	(104*1000*1000)
+#define SPI_SPICLK_FREQ_156M	(156*1000*1000)
+
+#define CLEAR_ALL_INTERRUPTS	0x3FUL
+#define ENABLE_ALL_INTERRUPTS	0x3FUL
+#define ENABLE_INTERRUPTS		0x03UL
+#define DISABLE_ALL_INTERRUPTS	0x0UL
+/*
+ * Message State
+ * we use the spi_message.state (void *) pointer to
+ * hold a single state value, that's why all this
+ * (void *) casting is done here.
+ */
+
+enum zx29_spi_state {
+	STATE_START,
+	STATE_RUNNING,
+	STATE_DONE,
+	STATE_ERROR
+};
+
+/*
+ * SPI State - Whether Enabled or Disabled
+ */
+#define SPI_DISABLED		(0)
+#define SPI_ENABLED			(1)
+
+/*
+ * SPI DMA State - Whether DMA Enabled or Disabled
+ */
+#define SPI_DMA_DISABLED	(0)
+#define SPI_DMA_ENABLED		(1)
+
+/*
+ * SPI SOD State - Whether SOD Enabled or Disabled
+ */
+#define SPI_SOD_DISABLED	(1)
+#define SPI_SOD_ENABLED		(0)
+#define SPI_SLAVE_MODE  	(1)
+#define SPI_MASTER_MODE		(0)
+
+
+/*
+ * SPI TRANSFER DELAY CFG
+ * DELAY TIME ≈ (1 / Buad_Rate) * 8 + (SPI_TIMING_T_CS_DESEL + 1) / Buad_Rate;                                                                                                                                                                                                                                                                                                                                                                                                                                                                                    
+ */
+
+
+enum spi_fifo_threshold_level {
+	SPI_FIFO_THRES_1,
+	SPI_FIFO_THRES_2,
+	SPI_FIFO_THRES_3,
+	SPI_FIFO_THRES_4,
+	SPI_FIFO_THRES_5,
+	SPI_FIFO_THRES_6,
+	SPI_FIFO_THRES_7,
+	SPI_FIFO_THRES_8,
+	SPI_FIFO_THRES_9,
+	SPI_FIFO_THRES_10,
+	SPI_FIFO_THRES_11,
+	SPI_FIFO_THRES_12,
+	SPI_FIFO_THRES_13,
+	SPI_FIFO_THRES_14,
+	SPI_FIFO_THRES_15,
+	SPI_FIFO_THRES_16
+
+};
+
+/*
+ * SPI Clock Parameter ranges
+ */
+#define DIV_MIN     0x00
+#define DIV_MAX     0x0F
+
+#define SPI_POLLING_TIMEOUT 1000
+
+/*
+ * The type of reading going on on this chip
+ */
+enum spi_reading {
+	READING_NULL,
+	READING_U8,
+	READING_U16,
+	READING_U32
+};
+
+/**
+ * The type of writing going on on this chip
+ */
+enum spi_writing {
+	WRITING_NULL,
+	WRITING_U8,
+	WRITING_U16,
+	WRITING_U32
+};
+
+/**
+ * struct vendor_data - vendor-specific config parameters
+ * for PL022 derivates
+ * @fifodepth: depth of FIFOs (both)
+ * @max_bpw: maximum number of bits per word
+ * @unidir: supports unidirection transfers
+ * @extended_cr: 32 bit wide control register 0 with extra
+ * features and extra features in CR1 as found in the ST variants
+ * @pl023: supports a subset of the ST extensions called "PL023"
+ */
+struct vendor_data {
+	int fifodepth;
+	int max_bpw;
+	bool loopback;
+};
+
+struct zx29_ssp_pins
+{
+	struct device	*dev;
+	
+	struct pinctrl		*pctrl;
+	struct pinctrl_state	*pfunc;
+	struct pinctrl_state	*pgpio;
+	struct pinctrl_state	*pcs_gpio_active;
+	struct pinctrl_state	*pcs_func;
+	struct pinctrl_state	*pcs_gpio_sleep;
+	int	gpio_cs;
+	int	gpio_clk;	
+	int	gpio_tx;	
+	int	gpio_rx;
+};
+struct zx29_ssp_pins ssp_pins[4];
+
+/**
+ * struct spi-zx29 - This is the private SSP driver data structure
+ * @adev: AMBA device model hookup
+ * @vendor: vendor data for the IP block
+ * @phybase: the physical memory where the SSP device resides
+ * @virtbase: the virtual memory where the SSP is mapped
+ * @clk: outgoing clock "SPICLK" for the SPI bus
+ * @master: SPI framework hookup
+ * @master_info: controller-specific data from machine setup
+ * @kworker: thread struct for message pump
+ * @kworker_task: pointer to task for message pump kworker thread
+ * @pump_messages: work struct for scheduling work to the message pump
+ * @queue_lock: spinlock to syncronise access to message queue
+ * @queue: message queue
+ * @busy: message pump is busy
+ * @running: message pump is running
+ * @pump_transfers: Tasklet used in Interrupt Transfer mode
+ * @cur_msg: Pointer to current spi_message being processed
+ * @cur_transfer: Pointer to current spi_transfer
+ * @cur_chip: pointer to current clients chip(assigned from controller_state)
+ * @next_msg_cs_active: the next message in the queue has been examined
+ *  and it was found that it uses the same chip select as the previous
+ *  message, so we left it active after the previous transfer, and it's
+ *  active already.
+ * @tx: current position in TX buffer to be read
+ * @tx_end: end position in TX buffer to be read
+ * @rx: current position in RX buffer to be written
+ * @rx_end: end position in RX buffer to be written
+ * @read: the type of read currently going on
+ * @write: the type of write currently going on
+ * @exp_fifo_level: expected FIFO level
+ * @dma_rx_channel: optional channel for RX DMA
+ * @dma_tx_channel: optional channel for TX DMA
+ * @sgt_rx: scattertable for the RX transfer
+ * @sgt_tx: scattertable for the TX transfer
+ * @dummypage: a dummy page used for driving data on the bus with DMA
+ */
+struct zx29_spi {
+	char name[16];
+	struct platform_device	*pdev;
+	struct vendor_data		*vendor;
+	resource_size_t			phybase;
+	void __iomem			*virtbase;
+	unsigned int	irq;
+	struct clk		*pclk;/* spi controller work clock */
+	struct clk		*spi_clk;/* spi clk line clock */
+	u32				clkfreq;
+	struct spi_master			*master;
+	struct zx29_spi_controller	*master_info;
+	/* Message per-transfer pump */
+	struct tasklet_struct		pump_transfers;
+	struct spi_message			*cur_msg;
+	struct spi_transfer			*cur_transfer;
+	struct chip_data			*cur_chip;
+	bool	next_msg_cs_active;
+	void	*tx;
+	void	*tx_end;
+	void	*rx;
+	void	*rx_end;
+	enum spi_reading	read;
+	enum spi_writing	write;
+	u32					exp_fifo_level;
+	enum spi_rx_level_trig		rx_lev_trig;
+	enum spi_tx_level_trig		tx_lev_trig;
+	/* DMA settings */
+#ifdef CONFIG_SPI_DMA_ENGINE
+	struct dma_chan			*dma_rx_channel;
+	struct dma_chan			*dma_tx_channel;
+	struct sg_table			sgt_rx;
+	struct sg_table			sgt_tx;
+	char					*dummypage;
+	unsigned int			dma_running;
+//	struct mutex		spi_lock;
+#endif
+#if defined(CONFIG_DEBUG_FS)
+	struct dentry *			spi_root;
+	struct debugfs_regset32 	spi_regset;
+	u32 spi_poll_cnt;
+	u32 spi_dma_cnt;
+#endif
+#if SPI_PSM_CONTROL
+    struct wake_lock        psm_lock;
+#endif
+	struct semaphore 	sema_dma;
+/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
+	wait_queue_head_t	wait;
+	int 			trans_done;
+/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
+
+	u8 iface_mode;
+#define	SPI_MOTO_FORMAT	0x00
+#define	SPI_TI_FORMAT	0x01
+#define	SPI_ISI_FORMAT	0x02
+	enum zx29_ssp_device_mode	mode;
+	int (*zx29_flush_rxfifo) (struct zx29_spi *zx29spi,void *buf);
+
+};
+
+/**
+ * struct chip_data - To maintain runtime state of SSP for each client chip
+ * @cr0: Value of control register CR0 of SSP - on later ST variants this
+ *       register is 32 bits wide rather than just 16
+ * @cr1: Value of control register CR1 of SSP
+ * @dmacr: Value of DMA control Register of SSP
+ * @cpsr: Value of Clock prescale register
+ * @cs:   Value of cs register
+ * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
+ * @enable_dma: Whether to enable DMA or not
+ * @read: function ptr to be used to read when doing xfer for this chip
+ * @write: function ptr to be used to write when doing xfer for this chip
+ * @cs_control: chip select callback provided by chip
+ * @xfer_type: polling/interrupt/DMA
+ *
+ * Runtime state of the SSP controller, maintained per chip,
+ * This would be set according to the current message that would be served
+ */
+struct chip_data {
+	u32 ver_reg;
+	u32 com_ctrl;
+	u32 fmt_ctrl;
+	u32 fifo_ctrl;
+	u32 timing;
+//	u32 intr_en;
+	u8 n_bytes;
+	u8 clk_div;/* spi clk divider */
+	bool enable_dma;
+	bool enable_trans_gap;
+	enum spi_reading read;
+	enum spi_writing write;
+	void (*cs_control) (int dev_id,u32 command);
+	int xfer_type;
+};
+//struct semaphore g_SpiTransferSemaphore;
+
+struct zx29_spi *g_zx29_spi[4];
+
+#if SPI_PSM_CONTROL
+static volatile unsigned int spi_active_count = 0;
+
+static void zx29_spi_set_active(struct wake_lock *lock)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+    if(spi_active_count == 0)
+    {
+        zx_cpuidle_set_busy(IDLE_FLAG_SPI);
+    }
+    spi_active_count++;
+
+	local_irq_restore(flags);
+
+    wake_lock(lock);
+}
+
+static void zx29_spi_set_idle(struct wake_lock *lock)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+#if 0 //qhf
+	spi_active_count--;
+    if(spi_active_count == 0)
+    {
+        zx_cpuidle_set_free(IDLE_FLAG_SPI);
+    }
+#endif
+	local_irq_restore(flags);
+
+    wake_unlock(lock);
+}
+#endif
+
+static int zx29_do_interrupt_dma_transfer(struct zx29_spi *zx29spi);
+/**
+ * default_cs_control - Dummy chip select function
+ * @command: select/delect the chip
+ *
+ * If no chip select function is provided by client this is used as dummy
+ * chip select
+ */
+
+static void default_cs_control(int dev_id,u32 command)
+{
+	gpio_set_value(ssp_pins[dev_id].gpio_cs, !command);
+}
+
+/**
+ * flush - flush the FIFO to reach a clean state
+ * SSP driver private data structure
+ */
+static int flush(struct zx29_spi *zx29spi)
+{
+	unsigned long limit = loops_per_jiffy << 1;
+	uint32_t rx_fifo_cnt_msk = SPI_FIFO_SR_MASK_RX_FIFO_CNTR;
+
+	dev_dbg(&zx29spi->pdev->dev, "flush\n");
+	/* Flushing FIFO by software cannot clear RX DMA Request. */
+	do {
+		if(0 == strcmp(zx29spi->pdev->name,"140a000.ssp")) {
+			while (readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & rx_fifo_cnt_msk)
+				readl((SPI_DR_OFFSET+zx29spi->virtbase));
+		}else {
+			while ((readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase))>>SPI_FIFO_SR_SHIFT_RX_CNT)&0x7F)
+				readl((SPI_DR_OFFSET+zx29spi->virtbase));
+		}
+	} while ((readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY) && limit--);
+
+	zx29spi->exp_fifo_level = 0;
+
+	return limit;
+}
+
+/**
+ * restore_state - Load configuration of current chip
+ * SSP driver private data structure
+ */
+static void restore_state(struct zx29_spi *zx29spi)
+{
+	struct chip_data *chip = zx29spi->cur_chip;
+
+	/* disable all interrupts */
+	writel(ENABLE_INTERRUPTS, (SPI_INTR_EN_OFFSET+zx29spi->virtbase));
+	writel(CLEAR_ALL_INTERRUPTS, (SPI_INTR_SR_OFFSET+zx29spi->virtbase));
+
+	writel(chip->fmt_ctrl, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
+	writel(chip->fifo_ctrl, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+//	writel(chip->intr_en, SPI_INTR_EN(zx297520v2spi->virtbase));
+	if(zx29spi->mode == ZX29_SSP_SLAVE_TYPE)
+		chip->com_ctrl |= GEN_MASK_BITS(SPI_SLAVE, SPI_COM_CTRL_MASK_MS, 2)|GEN_MASK_BITS(SPI_ENABLED, SPI_COM_CTRL_MASK_SSPE, 1);
+	writel(chip->com_ctrl, (SPI_COM_CTRL_OFFSET + zx29spi->virtbase));
+	writel(chip->timing, (SPI_TIMING_OFFSET + zx29spi->virtbase));
+}
+
+/*
+ * Default spi Register Values
+ */
+#define DEFAULT_SPI_COM_CTRL ( \
+	GEN_MASK_BITS(LOOPBACK_DISABLED, SPI_COM_CTRL_MASK_LBM, 0) | \
+	GEN_MASK_BITS(SPI_DISABLED, SPI_COM_CTRL_MASK_SSPE, 1) | \
+	GEN_MASK_BITS(SPI_MASTER, SPI_COM_CTRL_MASK_MS, 2) \
+)
+
+/*
+ * Default spi Register Values
+ */
+#define DEFAULT_SPI_SLAVE_COM_CTRL ( \
+	GEN_MASK_BITS(LOOPBACK_DISABLED, SPI_COM_CTRL_MASK_LBM, 0) | \
+	GEN_MASK_BITS(SPI_DISABLED, SPI_COM_CTRL_MASK_SSPE, 1) | \
+	GEN_MASK_BITS(SPI_SLAVE, SPI_COM_CTRL_MASK_MS, 2) \
+)
+
+
+#define DEFAULT_SPI_FMT_CTRL ( \
+	GEN_MASK_BITS(SPI_INTERFACE_MOTOROLA_SPI, SPI_FMT_CTRL_MASK_FRF, 0) | \
+	GEN_MASK_BITS(SPI_CLK_POL_IDLE_LOW, SPI_FMT_CTRL_MASK_POL, 2) | \
+	GEN_MASK_BITS(SPI_CLK_FIRST_EDGE, SPI_FMT_CTRL_MASK_PHA, 3) | \
+	GEN_MASK_BITS(SPI_DATA_BITS_8, SPI_FMT_CTRL_MASK_DSS, 4) \
+)
+
+#define DEFAULT_SPI_FIFO_CTRL ( \
+	GEN_MASK_BITS(SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_RX_DMA_EN, 2) | \
+	GEN_MASK_BITS(SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_TX_DMA_EN, 3) | \
+	GEN_MASK_BITS(SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_RX_FIFO_THRES, 4) | \
+	GEN_MASK_BITS(SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_TX_FIFO_THRES, 8) \
+)
+
+
+/**
+ * load_ssp_default_config - Load default configuration for SSP
+ * SSP driver private data structure
+ */
+static void load_spi_default_config(struct zx29_spi *zx29spi)
+{
+	writel(CLEAR_ALL_INTERRUPTS, (SPI_INTR_SR_OFFSET+zx29spi->virtbase));
+	writel(ENABLE_INTERRUPTS, (SPI_INTR_EN_OFFSET+zx29spi->virtbase));
+
+	writel(DEFAULT_SPI_FMT_CTRL, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
+	writel(DEFAULT_SPI_FIFO_CTRL, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+
+	if(zx29spi->mode == ZX29_SSP_MASTER_TYPE) {
+		writel(DEFAULT_SPI_COM_CTRL, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+	}
+	else {	
+		writel(DEFAULT_SPI_SLAVE_COM_CTRL, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+	}	
+}
+
+
+static unsigned reader(struct zx29_spi *zx29spi)
+{
+	uint32_t fifo_sr = 0,rd_max = 0;
+	unsigned len = 0;
+	uint32_t rx_fifo_cnt_msk = SPI_FIFO_SR_MASK_RX_FIFO_CNTR;
+	/*
+	 * The FIFO depth is different between primecell variants.
+	 * I believe filling in too much in the FIFO might cause
+	 * errons in 8bit wide transfers on ARM variants (just 8 words
+	 * FIFO, means only 8x8 = 64 bits in FIFO) at least.
+	 *
+	 * To prevent this issue, the TX FIFO is only filled to the
+	 * unused RX FIFO fill length, regardless of what the TX
+	 * FIFO status flag indicates.
+	 */
+
+	fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase));
+	if(!strcmp(zx29spi->pdev->name,"140a000.ssp")) {
+		rd_max = (fifo_sr & rx_fifo_cnt_msk) >> SPI_FIFO_SR_SHIFT_RX_CNT;
+	}else {
+		rd_max = (fifo_sr>>SPI_FIFO_SR_SHIFT_RX_CNT)&0x7f;
+	}
+	//read rx fifo to empty first
+	while ((zx29spi->rx < zx29spi->rx_end) && rd_max--) {
+		switch (zx29spi->read) {
+		case READING_NULL:
+			readw((SPI_DR_OFFSET+zx29spi->virtbase));
+			break;
+		case READING_U8:
+			*(u8 *) (zx29spi->rx) =
+				readw((SPI_DR_OFFSET+zx29spi->virtbase)) & 0xFFU;
+			break;
+		case READING_U16:
+			*(u16 *) (zx29spi->rx) =
+				(u16) readw((SPI_DR_OFFSET+zx29spi->virtbase));
+			break;
+		case READING_U32:
+			*(u32 *) (zx29spi->rx) =
+				readl((SPI_DR_OFFSET+zx29spi->virtbase));
+			break;
+		}
+		len += zx29spi->cur_chip->n_bytes;
+		zx29spi->rx += (zx29spi->cur_chip->n_bytes);		
+		zx29spi->exp_fifo_level--;
+	}
+	return len;
+
+}
+
+static unsigned writer(struct zx29_spi *zx29spi)
+{
+	uint32_t fifo_sr;
+	uint32_t wr_max;
+	uint32_t tx_fifo_cnt_msk = SPI_FIFO_SR_MASK_TX_FIFO_CNTR;
+	uint32_t tx_fifo_cnt_pos = SPI_FIFO_SR_SHIFT_TX_CNT;
+	unsigned len = 0;
+	/*
+	 * The FIFO depth is different between primecell variants.
+	 * I believe filling in too much in the FIFO might cause
+	 * errons in 8bit wide transfers on ARM variants (just 8 words
+	 * FIFO, means only 8x8 = 64 bits in FIFO) at least.
+	 *
+	 * To prevent this issue, the TX FIFO is only filled to the
+	 * unused RX FIFO fill length, regardless of what the TX
+	 * FIFO status flag indicates.
+	 */
+
+	fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase));
+	if(!strcmp(zx29spi->pdev->name,"140a000.ssp")) {
+		wr_max = (fifo_sr & tx_fifo_cnt_msk) >> tx_fifo_cnt_pos;
+	}else {
+		wr_max = (fifo_sr>>12)&0x1f;
+	}
+	
+	if ((fifo_sr & SPI_FIFO_SR_MASK_BUSY) && wr_max) {
+		wr_max--;
+	}
+	
+	while ((zx29spi->tx < zx29spi->tx_end) && wr_max--) {
+		switch (zx29spi->write) {
+		case WRITING_NULL:
+			writew(0x0, (SPI_DR_OFFSET+zx29spi->virtbase));
+			break;
+		case WRITING_U8:
+			writew(*(u8 *) (zx29spi->tx), (SPI_DR_OFFSET+zx29spi->virtbase));
+			break;
+		case WRITING_U16:
+			writew((*(u16 *) (zx29spi->tx)), (SPI_DR_OFFSET+zx29spi->virtbase));
+			break;
+		case WRITING_U32:
+			writel(*(u32 *) (zx29spi->tx), (SPI_DR_OFFSET+zx29spi->virtbase));
+			break;
+		}
+		len += zx29spi->cur_chip->n_bytes;
+		zx29spi->tx += (zx29spi->cur_chip->n_bytes);
+		zx29spi->exp_fifo_level++;
+	}
+	return len;
+}
+
+
+/**
+ * This will write to TX and read from RX according to the parameters.
+ */
+
+static void readwriter(struct zx29_spi *zx29spi)
+{
+	uint32_t fifo_sr;
+	uint32_t rd_max, wr_max;
+	uint32_t rx_fifo_cnt_msk;
+	uint32_t tx_fifo_cnt_msk;
+	uint32_t tx_fifo_cnt_pos;
+	ktime_t k_time_start = 0;
+	ktime_t diff_ns = 0;
+
+	rx_fifo_cnt_msk = SPI_FIFO_SR_MASK_RX_FIFO_CNTR;
+	tx_fifo_cnt_msk = SPI_FIFO_SR_MASK_TX_FIFO_CNTR;
+	tx_fifo_cnt_pos = SPI_FIFO_SR_SHIFT_TX_CNT;
+
+	/*
+	 * The FIFO depth is different between primecell variants.
+	 * I believe filling in too much in the FIFO might cause
+	 * errons in 8bit wide transfers on ARM variants (just 8 words
+	 * FIFO, means only 8x8 = 64 bits in FIFO) at least.
+	 *
+	 * To prevent this issue, the TX FIFO is only filled to the
+	 * unused RX FIFO fill length, regardless of what the TX
+	 * FIFO status flag indicates.
+	 */
+
+	fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase));
+	if(!strcmp(zx29spi->pdev->name,"140a000.ssp")) {
+		rd_max = (fifo_sr & rx_fifo_cnt_msk) >> SPI_FIFO_SR_SHIFT_RX_CNT;
+		wr_max = (fifo_sr & tx_fifo_cnt_msk) >> tx_fifo_cnt_pos;
+	}else {
+		rd_max = (fifo_sr>>SPI_FIFO_SR_SHIFT_RX_CNT)&0x7f;
+		wr_max = (fifo_sr>>12)&0x1f;
+	}
+	
+	if ((fifo_sr & SPI_FIFO_SR_MASK_BUSY) && wr_max) {
+		wr_max--;
+	}
+	//read rx fifo to empty first
+	while ((zx29spi->rx < zx29spi->rx_end) && rd_max--) {
+		switch (zx29spi->read) {
+		case READING_NULL:
+			readw((SPI_DR_OFFSET+zx29spi->virtbase));
+			break;
+		case READING_U8:
+			*(u8 *) (zx29spi->rx) =
+				readw((SPI_DR_OFFSET+zx29spi->virtbase)) & 0xFFU;
+			break;
+		case READING_U16:
+			*(u16 *) (zx29spi->rx) =
+				(u16) readw((SPI_DR_OFFSET+zx29spi->virtbase));
+			break;
+		case READING_U32:
+			*(u32 *) (zx29spi->rx) =
+				readl((SPI_DR_OFFSET+zx29spi->virtbase));
+			break;
+		}
+		zx29spi->rx += (zx29spi->cur_chip->n_bytes);		
+		zx29spi->exp_fifo_level--;
+	}
+
+	//write
+	while ((zx29spi->tx < zx29spi->tx_end) && wr_max--) {
+		switch (zx29spi->write) {
+		case WRITING_NULL:
+			writew(0x0, (SPI_DR_OFFSET+zx29spi->virtbase));
+			break;
+		case WRITING_U8:
+			writew(*(u8 *) (zx29spi->tx), (SPI_DR_OFFSET+zx29spi->virtbase));
+			break;
+		case WRITING_U16:
+			writew((*(u16 *) (zx29spi->tx)), (SPI_DR_OFFSET+zx29spi->virtbase));
+			break;
+		case WRITING_U32:
+			writel(*(u32 *) (zx29spi->tx), (SPI_DR_OFFSET+zx29spi->virtbase));
+			break;
+		}
+		zx29spi->tx += (zx29spi->cur_chip->n_bytes);
+		zx29spi->exp_fifo_level++;
+		
+		if(zx29spi->cur_chip->enable_trans_gap) {
+			
+			k_time_start = ktime_get();
+			do {
+				diff_ns = ktime_sub(ktime_get(),k_time_start);
+				fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+				cpu_relax();
+			}
+			while (fifo_sr && diff_ns < 10000000); //10ms
+			if(diff_ns >= 10000000) {
+				fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+				if(fifo_sr)
+					dev_info(&zx29spi->pdev->dev, "bus busy time_start=%lld diff_ns=%lld \n",k_time_start,diff_ns);
+			}
+		}
+	}
+	
+	if(!zx29spi->cur_chip->enable_trans_gap) {
+		
+		k_time_start = ktime_get();
+		do {
+			diff_ns = ktime_sub(ktime_get(),k_time_start);
+			fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;	
+			cpu_relax();
+		}while (fifo_sr && diff_ns < 100000000); //100ms
+		if(diff_ns >= 100000000) {
+			fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+			if(fifo_sr)
+				dev_info(&zx29spi->pdev->dev, "bus busy.. time_start=%lld diff_ns=%lld \n",k_time_start,diff_ns);
+		}
+	} 
+	/*
+	 * When we exit here the TX FIFO should be full and the RX FIFO
+	 * should be empty
+	 */
+}
+
+/*
+ * This DMA functionality is only compiled in if we have
+ * access to the generic DMA devices/DMA engine.
+ */
+#ifdef CONFIG_SPI_DMA_ENGINE
+
+static void zx29_fill_txfifo(struct zx29_spi *zx29spi)
+{
+	uint32_t fifo_sr;
+	int32_t rd_max, wr_max;
+	uint32_t rx_fifo_cnt_msk;
+	uint32_t tx_fifo_cnt_msk;
+	uint32_t tx_fifo_cnt_pos;
+	unsigned cur_transfer_len;
+	rx_fifo_cnt_msk = SPI_FIFO_SR_MASK_RX_FIFO_CNTR;
+	tx_fifo_cnt_msk = SPI_FIFO_SR_MASK_TX_FIFO_CNTR;
+	tx_fifo_cnt_pos = SPI_FIFO_SR_SHIFT_TX_CNT;
+
+	if(!zx29spi) {
+		printk("zx29spi err! \r\n");
+		return;
+	}
+	cur_transfer_len = zx29spi->cur_transfer->len;
+	while (zx29spi->tx < zx29spi->tx_end && cur_transfer_len) {
+		fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase));
+#if 0
+		rd_max = (fifo_sr & rx_fifo_cnt_msk) >> SPI_FIFO_SR_SHIFT_RX_CNT;
+		wr_max = (fifo_sr & tx_fifo_cnt_msk) >> tx_fifo_cnt_pos;
+#else
+		if(!strcmp(zx29spi->pdev->name,"140a000.ssp")) {
+			rd_max = (fifo_sr & rx_fifo_cnt_msk) >> SPI_FIFO_SR_SHIFT_RX_CNT;
+			wr_max = (fifo_sr & tx_fifo_cnt_msk) >> tx_fifo_cnt_pos;
+		}else {
+			rd_max = (fifo_sr>>SPI_FIFO_SR_SHIFT_RX_CNT)&0x7f;
+			wr_max = (fifo_sr>>12)&0x1f;
+		}
+#endif
+		if (fifo_sr & SPI_FIFO_SR_MASK_BUSY) {
+			wr_max--;
+		}
+		wr_max -= rd_max;
+		wr_max = (wr_max > 0) ? wr_max : 0;
+
+		//write
+		while ((zx29spi->tx < zx29spi->tx_end) && wr_max--) {
+			writew(0x0, (SPI_DR_OFFSET+zx29spi->virtbase));
+			zx29spi->tx += (zx29spi->cur_chip->n_bytes);
+			cur_transfer_len -= zx29spi->cur_chip->n_bytes;
+		}
+
+		cpu_relax();
+	}
+}
+
+static void dma_callback(void *data)
+{
+	struct zx29_spi *zx29spi = (struct zx29_spi *)data;
+	//printk(KERN_INFO "spi:dma transfer complete. %X-%X-%x\n", zx29spi->dma_running, readl((SPI_INTR_SR_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)));
+        /* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
+	//up(&zx29spi->sema_dma);
+	if(zx29spi->master->slave == true){
+                wake_up(&zx29spi->wait);
+                zx29spi->trans_done = true;
+	}else{
+                up(&zx29spi->sema_dma);
+	}
+        /* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
+}
+
+/*
+static void dma_callback_tx(void *data)
+{
+	struct zx29_spi *zx29spi = (struct zx29_spi *)data;
+   // printk(KERN_INFO "spi:dma transfer complete tx\n");
+    printk("%s",__func__);
+   printk("COM=0x%x,FMT=0x%x,FIFO_CTL=0x%x,FIFO_SR=0x%x\n",readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)));
+
+    //up(&g_SpiTransferSemaphore);
+}
+*/
+
+/**
+ * configure_dma - configures the channels for the next transfer
+ *  SSP driver's private data structure
+ */
+
+static int configure_dma(struct zx29_spi *zx29spi)
+{
+//	unsigned int pages;
+//	int ret;
+//	int rx_sglen, tx_sglen;
+	dma_channel_def rx_conf;
+	dma_channel_def tx_conf;
+	struct dma_chan *rxchan = zx29spi->dma_rx_channel;
+	struct dma_chan *txchan = zx29spi->dma_tx_channel;
+	struct dma_async_tx_descriptor *rxdesc;
+	struct dma_async_tx_descriptor *txdesc;
+	struct spi_transfer *transfer = zx29spi->cur_transfer;
+
+	rx_conf.src_addr  = (SPI_DR_OFFSET+zx29spi->phybase);
+	rx_conf.dest_addr = (unsigned int)zx29spi->rx;
+	rx_conf.dma_control.tran_mode = TRAN_PERI_TO_MEM;
+	rx_conf.dma_control.irq_mode  = DMA_ALL_IRQ_ENABLE;
+	rx_conf.link_addr = 0;
+
+	tx_conf.src_addr  = (unsigned int)zx29spi->tx;
+	tx_conf.dest_addr =  (SPI_DR_OFFSET+zx29spi->phybase);
+	tx_conf.dma_control.tran_mode  = TRAN_MEM_TO_PERI;
+	tx_conf.dma_control.irq_mode  = DMA_ALL_IRQ_ENABLE;
+	tx_conf.link_addr = 0;
+
+
+	/* Check that the channels are available */
+	if (!rxchan || !txchan)
+		return -ENODEV;
+
+	/*
+	 * If supplied, the DMA burstsize should equal the FIFO trigger level.
+	 * Notice that the DMA engine uses one-to-one mapping. Since we can
+	 * not trigger on 2 elements this needs explicit mapping rather than
+	 * calculation.
+	 */
+	 
+	switch (zx29spi->rx_lev_trig) {
+	case SPI_RX_1_OR_MORE_ELEM:
+		rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_1;
+		rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_1;
+		break;
+	case SPI_RX_4_OR_MORE_ELEM:
+		rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_4;
+		rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_4;
+		break;
+	case SPI_RX_8_OR_MORE_ELEM:
+		rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_8;
+		rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_8;
+		break;
+	case SPI_RX_16_OR_MORE_ELEM:
+		rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_16;
+		rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_16;
+		break;
+	case SPI_RX_32_OR_MORE_ELEM:
+		rx_conf.dma_control.src_burst_len = DMA_BURST_LEN_ALL;
+		rx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_ALL;
+		break;
+	default:
+		rx_conf.dma_control.src_burst_len = zx29spi->vendor->fifodepth >> 1;
+		rx_conf.dma_control.dest_burst_len = zx29spi->vendor->fifodepth >> 1;
+		break;
+	}
+
+	switch (zx29spi->tx_lev_trig) {
+	case SPI_TX_1_OR_MORE_EMPTY_LOC:
+		tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_1;
+		tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_1;
+		break;
+	case SPI_TX_4_OR_MORE_EMPTY_LOC:
+		tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_4;
+		tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_4;
+		break;
+	case SPI_TX_8_OR_MORE_EMPTY_LOC:
+		tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_8;
+		tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_8;
+		break;
+	case SPI_TX_16_OR_MORE_EMPTY_LOC:
+		tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_16;
+		tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_16;
+		break;
+	case SPI_TX_32_OR_MORE_EMPTY_LOC:
+		tx_conf.dma_control.src_burst_len = DMA_BURST_LEN_ALL;
+		tx_conf.dma_control.dest_burst_len = DMA_BURST_LEN_ALL;
+		break;
+	default:
+		tx_conf.dma_control.src_burst_len = zx29spi->vendor->fifodepth >> 1;
+		tx_conf.dma_control.dest_burst_len = zx29spi->vendor->fifodepth >> 1;
+		break;
+	}
+
+	switch (zx29spi->read) {
+	case READING_NULL:
+		/* Use the same as for writing */
+		rx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+		rx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+		rx_conf.count	  = zx29spi->cur_transfer->len;
+		break;
+	case READING_U8:
+		rx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+		rx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+		rx_conf.count	  = zx29spi->cur_transfer->len;
+		break;
+	case READING_U16:
+		rx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_16BIT;
+		rx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_16BIT;
+		rx_conf.count	  = zx29spi->cur_transfer->len;
+		break;
+	case READING_U32:
+		rx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_32BIT;
+		rx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_32BIT;
+		rx_conf.count	  = zx29spi->cur_transfer->len;
+		break;
+	}
+
+	switch (zx29spi->write) {
+	case WRITING_NULL:
+		/* Use the same as for reading */
+		tx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+		tx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+		tx_conf.count	  = zx29spi->cur_transfer->len;
+		break;
+	case WRITING_U8:
+		tx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+		tx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+		tx_conf.count	  = zx29spi->cur_transfer->len;
+		break;
+	case WRITING_U16:
+		tx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_16BIT;
+		tx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_16BIT;
+		tx_conf.count	  = zx29spi->cur_transfer->len;
+		break;
+	case WRITING_U32:
+		tx_conf.dma_control.src_burst_size = DMA_BURST_SIZE_32BIT;
+		tx_conf.dma_control.dest_burst_size = DMA_BURST_SIZE_32BIT;
+		tx_conf.count	  = zx29spi->cur_transfer->len;
+	break;
+	}
+
+	dmaengine_slave_config(rxchan,(struct dma_slave_config*)&rx_conf);
+	dmaengine_slave_config(txchan,(struct dma_slave_config*)&tx_conf);
+
+	/* Submit and fire RX and TX with TX last so we're ready to read! */
+	if (zx29spi->rx) {
+	//printk("%s,tx=%p,rx=%p,len=%d\n",__func__,zx29spi->tx,zx29spi->rx,zx29spi->cur_transfer->len);
+	//printk("tx_conf:sb_len=%d,db_len=%d, sb_size=%d,db_size=%d\n",tx_conf.dma_control.src_burst_len, tx_conf.dma_control.dest_burst_len, tx_conf.dma_control.src_burst_size,  tx_conf.dma_control.dest_burst_size);
+	//printk("rx_conf:sb_len=%d,db_len=%d, sb_size=%d,db_size=%d\n",rx_conf.dma_control.src_burst_len, rx_conf.dma_control.dest_burst_len, rx_conf.dma_control.src_burst_size, rx_conf.dma_control.dest_burst_size);
+
+		rxdesc= rxchan->device->device_prep_interleaved_dma(rxchan,NULL,0);
+		txdesc= txchan->device->device_prep_interleaved_dma(txchan,NULL,0);
+		/* Put the callback on the RX transfer only, that should finish last */
+		rxdesc->callback = dma_callback;
+		rxdesc->callback_param = zx29spi;
+	// txdesc->callback = dma_callback_tx;
+	// txdesc->callback_param = zx29spi;
+
+		dmaengine_submit(rxdesc);
+		dma_async_issue_pending(rxchan);
+		if (transfer->tx_dma) {
+			/* SPI RX buffer may overflow in DMA busy situation. */
+			dmaengine_submit(txdesc);
+			dma_async_issue_pending(txchan);
+			zx29spi->dma_running = TX_TRANSFER | RX_TRANSFER;
+			enable_irq(zx29spi->irq);	/* detect overflow through interrupt */
+		} else {
+			if(zx29spi->mode == ZX29_SSP_MASTER_TYPE)
+				zx29_fill_txfifo(zx29spi);
+			zx29spi->dma_running = RX_TRANSFER;
+		}
+	}
+	else if (zx29spi->tx){
+		txdesc = txchan->device->device_prep_interleaved_dma(txchan,NULL,0);
+		txdesc->callback = dma_callback;
+		txdesc->callback_param = zx29spi;
+		dmaengine_submit(txdesc);
+		dma_async_issue_pending(txchan);
+		zx29spi->dma_running = TX_TRANSFER;
+	}
+
+	return 0;
+}
+
+#if 0
+static bool zx29_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+    dma_peripheral_id peri_id =  (dma_peripheral_id) param;
+#if 0
+	if ((chan->chan_id == (unsigned int)peri_id) && \
+		(strcmp(dev_name(chan->device->dev), "a1200000.dma") == 0))
+		return true;
+
+	chan->private = param;
+
+    return false;
+#endif
+	if (chan->chan_id == (unsigned int)peri_id)
+		return true;
+
+	chan->private = param;
+
+    return false;
+
+}
+#endif
+extern bool zx29_dma_filter_fn(struct dma_chan *chan, void *param);
+static int  zx29_dma_probe(struct zx29_spi *zx29spi)
+{
+	dma_cap_mask_t mask;
+
+	/* Try to acquire a generic DMA engine slave channel */
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+	/*
+	 * We need both RX and TX channels to do DMA, else do none
+	 * of them.
+	 */
+	zx29spi->dma_rx_channel = dma_request_channel(mask,
+                        zx29_dma_filter_fn,
+					    zx29spi->master_info->dma_rx_param);
+	if (!zx29spi->dma_rx_channel) {
+		dev_dbg(&zx29spi->pdev->dev, "no RX DMA channel!\n");
+		dev_err(&zx29spi->pdev->dev, "no RX DMA channel!,dma_rx_param=:%d\n",zx29spi->master_info->dma_rx_param);
+		goto err_no_rxchan;
+	}
+
+	zx29spi->dma_tx_channel = dma_request_channel(mask,
+                        zx29_dma_filter_fn,
+					    zx29spi->master_info->dma_tx_param);
+	if (!zx29spi->dma_tx_channel) {
+		dev_dbg(&zx29spi->pdev->dev, "no TX DMA channel!\n");
+		dev_err(&zx29spi->pdev->dev, "no TX DMA channel!\n");
+		goto err_no_txchan;
+	}
+
+	zx29spi->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!zx29spi->dummypage) {
+		dev_dbg(&zx29spi->pdev->dev, "no DMA dummypage!\n");
+		dev_err(&zx29spi->pdev->dev, "no DMA dummypage!\n");
+		goto err_no_dummypage;
+	}
+
+	dev_info(&zx29spi->pdev->dev, "setup for DMA on RX %s, TX %s\n",
+		 dma_chan_name(zx29spi->dma_rx_channel),
+		 dma_chan_name(zx29spi->dma_tx_channel));
+
+	return 0;
+
+err_no_dummypage:
+	dma_release_channel(zx29spi->dma_tx_channel);
+err_no_txchan:
+	dma_release_channel(zx29spi->dma_rx_channel);
+	zx29spi->dma_rx_channel = NULL;
+err_no_rxchan:
+	dev_err(&zx29spi->pdev->dev,
+			"Failed to work in dma mode, work without dma!\n");
+	dev_dbg(&zx29spi->pdev->dev,
+			"Failed to work in dma mode, work without dma!\n");
+	return -ENODEV;
+}
+
+static void terminate_dma(struct zx29_spi *zx29spi)
+{
+	struct dma_chan *rxchan = zx29spi->dma_rx_channel;
+	struct dma_chan *txchan = zx29spi->dma_tx_channel;
+
+	dmaengine_terminate_all(rxchan);
+	dmaengine_terminate_all(txchan);
+//	unmap_free_dma_scatter(zx29spi);
+	zx29spi->dma_running = 0;
+}
+
+static void zx29_dma_remove(struct zx29_spi *zx29spi)
+{
+	if (zx29spi->dma_running)
+		terminate_dma(zx29spi);
+	if (zx29spi->dma_tx_channel)
+		dma_release_channel(zx29spi->dma_tx_channel);
+	if (zx29spi->dma_rx_channel)
+		dma_release_channel(zx29spi->dma_rx_channel);
+	kfree(zx29spi->dummypage);
+}
+
+#endif
+
+static irqreturn_t zx29_spi_irq(int irqno, void *dev_id)
+{
+	struct zx29_spi *zx29spi = dev_id;
+
+	disable_irq_nosync(zx29spi->irq);
+	up(&zx29spi->sema_dma);
+
+	//pr_info("spi_irq %X-%X\n", zx29spi->dma_running, readl((SPI_INTR_SR_OFFSET+zx29spi->virtbase)));
+	return IRQ_HANDLED;
+}
+
+static void print_info_data(void * data, int len) {
+	int i = 0;
+	unsigned char *p = data;
+
+	if(p) {
+
+		for(i = 0;i <= (len-8);i+=8) {
+			printk("%02x %02x %02x %02x %02x %02x %02x %02x \r\n",p[i],p[i+1],p[i+2],p[i+3],p[i+4],p[i+5],p[i+6],p[i+7]);
+		}
+		printk("\n");
+	}
+	
+}
+
+static int zx29_flush_rxfifo(struct zx29_spi *zx29spi,void *buf)
+{
+	int ret = 0;
+	struct spi_transfer transfer;
+	unsigned char data[64] = {0};
+	uint32_t fifo_sr = 0;
+	uint32_t rd_max = 0;
+	uint32_t rx_fifo_cnt_msk= SPI_FIFO_SR_MASK_RX_FIFO_CNTR;
+	
+	transfer.tx_buf = 0;
+	transfer.rx_buf = data;
+	transfer.len = 0;
+	
+	fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase));
+	if(!strcmp(zx29spi->pdev->name,"140a000.ssp")) {
+		rd_max = (fifo_sr & rx_fifo_cnt_msk) >> SPI_FIFO_SR_SHIFT_RX_CNT;
+	}else {
+		rd_max = (fifo_sr>>SPI_FIFO_SR_SHIFT_RX_CNT)&0x7f;
+	}
+	while(rd_max--) {
+		*(u8 *) transfer.rx_buf =
+				readw((SPI_DR_OFFSET+zx29spi->virtbase)) & 0xFFU;
+		transfer.rx_buf++;
+		transfer.len++;
+	}
+	memcpy(buf,data,transfer.len);
+	//dev_info(&zx29spi->pdev->dev,"spi_fifo_sr = %d transfer.len=%d \n",fifo_sr,transfer.len);
+	//print_info_data(data,transfer.len);
+	return transfer.len;
+	
+}
+
+int get_spi_rx_fifo(struct spi_device	*spi,unsigned char *buf)
+{
+	
+	struct zx29_spi *zx29spi = spi_master_get_devdata(spi->master);
+	int ret = 0;
+
+	if(!spi || !buf || !zx29spi)
+		return ret;
+	if(!zx29spi->zx29_flush_rxfifo)
+		return ret;
+	return zx29spi->zx29_flush_rxfifo(zx29spi,buf);
+}
+
+
+void set_spi_timing(struct spi_device	*spi,unsigned int param)
+{
+	struct zx29_spi *zx29spi = spi_master_get_devdata(spi->master);
+
+	printk("val set before: 0x%x \n",readl((SPI_TIMING_OFFSET+zx29spi->virtbase)));
+	writel(param, (SPI_TIMING_OFFSET+zx29spi->virtbase));
+	printk("val set after: 0x%x \n",readl((SPI_TIMING_OFFSET+zx29spi->virtbase)));
+}
+
+
+void slave_mode_set(struct spi_device	*spi,unsigned int mode)
+{
+	unsigned int regval = 0;
+	ktime_t k_time_start = 0;
+	ktime_t diff_ns = 0;
+	struct zx29_spi *zx29spi = spi_master_get_devdata(spi->master);
+
+	regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE);
+	writel(regval, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+	regval = readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase)) & (~(SPI_FMT_CTRL_MASK_POL|SPI_FMT_CTRL_MASK_PHA));
+	printk("val set before: 0x%x \n",regval);
+	switch(mode){
+		case 0:
+			break;
+		case 1:
+			regval |= SPI_FMT_CTRL_MASK_PHA;
+			break;
+		case 2:
+			regval |= SPI_FMT_CTRL_MASK_POL;
+			break;
+		case 3:
+			regval |= (SPI_FMT_CTRL_MASK_POL|SPI_FMT_CTRL_MASK_PHA);
+			break;
+		default:
+			break;
+	}	
+	writel(regval, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
+
+	writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE,  (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+	//while(((readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 4)&0x1) == 0);
+	k_time_start = ktime_get();
+	do {
+		diff_ns = ktime_sub(ktime_get(),k_time_start);
+		regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 4 & 0x1;
+		cpu_relax();
+	}
+	while (!regval && diff_ns < 100000000); //100ms
+	if(diff_ns >= 100000000)
+		dev_info(&zx29spi->pdev->dev, "wait sspe timeout, slave_mode_set failed! diff_ns= 0x%x \n",diff_ns);
+	else
+		printk("val set after: 0x%x \n",readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)));
+	return;
+}
+
+/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
+#define SSP0_PARA_BASE_ADDR 0x1400030
+#define SSP1_PARA_BASE_ADDR 0x1400048
+#define SSP_MASK_SW_WRST	(0x1L << 9)
+#define SSP_MASK_SW_PRST	(0x1L << 8)
+static int zx29_slave_ctrl_reset(struct zx29_spi *zx29spi)
+{
+    void __iomem *addr = NULL;
+    ktime_t k_time_start = 0;
+    ktime_t diff_ns = 0;
+    volatile unsigned int val = 0;
+
+	if(!strcmp(zx29spi->pdev->name,"1410000.ssp")) {
+    addr = ioremap(SSP1_PARA_BASE_ADDR, 0x1000);
+	}else{
+		addr = ioremap(SSP0_PARA_BASE_ADDR, 0x1000);
+	}
+
+	if(addr){
+        val = *(volatile unsigned int *)addr;
+		//dev_info(&zx29spi->pdev->dev, "val = 0x%x 0x%x\n",val,(~(SSP_MASK_SW_WRST|SSP_MASK_SW_PRST)));
+		*(volatile unsigned int *)addr =  val & (~(SSP_MASK_SW_WRST|SSP_MASK_SW_PRST));
+
+        k_time_start = ktime_get();
+        do {
+			diff_ns = ktime_sub(ktime_get(),k_time_start);
+			val = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 1 & 0x1;
+            cpu_relax();
+		}while(val && diff_ns < 100000000); //100ms
+
+		if(diff_ns >= 100000000)
+            dev_info(&zx29spi->pdev->dev, "zx29_slave_assert_ctrl failed!!! \n");
+        else
+            dev_info(&zx29spi->pdev->dev, "zx29_slave_assert_ctrl success! \n");
+
+
+        val = *(volatile unsigned int *)addr;
+		*(volatile unsigned int *)addr = val|(SSP_MASK_SW_WRST|SSP_MASK_SW_PRST);
+        udelay(500);
+
+		iounmap(addr);
+    }
+
+    return 0;
+}
+
+static int zx29_slave_ctrl_reinit(struct zx29_spi *zx29spi)
+{
+    volatile unsigned int regval;
+		ktime_t k_time_start = 0;
+		ktime_t diff_ns = 0;
+
+    zx29_slave_ctrl_reset(zx29spi);
+
+    /* Disable SPI */
+		regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE);
+		writel(regval, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+    load_spi_default_config(zx29spi);
+    writel(0, (SPI_TIMING_OFFSET + zx29spi->virtbase));
+
+		if(!strcmp(zx29spi->pdev->name,"1410000.ssp")) {
+			regval = readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase))&(~(0x1<<12));
+			writel(regval, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
+			dev_info(&zx29spi->pdev->dev," %s set non-camera mode regval:0x%x \n",zx29spi->pdev->name,regval);
+    }
+
+		writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE,  (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+		k_time_start = ktime_get();
+		do{
+			diff_ns = ktime_sub(ktime_get(),k_time_start);
+			regval = ((readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 4)&0x1);
+			cpu_relax();
+		}while((regval == 0) && diff_ns < 100000000);
+
+		if(diff_ns >= 100000000)
+			dev_info(&zx29spi->pdev->dev, "wait sspen timeout!!! \n");
+		else
+			dev_info(&zx29spi->pdev->dev,"ssp enabled \n",regval);
+
+    return 0;
+}
+/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
+
+static int zx29_slave_do_interrupt_dma_transfer(struct zx29_spi *zx29spi)
+{
+	struct spi_transfer *transfer = zx29spi->cur_transfer;
+	int ret = 0;
+	ktime_t k_time_start = 0;
+	ktime_t diff_ns = 0;
+	unsigned int fifo_sr = 0;
+	if((void *)transfer->tx_dma != NULL){
+		zx29spi->tx = (void *)transfer->tx_dma;
+		zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
+	}
+	if((void *)transfer->rx_dma != NULL){
+		zx29spi->rx = (void *)transfer->rx_dma;
+		zx29spi->rx_end = zx29spi->rx + zx29spi->cur_transfer->len;
+
+		/*if tx is null, use rx buffer as a dummy tx buffer.*/
+		if((void *)transfer->tx_dma == NULL){
+			zx29spi->tx = (void *)transfer->rx_dma;
+			zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
+		}
+	}
+
+	zx29spi->write = zx29spi->tx ? zx29spi->cur_chip->write : WRITING_NULL;
+	zx29spi->read = zx29spi->rx ? zx29spi->cur_chip->read : READING_NULL;
+
+	/* If we're using DMA, set up DMA here */
+	if (zx29spi->cur_chip->enable_dma) {
+		/* Configure DMA transfer */
+		zx29spi->trans_done = false; //yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck
+		ret = configure_dma(zx29spi);
+		if (ret) {
+			dev_err(&zx29spi->pdev->dev, "configuration of DMA failed, fall back to interrupt mode\n");
+			goto err_config_dma;
+		}
+	}
+
+	if (zx29spi->cur_chip->enable_dma)
+	{
+		extern void spi_dev_send_dma_cfg_down(struct spi_device *spi);
+		struct spi_device *spi = zx29spi->cur_msg->spi;
+		spi_dev_send_dma_cfg_down(spi);
+		/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
+		//down(&zx29spi->sema_dma);
+		ret = wait_event_freezable(zx29spi->wait, zx29spi->trans_done);
+		if(ret){
+			terminate_dma(zx29spi);
+			disable_irq_nosync(zx29spi->irq);
+			zx29spi->dma_running = 0;
+			zx29_slave_ctrl_reinit(zx29spi);
+			goto err_config_dma;
+
+		}
+		/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
+		//printk("COM=0x%x,FMT=0x%x,FIFO_CTL=0x%x,FIFO_SR=0x%x\n",readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)));
+
+		
+		k_time_start = ktime_get();
+		do {
+			diff_ns = ktime_sub(ktime_get(),k_time_start);
+			fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+			cpu_relax();
+		}
+		while (fifo_sr && diff_ns < 100000000); //100ms
+		
+		if(diff_ns >= 100000000) {
+			fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+			if(fifo_sr)
+				dev_info(&zx29spi->pdev->dev, "bus busy... time_start=%lld diff_ns=%lld \n",k_time_start,diff_ns);
+		}
+		if (zx29spi->dma_running == (TX_TRANSFER | RX_TRANSFER)) {
+			u32 intr_status;
+			intr_status = readl((SPI_INTR_SR_OFFSET+zx29spi->virtbase));
+			if (intr_status & SPI_INTR_SR_SCLR_MASK_RX_OVERRUN_INTR) {
+				terminate_dma(zx29spi);
+				dev_err(&zx29spi->cur_msg->spi->dev, "spi rx fifo overflow status = %X!!\n", intr_status);
+				ret = -EIO;
+			} else
+				disable_irq_nosync(zx29spi->irq);
+		}
+		zx29spi->dma_running = 0;
+	}
+
+err_config_dma:
+	if(ret)
+	{
+		dev_err(&zx29spi->pdev->dev, "down_interruptible, ret=%d\n",ret);
+	}
+	return ret;
+}
+
+
+static int zx29_do_interrupt_dma_transfer(struct zx29_spi *zx29spi)
+{
+	u32 irqflags = ENABLE_ALL_INTERRUPTS;
+	struct spi_transfer *transfer = zx29spi->cur_transfer;
+	int ret = 0;
+	static int sc_debug_info_record_cnt[4] ={0};
+	ktime_t k_time_start = 0;
+	ktime_t diff_ns = 0;
+	unsigned int fifo_sr = 0;
+	
+	if((void *)transfer->tx_dma != NULL){
+		zx29spi->tx = (void *)transfer->tx_dma;
+		zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
+	}
+	if((void *)transfer->rx_dma != NULL){
+		zx29spi->rx = (void *)transfer->rx_dma;
+		zx29spi->rx_end = zx29spi->rx + zx29spi->cur_transfer->len;
+
+		/*if tx is null, use rx buffer as a dummy tx buffer.*/
+		if((void *)transfer->tx_dma == NULL){
+			zx29spi->tx = (void *)transfer->rx_dma;
+			zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
+		}
+	}
+
+	zx29spi->write = zx29spi->tx ? zx29spi->cur_chip->write : WRITING_NULL;
+	zx29spi->read = zx29spi->rx ? zx29spi->cur_chip->read : READING_NULL;
+	//printk("zx29spi->cur_chip->enable_dma= 0x%x transfer->tx_dma=0x%x transfer->rx_dma=0x%x\n",zx29spi->cur_chip->enable_dma,transfer->tx_dma,transfer->rx_dma);
+	/* If we're using DMA, set up DMA here */
+	if (zx29spi->cur_chip->enable_dma) {
+		/* Configure DMA transfer */
+		ret = configure_dma(zx29spi);
+		if (ret) {
+			dev_err(&zx29spi->pdev->dev, "configuration of DMA failed, fall back to interrupt mode\n");
+			goto err_config_dma;
+		}
+		/* Disable interrupts in DMA mode, IRQ from DMA controller */
+		irqflags = DISABLE_ALL_INTERRUPTS;
+	}
+
+	/* config interrupts */
+	/* writel(irqflags, (SPI_INTR_EN_OFFSET+zx29spi->virtbase));	//spi interrupt mode is not supported. */
+
+	/* Enable SSP, turn on interrupts */
+//	writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE,  (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+	if (zx29spi->cur_chip->enable_dma)
+	{
+		ret = down_timeout(&zx29spi->sema_dma, msecs_to_jiffies(1500));
+		//printk("COM=0x%x,FMT=0x%x,FIFO_CTL=0x%x,FIFO_SR=0x%x\n",readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase)),readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)));
+		if (ret < 0) {
+			panic("spi transfer timeout,times(%d)\n",sc_debug_info_record_cnt[zx29spi->pdev->id]);
+			if(sc_debug_info_record_cnt[zx29spi->pdev->id] < 5) {
+				sc_debug_info_record(MODULE_ID_CAP_SPI, "%s transfer timeout:0x%x 0x%x 0x%x \n",zx29spi->pdev->name,readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)),
+									readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)),readl((SPI_INTR_SR_OFFSET+zx29spi->virtbase)));
+			}
+			sc_debug_info_record_cnt[zx29spi->pdev->id]++;
+		}
+		
+		k_time_start = ktime_get();
+		do {
+			diff_ns = ktime_sub(ktime_get(),k_time_start);
+			fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+			cpu_relax();
+		}
+		while (fifo_sr && diff_ns < 100000000); //100ms
+		
+		if(diff_ns >= 100000000) {
+			fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+			if(fifo_sr)
+				dev_info(&zx29spi->pdev->dev, "bus busy.... time_start=%lld diff_ns=%lld \n",k_time_start,diff_ns);
+		}
+
+		if (zx29spi->dma_running == (TX_TRANSFER | RX_TRANSFER)) {
+			u32 intr_status;
+			intr_status = readl((SPI_INTR_SR_OFFSET+zx29spi->virtbase));
+			if (intr_status & SPI_INTR_SR_SCLR_MASK_RX_OVERRUN_INTR) {
+				terminate_dma(zx29spi);
+				dev_err(&zx29spi->cur_msg->spi->dev, "spi rx fifo overflow status = %X!!\n", intr_status);
+				ret = -EIO;
+			} else
+				disable_irq_nosync(zx29spi->irq);
+		}
+		zx29spi->dma_running = 0;
+	}
+
+err_config_dma:
+	if(ret)
+	{
+		dev_err(&zx29spi->pdev->dev, "down_interruptible, ret=%d\n",ret);
+	}
+//	writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & ~ SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+	return ret;
+}
+
+
+static int zx29_do_polling_transfer(struct zx29_spi *zx29spi)
+{
+	struct spi_transfer *transfer = zx29spi->cur_transfer;
+	int ret = 0;
+	unsigned int fifo_sr = 0;	
+	ktime_t k_time_start = 0;
+	ktime_t diff_ns = 0;
+
+	dev_dbg(&zx29spi->pdev->dev, "polling transfer ongoing ...\n");
+
+	if (!zx29spi->tx && !zx29spi->rx) {
+		return ret;
+	}
+
+	k_time_start = ktime_get();
+	/*read and write*/
+	while ((zx29spi->tx < zx29spi->tx_end) || (zx29spi->rx < zx29spi->rx_end)) {
+		readwriter(zx29spi);
+		diff_ns = ktime_sub(ktime_get(),k_time_start);
+		if(diff_ns >= 1000000000) /*1s*/{
+			dev_info(&zx29spi->pdev->dev, "do_polling time out,diff_ns=%lld len=0x%x tx=0x%x tx_end=0x%x rx=0x%x rx_end=0x%x \n",
+													diff_ns,zx29spi->cur_transfer->len,zx29spi->tx,zx29spi->tx_end,zx29spi->rx,zx29spi->rx_end);
+			ret = -EIO;
+			break;
+		}
+	}
+	
+	k_time_start = ktime_get();
+	do {
+		diff_ns = ktime_sub(ktime_get(),k_time_start);
+		fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+		cpu_relax();
+		
+	}while (fifo_sr && diff_ns < 100000000); //100ms
+	
+	if(diff_ns >= 100000000) {
+		fifo_sr = readl((SPI_FIFO_SR_OFFSET+zx29spi->virtbase)) & SPI_FIFO_SR_MASK_BUSY;
+		if(fifo_sr) {
+			dev_info(&zx29spi->pdev->dev, "bus busy.. time_start=%lld diff_ns=%lld \n",k_time_start,diff_ns);
+			ret = -EIO;
+		}
+	}
+	return ret;
+}
+
+static int zx29_spi_map_mssg(struct zx29_spi *zx29spi,
+						struct spi_message *msg)
+{
+	struct device *dev;
+	struct spi_transfer *transfer;
+	int ret = 0;
+	static int sc_debug_info_record_tx_cnt[4] ={0};
+	static int sc_debug_info_record_rx_cnt[4] ={0};
+	
+	if(!zx29spi || !msg)
+		return -EFAULT;
+	
+	if (msg->is_dma_mapped || !msg->spi->dma_used || !zx29spi->master_info->enable_dma) {
+		return 0;
+	}
+	dev = &zx29spi->pdev->dev;
+	/* Map until end or first fail */
+	list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+	
+		if (/*transfer->len <= zx29spi->vendor->fifodepth ||*/ transfer->tx_dma || transfer->rx_dma )
+			continue;
+
+		if (transfer->tx_buf != NULL) {
+			transfer->tx_dma = dma_map_single(dev,(void *)transfer->tx_buf, transfer->len, DMA_TO_DEVICE);
+			if (dma_mapping_error(dev, transfer->tx_dma)) {
+				dev_err(dev, "dma_map_single spi Tx failed,times(%d)\n",sc_debug_info_record_tx_cnt[zx29spi->pdev->id]);
+				if(sc_debug_info_record_tx_cnt[zx29spi->pdev->id] < 5)
+					sc_debug_info_record(MODULE_ID_CAP_SPI, "%s tx_dma_map failed \n",zx29spi->pdev->name);
+				transfer->tx_dma = 0;
+				ret |= -ENOMEM;
+				sc_debug_info_record_tx_cnt[zx29spi->pdev->id]++;
+			}
+		}
+
+		if (transfer->rx_buf != NULL) {
+			transfer->rx_dma = dma_map_single(dev, transfer->rx_buf, transfer->len, DMA_FROM_DEVICE);
+			if (dma_mapping_error(dev, transfer->rx_dma)) {
+				dev_err(dev, "dma_map_single spi Rx failed,times(%d)\n",sc_debug_info_record_rx_cnt[zx29spi->pdev->id]);
+				if(sc_debug_info_record_rx_cnt[zx29spi->pdev->id] < 5)
+					sc_debug_info_record(MODULE_ID_CAP_SPI, "%s rx_dma_map failed \n",zx29spi->pdev->name);
+				transfer->rx_dma = 0;
+				ret |= -ENOMEM;
+				sc_debug_info_record_rx_cnt[zx29spi->pdev->id]++;
+			}
+
+			if (!transfer->rx_dma && transfer->tx_dma && transfer->tx_buf) {
+				dma_unmap_single(dev, transfer->tx_dma,	transfer->len, DMA_TO_DEVICE);
+				transfer->tx_dma = 0;
+			}
+		}
+	}
+	
+			
+	return ret;
+}
+
+static void zx29_spi_unmap_mssg(struct zx29_spi *zx29spi,
+						struct spi_message *msg)
+{
+	struct device *dev = &zx29spi->pdev->dev;
+	struct spi_transfer *transfer;
+
+	if (msg->is_dma_mapped || !msg->spi->dma_used || !zx29spi->master_info->enable_dma)
+		return;
+
+	list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+
+		if ( (!transfer->tx_buf && transfer->tx_dma) || (! transfer->rx_buf &&  transfer->rx_dma) )
+			continue;
+
+		if (transfer->rx_buf != NULL && transfer->rx_dma)
+			dma_unmap_single(dev, transfer->rx_dma,	transfer->len, DMA_FROM_DEVICE);
+
+		if (transfer->tx_buf != NULL && transfer->tx_dma)
+			dma_unmap_single(dev, transfer->tx_dma,	transfer->len, DMA_TO_DEVICE);
+	}
+}
+
+static int zx29_slave_transfer_one_message(struct spi_master *master,
+					  struct spi_message *msg)
+{
+	struct zx29_spi *zx29spi = spi_master_get_devdata(master);
+	struct spi_device *spi = msg->spi;
+	struct spi_transfer *transfer;
+	unsigned	cs_change = 1;
+	const int	nsecs = 100;
+	int ret = 0;
+
+	zx29spi->cur_msg = msg;
+
+	/* Setup the SPI using the per chip configuration */
+	zx29spi->cur_chip = spi_get_ctldata(msg->spi);
+	ret = zx29_spi_map_mssg(zx29spi, msg);
+	/* continue with polling mode */
+	if(ret){
+		dev_info(&zx29spi->pdev->dev, "ret = %d\n",ret);
+		goto out;
+	}
+	//restore_state(zx29spi);
+	list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+
+		zx29spi->cur_transfer = transfer;
+		if((void *)transfer->tx_buf != NULL){
+			zx29spi->tx = (void *)transfer->tx_buf;
+			zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
+		}
+		else
+			zx29spi->tx = zx29spi->tx_end =  NULL;
+
+		if((void *)transfer->rx_buf != NULL){
+			zx29spi->rx = (void *)transfer->rx_buf;
+			zx29spi->rx_end = zx29spi->rx + zx29spi->cur_transfer->len;
+#if 0
+			/*if tx is null, use rx buffer as a dummy tx buffer.*/
+			if((void *)transfer->tx_buf == NULL){
+				zx29spi->tx = (void *)transfer->rx_buf;
+				zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
+			}
+#endif
+		}
+		else
+			zx29spi->rx = zx29spi->rx_end =  NULL;
+
+		zx29spi->write = zx29spi->tx ? zx29spi->cur_chip->write : WRITING_NULL;
+		zx29spi->read = zx29spi->rx ? zx29spi->cur_chip->read : READING_NULL;
+
+		if (/*transfer->rx_buf || */transfer->rx_dma)
+			flush(zx29spi);
+		
+		writel(CLEAR_ALL_INTERRUPTS, (SPI_INTR_SR_OFFSET+zx29spi->virtbase));
+		
+		if (zx29spi->cur_chip->xfer_type == POLLING_TRANSFER || (!transfer->tx_dma && !transfer->rx_dma)) {
+			if (zx29spi->tx < zx29spi->tx_end) 
+				zx29spi->cur_transfer->len = writer(zx29spi);
+			if(zx29spi->rx < zx29spi->rx_end)
+				zx29spi->cur_transfer->len = reader(zx29spi);
+#if defined(CONFIG_DEBUG_FS)
+				zx29spi->spi_poll_cnt ++;
+#endif
+		} else {
+			struct chip_data *chip = zx29spi->cur_chip;
+			if (transfer->rx_buf || transfer->rx_dma) {
+				writel((chip->fifo_ctrl | (SPI_FIFO_CTRL_MASK_RX_DMA_EN | SPI_FIFO_CTRL_MASK_TX_DMA_EN)),
+						(SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+			} else {
+				writel((chip->fifo_ctrl | SPI_FIFO_CTRL_MASK_TX_DMA_EN), (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+			}
+			ret = zx29_slave_do_interrupt_dma_transfer(zx29spi);
+#if defined(CONFIG_DEBUG_FS)
+			zx29spi->spi_dma_cnt ++;
+#endif
+
+			/* clear TX/RX DMA Enable */
+			writel(chip->fifo_ctrl, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+		}
+
+		if (ret) {
+			pr_info("ssp:transfer error,transfer=%p\n", transfer);
+			break;
+		}
+
+		/* Update total byte transferred */
+		msg->actual_length += zx29spi->cur_transfer->len;
+
+		if (transfer->delay_usecs)
+			udelay(transfer->delay_usecs);
+	}
+out:
+	zx29_spi_unmap_mssg(zx29spi, msg);
+	msg->status = ret;
+	spi_finalize_current_message(master);
+
+#if SPI_PSM_CONTROL
+	zx29_spi_set_idle(&zx29spi->psm_lock);
+#endif
+
+	return ret;
+}
+
+static int zx29_transfer_one_message(struct spi_master *master,
+				      struct spi_message *msg)
+{
+	struct zx29_spi *zx29spi = spi_master_get_devdata(master);
+	struct spi_device *spi = msg->spi;
+	struct spi_transfer *transfer;
+	unsigned	cs_change = 1;
+	const int	nsecs = 100;
+	int	ret = 0;
+	ktime_t k_time_start = 0;
+	ktime_t diff_ns = 0;
+	unsigned int reg_val = 0;
+	
+	pm_stay_awake(&zx29spi->pdev->dev);
+	//printk(KERN_INFO "ssp:in function  %s \n", __FUNCTION__);
+#if SPI_PSM_CONTROL
+    zx29_spi_set_active(&zx29spi->psm_lock);
+#endif
+	//mutex_lock(&zx29spi->spi_lock);
+	//printk(KERN_INFO "ssp:lock \n");
+	/* Initial message state */
+	zx29spi->cur_msg = msg;
+	/* Setup the SPI using the per chip configuration */
+	zx29spi->cur_chip = spi_get_ctldata(msg->spi);
+
+	if ((clk_get_rate(zx29spi->spi_clk) / 2) != spi->max_speed_hz) {
+		clk_set_rate(zx29spi->spi_clk, spi->max_speed_hz * 2);
+	}
+
+	restore_state(zx29spi);
+
+	ret = zx29_spi_map_mssg(zx29spi, msg);
+	/* continue with polling mode */
+	if(ret){
+		dev_info(&zx29spi->pdev->dev, "ret = %d\n",ret);
+		goto out;
+	}
+	
+
+	//while (readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK);
+	k_time_start = ktime_get();
+	do {
+		diff_ns = ktime_sub(ktime_get(),k_time_start);
+		reg_val = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK;
+		cpu_relax();
+	}
+	while (reg_val && diff_ns < 100000000); //100ms
+	
+	if(diff_ns >= 100000000) {
+		reg_val = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK;
+		if(reg_val) {
+			dev_info(&zx29spi->pdev->dev, "wait sspe back time_out diff_ns=%lld \n",diff_ns);
+			goto out;
+		}
+	}
+
+	writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE,  (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+	list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+
+		zx29spi->cur_transfer = transfer;
+		//if (transfer->bits_per_word || transfer->speed_hz)
+		//	dev_warn(&msg->spi->dev, "ignore bits & speed setting in transfer.");
+
+		if((void *)transfer->tx_buf != NULL){
+			zx29spi->tx = (void *)transfer->tx_buf;
+			zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
+		}
+		else
+			zx29spi->tx = zx29spi->tx_end =  NULL;
+
+		if((void *)transfer->rx_buf != NULL){
+			zx29spi->rx = (void *)transfer->rx_buf;
+			zx29spi->rx_end = zx29spi->rx + zx29spi->cur_transfer->len;
+
+			/*if tx is null, use rx buffer as a dummy tx buffer.*/
+			if((void *)transfer->tx_buf == NULL){
+				zx29spi->tx = (void *)transfer->rx_buf;
+				zx29spi->tx_end = zx29spi->tx + zx29spi->cur_transfer->len;
+			}
+		}
+		else
+			zx29spi->rx = zx29spi->rx_end =  NULL;
+
+		zx29spi->write = zx29spi->tx ? zx29spi->cur_chip->write : WRITING_NULL;
+		zx29spi->read = zx29spi->rx ? zx29spi->cur_chip->read : READING_NULL;
+
+		if (transfer->rx_buf || transfer->rx_dma)
+			flush(zx29spi);
+		
+		writel(CLEAR_ALL_INTERRUPTS, (SPI_INTR_SR_OFFSET+zx29spi->virtbase));
+		if (cs_change) {
+			zx29spi->cur_chip->cs_control(zx29spi->pdev->id,ZX29_CS_ACTIVE);			
+		} 
+		
+		cs_change = transfer->cs_change;
+
+		if (zx29spi->cur_chip->xfer_type == POLLING_TRANSFER || (!transfer->tx_dma && !transfer->rx_dma)) {
+
+			ret = zx29_do_polling_transfer(zx29spi);
+			#if defined(CONFIG_DEBUG_FS)
+				zx29spi->spi_poll_cnt ++;
+			#endif
+		} else {
+			struct chip_data *chip = zx29spi->cur_chip;
+
+			if (transfer->rx_buf || transfer->rx_dma) {
+				writel((chip->fifo_ctrl | (SPI_FIFO_CTRL_MASK_RX_DMA_EN | SPI_FIFO_CTRL_MASK_TX_DMA_EN)),
+						(SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+			} else {
+				writel((chip->fifo_ctrl | SPI_FIFO_CTRL_MASK_TX_DMA_EN), (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+			}
+
+			ret = zx29_do_interrupt_dma_transfer(zx29spi);
+			#if defined(CONFIG_DEBUG_FS)
+				zx29spi->spi_dma_cnt ++;
+			#endif
+
+			/* clear TX/RX DMA Enable */
+			writel(chip->fifo_ctrl, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+		}
+
+		if (ret) {
+			pr_info("ssp:transfer error,transfer=%p\n", transfer);
+			break;
+		}
+
+		/* Update total byte transferred */
+		msg->actual_length += zx29spi->cur_transfer->len;
+
+		if (transfer->delay_usecs)
+			udelay(transfer->delay_usecs);
+
+		if (cs_change) {
+			zx29spi->cur_chip->cs_control(zx29spi->pdev->id,ZX29_CS_INACTIVE);
+			ndelay(nsecs);
+		}
+	}
+	if (ret || !cs_change) {
+		zx29spi->cur_chip->cs_control(zx29spi->pdev->id,ZX29_CS_INACTIVE);
+	}
+	//while (~ readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK);	
+	k_time_start = ktime_get();
+	do {
+		diff_ns = ktime_sub(ktime_get(),k_time_start);
+		reg_val = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK;
+		cpu_relax();
+	}
+	while (!reg_val && diff_ns < 100000000); //100ms
+	
+	if(diff_ns >= 100000000) {
+		reg_val = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK;
+		if(!reg_val) {
+			dev_info(&zx29spi->pdev->dev, "wait sspe back time_out diff_ns=%lld \n",diff_ns);
+			goto out;
+		}
+	}
+	writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & ~ SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+out:
+	zx29_spi_unmap_mssg(zx29spi, msg);
+	//mutex_unlock(&zx29spi->spi_lock);
+	//printk(KERN_INFO "ssp:unlock \n");
+
+	msg->status = ret;
+	spi_finalize_current_message(master);
+
+#if SPI_PSM_CONTROL
+    zx29_spi_set_idle(&zx29spi->psm_lock);
+#endif
+	pm_relax(&zx29spi->pdev->dev);
+	return ret;
+}
+
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start */
+#define SPI_SLVAE_RX_BUFF_SIZE        4096
+#define SPI_SLVAE_RX_MAX_PACK_NUM     15
+#define SPI_SLVAE_RX_PACK_LEN         146
+#define SPI_SLVAE_RX_LIST_BUFF_LEN    (SPI_SLVAE_RX_MAX_PACK_NUM*SPI_SLVAE_RX_PACK_LEN)
+static dma_channel_def slave_rx_conf[SPI_SLVAE_RX_MAX_PACK_NUM] = {0};
+//yu.dong@20240617 [T106BUG-641] SPI packet loss issue, increase kernel buffer and read all cached data away, no data loss.
+#define SPI_MAGIC 0x55555555
+static bool rxbuf_is_free_space(struct spi_device *spi)
+{
+	if (spi->recv_pos < spi->rd_pos) {
+        if ((spi->rd_pos - spi->recv_pos) > SPI_SLVAE_RX_PACK_LEN)
+			return 1;
+		else
+			return 0;
+	}
+	else {
+		if ((SPI_SLVAE_RX_BUFF_SIZE - spi->recv_pos + spi->rd_pos ) > SPI_SLVAE_RX_PACK_LEN)
+			return 1;
+		else
+			return 0;
+	}
+}
+
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss issue, increase kernel buffer and read all cached data away, no data loss start */
+static void dma_cyclic_callback(void *data)
+{
+	struct spi_device *spi = (struct spi_device *)data;
+	struct zx29_spi *zx29spi = NULL;
+	int index = 0;
+	unsigned int end = 0;
+
+	zx29spi = spi_master_get_devdata(spi->master);
+	zx29spi->spi_poll_cnt++;
+	end = *(volatile unsigned int *)(spi->cyc_buf +spi->cyc_index * SPI_SLVAE_RX_PACK_LEN + SPI_SLVAE_RX_PACK_LEN - 4);
+	while((end != SPI_MAGIC) && index < SPI_SLVAE_RX_MAX_PACK_NUM) {
+		if(!rxbuf_is_free_space(spi)) {
+			printk("rx_buff not enough space!!!!!");
+			zx29spi->spi_dma_cnt++;
+			break;
+		}else {
+			if((spi->recv_pos + SPI_SLVAE_RX_PACK_LEN) <= SPI_SLVAE_RX_BUFF_SIZE) {
+				memcpy(spi->rx_buf + spi->recv_pos,spi->cyc_buf + spi->cyc_index * SPI_SLVAE_RX_PACK_LEN,SPI_SLVAE_RX_PACK_LEN);
+			}else {
+				memcpy(spi->rx_buf + spi->recv_pos,spi->cyc_buf + spi->cyc_index * SPI_SLVAE_RX_PACK_LEN,SPI_SLVAE_RX_BUFF_SIZE - spi->recv_pos);
+				memcpy(spi->rx_buf,spi->cyc_buf + spi->cyc_index * SPI_SLVAE_RX_PACK_LEN + (SPI_SLVAE_RX_BUFF_SIZE - spi->recv_pos),SPI_SLVAE_RX_PACK_LEN-(SPI_SLVAE_RX_BUFF_SIZE-spi->recv_pos));
+			}
+			*(volatile unsigned int *)(spi->cyc_buf +spi->cyc_index * SPI_SLVAE_RX_PACK_LEN + SPI_SLVAE_RX_PACK_LEN - 4) = SPI_MAGIC;
+			spi->recv_pos = (spi->recv_pos + SPI_SLVAE_RX_PACK_LEN)%SPI_SLVAE_RX_BUFF_SIZE;
+			spi->cyc_index = (spi->cyc_index + 1)%SPI_SLVAE_RX_MAX_PACK_NUM;
+
+			zx29spi->spi_dma_cnt++;
+			index++;
+			end = *(volatile unsigned int *)(spi->cyc_buf +spi->cyc_index * SPI_SLVAE_RX_PACK_LEN + SPI_SLVAE_RX_PACK_LEN - 4);
+		}
+
+		if(spi->is_rd_waiting == true && spi->recv_done == 0) {
+			wake_up(&spi->rd_wait);
+			spi->recv_done = 1;
+		}
+	}
+	if((end != SPI_MAGIC) && index == SPI_SLVAE_RX_MAX_PACK_NUM)
+		printk("cyc_buf be covered!!!!!");
+	return;
+}
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss issue, increase kernel buffer and read all cached data away, no data loss end */
+
+static int zx29_slave_config_dma(struct zx29_spi *zx29spi,struct spi_device *spi)
+{
+	struct chip_data *chip = NULL;
+	struct dma_chan *rxchan = NULL;
+	struct dma_async_tx_descriptor *rxdesc;
+	unsigned short transfer_len = SPI_SLVAE_RX_PACK_LEN;
+	int i;
+
+	chip = zx29spi->cur_chip = spi->controller_state;
+
+	if (spi->rx_dma)
+		flush(zx29spi);
+	writel(CLEAR_ALL_INTERRUPTS, (SPI_INTR_SR_OFFSET+zx29spi->virtbase));
+	writel((chip->fifo_ctrl | SPI_FIFO_CTRL_MASK_RX_DMA_EN), (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+
+	zx29spi->write = zx29spi->tx ? zx29spi->cur_chip->write : WRITING_NULL;
+	zx29spi->read = zx29spi->rx ? zx29spi->cur_chip->read : READING_NULL;
+	rxchan = zx29spi->dma_rx_channel;
+	/* Check that the channels are available */
+	if (!rxchan)
+		return -ENODEV;
+
+	/*
+	 * If supplied, the DMA burstsize should equal the FIFO trigger level.
+	 * Notice that the DMA engine uses one-to-one mapping. Since we can
+	 * not trigger on 2 elements this needs explicit mapping rather than
+	 * calculation.
+	 */
+	for(i = 0;i < SPI_SLVAE_RX_MAX_PACK_NUM;i++) {
+		switch (zx29spi->rx_lev_trig) {
+		case SPI_RX_1_OR_MORE_ELEM:
+			slave_rx_conf[i].dma_control.src_burst_len = DMA_BURST_LEN_1;
+			slave_rx_conf[i].dma_control.dest_burst_len = DMA_BURST_LEN_1;
+			break;
+		case SPI_RX_4_OR_MORE_ELEM:
+			slave_rx_conf[i].dma_control.src_burst_len = DMA_BURST_LEN_4;
+			slave_rx_conf[i].dma_control.dest_burst_len = DMA_BURST_LEN_4;
+			break;
+		case SPI_RX_8_OR_MORE_ELEM:
+			slave_rx_conf[i].dma_control.src_burst_len = DMA_BURST_LEN_8;
+			slave_rx_conf[i].dma_control.dest_burst_len = DMA_BURST_LEN_8;
+			break;
+		case SPI_RX_16_OR_MORE_ELEM:
+			slave_rx_conf[i].dma_control.src_burst_len = DMA_BURST_LEN_16;
+			slave_rx_conf[i].dma_control.dest_burst_len = DMA_BURST_LEN_16;
+			break;
+		case SPI_RX_32_OR_MORE_ELEM:
+			slave_rx_conf[i].dma_control.src_burst_len = DMA_BURST_LEN_ALL;
+			slave_rx_conf[i].dma_control.dest_burst_len = DMA_BURST_LEN_ALL;
+			break;
+		default:
+			slave_rx_conf[i].dma_control.src_burst_len = zx29spi->vendor->fifodepth >> 1;
+			slave_rx_conf[i].dma_control.dest_burst_len = zx29spi->vendor->fifodepth >> 1;
+			break;
+		}
+
+		switch (zx29spi->read) {
+		case READING_NULL:
+			/* Use the same as for writing */
+			slave_rx_conf[i].dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+			slave_rx_conf[i].dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+			slave_rx_conf[i].count	  = transfer_len;
+			break;
+		case READING_U8:
+			slave_rx_conf[i].dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+			slave_rx_conf[i].dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+			slave_rx_conf[i].count	  = transfer_len;
+			break;
+		case READING_U16:
+			slave_rx_conf[i].dma_control.src_burst_size = DMA_BURST_SIZE_16BIT;
+			slave_rx_conf[i].dma_control.dest_burst_size = DMA_BURST_SIZE_16BIT;
+			slave_rx_conf[i].count	  = transfer_len;
+			break;
+		case READING_U32:
+			slave_rx_conf[i].dma_control.src_burst_size = DMA_BURST_SIZE_32BIT;
+			slave_rx_conf[i].dma_control.dest_burst_size = DMA_BURST_SIZE_32BIT;
+			slave_rx_conf[i].count	  = transfer_len;
+			break;
+		}
+
+		slave_rx_conf[i].src_addr  = (SPI_DR_OFFSET+zx29spi->phybase);
+		slave_rx_conf[i].dma_control.tran_mode = TRAN_PERI_TO_MEM;
+		slave_rx_conf[i].dma_control.irq_mode  = DMA_ALL_IRQ_ENABLE;
+		slave_rx_conf[i].dest_addr = (unsigned int)spi->rx_dma + transfer_len*i;
+		slave_rx_conf[i].link_addr = 1;
+		//yu.dong@20240617 [T106BUG-641] SPI packet loss issue, increase kernel buffer and read all cached data away, no data loss.
+		*(volatile unsigned int *)(spi->cyc_buf + transfer_len*i + transfer_len -4) = SPI_MAGIC;
+	}
+
+	dmaengine_slave_config(rxchan,(struct dma_slave_config*)&slave_rx_conf[0]);
+
+	/* Submit and fire RX and TX with TX last so we're ready to read! */
+	if (spi->rx_dma) {
+		rxdesc = rxchan->device->device_prep_dma_cyclic(rxchan,NULL,SPI_SLVAE_RX_MAX_PACK_NUM * SPI_SLVAE_RX_PACK_LEN, SPI_SLVAE_RX_PACK_LEN,0,0);
+		if (!rxdesc) {
+			printk(KERN_INFO "!!ERROR DESC !!![%s][%d]\n",__func__,__LINE__);
+			dmaengine_terminate_all(rxchan);
+			return -EBUSY;
+		}
+		/* Put the callback on the RX transfer only, that should finish last */
+		rxdesc->callback = dma_cyclic_callback;
+		rxdesc->callback_param = spi;
+		dmaengine_submit(rxdesc);
+		dma_async_issue_pending(rxchan);
+		zx29spi->dma_running = RX_TRANSFER;
+	}
+	return 0;
+}
+
+static int zx29_slave_rd_start(struct spi_device *spi)
+{
+	struct zx29_spi *zx29spi = NULL;
+	struct device *dev;
+	int status = 0;
+	static int wd_wait_queue_init = 0;
+
+	printk("zx29_slave_rd_start...\r\n");
+
+	zx29spi = spi_master_get_devdata(spi->master);
+	dev = &zx29spi->pdev->dev;
+	if (!zx29spi)
+		return -EINVAL;
+
+	spi->cyc_index = 0;
+	spi->rd_pos = spi->recv_pos = 0;
+
+	spi->cyc_buf = dma_alloc_coherent(dev, SPI_SLVAE_RX_BUFF_SIZE, &spi->rx_dma, GFP_KERNEL);
+	if (dma_mapping_error(dev, spi->rx_dma)) {
+		dev_err(dev, "dma_map_single spi rx failed\n");
+		return -ENOMEM;
+	}
+
+	if(wd_wait_queue_init == 0) {
+		init_waitqueue_head(&spi->rd_wait);
+		spi->recv_done = false;
+		spi->is_rd_waiting = false;
+		wd_wait_queue_init = 1;
+	}
+	status = zx29_slave_config_dma(zx29spi,spi);
+
+	return status;
+}
+
+static int zx29_slave_rd_stop(struct spi_device *spi)
+{
+	struct zx29_spi *zx29spi = NULL;
+	struct device *dev;
+	int status = 0;
+	struct chip_data *chip = NULL;
+	struct dma_chan *rxchan = NULL;
+
+	zx29spi = spi_master_get_devdata(spi->master);
+	dev = &zx29spi->pdev->dev;
+	if (!zx29spi)
+		return -EINVAL;
+
+	chip = zx29spi->cur_chip= spi->controller_state;
+	writel(chip->fifo_ctrl, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+	rxchan = zx29spi->dma_rx_channel;
+	/* Submit and fire RX and TX with TX last so we're ready to read! */
+	if(spi->rx_dma) {
+		dmaengine_terminate_all(rxchan);
+		zx29spi->dma_running = 0;
+	}
+
+	if(spi->cyc_buf != NULL && spi->rx_dma) {
+		dma_free_coherent(dev, SPI_SLVAE_RX_BUFF_SIZE, spi->cyc_buf, spi->rx_dma);
+		spi->cyc_buf = NULL;
+	}
+
+	spi->cyc_index = 0;
+	spi->rd_pos = spi->recv_pos = 0;
+	spi->recv_done = false;
+	spi->is_rd_waiting = false;
+	printk("zx29_slave_rd_stop...\r\n");
+
+	return status;
+}
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
+
+static int zx29_prepare_transfer_hardware(struct spi_master *master)
+{
+
+	return 0;
+}
+
+static int zx29_unprepare_transfer_hardware(struct spi_master *master)
+{
+	//struct zx29_spi *zx29spi = spi_master_get_devdata(master);
+
+	//dev_warn(&zx29spi->pdev->dev,"in function %s\n", __FUNCTION__);
+
+	/* nothing more to do - disable spi/ssp and power off */
+	//writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & ~ SPI_COM_CTRL_MASK_SSPE, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+	return 0;
+}
+
+static int verify_controller_parameters(struct zx29_spi *zx29spi,
+				struct spi_config_chip const *chip_info)
+{
+	if ((chip_info->iface < SPI_INTERFACE_MOTOROLA_SPI)
+	    || (chip_info->iface > SPI_INTERFACE_ISI_SPI)) {
+		dev_err(&zx29spi->pdev->dev,
+			"interface is configured incorrectly\n");
+		return -EINVAL;
+	}
+
+	if ((chip_info->hierarchy != SPI_MASTER)
+	    && (chip_info->hierarchy != SPI_SLAVE)) {
+		dev_err(&zx29spi->pdev->dev,
+			"hierarchy is configured incorrectly\n");
+		return -EINVAL;
+	}
+	if ((chip_info->com_mode != INTERRUPT_TRANSFER)
+	    && (chip_info->com_mode != DMA_TRANSFER)
+	    && (chip_info->com_mode != POLLING_TRANSFER)) {
+		dev_err(&zx29spi->pdev->dev,
+			"Communication mode is configured incorrectly\n");
+		return -EINVAL;
+	}
+	switch (chip_info->rx_lev_trig) {
+	case SPI_RX_1_OR_MORE_ELEM:
+	case SPI_RX_4_OR_MORE_ELEM:
+	case SPI_RX_8_OR_MORE_ELEM:
+		/* These are always OK, all variants can handle this */
+		break;
+	case SPI_RX_16_OR_MORE_ELEM:
+		if (zx29spi->vendor->fifodepth < 16) {
+			dev_err(&zx29spi->pdev->dev,
+			"RX FIFO Trigger Level is configured incorrectly\n");
+			return -EINVAL;
+		}
+		break;
+	case SPI_RX_32_OR_MORE_ELEM:
+		if (zx29spi->vendor->fifodepth < 32) {
+			dev_err(&zx29spi->pdev->dev,
+			"RX FIFO Trigger Level is configured incorrectly\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		dev_err(&zx29spi->pdev->dev,
+			"RX FIFO Trigger Level is configured incorrectly\n");
+		return -EINVAL;
+		break;
+	}
+	switch (chip_info->tx_lev_trig) {
+	case SPI_TX_1_OR_MORE_EMPTY_LOC:
+	case SPI_TX_4_OR_MORE_EMPTY_LOC:
+	case SPI_TX_8_OR_MORE_EMPTY_LOC:
+		/* These are always OK, all variants can handle this */
+		break;
+	case SPI_TX_16_OR_MORE_EMPTY_LOC:
+		if (zx29spi->vendor->fifodepth < 16) {
+			dev_err(&zx29spi->pdev->dev,
+			"TX FIFO Trigger Level is configured incorrectly\n");
+			return -EINVAL;
+		}
+		break;
+	case SPI_TX_32_OR_MORE_EMPTY_LOC:
+		if (zx29spi->vendor->fifodepth < 32) {
+			dev_err(&zx29spi->pdev->dev,
+			"TX FIFO Trigger Level is configured incorrectly\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		dev_err(&zx29spi->pdev->dev,
+			"TX FIFO Trigger Level is configured incorrectly\n");
+		return -EINVAL;
+		break;
+	}
+
+	return 0;
+}
+
+static struct vendor_data vendor_arm = {
+	.fifodepth = 16,
+	.max_bpw = 32,
+	.loopback = true,
+};
+
+/*
+ * A piece of default chip info unless the platform
+ * supplies it.
+ */
+static const struct spi_config_chip spi_default_chip_info = {
+	.com_mode = DMA_TRANSFER,//INTERRUPT_TRANSFER,//POLLING_TRANSFER,
+	.iface = SPI_INTERFACE_MOTOROLA_SPI,
+	.hierarchy = SPI_MASTER,
+	.slave_tx_disable = DO_NOT_DRIVE_TX,
+	.rx_lev_trig = SPI_RX_4_OR_MORE_ELEM,
+	.tx_lev_trig = SPI_TX_4_OR_MORE_EMPTY_LOC,
+//	.ctrl_len = SSP_BITS_8,
+//	.wait_state = SSP_MWIRE_WAIT_ZERO,
+//	.duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
+//	.cs_control = default_cs_control,
+};
+
+/*
+
+*/
+static void spi_set_gpio_function(int dev_id)
+{
+	if (pinctrl_select_state(ssp_pins[dev_id].pctrl, ssp_pins[dev_id].pfunc) < 0) {
+		printk("spi%d setting spi pin ctrl failed\n",dev_id);
+	}
+	return;
+}
+static void spi_set_gpio_gpio(int dev_id)
+{
+	if (pinctrl_select_state(ssp_pins[dev_id].pctrl, ssp_pins[dev_id].pgpio) < 0) {
+		printk("spi%d setting spi pin ctrl failed\n",dev_id);
+	}
+	return;
+}
+
+
+static void spi_set_gpio_val(int gpio_num, int val)
+{
+    //zx29_gpio_output_data(gpio_num, val);
+    gpio_set_value(gpio_num,val);
+	
+}
+
+static int spi_get_gpio_val(int gpio_num)
+{
+    //zx29_gpio_set_direction(gpio,GPIO_IN);
+
+    return gpio_get_value(gpio_num);
+}
+
+static void spi_time_delay(int delay/*us*/)
+{
+    udelay(delay);
+}
+
+void spi_fun_mode_stop(int dev_id)
+{
+	spi_set_gpio_gpio(dev_id);
+}
+
+void spi_gpio_mode_start(int dev_id)
+{
+	//mutex_lock(&g_zx29_spi->spi_lock); //spi control function mutex.
+	/* set clk tx rx cs to gpio */
+	//spi_set_gpio_gpio(dev_id);
+	gpio_direction_output(ssp_pins[dev_id].gpio_cs,SPI_GPIO_HIGH);
+	gpio_direction_output(ssp_pins[dev_id].gpio_clk,SPI_GPIO_LOW);
+	gpio_direction_output(ssp_pins[dev_id].gpio_tx,0);//value ?
+	gpio_direction_input(ssp_pins[dev_id].gpio_rx);
+
+	return ;
+}
+EXPORT_SYMBOL(spi_gpio_mode_start);
+void spi_gpio_mode_stop(int dev_id)
+{
+	/* set clk tx rx cs to function */
+	spi_set_gpio_function(dev_id);
+	//mutex_unlock(&g_zx29_spi->spi_lock); //spi control function mutex.
+}
+EXPORT_SYMBOL(spi_gpio_mode_stop);
+
+void spi_gpio_write_single8(int dev_id,unsigned char data)
+{
+    int i;
+
+	//printk("spi_gpio_write_single8 %x\n", data);
+
+	spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
+	spi_set_gpio_val(ssp_pins[dev_id].gpio_cs,  SPI_GPIO_LOW);/* CS invail*/
+
+	for( i=7; i>=0; i-- )
+	{
+		spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_LOW);
+		if ((data >> i) & 0x1)
+		{
+			spi_set_gpio_val(ssp_pins[dev_id].gpio_tx, SPI_GPIO_HIGH);
+		}
+		else
+		{
+			spi_set_gpio_val(ssp_pins[dev_id].gpio_tx, SPI_GPIO_LOW);
+		}
+		spi_time_delay(1);
+		spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
+		spi_time_delay(1);
+	}
+	spi_set_gpio_val(ssp_pins[dev_id].gpio_cs,  SPI_GPIO_HIGH);
+	spi_set_gpio_val(ssp_pins[dev_id].gpio_tx, SPI_GPIO_LOW);
+
+}
+EXPORT_SYMBOL(spi_gpio_write_single8);
+/*******************************************************************************
+ * Function:
+ * Description:
+ * Parameters:
+ *   Input:
+ *
+ *   Output:
+ *
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+unsigned char spi_gpio_read_single8(int dev_id)
+{
+	int i;
+	unsigned char readData = 0;
+
+	spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
+	spi_set_gpio_val(ssp_pins[dev_id].gpio_cs,  SPI_GPIO_LOW);/* CS */
+
+	for( i=7; i>=0; i-- )
+	{
+		spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_LOW);
+		spi_time_delay(1);
+		spi_set_gpio_val(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
+		if( spi_get_gpio_val(ssp_pins[dev_id].gpio_rx) )/* lcd tx rx */
+		{
+		    readData |= (1 << i);
+		}
+		spi_time_delay(1);
+	}
+	spi_set_gpio_val(ssp_pins[dev_id].gpio_cs,  SPI_GPIO_HIGH);
+
+	//printk("spi_gpio_read_single8 %x\n", readData);
+	return readData;
+}
+EXPORT_SYMBOL(spi_gpio_read_single8);
+
+/**
+ * @brief spi gpio mode, cs control
+ *
+ * This function used for lcd 3-wires spi mode.
+ * before cs pull down, spi pads will change to gpio mode.
+ * after cs pull high, spi pads gpio mode recovery to spi mode.
+ *
+ * @param level 0: cs line pull down, no-zero: cs line pull up.
+ *
+ * @retval none
+ */
+void spi_gpio_3wire_cs(int dev_id,unsigned char level)
+{
+	if(level){
+		spi_set_gpio_val(ssp_pins[dev_id].gpio_cs,SPI_GPIO_HIGH);
+		gpio_direction_input(ssp_pins[dev_id].gpio_tx);
+
+		/* zx29_gpio_function_sel(GPIO_AP_SPI0_CS, 	GPIO_AP_SPI0_CS_FUN); */
+		//zx29_gpio_function_sel(GPIO_AP_SPI0_CLK, 	GPIO_AP_SPI0_CLK_FUN);
+		//zx29_gpio_function_sel(GPIO_AP_SPI0_TXD, 	GPIO_AP_SPI0_TXD_FUN);
+
+    	//mutex_unlock(&g_zx29_spi->spi_lock); //spi control function mutex.
+	}
+	else{
+		//mutex_lock(&g_zx29_spi->spi_lock);
+
+		/* zx29_gpio_function_sel(GPIO_AP_SPI0_CS, 	GPIO_AP_CS_GPIO_FUN); */
+		//zx29_gpio_function_sel(GPIO_AP_SPI0_CLK, 	GPIO_AP_CLK_GPIO_FUN);
+		//zx29_gpio_function_sel(GPIO_AP_SPI0_TXD, 	GPIO_AP_TXD_GPIO_FUN);
+
+		gpio_direction_output(ssp_pins[dev_id].gpio_cs,SPI_GPIO_LOW);
+		gpio_direction_output(ssp_pins[dev_id].gpio_clk,SPI_GPIO_LOW);
+		gpio_direction_output(ssp_pins[dev_id].gpio_tx,SPI_GPIO_LOW);
+
+		spi_set_gpio_val(ssp_pins[dev_id].gpio_clk,SPI_GPIO_LOW);
+		spi_set_gpio_val(ssp_pins[dev_id].gpio_cs,SPI_GPIO_LOW);
+	}
+}
+EXPORT_SYMBOL(spi_gpio_3wire_cs);
+
+/**
+ * @brief spi gpio mode, one byte write.
+ *
+ * This function used for lcd 3-wires spi mode.
+ * txd line used tx function and rx function at different time.
+ *
+ * @param reg one byte write data.
+ *
+ * @retval none
+ */
+void spi_gpio_3wire_write8(int dev_id,unsigned char reg)
+{
+	int i;
+	//unsigned char readData = 0;
+
+	//write
+	spi_time_delay(50);
+	for (i = 0; i < 8; i++)
+	{
+	    gpio_set_value(ssp_pins[dev_id].gpio_clk, SPI_GPIO_LOW);
+	    spi_time_delay(50);
+
+	    if ((reg & 0x80)==0x80)
+	    {
+	        gpio_set_value(ssp_pins[dev_id].gpio_tx, SPI_GPIO_HIGH);
+	    }
+	    else
+	    {
+	        gpio_set_value(ssp_pins[dev_id].gpio_tx, SPI_GPIO_LOW);
+	    }
+	    spi_time_delay(50);
+
+	    gpio_set_value(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
+	    spi_time_delay(50);
+
+	    reg <<= 1;
+	}
+	//spi_time_delay(50);
+}
+EXPORT_SYMBOL(spi_gpio_3wire_write8);
+
+/**
+ * @brief spi gpio mode, one byte read.
+ *
+ * This function used for lcd 3-wires spi mode.
+ * txd line used tx function and rx function at different time.
+ *
+ * @param none.
+ *
+ * @retval one byte readed data.
+ */
+unsigned char spi_gpio_3wire_read8(int dev_id)
+{
+	int i;
+	unsigned char readData = 0;
+  	//read
+	gpio_direction_input(ssp_pins[dev_id].gpio_tx);
+	spi_time_delay(50);
+
+	readData = 0;
+	for (i = 0; i < 8; i++)
+	{
+		readData <<= 1;
+		gpio_set_value(ssp_pins[dev_id].gpio_clk, SPI_GPIO_LOW);
+		spi_time_delay(50);
+
+		if (SPI_GPIO_HIGH == gpio_get_value(ssp_pins[dev_id].gpio_tx))
+		{
+			readData |= 0x01;
+		}
+
+		gpio_set_value(ssp_pins[dev_id].gpio_clk, SPI_GPIO_HIGH);
+		spi_time_delay(50);
+	}
+	//spi_time_delay(50);
+
+	//printk("spi_gpio_read_single8 %x\n", readData);
+	return readData;
+}
+EXPORT_SYMBOL(spi_gpio_3wire_read8);
+
+
+static void  zx29_setup_to_regs(struct chip_data *chip,struct zx29_spi *zx29spi)
+{
+	unsigned int regval = 0;
+	ktime_t k_time_start = 0;
+	ktime_t k_time_end = 0;
+	ktime_t diff_ns = 0;
+        /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start */
+	regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE);
+	writel(regval, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+	
+	writel(chip->fmt_ctrl, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));
+	//writel(chip->fifo_ctrl, (SPI_FIFO_CTRL_OFFSET+zx29spi->virtbase));
+        /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
+	writel(chip->com_ctrl, (SPI_COM_CTRL_OFFSET + zx29spi->virtbase));
+	//writel(chip->timing, (SPI_TIMING_OFFSET + zx29spi->virtbase));
+
+	writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE,  (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+	
+	//while(((readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 4)&0x1) == 0);
+	k_time_start = ktime_get();
+	do {
+		regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & SPI_COM_CTRL_MASK_SSPE_BACK;
+		diff_ns = ktime_sub(ktime_get(),k_time_start);
+		cpu_relax();
+	}
+	while (!regval && diff_ns < 10000000); 
+	
+	if(diff_ns >= 10000000) {
+		dev_info(&zx29spi->pdev->dev, " zx29_setup_to_regs failed! diff_ns=%lld \n",diff_ns);
+	}
+	
+}
+/**
+ * zx29_setup - setup function registered to SPI master framework
+ * @spi: spi device which is requesting setup
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. If it is the first time when setup is called by this device,
+ * this function will initialize the runtime state for this chip and save
+ * the same in the device structure. Else it will update the runtime info
+ * with the updated chip info. Nothing is really being written to the
+ * controller hardware here, that is not done until the actual transfer
+ * commence.
+ */
+static int zx29_setup(struct spi_device *spi)
+{
+	struct spi_config_chip const *chip_info;
+	struct chip_data *chip;
+	unsigned speed_hz;
+	int status = 0;
+	struct zx29_spi *zx29spi = NULL;
+	unsigned int bits =0;
+	u8 iface = 0;
+	u32 tmp;
+
+	if (!spi)
+		return -EINVAL;
+	bits = spi->bits_per_word;
+	zx29spi = spi_master_get_devdata(spi->master);
+	if (!zx29spi)
+		return -EINVAL;
+	iface = zx29spi->iface_mode;
+
+	/* Get controller_state if one is supplied */
+	chip = spi_get_ctldata(spi);
+
+	if (chip == NULL) {
+		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+		if (!chip) {
+			dev_err(&spi->dev, "cannot allocate controller state\n");
+			return -ENOMEM;
+		}
+		dev_dbg(&spi->dev, "allocated memory for controller's runtime state\n");
+	}
+
+	/* Get controller data if one is supplied */
+	chip_info = spi->controller_data;
+
+	if (chip_info == NULL) {
+		chip_info = &spi_default_chip_info;
+		/* spi_board_info.controller_data not is supplied */
+		dev_dbg(&spi->dev, "using default controller_data settings\n");
+	} else
+		dev_dbg(&spi->dev, "using user supplied controller_data settings\n");
+
+	/*
+	 * We can override with custom divisors, else we use the board
+	 * frequency setting
+	 */
+
+	 /* set spi  clock source at 104MHz/1 */
+	//writel(chip ->clk_div-1, M0_SSP_CLKDIV_REG_VA);
+	speed_hz = spi->max_speed_hz;
+//	clk_set_rate(zx29spi->spi_clk, speed_hz * 2); /* f(ssp_clk) = 2*f(ssp_sclk_out) */
+	spi->max_speed_hz = clk_round_rate(zx29spi->spi_clk, speed_hz * 2) / 2;
+
+	if (spi->max_speed_hz != speed_hz)
+		dev_dbg(&spi->dev, "round speed %dHz differs from requested %dHz.", spi->max_speed_hz, speed_hz);
+
+	status = verify_controller_parameters(zx29spi, chip_info);
+	if (status) {
+		dev_err(&spi->dev, "controller data is incorrect");
+		goto err_config_params;
+	}
+
+	zx29spi->rx_lev_trig = chip_info->rx_lev_trig;
+	zx29spi->tx_lev_trig = chip_info->tx_lev_trig;
+
+	/* Now set controller state based on controller data */
+	//chip->xfer_type = chip_info->com_mode;
+	chip->xfer_type = spi->dma_used ? DMA_TRANSFER : POLLING_TRANSFER;
+	dev_dbg(&spi->dev, "chip->xfer_type = 0x%x \n",chip->xfer_type);
+	
+	if (!chip_info->cs_control) {
+			chip->cs_control = default_cs_control;
+
+		if (spi->master->num_chipselect != 1)
+			dev_err(&spi->dev, "chip select function is NULL!\n");
+	} else
+		chip->cs_control = chip_info->cs_control;
+
+	/* Check bits per word with vendor specific range */
+	if ((bits <= 3) || (bits > zx29spi->vendor->max_bpw)) {
+		status = -ENOTSUPP;
+		dev_err(&spi->dev, "illegal data size for this controller!\n");
+		dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n",
+				zx29spi->vendor->max_bpw);
+		goto err_config_params;
+	} else if (bits <= 8) {
+		dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
+		chip->n_bytes = 1;
+		chip->read = READING_U8;
+		chip->write = WRITING_U8;
+	} else if (bits <= 16) {
+		dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
+		chip->n_bytes = 2;
+		chip->read = READING_U16;
+		chip->write = WRITING_U16;
+	} else {
+		dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
+		chip->n_bytes = 4;
+		chip->read = READING_U32;
+		chip->write = WRITING_U32;
+	}
+
+	/* Now Initialize all register settings required for this chip */
+	chip->com_ctrl  = 0;
+	chip->fmt_ctrl = 0;
+	chip->fifo_ctrl = 0;
+	chip->timing = 0;
+	
+	if ((chip->xfer_type == DMA_TRANSFER)
+	    && ((zx29spi->master_info)->enable_dma)) {
+		chip->enable_dma = true;
+		dev_dbg(&spi->dev, "DMA mode set in controller state\n");
+	} else {
+		chip->enable_dma = false;
+		dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
+	}
+	SPI_WRITE_BITS(chip->fifo_ctrl, SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_RX_DMA_EN, 2);
+	SPI_WRITE_BITS(chip->fifo_ctrl, SPI_DMA_DISABLED, SPI_FIFO_CTRL_MASK_TX_DMA_EN, 3);
+
+	if (zx29spi->rx_lev_trig == SPI_RX_8_OR_MORE_ELEM)
+		SPI_WRITE_BITS(chip->fifo_ctrl, SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_RX_FIFO_THRES, 4);
+	else
+		SPI_WRITE_BITS(chip->fifo_ctrl, SPI_FIFO_THRES_4, SPI_FIFO_CTRL_MASK_RX_FIFO_THRES, 4);
+	if (zx29spi->tx_lev_trig == SPI_TX_8_OR_MORE_EMPTY_LOC)
+		SPI_WRITE_BITS(chip->fifo_ctrl, SPI_FIFO_THRES_8, SPI_FIFO_CTRL_MASK_TX_FIFO_THRES, 8);
+	else
+		SPI_WRITE_BITS(chip->fifo_ctrl, SPI_FIFO_THRES_4, SPI_FIFO_CTRL_MASK_TX_FIFO_THRES, 8);
+
+	SPI_WRITE_BITS(chip->fmt_ctrl, bits - 1, 			SPI_FMT_CTRL_MASK_DSS, 4);
+	SPI_WRITE_BITS(chip->fmt_ctrl, chip_info->iface,	SPI_FMT_CTRL_MASK_FRF, 0);
+	
+	if((iface== SPI_TI_FORMAT)||(iface== SPI_ISI_FORMAT)){	
+		printk("qhf %s set iface = %d\n",__func__,iface);
+		SPI_WRITE_BITS(chip->fmt_ctrl, iface,	SPI_FMT_CTRL_MASK_FRF, 0);
+	}	
+	/* Stuff that is common for all versions */
+	if (spi->mode & SPI_CPOL)
+		tmp = SPI_CLK_POL_IDLE_HIGH;
+	else
+		tmp = SPI_CLK_POL_IDLE_LOW;
+	SPI_WRITE_BITS(chip->fmt_ctrl, tmp, SPI_FMT_CTRL_MASK_POL, 2);
+
+	if (spi->mode & SPI_CPHA)
+		tmp = SPI_CLK_SECOND_EDGE;
+	else
+		tmp = SPI_CLK_FIRST_EDGE;
+
+	SPI_WRITE_BITS(chip->fmt_ctrl, tmp, SPI_FMT_CTRL_MASK_PHA, 3);
+
+	/* Loopback is available on all versions except PL023 */
+	if (zx29spi->vendor->loopback) {
+		if (spi->mode & SPI_LOOP)
+			tmp = LOOPBACK_ENABLED;
+		else
+			tmp = LOOPBACK_DISABLED;
+		SPI_WRITE_BITS(chip->com_ctrl, tmp, SPI_COM_CTRL_MASK_LBM, 0);
+	}
+//	SPI_WRITE_BITS(chip->com_ctrl, SPI_ENABLED, SPI_COM_CTRL_MASK_SSPE, 1);
+	SPI_WRITE_BITS(chip->com_ctrl, chip_info->hierarchy, SPI_COM_CTRL_MASK_MS, 2);
+//	SPI_WRITE_BITS(chip->com_ctrl, chip_info->slave_tx_disable, SPI_COM_CTRL_MASK_SOD, 3);
+
+	if(spi->trans_gaped) {
+		chip->enable_trans_gap = true;
+	}
+	SPI_WRITE_BITS(chip->timing,   spi->trans_gap_num, SPI_TIMING_MASK_T_CS_DESEL, 0);
+	/* Save controller_state */
+	spi_set_ctldata(spi, chip);
+	if(zx29spi->mode == ZX29_SSP_SLAVE_TYPE) {
+		
+		SPI_WRITE_BITS(chip->com_ctrl, SPI_SLAVE_MODE, SPI_COM_CTRL_MASK_MS, 2);
+		zx29_setup_to_regs(chip,zx29spi);
+	}
+
+        //yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme.
+
+	return status;
+ err_config_params:
+	spi_set_ctldata(spi, NULL);
+	kfree(chip);
+	return status;
+}
+
+/**
+ * zx29_cleanup - cleanup function registered to SPI master framework
+ * @spi: spi device which is requesting cleanup
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. It will free the runtime state of chip.
+ */
+static void zx29_cleanup(struct spi_device *spi)
+{
+	struct chip_data *chip = spi_get_ctldata(spi);
+
+	spi_set_ctldata(spi, NULL);
+	kfree(chip);
+}
+
+static int zx29_spi_clock_init(struct zx29_spi *zx29spi)
+{
+	int status = 0;
+	struct platform_device *pdev = zx29spi->pdev;
+	/* work clock */
+	zx29spi->spi_clk = devm_clk_get(&pdev->dev, "work_clk");
+	if (IS_ERR(zx29spi->spi_clk)) {
+		status = PTR_ERR(zx29spi->spi_clk);
+		dev_err(&pdev->dev, "could not retrieve SPI work clock\n");
+		return status;
+	}
+
+	 /* enable spiclk at function zx29_setup  */
+	
+	if (device_property_read_u32(&pdev->dev, "clock-frequency", &zx29spi->clkfreq))
+		zx29spi->clkfreq = SPI_SPICLK_FREQ_26M;
+	status = clk_set_rate(zx29spi->spi_clk, zx29spi->clkfreq);
+	if(status) {
+		dev_err(&pdev->dev,"clc_set_rate err status=%d \n",status);
+		return status;
+	}
+	/* enable ssp clock source */
+	clk_prepare_enable(zx29spi->spi_clk);
+
+	/* apb clock */
+	zx29spi->pclk = devm_clk_get(&pdev->dev, "apb_clk");
+	if (IS_ERR(zx29spi->pclk)) {
+		status = PTR_ERR(zx29spi->pclk);
+		dev_err(&pdev->dev, "could not retrieve SPI work clock\n");
+		return status;
+	}
+	clk_prepare_enable(zx29spi->pclk);
+
+	return status;
+}
+
+static void spicc_clkgate_ctrl(struct zx29_spi *zx29spi,unsigned char is_enable)
+{
+	if (is_enable) {
+		clk_enable(zx29spi->spi_clk);
+		clk_enable(zx29spi->pclk);
+	} else {
+		clk_disable(zx29spi->spi_clk);
+		clk_disable(zx29spi->pclk);
+	}
+}
+
+static int zx29_spi_slave_clock_init(struct zx29_spi *zx29spi)
+{
+	int status=0;
+	struct platform_device *pdev = zx29spi->pdev;
+	/* work clock */
+	zx29spi->spi_clk = devm_clk_get(&pdev->dev, "work_clk");
+	if (IS_ERR(zx29spi->spi_clk)) {
+		status = PTR_ERR(zx29spi->spi_clk);
+		dev_err(&pdev->dev, "could not retrieve SPI work clock\n");
+		return status;
+	}
+
+	/* enable spiclk at function zx29_setup  */
+	//if (device_property_read_u32(&pdev->dev, "clock-frequency", &zx29spi->clkfreq))
+		zx29spi->clkfreq = SPI_SPICLK_FREQ_156M;          /*salve */
+	
+	status = clk_set_rate(zx29spi->spi_clk, zx29spi->clkfreq);
+	if(status) {
+		dev_err(&pdev->dev,"clc_set_rate err status=%d \n",status);
+		return status;
+	}
+	 /* enable ssp clock source */
+	clk_prepare_enable(zx29spi->spi_clk);
+
+	/* apb clock */
+	zx29spi->pclk = devm_clk_get(&pdev->dev, "apb_clk");
+	if (IS_ERR(zx29spi->pclk)) {
+		status = PTR_ERR(zx29spi->pclk);
+		dev_err(&pdev->dev, "could not retrieve SPI work clock\n");
+		return status;
+	}
+	clk_prepare_enable(zx29spi->pclk);
+	spicc_clkgate_ctrl(zx29spi,true);
+
+	return status;
+}
+
+
+static int zx29_spi_init_pinctrl(struct platform_device *pdev)
+{
+	struct pinctrl	*pctrl;
+	enum of_gpio_flags flags;
+	int ret;
+	struct zx29_spi *zx29spi = NULL;
+
+	if(!pdev) {
+		printk("pdev not exist \n");
+		return -1;
+	}
+	zx29spi = (struct zx29_spi *)platform_get_drvdata(pdev);		
+	if (IS_ERR(zx29spi)) {
+		dev_warn(&pdev->dev, "Failed to get zx29->ssp%d pins",pdev->id);
+		pctrl = NULL;
+		return 0;
+	}
+			
+	pctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR(pctrl)) {
+		dev_warn(&pdev->dev, "Failed to get zx29->ssp%d pins",pdev->id);
+		pctrl = NULL;
+		return 0;
+	}
+	ssp_pins[pdev->id].pctrl=pctrl;
+
+	ssp_pins[pdev->id].pcs_gpio_active = pinctrl_lookup_state(pctrl, "cs_gpio_active");
+	if (IS_ERR(ssp_pins[pdev->id].pcs_gpio_active)) {
+			dev_err(&pdev->dev, "missing cs_gpio_active \n");
+	}	
+	
+	ssp_pins[pdev->id].pcs_gpio_sleep =  pinctrl_lookup_state(pctrl, "cs_gpio_sleep");
+	if (IS_ERR(ssp_pins[pdev->id].pcs_gpio_sleep)) {
+			dev_err(&pdev->dev, "missing cs_gpio_sleep \n");
+	}	
+	ssp_pins[pdev->id].pcs_func = pinctrl_lookup_state(ssp_pins[pdev->id].pctrl, "cs_func");
+	if (IS_ERR(ssp_pins[pdev->id].pcs_func)) {
+			dev_err(&pdev->dev, "missing cs_func \n");
+	}
+	
+	if(zx29spi->master->slave == false) {	
+		if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_gpio_active) < 0) {
+			printk("spi%d setting cs_gpio pin ctrl failed\n",pdev->id);
+		}		
+	}else {	
+		if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_func) < 0) {
+			printk("spi%d setting cs_func pin ctrl failed\n",pdev->id);
+		}
+	}
+
+	ssp_pins[pdev->id].gpio_cs = of_get_gpio_flags(pdev->dev.of_node, 0, &flags);
+	if (!gpio_is_valid(ssp_pins[pdev->id].gpio_cs)) {
+		pr_info("ssp%d gpio_cs no found\n",pdev->id);
+	}
+	
+	ssp_pins[pdev->id].gpio_clk = of_get_gpio_flags(pdev->dev.of_node, 1, &flags);
+	if (!gpio_is_valid(ssp_pins[pdev->id].gpio_clk)) {
+		pr_info("ssp%d gpio_clk no found\n",pdev->id);
+	}
+	ssp_pins[pdev->id].gpio_tx = of_get_gpio_flags(pdev->dev.of_node, 2, &flags);
+	if (!gpio_is_valid(ssp_pins[pdev->id].gpio_tx)) {
+		pr_info("ssp%d gpio_tx no found\n",pdev->id);
+	}
+	
+	ssp_pins[pdev->id].gpio_rx = of_get_gpio_flags(pdev->dev.of_node, 3, &flags);
+	if (!gpio_is_valid(ssp_pins[pdev->id].gpio_rx)) {
+		pr_info("ssp%d gpio_rx no found\n",pdev->id);
+	}
+
+	if(zx29spi->master->slave == false) 
+		gpio_direction_output(ssp_pins[pdev->id].gpio_cs,SPI_GPIO_HIGH);
+			
+	return 0;
+}
+static void zx29_spi_get_platformInfo(struct platform_device *pdev,struct zx29_spi_controller *platform_info)
+{
+	struct device	*dev=&pdev->dev;
+	u32 dma_tx,dma_rx,enable_dma;
+	
+ 
+	if (device_property_read_u16(dev, "bus_id", &platform_info->bus_id)) {
+		platform_info->bus_id = pdev->id;
+	}
+	if (device_property_read_u8(dev, "num_chipselect", &platform_info->num_chipselect)) {
+		platform_info->num_chipselect = 1;
+	}
+#if 0
+	if (device_property_read_u32(dev, "enable_dma",&enable_dma)) {
+		dev_err(&pdev->dev,"enable_dma get failed");
+		platform_info->enable_dma = 0;
+	}
+	else {
+		platform_info->enable_dma = enable_dma;
+	}
+#endif
+	if (device_property_read_u32(dev, "autosuspend_delay", &platform_info->autosuspend_delay))
+		platform_info->autosuspend_delay = 0;
+	
+	if(device_property_read_u32(dev, "dma_rx", &dma_rx)){
+		dev_err(&pdev->dev,"dma_rx get failed");
+	}
+	platform_info->dma_rx_param = (void*)dma_rx;
+	device_property_read_u32(dev, "dma_tx", &dma_tx);	
+	platform_info->dma_tx_param = (void*)dma_tx;
+
+	dev_dbg(&pdev->dev,"get dma_rx=0x%x dma_tx=0x%x enable_dma=0x%x",dma_rx,dma_tx,platform_info->enable_dma);
+	
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#define dump_register(reg)		\
+{							\
+	.name	= __stringify(reg),	\
+	.offset	= SPI_ ##reg##_OFFSET,	\
+}
+
+
+static const struct debugfs_reg32 spi_regs[] = {
+	dump_register(VER_REG),
+	dump_register(COM_CTRL),
+	dump_register(FMT_CTRL),
+	dump_register(DR),
+	dump_register(FIFO_CTRL),
+	dump_register(FIFO_SR),
+	dump_register(INTR_EN),
+	dump_register(INTR_SR),
+	dump_register(TIMING),
+};
+
+//#define Strcat(x, fmt, ...) sprintf(x, "%s" #fmt, x, __VA_ARGS__)
+
+static void debugfs_spi_init(struct zx29_spi *zx29spi)
+{
+	struct dentry *root;
+	struct dentry *node;
+	char tmp[32];
+
+	if(!zx29spi)
+		return;
+
+	//create root
+	sprintf(tmp,"spi%d_zx29", zx29spi->pdev->id);
+	root = debugfs_create_dir(tmp, NULL);
+	if (!root)	{
+		dev_err(&zx29spi->pdev->dev, "debugfs_create_dir %s err\n", tmp);
+		goto err;
+	}
+
+	//create regs
+	zx29spi->spi_regset.regs = (struct debugfs_reg32 *)spi_regs;
+	zx29spi->spi_regset.nregs = sizeof(spi_regs)/sizeof(struct debugfs_reg32);
+	zx29spi->spi_regset.base = zx29spi->virtbase;
+	
+	debugfs_create_regset32("spi_regs", S_IRUGO, root, &zx29spi->spi_regset);
+	//create info
+	debugfs_create_u32("poll_cnt", S_IRUGO, root, &zx29spi->spi_poll_cnt);
+	debugfs_create_u32("dma_cnt", S_IRUGO, root, &zx29spi->spi_dma_cnt);
+
+	zx29spi->spi_root = (void *)root;
+	return;
+err:
+	dev_err(&zx29spi->pdev->dev, "debugfs_spi_init err\n");
+}
+
+#endif
+
+
+static int zx29_spi_init_irq(struct platform_device *pdev, struct zx29_spi *zx29spi)
+{
+	int irq = 0,ret = 0;
+
+	if(!zx29spi || !pdev) {
+		ret = -ENOENT;
+		return ret;
+	}
+	irq = platform_get_irq(pdev, 0);
+	if (irq == NULL) {
+		dev_err(&pdev->dev, "Cannot get IORESOURCE_IRQ\n");
+		ret = -ENOENT;
+		return ret;
+	}
+	zx29spi->irq = irq;
+	dev_dbg(&pdev->dev, "used interrupt num is %d\n",  zx29spi->irq);
+	ret = devm_request_irq(&pdev->dev, zx29spi->irq, zx29_spi_irq,
+				IRQF_TRIGGER_HIGH | IRQF_NO_THREAD | IRQF_ONESHOT, dev_name(&pdev->dev), zx29spi);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "probe - cannot get IRQ (%d)\n", ret);
+		return ret;
+	}
+	disable_irq_nosync(zx29spi->irq);
+	return ret;
+				
+}
+
+static int zx29_spi_probe_of_master(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct zx29_spi_controller *platform_info=NULL ;
+	struct spi_master *master;
+	struct zx29_spi *zx29spi = NULL;	/*Data for this driver */
+	struct resource *regs = NULL;
+	struct resource *gpio = NULL;
+	struct resource *irq = NULL;	
+	struct device_node *np = pdev->dev.of_node;
+	int status = 0, i,ret;
+	u32 regval = 0;
+	
+    platform_info = devm_kzalloc(&pdev->dev, sizeof(struct zx29_spi_controller), GFP_KERNEL);
+	if(platform_info == NULL)
+		return 0;
+	platform_info->bus_id = 0,
+	platform_info->num_chipselect = 1,
+	platform_info->enable_dma = 1,
+	platform_info->autosuspend_delay=0,
+
+	/* Allocate master with space for data */
+	master = spi_alloc_master(dev, sizeof(struct zx29_spi));
+	if (master == NULL) {
+		dev_err(&pdev->dev, "probe - cannot alloc SPI master\n");
+		status = -ENOMEM;
+		goto err_no_master;
+	}
+
+	zx29spi = spi_master_get_devdata(master);
+	memset(zx29spi,0,sizeof(struct zx29_spi));
+	pdev->id = of_alias_get_id(np, "spi");	
+	if(pdev->id < 0){
+		printk("zx29_ssp of_alias_get_id fail ret:%d\n", pdev->id);
+		status = -ENOMEM;
+		goto err_no_master;
+	}
+	snprintf(zx29spi->name, sizeof(zx29spi->name), "zx29-spi%d", pdev->id);
+	zx29_spi_get_platformInfo(pdev,platform_info);
+	//mutex_init(&zx29spi->spi_lock);
+	g_zx29_spi[pdev->id] = zx29spi;
+	zx29spi->master = master;
+	zx29spi->master_info = platform_info;
+	zx29spi->pdev = pdev;
+	zx29spi->vendor = &vendor_arm;
+	zx29spi->mode = ZX29_SSP_MASTER_TYPE;
+	zx29spi->zx29_flush_rxfifo = zx29_flush_rxfifo;
+	sema_init(&zx29spi->sema_dma, 0);
+	/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
+	init_waitqueue_head(&zx29spi->wait);
+	zx29spi->trans_done = false;
+	/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
+	dev_set_drvdata(&pdev->dev, zx29spi);
+	device_init_wakeup(&pdev->dev, true);
+	/*
+	 * Bus Number Which has been Assigned to this SSP controller
+	 * on this board
+	 */
+	master->bus_num = platform_info->bus_id;
+	master->num_chipselect = platform_info->num_chipselect;
+	master->cleanup = zx29_cleanup;
+	master->setup = zx29_setup;
+	master->prepare_transfer_hardware = zx29_prepare_transfer_hardware;
+	master->transfer_one_message = zx29_transfer_one_message;
+	master->unprepare_transfer_hardware = zx29_unprepare_transfer_hardware;
+	//master->rt = platform_info->rt;
+
+	/*
+	 * Supports mode 0-3, loopback, and active low CS..
+	 */
+	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_NO_CS|SPI_LOOP;
+
+	dev_dbg(&pdev->dev, "BUSNO: %d\n", master->bus_num);
+	
+	zx29_spi_init_pinctrl(pdev);
+
+	 /* registers */
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (regs == NULL){
+		dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+		status = -ENOENT;
+		goto err_no_registers;
+	}
+	zx29spi->phybase = regs->start; 
+	zx29spi->virtbase = devm_platform_ioremap_resource(pdev, 0);
+	if (zx29spi->virtbase == NULL) {
+		status = -ENOMEM;
+		goto err_no_ioremap;
+	}
+	dev_dbg( &pdev->dev," mapped registers from 0x%08x to 0x%p\n",
+	        regs->start, zx29spi->virtbase);
+
+#if defined(CONFIG_DEBUG_FS)
+	debugfs_spi_init(zx29spi);
+#endif
+
+	/*clock init*/
+	status = zx29_spi_clock_init(zx29spi);
+	if(status)
+		goto err_no_clk;
+
+	/* Initialize transfer pump */
+	//tasklet_init(&zx29spi->pump_transfers, pump_transfers,(unsigned long)zx29spi);
+
+	/* Disable SPI */
+	regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE);
+	writel(regval, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+	load_spi_default_config(zx29spi);
+	writel(0, (SPI_TIMING_OFFSET + zx29spi->virtbase));
+	if(!strcmp(pdev->name,"1410000.ssp")) {
+		regval = readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase))&(~(0x1<<12));
+		writel(regval, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));	
+		dev_info(&pdev->dev,"%s set non-camera mode regval:0x%x \n",pdev->name,regval);
+	}
+	status = zx29_spi_init_irq(pdev,zx29spi);
+	if(status != 0) {
+		dev_err(&pdev->dev, "zx29_spi_init_irq err!!! \n");
+		goto err_no_irq;
+	}	
+	/* Get DMA channels */
+	if (platform_info->enable_dma) {
+		status = zx29_dma_probe(zx29spi);
+		if (status != 0) {
+			platform_info->enable_dma = 0;
+			sc_debug_info_record(MODULE_ID_CAP_SPI, "%s dma probe failed \n",pdev->name);
+		}
+	}
+
+#if SPI_PSM_CONTROL
+    wake_lock_init(&zx29spi->psm_lock, WAKE_LOCK_SUSPEND, zx29spi->name);
+#endif
+	master->dev.of_node = pdev->dev.of_node;
+	ret = devm_spi_register_master(&pdev->dev, master);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+		goto err_spi_register;
+	}
+	dev_info(&pdev->dev," probe succeeded\n");
+
+	/* let runtime pm put suspend */
+	if (platform_info->autosuspend_delay > 0) {
+		dev_info(&pdev->dev, "will use autosuspend for runtime pm, delay %dms\n", platform_info->autosuspend_delay);
+		pm_runtime_set_autosuspend_delay(dev, platform_info->autosuspend_delay);
+		pm_runtime_use_autosuspend(dev);
+		pm_runtime_put_autosuspend(dev);
+	} else {
+		pm_runtime_put(dev);
+	}
+	
+	return 0;
+
+ err_spi_register:
+#if SPI_PSM_CONTROL
+    wake_lock_destroy(&zx29spi->psm_lock);
+#endif
+	if (platform_info->enable_dma)
+		zx29_dma_remove(zx29spi);
+
+ err_no_irq:
+	clk_disable(zx29spi->spi_clk);
+// err_no_clk_en:
+	//clk_unprepare(pl022->clk);
+ //err_clk_prep:
+	clk_put(zx29spi->spi_clk);
+ err_no_clk:
+//	iounmap(zx29spi->virtbase);
+ err_gpios:
+    /* add */
+ err_no_ioremap:
+ err_no_registers:
+	spi_master_put(master);
+ err_no_master:
+ err_no_pdata:
+	return status;
+}
+
+static int zx29_spi_probe_of_slave(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct zx29_spi_controller *platform_info=NULL ;
+	struct spi_master *master;
+	struct zx29_spi *zx29spi = NULL;	/*Data for this driver */
+	struct resource *regs = NULL;
+	struct resource *gpio = NULL;
+	struct resource *irq = NULL;	
+	struct device_node *np = pdev->dev.of_node;
+	int status = 0, i,ret;
+	u32 regval = 0;
+	
+    platform_info = devm_kzalloc(&pdev->dev, sizeof(struct zx29_spi_controller), GFP_KERNEL);
+	if(platform_info == NULL)
+		return 0;
+	platform_info->bus_id = 0,
+	platform_info->num_chipselect = 1,
+	platform_info->enable_dma = 1,
+	platform_info->autosuspend_delay=0,
+	
+	/* Allocate master with space for data */
+	master = spi_alloc_master(dev, sizeof(struct zx29_spi));
+	if (master == NULL) {
+		dev_err(&pdev->dev, "probe - cannot alloc SPI master\n");
+		status = -ENOMEM;
+		goto err_no_master;
+	}
+	master->slave = true;
+	zx29spi = spi_master_get_devdata(master);
+	memset(zx29spi,0,sizeof(struct zx29_spi));
+	pdev->id = of_alias_get_id(np, "spi");	
+	if(pdev->id < 0){
+		printk("zx29_ssp of_alias_get_id fail ret:%d\n", pdev->id);
+		goto err_no_master;
+	}
+	snprintf(zx29spi->name, sizeof(zx29spi->name), "zx29-spi%d", pdev->id);
+	
+	zx29_spi_get_platformInfo(pdev,platform_info);
+	//mutex_init(&zx29spi->spi_lock);
+	g_zx29_spi[pdev->id] = zx29spi;
+	zx29spi->master = master;
+	zx29spi->master_info = platform_info;
+	zx29spi->pdev = pdev;
+	zx29spi->vendor = &vendor_arm;
+	zx29spi->mode = ZX29_SSP_SLAVE_TYPE;
+	zx29spi->zx29_flush_rxfifo = zx29_flush_rxfifo;
+	sema_init(&zx29spi->sema_dma, 0);
+	/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck start */
+	init_waitqueue_head(&zx29spi->wait);
+	zx29spi->trans_done = false;
+	/* yu.dong@20240521 [T106BUG-616] SPI set to slave mode for read will get stuck end */
+	dev_set_drvdata(&pdev->dev, zx29spi);
+	/*
+	 * Bus Number Which has been Assigned to this SSP controller
+	 * on this board
+	 */
+	master->bus_num = platform_info->bus_id;
+	master->num_chipselect = platform_info->num_chipselect;
+	master->cleanup = zx29_cleanup;
+	master->setup = zx29_setup;
+	master->prepare_transfer_hardware = zx29_prepare_transfer_hardware;
+	master->transfer_one_message = zx29_slave_transfer_one_message;
+	master->unprepare_transfer_hardware = zx29_unprepare_transfer_hardware;
+	/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start */
+	master->spi_slave_rd_start = zx29_slave_rd_start;
+	master->spi_slave_rd_stop = zx29_slave_rd_stop;
+	/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
+	//master->rt = platform_info->rt;
+
+	/*
+	 * Supports mode 0-3, loopback, and active low CS..
+	 */
+	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_NO_CS|SPI_LOOP;
+
+	dev_dbg(&pdev->dev, "BUSNO: %d\n", master->bus_num);
+	
+	zx29_spi_init_pinctrl(pdev);
+
+	 /* registers */
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (regs == NULL){
+		dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+		status = -ENOENT;
+		goto err_no_registers;
+	}
+	zx29spi->phybase = regs->start; 
+	zx29spi->virtbase = devm_platform_ioremap_resource(pdev, 0);
+	if (zx29spi->virtbase == NULL) {
+		status = -ENOMEM;
+		goto err_no_ioremap;
+	}
+	dev_dbg( &pdev->dev," mapped registers from 0x%08x to 0x%p\n",
+	        regs->start, zx29spi->virtbase);
+
+#if defined(CONFIG_DEBUG_FS)
+	debugfs_spi_init(zx29spi);
+#endif
+
+	/*clock init*/
+	status = zx29_spi_slave_clock_init(zx29spi);
+	if(status)
+		goto err_no_clk;
+	/* Initialize transfer pump */
+	//tasklet_init(&zx29spi->pump_transfers, pump_transfers,(unsigned long)zx29spi);
+
+	/* Disable SPI */
+	regval = readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) & (~SPI_COM_CTRL_MASK_SSPE);
+	writel(regval, (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+
+	load_spi_default_config(zx29spi);
+	writel(0, (SPI_TIMING_OFFSET + zx29spi->virtbase));
+	
+	if(!strcmp(pdev->name,"1410000.ssp")) {
+		regval = readl((SPI_FMT_CTRL_OFFSET+zx29spi->virtbase))&(~(0x1<<12));
+		writel(regval, (SPI_FMT_CTRL_OFFSET+zx29spi->virtbase));	
+		dev_info(&pdev->dev," %s set non-camera mode regval:0x%x \n",pdev->name,regval);
+	}
+	
+	writel(readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase)) | SPI_COM_CTRL_MASK_SSPE,  (SPI_COM_CTRL_OFFSET+zx29spi->virtbase));
+	while(((readl((SPI_COM_CTRL_OFFSET+zx29spi->virtbase))>> 4)&0x1) == 0);
+	
+	dev_info(&pdev->dev,"ssp enabled \n",regval);
+	/* irq*/
+	status = zx29_spi_init_irq(pdev,zx29spi);
+	if(status != 0) {
+		dev_err(&pdev->dev, "zx29_spi_init_irq err!!! \n");
+		goto err_no_irq;
+	}
+	
+	/* Get DMA channels */	
+	if (platform_info->enable_dma) {
+		status = zx29_dma_probe(zx29spi);
+		if (status != 0) {
+			platform_info->enable_dma = 0;
+			sc_debug_info_record(MODULE_ID_CAP_SPI, "%s dma probe failed",pdev->name);
+		}
+	}
+
+#if SPI_PSM_CONTROL
+    wake_lock_init(&zx29spi->psm_lock, WAKE_LOCK_SUSPEND, zx29spi->name);
+#endif
+	master->dev.of_node = pdev->dev.of_node;
+	ret = devm_spi_register_master(&pdev->dev, master);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+		goto err_spi_register;
+	}
+	
+	dev_info(&pdev->dev," probe succeeded\n");
+
+	/* let runtime pm put suspend */
+	if (platform_info->autosuspend_delay > 0) {
+		dev_info(&pdev->dev, "will use autosuspend for runtime pm, delay %dms\n", platform_info->autosuspend_delay);
+		pm_runtime_set_autosuspend_delay(dev, platform_info->autosuspend_delay);
+		pm_runtime_use_autosuspend(dev);
+		pm_runtime_put_autosuspend(dev);
+	} else {
+		pm_runtime_put(dev);
+	}
+	
+	return 0;
+
+ err_spi_register:
+#if SPI_PSM_CONTROL
+    wake_lock_destroy(&zx29spi->psm_lock);
+#endif
+	if (platform_info->enable_dma)
+		zx29_dma_remove(zx29spi);
+
+ err_no_irq:
+	clk_disable(zx29spi->spi_clk);
+// err_no_clk_en:
+	//clk_unprepare(pl022->clk);
+ //err_clk_prep:
+	clk_put(zx29spi->spi_clk);
+ err_no_clk:
+//	iounmap(zx29spi->virtbase);
+ err_gpios:
+    /* add */
+ err_no_ioremap:
+ err_no_registers:
+	spi_master_put(master);
+ err_no_master:
+ err_no_pdata:
+	return status;
+}
+
+
+
+static int  zx29_spi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	const struct of_device_id *match;
+	const struct zx29_ssp_device_of_data *data;
+	enum zx29_ssp_device_mode mode;
+	int status = -EINVAL;
+	match = of_match_device(zx29_spi_of_match, dev);
+	if (!match)
+		return -EINVAL;
+	data = (struct zx29_ssp_device_of_data *)match->data;
+	mode = (enum zx29_ssp_device_mode)data->mode;
+	
+	dev_info(&pdev->dev,"%s mode \n", (mode==0)?"MASTER":(mode==1)?"SLAVE":"UNKNOWN");
+	if(mode == ZX29_SSP_MASTER_TYPE) 
+		status = zx29_spi_probe_of_master(pdev);
+	if(mode == ZX29_SSP_SLAVE_TYPE)
+		status = zx29_spi_probe_of_slave(pdev);
+	
+	return status;
+}
+
+static int __exit zx29_spi_remove(struct platform_device *pdev)
+{
+	struct zx29_spi *zx29spi = dev_get_drvdata(&pdev->dev);
+	struct  resource *              gpio = NULL;
+	//struct  resource *              irq = NULL;
+	int i;
+
+	if (!zx29spi)
+		return 0;
+
+	/*
+	 * undo pm_runtime_put() in probe.  I assume that we're not
+	 * accessing the primecell here.
+	 */
+	pm_runtime_get_noresume(&pdev->dev);
+
+	spi_unregister_master(zx29spi->master);
+
+	load_spi_default_config(zx29spi);
+	if (zx29spi->master_info->enable_dma)
+		zx29_dma_remove(zx29spi);
+/*
+	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if( irq != NULL )
+	{
+		free_irq(irq->start, zx29spi);
+	}
+*/
+	devm_free_irq(&pdev->dev, zx29spi->irq, zx29spi);
+
+	clk_disable(zx29spi->spi_clk);
+	clk_put(zx29spi->spi_clk);
+
+	clk_disable(zx29spi->pclk);
+	clk_put(zx29spi->pclk);
+
+	#if defined(CONFIG_DEBUG_FS)
+	if(zx29spi->spi_root){
+		printk(KERN_INFO "spi:debugfs_remove_recursive \n");
+		debugfs_remove_recursive(zx29spi->spi_root);
+	}
+	#endif
+
+
+	//	iounmap(zx29spi->virtbase);
+	//amba_release_regions(adev);
+	//tasklet_disable(&zx29spi->pump_transfers);
+
+	spi_master_put(zx29spi->master);
+	//amba_set_drvdata(adev, NULL);
+	dev_set_drvdata(&pdev->dev, NULL);
+
+#if SPI_PSM_CONTROL
+    wake_lock_destroy(&zx29spi->psm_lock);
+#endif
+
+	return 0;
+}
+
+static const struct zx29_ssp_device_of_data zx29_ssp_master_of_data = {
+	.mode = ZX29_SSP_MASTER_TYPE,
+};
+
+static const struct zx29_ssp_device_of_data zx29_ssp_slave_of_data = {
+	.mode = ZX29_SSP_SLAVE_TYPE,
+};
+
+static const struct of_device_id zx29_spi_of_match[] = {
+	{ 
+		.compatible = "zte,zx29_ssp",
+		.data = &zx29_ssp_master_of_data,	
+	},
+	{
+		.compatible = "zte,zx29_ssp_slave",
+		.data = &zx29_ssp_slave_of_data,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, zx29_spi_of_match);
+
+#ifdef CONFIG_PM
+static int zx29_spi_suspend(struct platform_device *pdev,pm_message_t state)
+{
+	struct zx29_spi *zx29spi = NULL;
+
+	if(pdev == NULL)
+		return -1;
+	if(pdev && &pdev->dev)
+		pinctrl_pm_select_sleep_state(&pdev->dev);
+
+	zx29spi = (struct zx29_spi *)platform_get_drvdata(pdev);
+	if(zx29spi && (zx29spi->master->slave == true)) {
+		if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_gpio_sleep) < 0) {
+			printk("spi%d setting cs_gpio pin ctrl failed\n",pdev->id);
+			return -1;
+		}
+	}
+	if(zx29spi&&zx29spi->master->slave == false) {
+		if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_gpio_sleep) < 0) {
+			printk("spi%d setting cs_gpio_sleep pin ctrl failed\n",pdev->id);
+			return -1;
+		}
+		printk("spi%d setting cs_gpio_sleep pin ctrl\n",pdev->id);
+	}
+	return 0;
+}
+
+static int zx29_spi_resume(struct platform_device *pdev)
+{
+	struct zx29_spi *zx29spi = NULL;
+
+	if(pdev == NULL)
+		return -1;
+	if(pdev && &pdev->dev)
+		pinctrl_pm_select_default_state(&pdev->dev);
+
+	zx29spi = (struct zx29_spi *)platform_get_drvdata(pdev);
+	if(zx29spi && (zx29spi->master->slave == true)) {
+		if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_func) < 0) {
+			printk("spi%d setting cs_func pin ctrl failed\n",pdev->id);
+			return -1;
+		}
+	}
+	if(zx29spi&&zx29spi->master->slave == false) {
+		if (pinctrl_select_state(ssp_pins[pdev->id].pctrl, ssp_pins[pdev->id].pcs_gpio_active) < 0) {
+			printk("spi%d setting cs_gpio_active pin ctrl failed\n",pdev->id);
+			return -1;
+		}
+		printk("spi%d setting cs_gpio_active pin ctrl\n",pdev->id);
+		gpio_direction_output(ssp_pins[pdev->id].gpio_cs,SPI_GPIO_HIGH);
+	}
+	return 0;
+}
+#endif
+
+static struct platform_driver zx29_spi_driver = {
+	.driver = {
+		.name		= "zx29_ssp",
+		.of_match_table = of_match_ptr(zx29_spi_of_match),
+		.owner		= THIS_MODULE,
+	},
+	.probe      = zx29_spi_probe,
+	#ifdef CONFIG_PM
+	.suspend 	= zx29_spi_suspend,
+	.resume 	= zx29_spi_resume,
+	#endif
+	.remove		= __exit_p(zx29_spi_remove),
+};
+
+static int __init zx29_spi_init(void)
+{
+	return platform_driver_register(&zx29_spi_driver);
+}
+
+static void __exit zx29_spi_exit(void)
+{
+	platform_driver_unregister(&zx29_spi_driver);
+}
+
+module_init(zx29_spi_init);
+module_exit(zx29_spi_exit);
+
+MODULE_DESCRIPTION("zx29 spi controller driver");
+MODULE_AUTHOR("zte");
+MODULE_LICENSE("GPL");
+
diff --git a/upstream/linux-5.10/drivers/spi/spidev.c b/upstream/linux-5.10/drivers/spi/spidev.c
new file mode 100755
index 0000000..1522a21
--- /dev/null
+++ b/upstream/linux-5.10/drivers/spi/spidev.c
@@ -0,0 +1,2406 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Simple synchronous userspace interface to SPI devices
+ *
+ * Copyright (C) 2006 SWAPP
+ *	Andrea Paterniani <a.paterniani@swapp-eng.it>
+ * Copyright (C) 2007 David Brownell (simplification, cleanup)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/compat.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/acpi.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spidev.h>
+
+#include <linux/uaccess.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start*/
+#include <linux/wait.h>
+#include <linux/suspend.h>
+
+#define SPI_SLAVE_FOR_YK
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
+/*
+ * This supports access to SPI devices using normal userspace I/O calls.
+ * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
+ * and often mask message boundaries, full SPI support requires full duplex
+ * transfers.  There are several kinds of internal message boundaries to
+ * handle chipselect management and other protocol options.
+ *
+ * SPI has a character major number assigned.  We allocate minor numbers
+ * dynamically using a bitmask.  You must use hotplug tools, such as udev
+ * (or mdev with busybox) to create and destroy the /dev/spidevB.C device
+ * nodes, since there is no fixed association of minor numbers with any
+ * particular SPI bus or device.
+ */
+#define SPIDEV_MAJOR			153	/* assigned */
+#define N_SPI_MINORS			32	/* ... up to 256 */
+
+static DECLARE_BITMAP(minors, N_SPI_MINORS);
+
+
+/* Bit masks for spi_device.mode management.  Note that incorrect
+ * settings for some settings can cause *lots* of trouble for other
+ * devices on a shared bus:
+ *
+ *  - CS_HIGH ... this device will be active when it shouldn't be
+ *  - 3WIRE ... when active, it won't behave as it should
+ *  - NO_CS ... there will be no explicit message boundaries; this
+ *	is completely incompatible with the shared bus model
+ *  - READY ... transfers may proceed when they shouldn't.
+ *
+ * REVISIT should changing those flags be privileged?
+ */
+#define SPI_MODE_MASK		(SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
+				| SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
+				| SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
+				| SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL \
+				| SPI_RX_QUAD | SPI_RX_OCTAL)
+
+struct spidev_data {
+	dev_t			devt;
+	spinlock_t		spi_lock;
+	struct spi_device	*spi;
+	struct list_head	device_entry;
+
+	/* TX/RX buffers are NULL unless this device is open (users > 0) */
+	struct mutex		buf_lock;
+	unsigned		users;
+	u8			*tx_buffer;
+	u8			*rx_buffer;
+	u32			speed_hz;
+	u8       	rd_from_rx_buffer;
+
+//#define   SPIDEV_DEBUG
+#ifdef SPIDEV_DEBUG
+	struct pinctrl	*pctrl;
+	struct pinctrl_state	*pgpioex;
+	struct pinctrl_state	*pint_ex;
+	int gpio_ex;
+	int gpio_int;
+	int irq;
+	int tx_flag;
+	int rx_cnt_in_rx_thread;
+	int rx_cnt_in_tx_thread;
+	struct semaphore wait_req;
+	struct semaphore rec_req;
+	struct semaphore rec_head_msg_req;
+	struct semaphore rec_data_msg_req;
+	spinlock_t tx_flag_lock;
+	int msg_id;
+	bool is_data_check;
+	int rx_data_check_ok_cnt;
+	int rx_data_check_err_cnt;
+#endif
+//#define TEST_SWAP_KERNEL_AND_USER
+#ifdef TEST_SWAP_KERNEL_AND_USER
+	struct pinctrl	*pctrl;
+	struct pinctrl_state	*pgpioex;
+	struct pinctrl_state	*pint_ex;
+	struct semaphore sig_req;
+	struct semaphore sem_dma_cfg_done;
+	int gpio_ex;
+	int gpio_int;
+	int irq;
+	int pid;
+	int dma_cfg_done;
+#endif
+};
+
+static LIST_HEAD(device_list);
+static DEFINE_MUTEX(device_list_lock);
+
+static unsigned bufsiz = 4096;
+module_param(bufsiz, uint, S_IRUGO);
+MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
+
+/*-------------------------------------------------------------------------*/
+
+static ssize_t
+spidev_sync(struct spidev_data *spidev, struct spi_message *message)
+{
+	int status;
+	struct spi_device *spi;
+
+	spin_lock_irq(&spidev->spi_lock);
+	spi = spidev->spi;
+	spin_unlock_irq(&spidev->spi_lock);
+
+	if (spi == NULL)
+		status = -ESHUTDOWN;
+	else
+		status = spi_sync(spi, message);
+
+	if (status == 0)
+		status = message->actual_length;
+
+	return status;
+}
+
+static inline ssize_t
+spidev_sync_write(struct spidev_data *spidev, size_t len)
+{
+	struct spi_transfer	t = {
+			.tx_buf		= spidev->tx_buffer,
+			.len		= len,
+			.speed_hz	= spidev->speed_hz,
+		};
+	struct spi_message	m;
+
+	spi_message_init(&m);
+	spi_message_add_tail(&t, &m);
+	return spidev_sync(spidev, &m);
+}
+
+static inline ssize_t
+spidev_sync_read(struct spidev_data *spidev, size_t len)
+{
+	struct spi_transfer	t = {
+			.rx_buf		= spidev->rx_buffer,
+			.len		= len,
+			.speed_hz	= spidev->speed_hz,
+		};
+	struct spi_message	m;
+
+	spi_message_init(&m);
+	spi_message_add_tail(&t, &m);
+	return spidev_sync(spidev, &m);
+}
+
+
+static inline ssize_t
+spidev_sync_write_and_read(struct spidev_data *spidev, size_t len)
+{
+	struct spi_transfer	t = {
+			.tx_buf     = spidev->tx_buffer,
+			.rx_buf		= spidev->rx_buffer,
+			.len		= len,
+			.speed_hz	= spidev->speed_hz,
+		};
+	struct spi_message	m;
+
+	spi_message_init(&m);
+	spi_message_add_tail(&t, &m);
+	return spidev_sync(spidev, &m);
+}
+
+
+
+/*-------------------------------------------------------------------------*/
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start*/
+/* Read-only message with current device setup */
+static ssize_t
+spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
+{
+	struct spidev_data	*spidev;
+	ssize_t			status;
+	unsigned long	missing;
+	/* chipselect only toggles at start or end of operation */
+	if (count > bufsiz)
+		return -EMSGSIZE;
+
+	spidev = filp->private_data;
+
+
+
+	#ifdef SPI_SLAVE_FOR_YK
+	size_t total = 0;
+
+	if (spidev->spi->rd_pos == spidev->spi->recv_pos) {
+
+		status = 0;
+		spidev->spi->is_rd_waiting = true;
+		if(0 != wait_event_freezable(spidev->spi->rd_wait, spidev->spi->recv_done)) {
+			if(spidev->spi->controller->spi_slave_rd_stop)
+				spidev->spi->controller->spi_slave_rd_stop(spidev->spi);
+			spidev->spi->is_rd_waiting = false;
+			return 	status;
+		}else {
+			spidev->spi->recv_done = false;
+			spidev->spi->is_rd_waiting = false;
+		}
+	}
+	mutex_lock(&spidev->buf_lock);
+	if(spidev->spi->rd_pos < spidev->spi->recv_pos) {
+
+		total = spidev->spi->recv_pos - spidev->spi->rd_pos;
+		status = (total > count) ? count : total;
+	
+		missing = copy_to_user(buf, spidev->rx_buffer+spidev->spi->rd_pos, status);
+		if (missing == status) {
+			status = -EFAULT;
+		}
+		else {
+			status = status - missing;
+			spidev->spi->rd_pos += status;
+		}
+
+	}else if(spidev->spi->rd_pos > spidev->spi->recv_pos) {
+
+		total = bufsiz - (spidev->spi->rd_pos - spidev->spi->recv_pos);
+		status = (total > count) ? count : total;
+
+		if((spidev->spi->rd_pos + status) <= bufsiz) {
+
+			missing = copy_to_user(buf, spidev->rx_buffer+spidev->spi->rd_pos, status);
+			if (missing == status) {
+				status = -EFAULT;
+			}
+			else {
+				status = status - missing;
+				spidev->spi->rd_pos += status;
+				spidev->spi->rd_pos = spidev->spi->rd_pos%bufsiz;
+			}
+		}else {
+
+			unsigned long first,rest;
+
+			first = bufsiz - spidev->spi->rd_pos;
+			missing = copy_to_user(buf, spidev->rx_buffer+spidev->spi->rd_pos, first);
+			if (missing == first) {
+				status = -EFAULT;
+			} else {
+				status = status - missing;
+			}
+
+			rest = status-first;
+			missing = copy_to_user(buf+first, spidev->rx_buffer, rest);
+			if (missing == rest) {
+				status = -EFAULT;
+			} else {
+				status = status - missing;
+			}
+			spidev->spi->rd_pos = rest;
+		}
+	}
+	#else
+	mutex_lock(&spidev->buf_lock);
+	if(spidev->rd_from_rx_buffer) 
+		status = count;
+	else 
+		status = spidev_sync_read(spidev, count);
+	
+	if (status > 0) {
+
+		missing = copy_to_user(buf, spidev->rx_buffer, status);
+		if (missing == status)
+			status = -EFAULT;
+		else
+			status = status - missing;
+	}
+	#endif
+	mutex_unlock(&spidev->buf_lock);
+
+	return status;
+}
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end*/
+
+/* Write-only message with current device setup */
+static ssize_t
+spidev_write(struct file *filp, const char __user *buf,
+		size_t count, loff_t *f_pos)
+{
+	struct spidev_data	*spidev;
+	ssize_t			status;
+	unsigned long		missing;
+
+	/* chipselect only toggles at start or end of operation */
+	if (count > bufsiz)
+		return -EMSGSIZE;
+
+	spidev = filp->private_data;
+
+	mutex_lock(&spidev->buf_lock);
+	missing = copy_from_user(spidev->tx_buffer, buf, count);
+	if (missing == 0) {
+		if(spidev->rd_from_rx_buffer)
+			status = spidev_sync_write_and_read(spidev, count);
+		else	
+			status = spidev_sync_write(spidev, count);
+	}else {
+		status = -EFAULT;
+	}
+	mutex_unlock(&spidev->buf_lock);
+
+	return status;
+}
+
+static int spidev_message(struct spidev_data *spidev,
+		struct spi_ioc_transfer *u_xfers, unsigned n_xfers)
+{
+	struct spi_message	msg;
+	struct spi_transfer	*k_xfers;
+	struct spi_transfer	*k_tmp;
+	struct spi_ioc_transfer *u_tmp;
+	unsigned		n, total, tx_total, rx_total;
+	u8			*tx_buf, *rx_buf;
+	int			status = -EFAULT;
+
+	spi_message_init(&msg);
+	k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
+	if (k_xfers == NULL)
+		return -ENOMEM;
+
+	/* Construct spi_message, copying any tx data to bounce buffer.
+	 * We walk the array of user-provided transfers, using each one
+	 * to initialize a kernel version of the same transfer.
+	 */
+	tx_buf = spidev->tx_buffer;
+	rx_buf = spidev->rx_buffer;
+	total = 0;
+	tx_total = 0;
+	rx_total = 0;
+	for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
+			n;
+			n--, k_tmp++, u_tmp++) {
+		/* Ensure that also following allocations from rx_buf/tx_buf will meet
+		 * DMA alignment requirements.
+		 */
+		unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_KMALLOC_MINALIGN);
+
+		k_tmp->len = u_tmp->len;
+
+		total += k_tmp->len;
+		/* Since the function returns the total length of transfers
+		 * on success, restrict the total to positive int values to
+		 * avoid the return value looking like an error.  Also check
+		 * each transfer length to avoid arithmetic overflow.
+		 */
+		if (total > INT_MAX || k_tmp->len > INT_MAX) {
+			status = -EMSGSIZE;
+			goto done;
+		}
+
+		if (u_tmp->rx_buf) {
+			/* this transfer needs space in RX bounce buffer */
+			rx_total += len_aligned;
+			if (rx_total > bufsiz) {
+				status = -EMSGSIZE;
+				goto done;
+			}
+			k_tmp->rx_buf = rx_buf;
+			rx_buf += len_aligned;
+		}
+		if (u_tmp->tx_buf) {
+			/* this transfer needs space in TX bounce buffer */
+			tx_total += len_aligned;
+			if (tx_total > bufsiz) {
+				status = -EMSGSIZE;
+				goto done;
+			}
+			k_tmp->tx_buf = tx_buf;
+			if (copy_from_user(tx_buf, (const u8 __user *)
+						(uintptr_t) u_tmp->tx_buf,
+					u_tmp->len))
+				goto done;
+			tx_buf += len_aligned;
+		}
+
+		k_tmp->cs_change = !!u_tmp->cs_change;
+		k_tmp->tx_nbits = u_tmp->tx_nbits;
+		k_tmp->rx_nbits = u_tmp->rx_nbits;
+		k_tmp->bits_per_word = u_tmp->bits_per_word;
+		k_tmp->delay.value = u_tmp->delay_usecs;
+		k_tmp->delay.unit = SPI_DELAY_UNIT_USECS;
+		k_tmp->speed_hz = u_tmp->speed_hz;
+		k_tmp->word_delay.value = u_tmp->word_delay_usecs;
+		k_tmp->word_delay.unit = SPI_DELAY_UNIT_USECS;
+		if (!k_tmp->speed_hz)
+			k_tmp->speed_hz = spidev->speed_hz;
+#ifdef VERBOSE
+		dev_dbg(&spidev->spi->dev,
+			"  xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n",
+			k_tmp->len,
+			k_tmp->rx_buf ? "rx " : "",
+			k_tmp->tx_buf ? "tx " : "",
+			k_tmp->cs_change ? "cs " : "",
+			k_tmp->bits_per_word ? : spidev->spi->bits_per_word,
+			k_tmp->delay.value,
+			k_tmp->word_delay.value,
+			k_tmp->speed_hz ? : spidev->spi->max_speed_hz);
+#endif
+		spi_message_add_tail(k_tmp, &msg);
+	}
+
+	status = spidev_sync(spidev, &msg);
+	if (status < 0)
+		goto done;
+
+	/* copy any rx data out of bounce buffer */
+	for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
+			n;
+			n--, k_tmp++, u_tmp++) {
+		if (u_tmp->rx_buf) {
+			if (copy_to_user((u8 __user *)
+					(uintptr_t) u_tmp->rx_buf, k_tmp->rx_buf,
+					u_tmp->len)) {
+				status = -EFAULT;
+				goto done;
+			}
+		}
+	}
+	status = total;
+
+done:
+	kfree(k_xfers);
+	return status;
+}
+
+static struct spi_ioc_transfer *
+spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
+		unsigned *n_ioc)
+{
+	u32	tmp;
+
+	/* Check type, command number and direction */
+	if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC
+			|| _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
+			|| _IOC_DIR(cmd) != _IOC_WRITE)
+		return ERR_PTR(-ENOTTY);
+
+	tmp = _IOC_SIZE(cmd);
+	if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
+		return ERR_PTR(-EINVAL);
+	*n_ioc = tmp / sizeof(struct spi_ioc_transfer);
+	if (*n_ioc == 0)
+		return NULL;
+
+	/* copy into scratch area */
+	return memdup_user(u_ioc, tmp);
+}
+
+static long
+spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int			retval = 0;
+	struct spidev_data	*spidev;
+	struct spi_device	*spi;
+	u32			tmp;
+	unsigned		n_ioc;
+	struct spi_ioc_transfer	*ioc;
+
+	/* Check type and command number */
+	if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
+		return -ENOTTY;
+
+	/* guard against device removal before, or while,
+	 * we issue this ioctl.
+	 */
+	spidev = filp->private_data;
+	spin_lock_irq(&spidev->spi_lock);
+	spi = spi_dev_get(spidev->spi);
+	spin_unlock_irq(&spidev->spi_lock);
+
+	if (spi == NULL)
+		return -ESHUTDOWN;
+
+	/* use the buffer lock here for triple duty:
+	 *  - prevent I/O (from us) so calling spi_setup() is safe;
+	 *  - prevent concurrent SPI_IOC_WR_* from morphing
+	 *    data fields while SPI_IOC_RD_* reads them;
+	 *  - SPI_IOC_MESSAGE needs the buffer locked "normally".
+	 */
+	mutex_lock(&spidev->buf_lock);
+
+	switch (cmd) {
+	/* read requests */
+	case SPI_IOC_RD_MODE:
+		retval = put_user(spi->mode & SPI_MODE_MASK,
+					(__u8 __user *)arg);
+		break;
+	case SPI_IOC_RD_MODE32:
+		retval = put_user(spi->mode & SPI_MODE_MASK,
+					(__u32 __user *)arg);
+		break;
+	case SPI_IOC_RD_LSB_FIRST:
+		retval = put_user((spi->mode & SPI_LSB_FIRST) ?  1 : 0,
+					(__u8 __user *)arg);
+		break;
+	case SPI_IOC_RD_BITS_PER_WORD:
+		retval = put_user(spi->bits_per_word, (__u8 __user *)arg);
+		break;
+	case SPI_IOC_RD_MAX_SPEED_HZ:
+		retval = put_user(spidev->speed_hz, (__u32 __user *)arg);
+		break;
+	case SPI_IOC_RD_RD_DATA_FROM:
+		retval = put_user(spidev->rd_from_rx_buffer, (__u32 __user *)arg);
+		break;
+	
+#ifdef TEST_SWAP_KERNEL_AND_USER
+	case SPI_IOC_RD_INT_ST:
+		tmp = gpio_get_value(spidev->gpio_int);
+		retval = put_user(tmp, (__u32 __user *)arg);
+		break;
+#endif
+	/* write requests */
+	case SPI_IOC_WR_MODE:
+	case SPI_IOC_WR_MODE32:
+		if (cmd == SPI_IOC_WR_MODE)
+			retval = get_user(tmp, (u8 __user *)arg);
+		else
+			retval = get_user(tmp, (u32 __user *)arg);
+		if (retval == 0) {
+			struct spi_controller *ctlr = spi->controller;
+			u32	save = spi->mode;
+
+			if (tmp & ~SPI_MODE_MASK) {
+				retval = -EINVAL;
+				break;
+			}
+
+			if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
+			    ctlr->cs_gpiods[spi->chip_select])
+				tmp |= SPI_CS_HIGH;
+
+			tmp |= spi->mode & ~SPI_MODE_MASK;
+			spi->mode = (u16)tmp;
+			retval = spi_setup(spi);
+			if (retval < 0)
+				spi->mode = save;
+			else
+				dev_dbg(&spi->dev, "spi mode %x\n", tmp);
+		}
+		break;
+	case SPI_IOC_WR_LSB_FIRST:
+		retval = get_user(tmp, (__u8 __user *)arg);
+		if (retval == 0) {
+			u32	save = spi->mode;
+
+			if (tmp)
+				spi->mode |= SPI_LSB_FIRST;
+			else
+				spi->mode &= ~SPI_LSB_FIRST;
+			retval = spi_setup(spi);
+			if (retval < 0)
+				spi->mode = save;
+			else
+				dev_dbg(&spi->dev, "%csb first\n",
+						tmp ? 'l' : 'm');
+		}
+		break;
+	case SPI_IOC_WR_BITS_PER_WORD:
+		retval = get_user(tmp, (__u8 __user *)arg);
+		if (retval == 0) {
+			u8	save = spi->bits_per_word;
+
+			spi->bits_per_word = tmp;
+			retval = spi_setup(spi);
+			if (retval < 0)
+				spi->bits_per_word = save;
+			else
+				dev_dbg(&spi->dev, "%d bits per word\n", tmp);
+		}
+		break;
+	case SPI_IOC_WR_MAX_SPEED_HZ:
+		retval = get_user(tmp, (__u32 __user *)arg);
+		if (retval == 0) {
+			u32	save = spi->max_speed_hz;
+
+			spi->max_speed_hz = tmp;
+			retval = spi_setup(spi);
+			if (retval == 0) {
+				spidev->speed_hz = tmp;
+				dev_dbg(&spi->dev, "%d Hz (max)\n",
+					spidev->speed_hz);
+			} else {
+				spi->max_speed_hz = save;
+			}
+		}
+		break;
+	case SPI_IOC_WR_RD_DATA_FROM:
+		retval = get_user(tmp, (__u8 __user *)arg);
+		if (retval == 0) {
+			spidev->rd_from_rx_buffer = tmp;
+			dev_dbg(&spi->dev, "RD DATA FROM %s \n",
+					spidev->rd_from_rx_buffer ? "RX_BUFFER":"DEVICE");
+		}
+		break;
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start */
+#ifdef SPI_SLAVE_FOR_YK
+	case SPI_IOC_RD_BLOCK_RELEASE:
+		if(spidev->spi->is_rd_waiting == true) {
+				wake_up(&spidev->spi->rd_wait);
+                                spidev->spi->recv_done = 1;
+		}
+		break;
+#endif
+/* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
+#ifdef TEST_SWAP_KERNEL_AND_USER
+	case SPI_IOC_WR_SIG_PID:
+		retval = get_user(tmp, (__u32 __user *)arg);
+		if (retval == 0) {
+			spidev->pid = tmp;
+			dev_dbg(&spi->dev, "SET SIG PID %d \n",
+					spidev->pid);
+		}else{
+			printk("%s %d %d \r\n",__FUNCTION__,__LINE__,retval);
+		}
+		
+		break;
+#endif
+	default:
+		/* segmented and/or full-duplex I/O request */
+		/* Check message and copy into scratch area */
+		ioc = spidev_get_ioc_message(cmd,
+				(struct spi_ioc_transfer __user *)arg, &n_ioc);
+		if (IS_ERR(ioc)) {
+			retval = PTR_ERR(ioc);
+			break;
+		}
+		if (!ioc)
+			break;	/* n_ioc is also 0 */
+
+		/* translate to spi_message, execute */
+		retval = spidev_message(spidev, ioc, n_ioc);
+		kfree(ioc);
+		break;
+	}
+
+	mutex_unlock(&spidev->buf_lock);
+	spi_dev_put(spi);
+	return retval;
+}
+
+#ifdef CONFIG_COMPAT
+static long
+spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
+		unsigned long arg)
+{
+	struct spi_ioc_transfer __user	*u_ioc;
+	int				retval = 0;
+	struct spidev_data		*spidev;
+	struct spi_device		*spi;
+	unsigned			n_ioc, n;
+	struct spi_ioc_transfer		*ioc;
+
+	u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
+
+	/* guard against device removal before, or while,
+	 * we issue this ioctl.
+	 */
+	spidev = filp->private_data;
+	spin_lock_irq(&spidev->spi_lock);
+	spi = spi_dev_get(spidev->spi);
+	spin_unlock_irq(&spidev->spi_lock);
+
+	if (spi == NULL)
+		return -ESHUTDOWN;
+
+	/* SPI_IOC_MESSAGE needs the buffer locked "normally" */
+	mutex_lock(&spidev->buf_lock);
+
+	/* Check message and copy into scratch area */
+	ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
+	if (IS_ERR(ioc)) {
+		retval = PTR_ERR(ioc);
+		goto done;
+	}
+	if (!ioc)
+		goto done;	/* n_ioc is also 0 */
+
+	/* Convert buffer pointers */
+	for (n = 0; n < n_ioc; n++) {
+		ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf);
+		ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf);
+	}
+
+	/* translate to spi_message, execute */
+	retval = spidev_message(spidev, ioc, n_ioc);
+	kfree(ioc);
+
+done:
+	mutex_unlock(&spidev->buf_lock);
+	spi_dev_put(spi);
+	return retval;
+}
+
+static long
+spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC
+			&& _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0))
+			&& _IOC_DIR(cmd) == _IOC_WRITE)
+		return spidev_compat_ioc_message(filp, cmd, arg);
+
+	return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#else
+#define spidev_compat_ioctl NULL
+#endif /* CONFIG_COMPAT */
+
+static int spidev_open(struct inode *inode, struct file *filp)
+{
+	struct spidev_data	*spidev;
+	int			status = -ENXIO;
+	struct spi_device	*spi;
+	
+	mutex_lock(&device_list_lock);
+
+	list_for_each_entry(spidev, &device_list, device_entry) {
+		if (spidev->devt == inode->i_rdev) {
+			status = 0;
+			break;
+		}
+	}
+
+	if (status) {
+		pr_debug("spidev: nothing for minor %d\n", iminor(inode));
+		goto err_find_dev;
+	}
+
+	if (!spidev->tx_buffer) {
+		spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+		if (!spidev->tx_buffer) {
+			dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+			status = -ENOMEM;
+			goto err_find_dev;
+		}
+	}
+
+	if (!spidev->rx_buffer) {
+		spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+		if (!spidev->rx_buffer) {
+			dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+			status = -ENOMEM;
+			goto err_alloc_rx_buf;
+		}
+	}
+
+        /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start */
+	#ifdef SPI_SLAVE_FOR_YK
+	if(spidev->rx_buffer) {
+		spidev->spi->rx_buf = spidev->rx_buffer;
+		if(spidev->spi->controller->spi_slave_rd_start)
+			spidev->spi->controller->spi_slave_rd_start(spidev->spi);
+	}
+	#endif
+        /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
+	spidev->users++;
+	filp->private_data = spidev;
+	stream_open(inode, filp);
+
+	mutex_unlock(&device_list_lock);
+
+	
+	spin_lock_irq(&spidev->spi_lock);
+	spi = spi_dev_get(spidev->spi);
+	spin_unlock_irq(&spidev->spi_lock);
+	if(spi && spi->master->slave) 
+		pm_stay_awake(&spi->dev);
+	
+	return 0;
+
+err_alloc_rx_buf:
+	kfree(spidev->tx_buffer);
+	spidev->tx_buffer = NULL;
+err_find_dev:
+	mutex_unlock(&device_list_lock);
+	return status;
+}
+
+static int spidev_release(struct inode *inode, struct file *filp)
+{
+	struct spidev_data	*spidev;
+	int			dofree;
+	struct spi_device	*spi;
+
+	mutex_lock(&device_list_lock);
+	spidev = filp->private_data;
+	filp->private_data = NULL;
+
+	spin_lock_irq(&spidev->spi_lock);
+	/* ... after we unbound from the underlying device? */
+	dofree = (spidev->spi == NULL);
+	spin_unlock_irq(&spidev->spi_lock);
+
+	/* last close? */
+	spidev->users--;
+	if (!spidev->users) {
+
+		spin_lock_irq(&spidev->spi_lock);
+		spi = spi_dev_get(spidev->spi);
+		spin_unlock_irq(&spidev->spi_lock);
+
+                /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme start */
+		#ifdef SPI_SLAVE_FOR_YK
+		if(spidev->rx_buffer) {
+			if(spi->controller->spi_slave_rd_stop)
+				spi->controller->spi_slave_rd_stop(spi);
+		}
+		#endif

+                /* yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme end */
+		if(spi && spi->master->slave) 
+			pm_relax(&spi->dev);
+		kfree(spidev->tx_buffer);
+		spidev->tx_buffer = NULL;
+
+		kfree(spidev->rx_buffer);
+		spidev->rx_buffer = NULL;
+
+		if (dofree)
+			kfree(spidev);
+		else
+			spidev->speed_hz = spidev->spi->max_speed_hz;
+	}
+#ifdef CONFIG_SPI_SLAVE
+	if (!dofree)
+		spi_slave_abort(spidev->spi);
+#endif
+	mutex_unlock(&device_list_lock);
+	
+	return 0;
+}
+
+static const struct file_operations spidev_fops = {
+	.owner =	THIS_MODULE,
+	/* REVISIT switch to aio primitives, so that userspace
+	 * gets more complete API coverage.  It'll simplify things
+	 * too, except for the locking.
+	 */
+	.write =	spidev_write,
+	.read =		spidev_read,
+	.unlocked_ioctl = spidev_ioctl,
+	.compat_ioctl = spidev_compat_ioctl,
+	.open =		spidev_open,
+	.release =	spidev_release,
+	.llseek =	no_llseek,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* The main reason to have this class is to make mdev/udev create the
+ * /dev/spidevB.C character device nodes exposing our userspace API.
+ * It also simplifies memory management.
+ */
+
+static struct class *spidev_class;
+
+#ifdef CONFIG_OF
+static const struct of_device_id spidev_dt_ids[] = {
+	{ .compatible = "rohm,dh2228fv" },
+	{ .compatible = "lineartechnology,ltc2488" },
+	{ .compatible = "ge,achc" },
+	{ .compatible = "semtech,sx1301" },
+	{ .compatible = "lwn,bk4" },
+	{ .compatible = "dh,dhcom-board" },
+	{ .compatible = "menlo,m53cpld" },
+	{ .compatible = "zte,spidev" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, spidev_dt_ids);
+#endif
+
+#ifdef CONFIG_ACPI
+
+/* Dummy SPI devices not to be used in production systems */
+#define SPIDEV_ACPI_DUMMY	1
+
+static const struct acpi_device_id spidev_acpi_ids[] = {
+	/*
+	 * The ACPI SPT000* devices are only meant for development and
+	 * testing. Systems used in production should have a proper ACPI
+	 * description of the connected peripheral and they should also use
+	 * a proper driver instead of poking directly to the SPI bus.
+	 */
+	{ "SPT0001", SPIDEV_ACPI_DUMMY },
+	{ "SPT0002", SPIDEV_ACPI_DUMMY },
+	{ "SPT0003", SPIDEV_ACPI_DUMMY },
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, spidev_acpi_ids);
+
+static void spidev_probe_acpi(struct spi_device *spi)
+{
+	const struct acpi_device_id *id;
+
+	if (!has_acpi_companion(&spi->dev))
+		return;
+
+	id = acpi_match_device(spidev_acpi_ids, &spi->dev);
+	if (WARN_ON(!id))
+		return;
+
+	if (id->driver_data == SPIDEV_ACPI_DUMMY)
+		dev_warn(&spi->dev, "do not use this driver in production systems!\n");
+}
+#else
+static inline void spidev_probe_acpi(struct spi_device *spi) {}
+#endif
+
+#ifdef SPIDEV_DEBUG
+#define SPIDEV_ATTR(_name) \
+static struct kobj_attribute _name##_attr = {	\
+	.attr	= {				\
+		.name = __stringify(_name),	\
+		.mode = 0644,			\
+	},					\
+	.show	= _name##_show,			\
+	.store	= _name##_store,		\
+}
+
+
+static void print_buf_data(void * buf,int count)
+{
+	int i = 0;
+	if(buf) {
+		unsigned char *p = buf;
+		for(i = 0;i<= count-8;i+=8) {
+				printk("%02x %02x %02x %02x %02x %02x %02x %02x \r\n",p[i],p[i+1],p[i+2],p[i+3],p[i+4],p[i+5],p[i+6],p[i+7]);
+		}
+	}
+}
+
+
+struct spi_dev_hand_msg{
+	unsigned short head;
+	unsigned int len;
+	unsigned short tail;
+};
+
+#define MSG_HEAD 0xa5a5
+#define MSG_TAIL 0x7e7e
+
+extern void slave_mode_set(struct spi_device	*spi,unsigned int param);
+extern void set_spi_timing(struct spi_device	*spi,unsigned int param);
+extern int get_spi_rx_fifo(struct spi_device	*spi,unsigned char *buf);
+static int  spidev_get_rxfifo(struct spi_device	*spi,unsigned char *buf)
+{
+	int ret = 0;
+	
+	if(!spi || !buf)
+		return ret;
+	return get_spi_rx_fifo(spi,buf);
+	
+}
+
+
+static int data_to_packet(void * buf,int len)
+{
+	int i = 2,ret = -1;
+	unsigned char sum = 0;
+	unsigned char *p = (unsigned char *)buf;
+
+	if(!p || len < 4) {
+		printk("%s param err! \n",__FUNCTION__);
+		return ret;
+	}
+	for(i = 2;i<len-2;i++)
+		sum += p[i];
+	p[1] = sum;
+	ret = 0;
+	return ret;
+}
+
+static int packet_check(void *buf,int len)
+{
+	unsigned char *p = (unsigned char *)buf;
+	int i = 2,ret = -1;
+	unsigned char sum=0;
+	if(!p || len < 4) {
+		printk("%s param err! \n",__FUNCTION__);
+		return ret;
+	}
+	if( (p[0] == 0xa5) && (p[len-1] == 0x7e) ) {
+		for(i = 2;i<len-2;i++)
+			sum +=p[i]; 
+		if(sum == p[1])
+			ret = 0;
+	}
+	return ret;
+	
+}
+
+
+static int spi_dev_pin_init_test(struct spi_device *spi)
+{
+	struct spidev_data	*spidev = spi_get_drvdata(spi);
+	enum of_gpio_flags flags;
+	static int spi_dev_pin_init_flag = 0; 
+	int status = 0;
+	
+	if(spi_dev_pin_init_flag < 2){
+		spidev->pctrl = devm_pinctrl_get(&spi->dev);
+		if(!spidev->pctrl) {
+			dev_info(&spi->dev,"get dev pctrl failed!\n",status);	
+			return status;
+		}
+
+		spidev->pint_ex = pinctrl_lookup_state(spidev->pctrl, "int_ex");
+		if (IS_ERR(spidev->pint_ex)) {
+			dev_err(&spi->dev, "TEST: missing pint_ex \n");
+			return status;
+		}
+		if (pinctrl_select_state(spidev->pctrl, spidev->pint_ex) < 0) {
+			dev_err(&spi->dev, "TEST: slect pint_ex \n");
+			return status;
+		}
+		
+		spidev->pgpioex = pinctrl_lookup_state(spidev->pctrl, "ex_gpio");
+		if (IS_ERR(spidev->pgpioex)) {
+			dev_err(&spi->dev, "TEST: missing ex_gpio \n");
+			return status;
+		}
+
+		spidev->gpio_ex = of_get_gpio_flags(spi->dev.of_node, 0, &flags);
+		if (!gpio_is_valid(spidev->gpio_ex)) {
+			dev_err(&spi->dev,"gpio_ex no found,spidev->gpio_ex=%d \n",spidev->gpio_ex);
+			return status;
+		}
+		dev_info(&spi->dev,"gpio_ex found,spidev->gpio_ex=%d \n",spidev->gpio_ex);
+
+		status = gpio_request(spidev->gpio_ex, "gpio_ex");
+		if (status) {
+			pr_info("spidev->gpio_ex request error.\n");
+		}else {
+			gpio_direction_output(spidev->gpio_ex, 1);	
+			dev_info(&spi->dev, "spidev->gpio_ex success \n");
+		}
+		
+		spidev->gpio_int = of_get_gpio_flags(spi->dev.of_node, 1, &flags);
+		if (!gpio_is_valid(spidev->gpio_int)) {
+			dev_err(&spi->dev,"gpio_int no found,spidev->gpio_int=%d \n",spidev->gpio_int);
+			return status;
+		}
+		dev_info(&spi->dev,"gpio_int found,spidev->gpio_int=%d \n",spidev->gpio_int);
+
+		spi_dev_pin_init_flag += 1;
+	}
+	return status;
+		
+}
+
+static irqreturn_t spidev_master_hand_shake_irq(int irqno, void *dev_id)
+{
+	static int count;
+	int gpio_in_status = 0,gpio_out_status = 0;
+	
+	struct spidev_data	*spidev = dev_id;
+
+	gpio_out_status = gpio_get_value(spidev->gpio_ex);
+	gpio_in_status = gpio_get_value(spidev->gpio_int);
+
+	//pr_info("hand_shake_irq get = %d %d %d\n", ++count,gpio_out_status,gpio_in_status);
+
+	if(gpio_out_status && !gpio_in_status) {
+		if(spidev->tx_flag == 0) {
+			up(&spidev->rec_req);   /*receive slave reqeuet*/
+		}else {
+			pr_info("mmm \r\n");
+			up(&spidev->wait_req);     /*first receive master req*/
+		}
+	}else if(!gpio_out_status && !gpio_in_status) {
+		up(&spidev->wait_req);       /*receive slave ack*/
+	}else {
+		pr_info("recive invalid request\n");
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t spidev_slave_hand_shake_irq(int irqno, void *dev_id)
+{
+	static int count;
+	int gpio_in_status = 0,gpio_out_status = 0;
+	
+	struct spidev_data	*spidev = dev_id;
+
+	gpio_out_status = gpio_get_value(spidev->gpio_ex);
+	gpio_in_status = gpio_get_value(spidev->gpio_int);
+
+	//pr_info("hand_shake_irq get = %d %d %d\n", ++count,gpio_out_status,gpio_in_status);
+
+	if(gpio_out_status && !gpio_in_status)
+	{
+		if(spidev->tx_flag == 0) {
+			up(&spidev->rec_req);     /*first receive master req*/
+		}else {
+			pr_info("sss \n");
+			up(&spidev->wait_req); 
+		}
+		/*. then set gpio_out low as ack. */
+	}else if(!gpio_out_status && !gpio_in_status) {
+		up(&spidev->wait_req);   /*receive master ack*/
+	}else {
+		pr_info("recive invalid request\n");
+	}
+	return IRQ_HANDLED;
+}
+
+
+
+static int spi_dev_irq_init_test(struct spi_device *spi)
+{
+	struct spidev_data	*spidev = spi_get_drvdata(spi);
+	static int spi_dev_irq_init_flag = 0; 
+	int irq = 0,ret = 0;
+
+	if(spi_dev_irq_init_flag < 2) {
+		if(!spi || !spidev) {
+			ret = -ENOENT;
+			return ret;
+		}	
+		irq = irq_of_parse_and_map(spi->dev.of_node, 0);
+		if (irq <= 0) {
+		    dev_err(&spi->dev, "ERROR: invalid interrupt number, irq = %d\n",irq);
+			return -EBUSY;
+		}
+		spidev->irq = irq;
+		dev_info(&spi->dev, "used interrupt num is %d\n",  spidev->irq);
+		if(strcmp(dev_name(&spi->dev),"spi1.0")==0) {	
+			ret = devm_request_irq(&spi->dev, spidev->irq, spidev_master_hand_shake_irq,
+					 	0, dev_name(&spi->dev), spidev);
+		}else { 
+			ret = devm_request_irq(&spi->dev, spidev->irq, spidev_slave_hand_shake_irq,
+					 	0, dev_name(&spi->dev), spidev);
+		}
+		if (ret < 0) {
+			dev_err(&spi->dev, "probe - cannot get IRQ (%d)\n", ret);
+			return ret;
+		}
+		spi_dev_irq_init_flag += 1;
+	}
+	return ret;
+
+}
+
+static size_t spi_dev_send_handle_pack_test(struct spidev_data	*spidev,int len,struct spi_dev_hand_msg *recv_msg)
+{
+	struct spi_dev_hand_msg send_msg={0};
+	
+	send_msg.head = MSG_HEAD;
+	send_msg.len = len;
+	send_msg.tail = MSG_TAIL;
+	
+	struct spi_transfer	t = {
+		.tx_buf     = &send_msg,
+		.rx_buf		= recv_msg,
+		.len		= sizeof(struct spi_dev_hand_msg),
+		.speed_hz	= spidev->speed_hz,
+	};
+	struct spi_message	m;
+	
+	spi_message_init(&m);
+	spi_message_add_tail(&t, &m);
+	return spidev_sync(spidev, &m);
+	
+	
+}
+
+
+
+static size_t spi_dev_recv_handle_pack_test(struct spidev_data	*spidev,int len,struct spi_dev_hand_msg *recv_msg)
+{
+	struct spi_dev_hand_msg send_msg={0};
+	
+	struct spi_transfer	t = {
+		.tx_buf     = &send_msg,
+		.rx_buf		= recv_msg,
+		.len		= sizeof(struct spi_dev_hand_msg),
+		.speed_hz	= spidev->speed_hz,
+	};
+	struct spi_message	m;
+	
+	spi_message_init(&m);
+	spi_message_add_tail(&t, &m);
+	return spidev_sync(spidev, &m);
+	
+	
+}
+
+
+static void wait_spi_bus_idle_status_test(struct spidev_data	*spidev)
+{
+	int count = 0;
+	
+	do {
+		spin_lock_irq(&spidev->tx_flag_lock);
+		if( gpio_get_value(spidev->gpio_ex) && gpio_get_value(spidev->gpio_int))
+			break;
+		else {
+			spin_unlock(&spidev->tx_flag_lock);
+			usleep_range(50,100);
+			count++;
+			if(count%20 == 0) {
+				printk("bus busy %d us cnts.outst(%d),intst(%d).\n",count*50,
+								gpio_get_value(spidev->gpio_ex),gpio_get_value(spidev->gpio_int));
+			}
+			
+		}
+	}while(1);
+	spidev->tx_flag = 1;
+	spin_unlock_irq(&spidev->tx_flag_lock);
+	
+}
+
+static size_t spi_dev_send_one_pack_test(struct spi_device *spi,size_t len) {
+
+	struct spidev_data	*spidev = spi_get_drvdata(spi);	
+	struct spi_dev_hand_msg recv_msg={0};
+	size_t status;
+	int ret;
+	int rx_data_flag = 0;
+	if(len>4096)
+		printk("len(%d) err: \r\n",len);
+	wait_spi_bus_idle_status_test(spidev);
+	gpio_set_value(spidev->gpio_ex,0);
+	ret = down_timeout(&spidev->wait_req, msecs_to_jiffies(50));  /*first ack m= 0,s=0*/
+	if (ret < 0) {
+		printk("first ack timeout\n");
+	}
+	spi_dev_send_handle_pack_test(spidev,len,&recv_msg);  /*send head msg*/
+	if(recv_msg.head == MSG_HEAD && recv_msg.tail == MSG_TAIL) {
+		len = (recv_msg.len >= len) ? recv_msg.len : len; 
+		spidev->rx_cnt_in_tx_thread++;
+		rx_data_flag = 1;
+		if(len>4096)
+			printk("len(%d) err: \r\n",len);
+	}
+	ret = down_timeout(&spidev->wait_req, msecs_to_jiffies(100)); 
+	if (ret < 0) {
+		printk("second ack timeout\n");
+	}
+	//down(&spidev->wait_req);  /*second ack m= 0,s=0*/
+	status = spidev_sync_write_and_read(spidev,len);	
+	if(rx_data_flag && spidev->is_data_check) {
+		ret = packet_check(spidev->rx_buffer,recv_msg.len);
+		if(ret) {
+			spidev->rx_data_check_err_cnt++;
+			//dev_info(&spi->dev,"%s packet check err \r\n",__FUNCTION__);
+		}else {
+			spidev->rx_data_check_ok_cnt++;
+			//dev_info(&spi->dev,"%s packet check success \r\n",__FUNCTION__);
+		}
+	}
+	spidev->tx_flag = 0;
+	gpio_set_value(spidev->gpio_ex,1);
+	return status;
+}
+
+
+static size_t spi_dev_slave_send_one_pack_test(struct spi_device *spi,size_t len) {
+
+	struct spidev_data	*spidev = spi_get_drvdata(spi);	
+	struct spi_dev_hand_msg recv_msg={0};
+	size_t status;
+	int ret;
+	int rx_data_flag = 0;
+	if(len>4096)
+		printk("len(%d) err: \r\n",len);
+	wait_spi_bus_idle_status_test(spidev);
+	up(&spidev->rec_head_msg_req);/*response master tx/rx dma set */
+	//printk("%s %d \r\n",__FUNCTION__,__LINE__);
+	spi_dev_send_handle_pack_test(spidev,len,&recv_msg);  /*send head msg*/
+	if(recv_msg.head == MSG_HEAD && recv_msg.tail == MSG_TAIL) {
+		if(len != recv_msg.len) {
+			//printk("%s len=%d rec_len=%d\n",__FUNCTION__,len,recv_msg.len);
+			len = (recv_msg.len >= len) ? recv_msg.len : len; 
+
+		}
+		spidev->rx_cnt_in_tx_thread++;
+		rx_data_flag = 1;
+		if(len>4096)
+			printk("len(%d) err: \r\n",len);
+	}
+
+	//down(&spidev->wait_req);    
+	ret = down_timeout(&spidev->wait_req, msecs_to_jiffies(100)); 
+	if (ret < 0) {
+		printk("wait req timeout\n");
+	}
+	up(&spidev->rec_data_msg_req);/*response master tx/rx dma set */
+	//printk("%s %d \r\n",__FUNCTION__,__LINE__);
+	status = spidev_sync_write_and_read(spidev,len);
+	if(rx_data_flag && spidev->is_data_check) {
+		ret = packet_check(spidev->rx_buffer,recv_msg.len);
+		if(ret) {
+			spidev->rx_data_check_err_cnt++;
+			//dev_info(&spi->dev,"%s packet check err \r\n",__FUNCTION__);
+		}else {
+			spidev->rx_data_check_ok_cnt++;
+			//dev_info(&spi->dev,"%s packet check success \r\n",__FUNCTION__);
+		}
+	}
+	spidev->tx_flag = 0;
+	gpio_set_value(spidev->gpio_ex,1);
+	return status;
+}
+
+static int spi_dev_slave_read_hand_msg_process_test(void *arg)
+{
+	struct spi_device *spi = (struct spi_device *)arg;
+	struct spidev_data	*spidev = spi_get_drvdata(spi);	
+
+	while(1) {
+		down(&spidev->rec_head_msg_req);
+		//printk("%s %d \r\n",__FUNCTION__,__LINE__);
+		gpio_set_value(spidev->gpio_ex,0);
+	}	
+	return 0;
+}
+
+
+static int spi_dev_slave_read_data_process_test(void *arg)
+{
+	struct spi_device *spi = (struct spi_device *)arg;
+	struct spidev_data	*spidev = spi_get_drvdata(spi);	
+	struct spi_dev_hand_msg *recv_msg=(struct spi_dev_hand_msg *)spidev->rx_buffer;
+
+	while(1) {
+		down(&spidev->rec_data_msg_req);
+		//printk("%s %d \r\n",__FUNCTION__,__LINE__);
+		gpio_set_value(spidev->gpio_ex,1);
+		usleep_range(50,100);
+		gpio_set_value(spidev->gpio_ex,0);
+	}
+	return 0;
+}
+
+static int spi_dev_master_read_thread_test(void *arg)
+{
+	struct spi_device *spi = (struct spi_device *)arg;
+	struct spidev_data	*spidev = spi_get_drvdata(spi);	
+	pid_t kid;
+	struct pid *pid;
+	struct task_struct * tsk;
+	struct spi_dev_hand_msg recv_msg;
+	int ret;
+	if(!spidev){
+		dev_info(&spi->dev,"spi_dev return \r\n");
+		return 0;
+	}
+	while(1) {
+		
+		down(&spidev->rec_req);   /*first receive slave req*/  
+		spi_dev_recv_handle_pack_test(spidev, sizeof(struct spi_dev_hand_msg), &recv_msg);
+		gpio_set_value(spidev->gpio_ex,0);
+		ret = down_timeout(&spidev->wait_req, msecs_to_jiffies(100)); 
+		if (ret < 0) {
+			printk("%s wait req timeout\n",__FUNCTION__);
+		}
+		if(recv_msg.head == MSG_HEAD && recv_msg.tail == MSG_TAIL) {
+			int len = recv_msg.len;
+			spidev_sync_write_and_read(spidev, len);  /*set dma and recv data msg*/
+			spidev->rx_cnt_in_rx_thread++;
+			if(spidev->is_data_check) {
+				ret = packet_check(spidev->rx_buffer,len);
+				if(ret) {
+					spidev->rx_data_check_err_cnt++;
+					//dev_info(&spi->dev,"%s packet check err \r\n",__FUNCTION__);
+				}else {
+					spidev->rx_data_check_ok_cnt++;
+					//dev_info(&spi->dev,"%s packet check success \r\n",__FUNCTION__);
+				}
+			}
+			gpio_set_value(spidev->gpio_ex,1);
+			//print_buf_data(spidev->rx_buffer, len);
+		}else {
+			printk("%s data invalid\n",__FUNCTION__);
+			gpio_set_value(spidev->gpio_ex,1);
+		}
+	}
+	return 0;
+}
+
+
+
+
+static int spi_dev_slave_read_thread_test(void *arg)
+{
+	struct spi_device *spi = (struct spi_device *)arg;
+	struct spidev_data	*spidev = spi_get_drvdata(spi);	
+	int ret;
+	struct spi_dev_hand_msg recv_msg;
+	
+	if(!spidev){
+		dev_info(&spi->dev,"spi_dev return \r\n");
+		return 0;
+	}
+	while(1) {
+		
+		down(&spidev->rec_req);   /*first receive master req*/  
+		//printk("%s %d \r\n",__FUNCTION__,__LINE__);
+		up(&spidev->rec_head_msg_req);/*response master tx/rx dma set */
+		//printk("%s %d \r\n",__FUNCTION__,__LINE__);
+		//spidev_sync_write_and_read(spidev,sizeof(struct spi_dev_hand_msg)); /*set dma and recv head msg*/
+		spi_dev_recv_handle_pack_test(spidev, sizeof(struct spi_dev_hand_msg), &recv_msg);
+		//recv_msg=(struct spi_dev_hand_msg *)spidev->rx_buffer;
+		if(recv_msg.head == MSG_HEAD && recv_msg.tail == MSG_TAIL) {
+			int len = recv_msg.len;
+			up(&spidev->rec_data_msg_req);      /*response master tx/rx dma set */
+			//printk("%s %d %d \r\n",__FUNCTION__,__LINE__,len);
+			spidev_sync_write_and_read(spidev, len);  /*set dma and recv data msg*/
+			if(spidev->is_data_check) {
+				ret = packet_check(spidev->rx_buffer,len);
+				if(ret) {
+					spidev->rx_data_check_err_cnt++;
+					//dev_info(&spi->dev,"%s packet check err \r\n",__FUNCTION__);
+				}else {
+					spidev->rx_data_check_ok_cnt++;
+					//dev_info(&spi->dev,"%s packet check success \r\n",__FUNCTION__);
+				}
+			}
+			gpio_set_value(spidev->gpio_ex,1);
+			spidev->rx_cnt_in_rx_thread++;
+			//print_buf_data(spidev->rx_buffer, len);
+		}else {
+			up(&spidev->rec_data_msg_req);
+			printk("%s data invalid\n",__FUNCTION__);
+			gpio_set_value(spidev->gpio_ex,1);
+		}
+	}
+	return 0;
+}
+
+
+static int spidev_debug_test_init(struct spi_device *spi)
+{
+	int ret = 0;
+	struct spidev_data	*spidev = spi_get_drvdata(spi);
+	
+	ret =spi_dev_pin_init_test(spi);
+	if(ret) {
+		dev_info(&spi->dev, "spi_dev_pin_init_test,ret=%d \n",ret);
+		return ret;
+	}
+	spin_lock_init(&spidev->tx_flag_lock);
+	sema_init(&spidev->wait_req, 0);
+	sema_init(&spidev->rec_req, 0);
+	sema_init(&spidev->rec_head_msg_req, 0);
+	sema_init(&spidev->rec_data_msg_req, 0);
+	spidev->tx_flag = 0;
+	spidev->rx_cnt_in_rx_thread = 0;
+	spidev->rx_cnt_in_tx_thread = 0;
+	spidev->is_data_check = false;
+	if (!spidev->tx_buffer) {
+		spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+		if (!spidev->tx_buffer) {
+			dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+			return ret;
+		}
+	} 
+
+	if (!spidev->rx_buffer) {
+		spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+		if (!spidev->rx_buffer) {
+			dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+			kfree(spidev->tx_buffer);
+			spidev->tx_buffer = NULL;
+			return ret;
+		}
+	}
+	
+	if(strcmp(dev_name(&spi->dev),"spi1.0")==0) {	
+		kernel_thread(spi_dev_master_read_thread_test,spi, 0);	/* fork the main thread */
+	}else { 
+		kernel_thread(spi_dev_slave_read_thread_test,spi, 0);  /* fork the main thread */
+		kernel_thread(spi_dev_slave_read_hand_msg_process_test,spi, 0);
+		kernel_thread(spi_dev_slave_read_data_process_test,spi, 0);
+	}
+
+	ret =spi_dev_irq_init_test(spi);
+	if(ret) {
+		dev_info(&spi->dev, "spi_dev_irq_init_test,ret=%d \n",ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+
+static ssize_t spidevinfo_show(struct kobject *kobj, struct kobj_attribute *attr,
+                              char *buf)
+{
+
+	ssize_t count = 0;
+
+	struct device *dev = container_of(kobj, struct device, kobj);
+	//struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+	unsigned char cmd_str[16] = {0};
+	u32 param1,param2,param3;
+	u8 rwaddr,rwsize;
+	int ret,i;
+
+	
+	return count;
+
+	
+}
+extern void get_random_bytes(void * buf, size_t len); 
+static ssize_t spidevinfo_store(struct kobject *kobj, struct kobj_attribute *attr,
+                               const char *buf, size_t n)
+
+{
+	ssize_t ret =0;
+	struct device *dev = container_of(kobj, struct device, kobj);
+	//struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+	struct spi_device *spi = (struct spi_device *)dev;
+	struct spidev_data	*spidev = spi_get_drvdata(spi);
+	unsigned char cmd_str[0x20] = {0};
+	u8 bBuf[32];
+
+	u32 param1 = 0,param2 = 0,param3 = 0;
+	u32 rwaddr =0 ,rwsize = 0;
+	int i;
+    s8 rev          = -1;
+	size_t count = 0;
+    
+	
+	dev_info(&spi->dev, "spidev->speed_hz:%d \n", spi->max_speed_hz);
+
+	sscanf(buf, "%31s %x %x %x", &cmd_str,&param1,&param2,&param3);
+	dev_info(dev, "cmd_str:%s,param1:%x,param2:%x,param3:%x\n",cmd_str,param1,param2,param3);
+
+	dev_info(&spi->dev, "mode %d, %s%s%s%s%u bits/w, %u Hz max --\n",
+		(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
+		(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
+		(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
+		(spi->mode & SPI_3WIRE) ? "3wire, " : "",
+		(spi->mode & SPI_LOOP) ? "loopback, " : "",
+		spi->bits_per_word, spi->max_speed_hz);
+	
+	count = param1;
+	ret = strcmp(cmd_str,"spi_write");	
+	if( ret == 0) {
+		count = param1;
+		if (!spidev->tx_buffer) {
+			spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+			if (!spidev->tx_buffer) {
+				dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+				return n;
+			}
+		} 
+		dev_info(dev, "spidev->tx_buffer=0x%x\n",spidev->tx_buffer);
+		for(i = 0;i<count;i++) {
+			spidev->tx_buffer[i]=i;
+		}
+		print_buf_data(spidev->tx_buffer,count); 
+		ret = spidev_sync_write(spidev, count);
+		if(ret == count) {
+			dev_info(dev, "send len success(len:%d) \n",ret);
+		}
+		kfree(spidev->tx_buffer);
+		spidev->tx_buffer = NULL;
+		dev_info(dev, "spi write end: \n"); 
+	}
+	
+	ret = strcmp(cmd_str,"spi_read");
+	if(ret == 0) {
+		count = param1;
+		if (!spidev->rx_buffer) {
+			spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+			if (!spidev->rx_buffer) {
+				dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+				return n;
+			}
+		} 
+		memset(spidev->rx_buffer,0x0,bufsiz);
+		ret = spidev_sync_read(spidev, count);
+		
+		if(ret == count) {
+			dev_info(dev, "read len success(len:%d) \n",ret);
+			print_buf_data(spidev->rx_buffer,count); 
+		}
+		kfree(spidev->rx_buffer);
+				spidev->rx_buffer = NULL;
+		dev_info(dev, "spi read end: \n"); 
+	}
+
+	ret = strcmp(cmd_str,"write_then_read");
+	if(ret == 0) {
+		count = param1;
+		
+		if (!spidev->tx_buffer) {
+			spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+			if (!spidev->tx_buffer) {
+				dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+				return n;
+			}
+		} 
+
+		if (!spidev->rx_buffer) {
+			spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+			if (!spidev->rx_buffer) {
+				dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+				kfree(spidev->tx_buffer);
+				spidev->tx_buffer = NULL;
+				return n;
+			}
+		}
+		for(i = 0;i<count;i++) {
+			spidev->tx_buffer[i]=i;
+		}
+		//memset(spidev->rx_buffer,0x0,bufsiz);
+		ret = spi_write_then_read(spi, spidev->tx_buffer, count, spidev->rx_buffer, count);
+		
+		if(ret == 0) {
+			dev_info(dev, "spi write data(%d bytes) \n",count);
+			print_buf_data(spidev->tx_buffer,count);
+			dev_info(dev, "spi read data(%d bytes) \n",count);
+			print_buf_data(spidev->rx_buffer,count); 
+		}
+
+		kfree(spidev->tx_buffer);
+				spidev->tx_buffer = NULL; 
+		kfree(spidev->rx_buffer);
+				spidev->rx_buffer = NULL;
+		dev_info(dev, "write_then_read.\n"); 
+		
+	}
+
+	
+	ret = strcmp(cmd_str,"write_and_read");
+	if(ret == 0) {
+		count = param1;
+		if (!spidev->tx_buffer) {
+			spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+			if (!spidev->tx_buffer) {
+				dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+				return n;
+			}
+		} 
+
+		if (!spidev->rx_buffer) {
+			spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+			if (!spidev->rx_buffer) {
+				dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+				kfree(spidev->tx_buffer);
+				spidev->tx_buffer = NULL;
+				return n;
+			}
+		}
+		for(i = 0;i<count;i++) {
+			spidev->tx_buffer[i]=i;
+		}
+		memset(spidev->rx_buffer,0x0,bufsiz);
+		ret = spidev_sync_write_and_read(spidev, count);
+		
+		if(ret == count) {
+			dev_info(dev, "spi write data(%d bytes) \n",ret);
+			print_buf_data(spidev->tx_buffer,count);
+			dev_info(dev, "spi read data(%d bytes) \n",ret);
+			print_buf_data(spidev->rx_buffer,count); 
+			dev_info(dev, "write_and_read.\n"); 
+		}
+#if 0
+		kfree(spidev->tx_buffer);
+				spidev->tx_buffer = NULL; 
+		kfree(spidev->rx_buffer);
+				spidev->rx_buffer = NULL;
+#endif
+		dev_info(dev, "write_and_read.\n"); 
+		
+	}
+	ret = strcmp(cmd_str,"fifo_flush");
+	if(ret == 0) {
+		unsigned char buff[64] ={0};
+		
+		ret = spidev_get_rxfifo(spi,buff);
+		dev_info(dev, "get rx_fifo_len(%d bytes) \n",ret);
+		print_buf_data(buff,ret);
+	}
+
+	ret = strcmp(cmd_str,"timing-set");
+	if(ret == 0) {
+		dev_info(dev, "timing param(%d) \n",param1);
+		set_spi_timing(spi,param1);
+	}
+	
+	ret = strcmp(cmd_str,"loop-en");
+	if(ret == 0) {
+		spi->mode |= SPI_LOOP;
+		spi_setup(spi);
+	}
+	ret = strcmp(cmd_str,"loop-dis");
+	if(ret == 0) {
+		spi->mode &= ~SPI_LOOP;
+		spi_setup(spi);
+	}
+	ret = strcmp(cmd_str,"speed_set");
+	if(ret == 0) {
+		spi->max_speed_hz = param1;
+		spi_setup(spi);
+	}
+
+	ret = strcmp(cmd_str,"mode_set");
+	if(ret == 0) {
+		if(param1 != 0 && param1 != 1 && param1 != 2 && param1 != 3) 
+			dev_info(dev, "param err(%d) \n",param1);
+		else
+			dev_info(dev, "set spi mode(%d) \n",param1);
+		spi->mode &= (~0x3);
+		spi->mode |= param1;
+		ret = spi_setup(spi);
+		dev_info(dev, "set spi mode(0x%x),ret=%d \n",spi->mode,ret);
+	}
+
+
+	ret = strcmp(cmd_str,"slave_mode_set");
+	if(ret == 0) {
+		if(param1 != 0 && param1 != 1 && param1 != 2 && param1 != 3) 
+			dev_info(dev, "param err(%d) \n",param1);
+		else
+			dev_info(dev, "set spi mode(%d) \n",param1);
+		slave_mode_set(spi,param1);
+	}
+	
+	ret = strcmp(cmd_str,"send_msg_rand_len");
+	if(ret == 0) {
+		
+		count = 0;
+		int times = param1;
+		while(times--) {
+
+			get_random_bytes(&count,4);
+			count = (count%0x1000) + 1;
+			if (!spidev->tx_buffer) {
+				spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+				if (!spidev->tx_buffer) {
+					dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+					return n;
+				}
+			} 
+
+			if (!spidev->rx_buffer) {
+				spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+				if (!spidev->rx_buffer) {
+					dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+					kfree(spidev->tx_buffer);
+					spidev->tx_buffer = NULL;
+					return n;
+				}
+			}
+			for(i = 0;i<count;i++) {
+				spidev->tx_buffer[i]=i;
+			}
+			//memset(spidev->rx_buffer,0x0,bufsiz);
+			if(strcmp(dev_name(&spi->dev),"spi1.0")==0) {
+				ret = spi_dev_send_one_pack_test(spi, count);
+			}
+			else { 
+				ret = spi_dev_slave_send_one_pack_test(spi, count);
+			}
+			if(ret == count) {
+			#if 0	
+				dev_info(dev, "spi write data(%d bytes) \n",ret);
+				print_buf_data(spidev->tx_buffer,count);
+				dev_info(dev, "spi read data(%d bytes) \n",ret);
+				print_buf_data(spidev->rx_buffer,count);
+			#endif
+				dev_info(dev, "write_and_read success. retain times:%d rx_cnt_in_tx_thread:%d spidev->rx_cnt_in_rx_thread:%d \n",
+														times,spidev->rx_cnt_in_tx_thread,spidev->rx_cnt_in_rx_thread);
+				
+			}
+			msleep((count%5)+1);
+			//usleep_range(5+(count%10),20);
+		}
+#if 0
+		kfree(spidev->tx_buffer);
+				spidev->tx_buffer = NULL; 
+		kfree(spidev->rx_buffer);
+				spidev->rx_buffer = NULL;
+#endif
+		
+	}
+
+	ret = strcmp(cmd_str,"send_msg_fixed_len");
+	if(ret == 0) {
+		int times = param1;
+		int debug = param3;
+		count = param2;
+		if(count > 4096) {
+			printk("msg_fixed_len(%d bytes) out of range(4KB)\r\n",count);
+			return n;
+		}
+		while(times--) {
+			if (!spidev->tx_buffer) {
+				spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+				if (!spidev->tx_buffer) {
+					dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+					return n;
+				}
+			} 
+
+			if (!spidev->rx_buffer) {
+				spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+				if (!spidev->rx_buffer) {
+					dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+					kfree(spidev->tx_buffer);
+					spidev->tx_buffer = NULL;
+					return n;
+				}
+			}
+
+			get_random_bytes(spidev->tx_buffer,count);
+			//memset(spidev->rx_buffer,0x0,bufsiz);
+			#if 0
+			for(i = 0;i<count;i++) {
+				spidev->tx_buffer[i]=i;
+			}
+			memset(spidev->rx_buffer,0x0,bufsiz);
+			#endif
+			if(strcmp(dev_name(&spi->dev),"spi1.0")==0) {
+				ret = spi_dev_send_one_pack_test(spi, count);
+			}
+			else { 
+				ret = spi_dev_slave_send_one_pack_test(spi, count);
+			}
+			if(ret == count) {
+				if(debug) { 	
+					dev_info(dev, "spi write data(%d bytes) \n",ret);
+					print_buf_data(spidev->tx_buffer,count);
+				}
+				dev_info(dev, "write_and_read success. retain times:%d rx_cnt_in_tx_thread:%d spidev->rx_cnt_in_rx_thread:%d \n",
+														times,spidev->rx_cnt_in_tx_thread,spidev->rx_cnt_in_rx_thread);	
+			}
+			msleep((count%5)+1);
+		}
+#if 0
+		kfree(spidev->tx_buffer);
+				spidev->tx_buffer = NULL; 
+		kfree(spidev->rx_buffer);
+				spidev->rx_buffer = NULL;
+#endif
+		
+	}
+
+	ret = strcmp(cmd_str,"data_check_ctrl");
+	if(ret == 0) {
+		if(param1) {
+			spidev->is_data_check = true;
+			spidev->rx_data_check_ok_cnt = 0;
+			spidev->rx_data_check_err_cnt = 0;
+		}
+		else {
+			spidev->is_data_check = false;
+		}
+		dev_info(dev, "rx_check_ok_cnt:%d rx_check_err_cnt:%d\n",spidev->rx_data_check_ok_cnt,spidev->rx_data_check_err_cnt); 
+	}
+	ret = strcmp(cmd_str,"send_msg_with_check");
+	if(ret == 0) {
+		int times = param1;
+		int debug = param3;
+		count = param2;
+		if(count > 4096 || count < 4) {
+			printk("msg_fixed_len(%d bytes) out of range(4KB)\r\n",count);
+			return n;
+		}
+		while(times--) {
+			if (!spidev->tx_buffer) {
+				spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+				if (!spidev->tx_buffer) {
+					dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+					return n;
+				}
+			} 
+
+			if (!spidev->rx_buffer) {
+				spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+				if (!spidev->rx_buffer) {
+					dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+					kfree(spidev->tx_buffer);
+					spidev->tx_buffer = NULL;
+					return n;
+				}
+			}
+
+			get_random_bytes(spidev->tx_buffer,count);
+			spidev->tx_buffer[0] = 0xa5;
+			spidev->tx_buffer[count-1] = 0x7e;
+			ret = data_to_packet(spidev->tx_buffer,count);
+			//memset(spidev->rx_buffer,0x0,bufsiz);
+			if(strcmp(dev_name(&spi->dev),"spi1.0")==0) {
+				ret = spi_dev_send_one_pack_test(spi, count);
+			}
+			else { 
+				ret = spi_dev_slave_send_one_pack_test(spi, count);
+			}
+			if(ret == count) {
+				if(debug) { 	
+					dev_info(dev, "spi write data(%d bytes) \n",ret);
+					print_buf_data(spidev->tx_buffer,count);
+				}
+				dev_info(dev, "complete.retain:%d rx_cnt_in_tx_thread:%d spidev->rx_cnt_in_rx_thread:%d rx_check_ok_cnt:%d rx_check_err_cnt:%d\n",
+														times,spidev->rx_cnt_in_tx_thread,spidev->rx_cnt_in_rx_thread,
+														spidev->rx_data_check_ok_cnt,spidev->rx_data_check_err_cnt); 
+			}
+			usleep_range(5+(count%10),20);
+		}			
+	}
+
+	ret = strcmp(cmd_str,"gpio_out_val");
+	if(ret == 0) {
+		if(param1)
+			gpio_set_value(spidev->gpio_ex,1);
+		else
+			gpio_set_value(spidev->gpio_ex,0);
+	}
+	ret = strcmp(cmd_str,"test_ktime_get");
+	if(ret == 0) {
+		ktime_t k_time_start = 0;
+		ktime_t k_time_end = 0;
+		ktime_t diff = 0;
+
+		k_time_start = ktime_get();	
+		gpio_set_value(spidev->gpio_ex,0);		
+		do {
+			diff = ktime_sub(ktime_get(),k_time_start);
+		}while(diff <= (param1*1000));
+		gpio_set_value(spidev->gpio_ex,1);
+		printk("test ktime_get: start=%lld end=%lld diff=%lld \r\n",k_time_start,ktime_get(),diff);
+	}
+	return n;
+
+}
+
+
+SPIDEV_ATTR(spidevinfo);
+
+
+static struct attribute * test_attr[] = {
+	&spidevinfo_attr.attr,
+
+	NULL,
+};
+
+static const struct attribute_group attr_group = {
+	.attrs = test_attr,
+};
+
+static const struct attribute_group *attr_groups[] = {
+	&attr_group,
+
+	NULL,
+};
+#endif
+
+
+#ifdef  TEST_SWAP_KERNEL_AND_USER 
+
+/*  v3e
+spi0(master)-------------------------------------spi1(slave)
+GPIO129 <----------------------------------------INT4(GPIO51)
+INT7(GPIO54) <-----------------------------------GPIO130
+*/
+
+/*  v3 mdl
+4#(master) --------------------------------------5#(slave)
+GPIO130 <----------------------------------------INT6(GPIO53)
+INT7(GPIO54) <-----------------------------------GPIO131
+*/
+
+//#define TEST_SPI_SLAVE
+#ifdef TEST_SPI_SLAVE
+#define GPIO_NUM_EX  		 131
+#define GPIO_NUM_INT  		 53
+#else
+#define GPIO_NUM_EX  		 130
+#define GPIO_NUM_INT  		 54
+#endif
+
+static int spi_dev_pin_init_test(struct spi_device *spi)
+{
+	struct spidev_data	*spidev = spi_get_drvdata(spi);
+	enum of_gpio_flags flags;
+	int status = 0;
+	
+
+	spidev->pctrl = devm_pinctrl_get(&spi->dev);
+	if(!spidev->pctrl) {
+		dev_info(&spi->dev,"get dev pctrl failed!\n",status);	
+		return status;
+	}
+
+	spidev->pint_ex = pinctrl_lookup_state(spidev->pctrl, "int_ex");
+	if (IS_ERR(spidev->pint_ex)) {
+		dev_err(&spi->dev, "TEST: missing pint_ex \n");
+		return status;
+	}
+
+	if (pinctrl_select_state(spidev->pctrl, spidev->pint_ex) < 0) {
+			dev_err(&spi->dev, "TEST: slect pint_ex \n");
+			return status;
+	}
+	if(strcmp(dev_name(&spi->dev),"spi1.0")==0) {
+		spidev->gpio_ex = GPIO_NUM_EX;
+		spidev->gpio_int = GPIO_NUM_INT;
+	} else {
+		spidev->gpio_ex = 130;
+		spidev->gpio_int = 51;
+	}
+	return status;
+		
+}
+
+static void send_signal(int sig_no,void *dev_id)
+{
+	int ret;
+	struct spidev_data	*spidev = (struct spidev_data *)dev_id;
+	struct kernel_siginfo info;
+	struct task_struct * my_task = NULL;
+	
+	//printk("send signal %d to pid %d \n",sig_no,spidev->pid);
+	memset(&info,0,sizeof(struct siginfo));
+	if(spidev->pid == 0) {
+		printk("send_signal pid is not valid \n");
+		return;
+	}
+	
+	info.si_signo = sig_no;
+	info.si_code = gpio_get_value(spidev->gpio_int);
+	info.si_errno = gpio_get_value(spidev->gpio_ex);
+	rcu_read_lock();
+	my_task = pid_task(find_vpid(spidev->pid),PIDTYPE_PID);
+	rcu_read_unlock();
+	
+	if(!my_task) {
+		printk("%s get pid_task failed! \n",__FUNCTION__);
+		return;
+	}
+	ret = send_sig_info(sig_no, &info, my_task);
+	if(ret < 0)
+		printk("send signal failed! \n");
+	
+}
+
+static int spi_dev_sig_process_test(void *arg)
+{
+	struct spi_device *spi = (struct spi_device *)arg;
+	struct spidev_data	*spidev = spi_get_drvdata(spi);	
+
+	while(1) {
+		down(&spidev->sig_req);
+		send_signal(SIGUSR1,spidev);
+		
+	}
+	return 0;
+}
+static void send_dma_cfg_done_signal(int sig_no,void *dev_id)
+{
+	int ret;
+	struct spidev_data	*spidev = (struct spidev_data *)dev_id;
+	struct kernel_siginfo info;
+	struct task_struct * my_task = NULL;
+	int dma_cfg_done = 0;
+	//printk("send signal %d to pid %d \n",sig_no,spidev->pid);
+	memset(&info,0,sizeof(struct siginfo));
+
+	if(spidev->dma_cfg_done == 1) {
+		dma_cfg_done = spidev->dma_cfg_done;
+		spidev->dma_cfg_done= 0;
+	}
+	if(spidev->pid == 0) {
+		printk("%s is not valid\n",__FUNCTION__);
+		return;
+	}
+	info.si_signo = sig_no;
+	info.si_errno = dma_cfg_done;
+	rcu_read_lock();
+	my_task = pid_task(find_vpid(spidev->pid),PIDTYPE_PID);
+	rcu_read_unlock();
+	
+	if(!my_task) {
+		printk("%s get pid_task failed! \n",__FUNCTION__);
+		return;
+	}
+	ret = send_sig_info(sig_no, &info, my_task);
+	if(ret < 0)
+		printk("send signal failed! \n");
+	
+}
+
+static int spi_dev_dma_cfg_done_process_test(void *arg)
+{
+	struct spi_device *spi = (struct spi_device *)arg;
+	struct spidev_data	*spidev = spi_get_drvdata(spi);	
+
+	while(1) {
+		down(&spidev->sem_dma_cfg_done);
+		send_dma_cfg_done_signal(SIGUSR2,spidev);
+	}
+	return 0;
+}
+
+static irqreturn_t spidev_hand_shake_irq(int irqno, void *dev_id)
+{
+	struct spidev_data	*spidev = (struct spidev_data *)dev_id;
+	
+	int gpio_out_status = gpio_get_value(spidev->gpio_ex);
+	int gpio_int_status = gpio_get_value(spidev->gpio_int);
+
+	up(&spidev->sig_req);
+	dev_dbg(&spidev->spi->dev,"out=%d int=%d \r\n",gpio_out_status,gpio_int_status);	
+	
+	return IRQ_HANDLED;
+}
+
+
+static int spi_dev_irq_init_test(struct spi_device *spi)
+{
+	struct spidev_data	*spidev = spi_get_drvdata(spi);
+	int irq = 0,ret = 0;
+
+	if(!spi || !spidev) {
+		ret = -ENOENT;
+		return ret;
+	}
+
+	sema_init(&spidev->sig_req, 0);
+	sema_init(&spidev->sem_dma_cfg_done, 0);
+	kernel_thread(spi_dev_sig_process_test,spi, 0);	/* fork the main thread */
+	kernel_thread(spi_dev_dma_cfg_done_process_test,spi, 0);	/* fork the main thread */
+	irq = irq_of_parse_and_map(spi->dev.of_node, 0);
+	if (irq <= 0) {
+	    dev_err(&spi->dev, "ERROR: invalid interrupt number, irq = %d\n",irq);
+		return -EBUSY;
+	}
+	spidev->irq = irq;
+	dev_info(&spi->dev, "used interrupt num is %d\n",  spidev->irq);
+		
+	ret = devm_request_irq(&spi->dev, spidev->irq, spidev_hand_shake_irq,
+				 0, dev_name(&spi->dev), spidev);
+	
+	if (ret < 0) {
+		dev_err(&spi->dev, "probe - cannot get IRQ (%d)\n", ret);
+		return ret;
+	}
+
+	return ret;
+
+}
+
+#endif
+
+#ifdef TEST_SWAP_KERNEL_AND_USER
+void spi_dev_send_dma_cfg_down(struct spi_device *spi)
+{
+	struct spidev_data	*spidev = spi_get_drvdata(spi);
+	spidev->dma_cfg_done = 1;
+	up(&spidev->sem_dma_cfg_done);
+}
+#else
+void spi_dev_send_dma_cfg_down(struct spi_device *spi)
+{
+	return;
+}
+#endif
+/*-------------------------------------------------------------------------*/
+
+static int spidev_probe(struct spi_device *spi)
+{
+	struct spidev_data	*spidev;
+	int			status;
+	unsigned long		minor;
+	u32 val;
+	/*
+	 * spidev should never be referenced in DT without a specific
+	 * compatible string, it is a Linux implementation thing
+	 * rather than a description of the hardware.
+	 */
+	WARN(spi->dev.of_node &&
+	     of_device_is_compatible(spi->dev.of_node, "spidev"),
+	     "%pOF: buggy DT: spidev listed directly in DT\n", spi->dev.of_node);
+
+	spidev_probe_acpi(spi);
+
+	/* Allocate driver data */
+	spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
+	if (!spidev)
+		return -ENOMEM;
+
+	/* Initialize the driver data */
+	spidev->spi = spi;
+	spin_lock_init(&spidev->spi_lock);
+	mutex_init(&spidev->buf_lock);
+
+	INIT_LIST_HEAD(&spidev->device_entry);
+	
+	if (device_property_read_u32(&spi->dev, "enable_dma",&val)) {
+		spi->dma_used = 0;
+		dev_err(&spi->dev,"enable_dma get failed");
+	}
+	else {
+		spi->dma_used = val;
+		dev_info(&spi->dev,"enable_dma = 0x%x",val);
+	}
+
+	if (device_property_read_u32(&spi->dev, "enable_trans_gap",&val)) {	
+		spi->trans_gaped = 0;
+		dev_err(&spi->dev,"enable_trans_gap get failed");
+	}
+	else {
+		spi->trans_gaped = val;
+		dev_info(&spi->dev,"enable_trans_gap = 0x%x",val);
+	}
+
+	if (device_property_read_u32(&spi->dev, "trans_gap_num",&val)) {	
+		spi->trans_gap_num = 0;
+		dev_err(&spi->dev,"trans_gap_num get failed");
+	}
+	else {
+		spi->trans_gap_num = val;
+		dev_info(&spi->dev,"trans_gap_num = 0x%x",val);
+	}
+
+	// yu.dong@20240617 [T106BUG-641] SPI packet loss problem, add kernel buffer scheme.
+
+	/* If we can allocate a minor number, hook up this device.
+	 * Reusing minors is fine so long as udev or mdev is working.
+	 */
+	mutex_lock(&device_list_lock);
+	minor = find_first_zero_bit(minors, N_SPI_MINORS);
+	if (minor < N_SPI_MINORS) {
+		struct device *dev;
+
+		spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
+		dev = device_create(spidev_class, &spi->dev, spidev->devt,
+				    spidev, "spidev%d.%d",
+				    spi->master->bus_num, spi->chip_select);
+		status = PTR_ERR_OR_ZERO(dev);
+	} else {
+		dev_dbg(&spi->dev, "no minor number available!\n");
+		status = -ENODEV;
+	}
+	if (status == 0) {
+		set_bit(minor, minors);
+		list_add(&spidev->device_entry, &device_list);
+	}
+	mutex_unlock(&device_list_lock);
+
+	spidev->speed_hz = spi->max_speed_hz;
+	spidev->rd_from_rx_buffer = 0;
+	if (status == 0)
+		spi_set_drvdata(spi, spidev);
+	else
+		kfree(spidev);
+	spi_setup(spi);
+	if(0 == status && spi->master->slave)
+		device_init_wakeup(&spi->dev, true);
+#ifdef SPIDEV_DEBUG
+		int ret = sysfs_create_groups(&spi->dev.kobj, attr_groups);
+	
+		if (ret) {
+			dev_err(&spi->dev, "create test_kobj attr group fain error=%d\n", ret);
+			return ret;
+		}
+
+		ret = spidev_debug_test_init(spi);
+		if (ret) {
+			dev_err(&spi->dev, "spidev_debug_test_init error=%d\n", ret);
+			return ret;
+		}
+#endif
+	
+#ifdef	TEST_SWAP_KERNEL_AND_USER
+		int ret;
+		spidev->dma_cfg_done = 0;
+		spidev->pid = 0;
+		ret =spi_dev_pin_init_test(spi);
+		if(ret) {
+			dev_info(&spi->dev, "spi_dev_pin_init_test,ret=%d \n",ret);
+			return ret;
+		}
+
+		ret =spi_dev_irq_init_test(spi);
+		if(ret) {
+			dev_info(&spi->dev, "spi_dev_irq_init_test,ret=%d \n",ret);
+			return ret;
+		}
+#endif
+	return status;
+}
+
+static int spidev_remove(struct spi_device *spi)
+{
+	struct spidev_data	*spidev = spi_get_drvdata(spi);
+
+	/* prevent new opens */
+	mutex_lock(&device_list_lock);
+	/* make sure ops on existing fds can abort cleanly */
+	spin_lock_irq(&spidev->spi_lock);
+	spidev->spi = NULL;
+	spin_unlock_irq(&spidev->spi_lock);
+
+	list_del(&spidev->device_entry);
+	device_destroy(spidev_class, spidev->devt);
+	clear_bit(MINOR(spidev->devt), minors);
+	if (spidev->users == 0)
+		kfree(spidev);
+	mutex_unlock(&device_list_lock);
+
+	return 0;
+}
+
+static struct spi_driver spidev_spi_driver = {
+	.driver = {
+		.name =		"spidev",
+		.of_match_table = of_match_ptr(spidev_dt_ids),
+		.acpi_match_table = ACPI_PTR(spidev_acpi_ids),
+	},
+	.probe =	spidev_probe,
+	.remove =	spidev_remove,
+
+	/* NOTE:  suspend/resume methods are not necessary here.
+	 * We don't do anything except pass the requests to/from
+	 * the underlying controller.  The refrigerator handles
+	 * most issues; the controller driver handles the rest.
+	 */
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init spidev_init(void)
+{
+	int status;
+
+	/* Claim our 256 reserved device numbers.  Then register a class
+	 * that will key udev/mdev to add/remove /dev nodes.  Last, register
+	 * the driver which manages those device numbers.
+	 */
+	BUILD_BUG_ON(N_SPI_MINORS > 256);
+	status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
+	if (status < 0)
+		return status;
+
+	spidev_class = class_create(THIS_MODULE, "spidev");
+	if (IS_ERR(spidev_class)) {
+		unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
+		return PTR_ERR(spidev_class);
+	}
+
+	status = spi_register_driver(&spidev_spi_driver);
+	if (status < 0) {
+		class_destroy(spidev_class);
+		unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
+	}
+	return status;
+}
+module_init(spidev_init);
+
+static void __exit spidev_exit(void)
+{
+	spi_unregister_driver(&spidev_spi_driver);
+	class_destroy(spidev_class);
+	unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
+}
+module_exit(spidev_exit);
+
+MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
+MODULE_DESCRIPTION("User mode SPI device interface");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:spidev");