[Feature][T106_eSDK]T106-V2.01.01.02P56U06.AP.15.11_CAP.15.11(SDK4.6)diff_17.02(SDK4.7)

Only Configure: No
Affected branch: master
Affected module: unknow
Is it affected on both ZXIC and MTK: only ZXIC
Self-test: Yes
Doc Update: No

Change-Id: I9dc02273b59a074828ab3eeaa84306415b153138
diff --git a/upstream/linux-5.10/drivers/dma/sc/zx297520v3_dma.c b/upstream/linux-5.10/drivers/dma/sc/zx297520v3_dma.c
new file mode 100755
index 0000000..6ef00d8
--- /dev/null
+++ b/upstream/linux-5.10/drivers/dma/sc/zx297520v3_dma.c
@@ -0,0 +1,1464 @@
+/*******************************************************************************
+ * Copyright (C) 2013, ZTE Corporation.
+ *
+ * File Name:dma.c
+ * File Mark:
+ * Description:
+ * Others:
+ * Version:       0.1
+ * Author:        limeifeng
+ * Date:
+ * modify
+
+
+  ********************************************************************************/
+
+/****************************************************************************
+* 	                                           Include files
+****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include "../dmaengine.h"
+#include <linux/soc/sc/common.h>
+#include <linux/soc/sc/spinlock.h>
+
+#include "zx297520v3_dma.h"
+
+//#pragma GCC optimize("O0")
+#define DMA_SUCCESS			DMA_COMPLETE
+
+#define DMA_CHANNEL_CONFIG(peripheral_id, is_used , enable_mem2mem)		{peripheral_id, is_used, enable_mem2mem}
+
+/*dma channel config define*/
+typedef struct 
+{
+    dma_peripheral_id 	peripheral_id;		/* hw channel id */
+    unsigned int 		is_used;
+    unsigned int 		enable_mem2mem;
+#if 0	
+    void *				data;
+    dma_callback_func 	channel_callback;
+#endif	
+}dma_channel_config;
+
+static dma_channel_config dma_chan_config[] =
+{
+	DMA_CHANNEL_CONFIG(DMA_CH_UART0_TX, 	false,	false),
+	DMA_CHANNEL_CONFIG(DMA_CH_UART0_RX,		false,	false),
+	DMA_CHANNEL_CONFIG(DMA_CH_UART1_TX,		false,	false),
+	DMA_CHANNEL_CONFIG(DMA_CH_UART1_RX,		false,	false),
+	DMA_CHANNEL_CONFIG(DMA_CH_SSP0_TX,		false,	false),
+	
+	DMA_CHANNEL_CONFIG(DMA_CH_SSP0_RX,		false,	true),
+#if 1	/* only ps core used */
+	DMA_CHANNEL_CONFIG(DMA_CH_GPRS0,		false,	false),
+	DMA_CHANNEL_CONFIG(DMA_CH_GPRS1,		false,	false),
+	DMA_CHANNEL_CONFIG(DMA_CH_USIM,			false,	false),
+#endif	
+	DMA_CHANNEL_CONFIG(DMA_CH_I2S0_TX, 		false,	false),
+
+	DMA_CHANNEL_CONFIG(DMA_CH_I2S0_RX0,		false,	false),
+	DMA_CHANNEL_CONFIG(DMA_CH_I2S1_TX,		false,	false),
+	DMA_CHANNEL_CONFIG(DMA_CH_I2S1_RX0,		false,	false),
+	DMA_CHANNEL_CONFIG(DMA_CH_SPIFC_TX,		false,	false),
+	DMA_CHANNEL_CONFIG(DMA_CH_SPIFC_RX,		false,	false),	
+	
+	DMA_CHANNEL_CONFIG(DMA_CH_SSP1_TX,		false,	true),
+	DMA_CHANNEL_CONFIG(DMA_CH_SSP1_RX,		false,	false),	
+	DMA_CHANNEL_CONFIG(DMA_CH_UART2_TX,		false,	true),
+	DMA_CHANNEL_CONFIG(DMA_CH_UART2_RX,		false,	true),
+	
+	DMA_CHANNEL_CONFIG(DMA_CH_EMBMS,		false,	false),
+	#if 1	/* only ps core used */	
+	DMA_CHANNEL_CONFIG(DMA_CH_USIM1,		false,	false),
+	#endif	
+	DMA_CHANNEL_CONFIG(DMA_CH_M2M_TX,		false,	true),
+	DMA_CHANNEL_CONFIG(DMA_CH_M2M_RX,		false,	true),
+};
+
+/****************************************************************************
+* 	                                           Local Macros
+****************************************************************************/
+#define BIT_SHIFT_L(value,BIT_NO)				((unsigned int)(value << (BIT_NO)))
+#define GET_HIGH_16BIT(val)						(unsigned int)(val >> (16))
+#define GET_LOW_16BIT(val)						(unsigned int)(val & (0xffff))
+#define DMA_CHANNEL(dmac,channel)				(unsigned int)(dmac << (16)|(channel) )
+
+/*dma control reg bit */
+#define DMA_CTRL_ENABLE(value)             		BIT_SHIFT_L(value,0)
+#define DMA_CTRL_SOFT_B_REQ(value)         		BIT_SHIFT_L(value,1)
+#define DMA_CTRL_SRC_FIFO_MOD(value)       		BIT_SHIFT_L(value,2)
+#define DMA_CTRL_DEST_FIFO_MOD(value)      		BIT_SHIFT_L(value,3)
+#define DMA_CTRL_IRQ_MOD(value)            		BIT_SHIFT_L(value,4)
+#define DMA_CTRL_SRC_BURST_SIZE(value)         	BIT_SHIFT_L(value,6)
+#define DMA_CTRL_SRC_BURST_LENGTH(value)        BIT_SHIFT_L(value,9)
+#define DMA_CTRL_DEST_BURST_SIZE(value)        	BIT_SHIFT_L(value,13)
+#define DMA_CTRL_DEST_BURST_LENGTH(value)       BIT_SHIFT_L(value,16)
+#define DMA_CTRL_INTERRUPT_SEL(value)      		BIT_SHIFT_L(value,20)
+#define DMA_CTRL_FORCE_CLOSE(value)   			BIT_SHIFT_L(value,31)
+
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+
+/* һ��������������������� */
+#define MAX_LLI_PARA_CNT						(32)
+
+
+/* config dma reused */
+#define	DMA_SEL_CFG_REG 						(get_socsys_base() + 0x120)
+
+#if defined(CONFIG_ARCH_ZX297520V2)
+#define	DMA_SEL_UART2_I2S						(1U << 0)
+#define	DMA_SEL_UART1_HASH						(1U << 1)
+#define	DMA_SEL_I2S0_TDM						(1U << 2)
+#define	DMA_SEL_I2S1_TDM						(1U << 3)
+#elif defined(CONFIG_ARCH_ZX297520V3)
+#define	DMA_SEL_UART2TX_I2S0RX1					(1U << 0)
+#define	DMA_SEL_UART2RX_I2S1RX1					(1U << 1)
+#define	DMA_SEL_UART1RX_HASH					(1U << 2)
+#define	DMA_SEL_I2S0TX_TDMTX0					(1U << 3)
+#define	DMA_SEL_I2S0RX0_TDMRX0					(1U << 4)
+#define	DMA_SEL_I2S1TX_TDMTX1					(1U << 5)
+#define	DMA_SEL_I2S1RX0_TDMRX1					(1U << 6)
+#endif
+
+typedef struct
+{
+    volatile unsigned int src_addr;
+    volatile unsigned int dest_addr;
+    volatile unsigned int xpara;
+    volatile unsigned int yzpara;
+    volatile unsigned int src_yzstep;
+    volatile unsigned int dest_yzstep;
+    volatile unsigned int reserved0;
+    volatile unsigned int link_addr;
+    volatile unsigned int control;
+}dma_lli_param;
+
+#define MAX_LLI_PARAMS_CNT						(sizeof(dma_lli_param)*MAX_LLI_PARA_CNT)
+static dma_lli_param *dma_lli_params[DMA_CH_NUM];
+static dma_addr_t dma_lli_phy_addr[DMA_CH_NUM];
+
+#define ZX29_DMA_TEST		0
+
+typedef struct
+{
+    volatile unsigned short core_id;		/* zte_coreid -- for debug */
+    volatile unsigned short is_used;
+}dma_pub_config;
+
+static dma_pub_config *dma_pub_configs;
+
+#define ZX29_DMA_INT_SEL	DMA_INT_TO_A9
+
+/****************************************************************************
+* 	                                           Local Types
+****************************************************************************/
+static DEFINE_MUTEX(dma_mutex);
+
+struct zx29_dma_channel
+{
+    dma_peripheral_id 				peripheral_id;
+    struct zx29_dma * 				dma_device;
+    struct dma_chan 				chan;
+    struct dma_async_tx_descriptor	desc;
+    struct tasklet_struct	  		tasklet;
+    enum dma_status 				status;
+	unsigned int					cyclic;
+	dma_peripheral_id 				req_peripheral_id;
+//    dma_channel_def 				dma_chan_par; 
+    dma_cookie_t 					zx29_dma_cookie;
+};
+
+struct zx29_dma
+{
+    struct dma_device		dma;
+    dma_regs  __iomem *		reg;
+    dma_channel_config * 	chan_config;
+	unsigned int			channel_count;
+    struct zx29_dma_channel dma_chan[DMA_CH_NUM]; 
+};
+/****************************************************************************
+* 	                     DMA trace
+****************************************************************************/
+/* #define ZX_TRACE_DMA */
+
+#ifdef ZX_TRACE_DMA
+/*#pragma GCC optimize("O0")*/
+
+extern unsigned int test_timer_read( void );
+
+#define	TRACE_DMA_COUNT	1000
+
+typedef enum
+{
+	DMA_DO_SUBMIT = 0,
+	DMA_DO_START = 1,		
+	DMA_DO_ERR = 2,
+	DMA_DO_SUCCESS = 3,	
+}dma_behavior_t;
+
+typedef struct 
+{
+  	dma_peripheral_id 	peripheral_id;
+	dma_behavior_t  	behavior;	
+}dma_trace_t;
+
+volatile dma_trace_t dma_trace_view[TRACE_DMA_COUNT+10];
+volatile unsigned int 	 dma_trace_index = 0;
+
+#define	dma_trace_index_inc()	\
+do{ \
+	dma_trace_index++;\
+	if(dma_trace_index>=TRACE_DMA_COUNT)\
+	   	dma_trace_index=0;\
+}while(0)
+
+static struct zx29_dma_channel *to_zx29_dma_chan(struct dma_chan *chan);
+static void dma_trace_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct zx29_dma_channel *dma_channel = to_zx29_dma_chan(tx->chan);
+
+	dma_trace_view[dma_trace_index].peripheral_id	= dma_channel->peripheral_id;
+	dma_trace_view[dma_trace_index].behavior		= DMA_DO_SUBMIT;
+	dma_trace_index_inc();
+}
+
+static void dma_trace_pending(dma_peripheral_id peripheral_id)
+{
+	dma_trace_view[dma_trace_index].peripheral_id	= peripheral_id;
+	dma_trace_view[dma_trace_index].behavior		= DMA_DO_START;
+	dma_trace_index_inc();
+}
+
+static void dma_trace_err(dma_peripheral_id peripheral_id)
+{
+	dma_trace_view[dma_trace_index].peripheral_id	= peripheral_id;
+	dma_trace_view[dma_trace_index].behavior		= DMA_DO_ERR;
+	dma_trace_index_inc();
+}
+
+static void dma_trace_success(dma_peripheral_id peripheral_id)
+{
+	dma_trace_view[dma_trace_index].peripheral_id	= peripheral_id;
+	dma_trace_view[dma_trace_index].behavior		= DMA_DO_SUCCESS;
+	dma_trace_index_inc();
+}
+#else
+static void dma_trace_submit(struct dma_async_tx_descriptor *tx){}
+static void dma_trace_pending(dma_peripheral_id peripheral_id){}
+static void dma_trace_err(dma_peripheral_id peripheral_id){}
+static void dma_trace_success(dma_peripheral_id peripheral_id){}
+#endif
+
+static struct zx29_dma dma_dev;
+
+unsigned int dma_err_num = 0;
+
+#if 0
+#define DMA_CHANNEL_CONFIG(peripheral_id, is_used , enable_mem2mem)		{peripheral_id, is_used, enable_mem2mem}
+static dma_channel_config dma_chan_config[] =
+{
+	DMA_CHANNEL_CONFIG(DMA_CH_UART0_TX, 	false,	true),
+	DMA_CHANNEL_CONFIG(DMA_CH_UART0_RX,		false,	true),
+	DMA_CHANNEL_CONFIG(DMA_CH_UART1_TX,		false,	true),
+	DMA_CHANNEL_CONFIG(DMA_CH_UART1_RX,		false,	true),
+	DMA_CHANNEL_CONFIG(DMA_CH_SSP0_TX,		false,	true),
+	
+	DMA_CHANNEL_CONFIG(DMA_CH_SSP0_RX,		false,	true),
+#if 0	/* only ps core used */
+	DMA_CHANNEL_CONFIG(DMA_CH_GPRS0,		true,	true),
+	DMA_CHANNEL_CONFIG(DMA_CH_GPRS1,		true,	true),
+	DMA_CHANNEL_CONFIG(DMA_CH_USIM,			true,	true),
+#endif	
+	DMA_CHANNEL_CONFIG(DMA_CH_I2S0_TX, 		false,	false),
+
+	DMA_CHANNEL_CONFIG(DMA_CH_I2S0_RX0,		false,	false),
+	DMA_CHANNEL_CONFIG(DMA_CH_I2S1_TX,		false,	false),
+	DMA_CHANNEL_CONFIG(DMA_CH_I2S1_RX0,		false,	false),
+	DMA_CHANNEL_CONFIG(DMA_CH_SPIFC_TX,		false,	false),
+	DMA_CHANNEL_CONFIG(DMA_CH_SPIFC_RX,		false,	false),	
+	
+	DMA_CHANNEL_CONFIG(DMA_CH_SSP1_TX,		false,	true),
+	DMA_CHANNEL_CONFIG(DMA_CH_SSP1_RX,		false,	true),	
+	DMA_CHANNEL_CONFIG(DMA_CH_UART2_TX,		false,	true),
+	DMA_CHANNEL_CONFIG(DMA_CH_UART2_RX,		false,	true),
+	
+#ifdef CONFIG_ARCH_ZX297520V2
+#else
+	DMA_CHANNEL_CONFIG(DMA_CH_EMBMS,		false,	true),
+	#if 0	/* only ps core used */	
+	DMA_CHANNEL_CONFIG(DMA_CH_USIM1,		false,	true),
+	#endif	
+	DMA_CHANNEL_CONFIG(DMA_CH_M2M_TX,		false,	true),
+	DMA_CHANNEL_CONFIG(DMA_CH_M2M_RX,		false,	true),
+#endif
+};
+
+#endif
+static unsigned short dma_chan_check_lock(dma_peripheral_id peripheral_id)
+{
+	if((peripheral_id == DMA_CH_SPIFC_TX) || (peripheral_id == DMA_CH_SPIFC_RX))
+		return false;
+
+	return dma_pub_configs[peripheral_id].is_used;
+}
+
+static void dma_chan_lock(dma_peripheral_id peripheral_id)
+{
+	if((peripheral_id == DMA_CH_SPIFC_TX) || (peripheral_id == DMA_CH_SPIFC_RX))
+		return;
+	dma_pub_configs[peripheral_id].core_id = 208 /*for cap CORE_ID_AP*/;
+	dma_pub_configs[peripheral_id].is_used = true;
+}
+
+static void dma_chan_unlock(dma_peripheral_id peripheral_id)
+{
+	if((peripheral_id == DMA_CH_SPIFC_TX) || (peripheral_id == DMA_CH_SPIFC_RX))
+		return;
+	dma_pub_configs[peripheral_id].core_id = CORE_ID_NUM;
+	dma_pub_configs[peripheral_id].is_used = false;
+}
+
+/* some channel need config reuse register */
+static void	dma_reuse_config(dma_peripheral_id peripheral_id)
+{
+	switch(peripheral_id)
+	{
+#if defined(CONFIG_ARCH_ZX297520V2)
+	case DMA_CH_UART2_TX:
+	case DMA_CH_UART2_RX:
+		zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_UART2_I2S);
+		break;		
+
+	case DMA_CH_I2S0_RX1:
+	case DMA_CH_I2S1_RX1:
+		zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_UART2_I2S);
+		break;		
+	
+	case DMA_CH_UART1_RX:
+		zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_UART1_HASH);
+		break;
+
+	case DMA_CH_HASH_RX:
+		zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_UART1_HASH);
+		break;	
+
+	case DMA_CH_I2S0_TX:
+	case DMA_CH_I2S0_RX0:
+		zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S0_TDM);
+		break;		
+
+	case DMA_CH_TDM_TX0:
+	case DMA_CH_TDM_RX0:
+		zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S0_TDM);
+		break;		
+
+	case DMA_CH_I2S1_TX:
+	case DMA_CH_I2S1_RX0:
+		zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S1_TDM);
+		break;		
+
+	case DMA_CH_TDM_TX1:
+	case DMA_CH_TDM_RX1:
+		zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S1_TDM);
+		break;
+
+#elif defined(CONFIG_ARCH_ZX297520V3)
+	case DMA_CH_UART1_RX:
+		zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_UART1RX_HASH);
+		break;
+	case DMA_CH_I2S0_TX:
+		zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S0TX_TDMTX0);
+		break;	
+	case DMA_CH_I2S0_RX0:
+		zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S0RX0_TDMRX0);
+		break;
+	case DMA_CH_I2S1_TX:
+		zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S1TX_TDMTX1);
+		break;
+	case DMA_CH_I2S1_RX0:
+		zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S1RX0_TDMRX1);
+		break;
+	case DMA_CH_UART2_TX:
+		zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_UART2TX_I2S0RX1);
+		break;
+	case DMA_CH_UART2_RX:
+		zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_UART2RX_I2S1RX1);
+		break;
+	case DMA_CH_HASH_RX:
+		zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_UART1RX_HASH);
+		break;	
+	case DMA_CH_TDM_TX0:
+		zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S0TX_TDMTX0);
+		break;
+	case DMA_CH_TDM_RX0:
+		zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S0RX0_TDMRX0);
+		break;
+	case DMA_CH_TDM_TX1:
+		zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S1TX_TDMTX1);
+		break;
+	case DMA_CH_TDM_RX1:
+		zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S1RX0_TDMRX1);
+		break;
+	case DMA_CH_I2S0_RX1:
+		zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_UART2TX_I2S0RX1);
+		break;
+	case DMA_CH_I2S1_RX1:
+		zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_UART2RX_I2S1RX1);
+		break;
+#endif
+
+	default:
+		break;
+	}
+}
+
+static dma_peripheral_id get_real_peri_id(dma_peripheral_id peripheral_id)
+{
+	if(peripheral_id < DMA_CH_NUM)
+		return peripheral_id;
+
+	switch(peripheral_id)
+	{
+	case DMA_CH_HASH_RX:
+		return DMA_CH_UART1_RX;
+	case DMA_CH_TDM_TX0:
+		return DMA_CH_I2S0_TX;
+	case DMA_CH_TDM_RX0:
+		return DMA_CH_I2S0_RX0;
+	case DMA_CH_TDM_TX1:
+		return DMA_CH_I2S1_TX;		
+	case DMA_CH_TDM_RX1:
+		return DMA_CH_I2S1_RX0;
+	case DMA_CH_I2S0_RX1:
+		return DMA_CH_UART2_TX;
+	case DMA_CH_I2S1_RX1:
+		return DMA_CH_UART2_RX;	
+	default:
+		return 0xff;
+	}
+}
+
+static unsigned int get_channel_id(dma_peripheral_id peripheral_id)
+{
+	int i;
+    dma_channel_config *chan_config = dma_dev.chan_config;
+	dma_peripheral_id real_peripheral_id = 0xff;	
+
+	real_peripheral_id = get_real_peri_id(peripheral_id);
+
+    for(i=0; i<dma_dev.channel_count; i++)
+    {
+        if ( (chan_config[i].peripheral_id==real_peripheral_id))	
+            return i;
+    }
+	
+    return 0xff;
+}
+
+static void dma_sync_lli_for_cpu(unsigned int channel_id)
+{
+	dma_sync_single_for_cpu(dma_dev.dma.dev, dma_lli_phy_addr[channel_id], MAX_LLI_PARAMS_CNT, DMA_BIDIRECTIONAL);	
+}
+
+static void dma_sync_lli_for_device(unsigned int channel_id)
+{
+	dma_sync_single_for_device(dma_dev.dma.dev, dma_lli_phy_addr[channel_id], MAX_LLI_PARAMS_CNT, DMA_BIDIRECTIONAL);	
+}
+
+static signed int dma_find_chan(dma_peripheral_id channel_id);
+static signed int dma_reset_chan(struct zx29_dma_channel *channel);
+
+static void dma_tasklet(unsigned long data)
+{
+	struct zx29_dma_channel *chan = (struct zx29_dma_channel *)data;
+
+	if (chan->desc.callback)
+		chan->desc.callback(chan->desc.callback_param);
+}
+
+static struct zx29_dma_channel *to_zx29_dma_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct zx29_dma_channel, chan);
+}
+
+static signed int dma_disable_chan(struct zx29_dma_channel *chan)
+{
+    dma_chan_reg  __iomem* chan_reg_ptr=NULL;
+	
+    if (chan->peripheral_id >= DMA_CH_NUM)
+    {
+        return -EINVAL;
+    }
+	
+    chan_reg_ptr= &(dma_dev.reg->channel[chan->peripheral_id]);
+    chan_reg_ptr->control |= DMA_CTRL_FORCE_CLOSE(1);
+	
+    return 0;
+}
+
+/*reset channel para*/
+static signed int dma_reset_chan(struct zx29_dma_channel *chan)
+{
+    unsigned int  peripheral_id;
+	unsigned int  channel_id;
+    dma_regs  __iomem* pReg;
+    dma_chan_reg  __iomem* chan_reg_ptr;
+
+    if (!chan) {
+        return -EINVAL;
+    }
+
+	peripheral_id = (unsigned int)chan->peripheral_id;
+    if (peripheral_id >= DMA_CH_NUM) {
+        return -EINVAL;
+    }
+
+	channel_id = get_channel_id(chan->peripheral_id);
+	if(channel_id == 0xff)
+		return -EINVAL;
+	
+    pReg= dma_dev.reg;
+    chan_reg_ptr= &(pReg->channel[peripheral_id]);
+	
+    /*force close current channel*/
+    chan_reg_ptr->control  |= DMA_CTRL_FORCE_CLOSE(1);
+
+    //memset((void*) chan_reg_ptr,0,sizeof(dma_chan_reg));
+    pReg->raw_int_tc_status 		=  BIT_SHIFT_L(0x1,peripheral_id);
+    pReg->raw_int_src_err_status 	=  BIT_SHIFT_L(0x1,peripheral_id);
+    pReg->raw_int_dest_err_status 	=  BIT_SHIFT_L(0x1,peripheral_id);
+    pReg->raw_int_cfg_err_status 	=  BIT_SHIFT_L(0x1,peripheral_id);
+	memset((void*) chan_reg_ptr,0,sizeof(dma_chan_reg));
+    //dma_dev[dmac_id].chan_config[channel_id].channelCbk = NULL;
+    //dma_dev[dmac_id].chan_config[channel_id].data = NULL;
+    chan->status = DMA_SUCCESS;
+	chan->cyclic = 0;
+    dma_dev.chan_config[channel_id].is_used = false;
+	dma_chan_unlock(dma_dev.chan_config[channel_id].peripheral_id);
+	
+    return 0;
+}
+
+/*find the fixed free channel for peripheralID*/
+static signed int dma_find_chan(dma_peripheral_id peripheral_id)
+{
+    unsigned int channel_id = 0xff;
+    dma_channel_config *chan_config = dma_dev.chan_config;
+
+#if 0/*move to zx29_dma_filter_fn*/	
+    /*in case there is free channel,allocate it to M2M*/
+    if (DMA_CH_MEMORY==peripheral_id) 
+    {
+        for(i=0; i<dma_dev.channel_count; i++)
+        {
+            if((chan_config[i].is_used==false) && \
+				(dma_chan_check_lock(chan_config[i].peripheral_id)==false) && \
+				(chan_config[i].enable_mem2mem==true))
+        	{
+                chan_config[i].is_used = true;
+				dma_chan_lock(chan_config[i].peripheral_id);
+                return i;
+        	}
+        }
+        return -EAGAIN;
+    }
+#endif
+
+	channel_id = get_channel_id(peripheral_id);
+	if(channel_id==0xff)
+		return -EAGAIN;
+	
+	reg_spin_lock();	
+    /*if channle has been used,return error*/
+    if((chan_config[channel_id].is_used==true) || \
+		(dma_chan_check_lock(chan_config[channel_id].peripheral_id)==true))
+	{
+		reg_spin_unlock();
+		return -EAGAIN;
+	}
+
+    /*get the channel number*/
+    chan_config[channel_id].is_used =true;
+	dma_chan_lock(chan_config[channel_id].peripheral_id);
+	reg_spin_unlock();
+
+	/* channel reuse*/
+	dma_reuse_config(peripheral_id);
+
+    return channel_id;
+}
+
+static u32 dma_get_residue(struct zx29_dma_channel *chan)
+{
+	dma_regs  __iomem* pReg = NULL;
+	dma_chan_reg  __iomem* chan_reg_ptr = NULL;
+
+	pReg= dma_dev.reg;
+	chan_reg_ptr= &(pReg->channel[chan->peripheral_id]);
+
+    return chan_reg_ptr->xpara;
+}
+
+static enum dma_status zx29_dma_tx_status(struct dma_chan *chan,
+			dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+	struct zx29_dma_channel *zx29_chan = to_zx29_dma_chan(chan);
+	dma_cookie_t last_used;
+	u32 bytes;
+
+	bytes = dma_get_residue(zx29_chan);
+	last_used = chan->cookie;
+	dma_set_tx_state(txstate, chan->completed_cookie, last_used, bytes);
+
+	return zx29_chan->status;
+}
+
+static unsigned int parse_dma_req(dma_transfer_mode trans_mode)
+{
+	unsigned int control = 0;
+	
+	switch(trans_mode)
+	{
+	case TRAN_PERI_TO_PERI:
+		control = DMA_CTRL_SOFT_B_REQ(DMA_PERIPHERAL_REQ)\
+                    | DMA_CTRL_SRC_FIFO_MOD(DMA_ADDRMOD_FIFO) \
+                    | DMA_CTRL_DEST_FIFO_MOD(DMA_ADDRMOD_FIFO);
+		break;
+			
+    case TRAN_PERI_TO_MEM:
+		control = DMA_CTRL_SOFT_B_REQ(DMA_PERIPHERAL_REQ)\
+                    | DMA_CTRL_SRC_FIFO_MOD(DMA_ADDRMOD_FIFO) \
+                    | DMA_CTRL_DEST_FIFO_MOD(DMA_ADDRMOD_RAM);
+		break;
+		
+    case TRAN_MEM_TO_PERI:
+		control = DMA_CTRL_SOFT_B_REQ(DMA_PERIPHERAL_REQ)\
+                    | DMA_CTRL_SRC_FIFO_MOD(DMA_ADDRMOD_RAM) \
+                    | DMA_CTRL_DEST_FIFO_MOD(DMA_ADDRMOD_FIFO);
+		break;		
+
+    case TRAN_MEM_TO_MEM:
+	default:	
+		control = DMA_CTRL_SOFT_B_REQ(DMA_SOFT_REQ)\
+                    | DMA_CTRL_SRC_FIFO_MOD(DMA_ADDRMOD_RAM) \
+                    | DMA_CTRL_DEST_FIFO_MOD(DMA_ADDRMOD_RAM);
+		break;				
+	}
+
+	return control;
+}
+	
+static signed int dma_set_chan_para(unsigned int channel)//,dma_channel_def * chan_para)
+{
+    volatile dma_chan_reg   __iomem* chan_reg = &(dma_dev.reg->channel[channel]);
+	unsigned int channel_id = get_channel_id(channel);
+	dma_lli_param *temp_dma_lli_params = NULL;
+
+	if(channel_id>= DMA_CH_NUM)
+		return -EAGAIN;
+
+	temp_dma_lli_params = dma_lli_params[channel_id];
+
+/*    chan_reg->src_addr 	= chan_para->src_addr;
+    chan_reg->dest_addr = chan_para->dest_addr;
+    chan_reg->xpara 	= chan_para->count;
+	chan_reg->link_addr = chan_para->link_addr;
+
+	if(chan_para->link_addr)
+		chan_reg->link_addr = dma_lli_phy_addr[get_channel_id(channel)];
+
+    chan_reg->control = parse_dma_req(chan_para->dma_control.tran_mode)\
+                        | DMA_CTRL_SRC_BURST_SIZE(chan_para->dma_control.src_burst_size) \
+                        | DMA_CTRL_SRC_BURST_LENGTH((chan_para->dma_control.src_burst_len )) \
+                        | DMA_CTRL_DEST_BURST_SIZE(chan_para->dma_control.dest_burst_size) \
+                        | DMA_CTRL_DEST_BURST_LENGTH((chan_para->dma_control.dest_burst_len ))\
+                        | DMA_CTRL_INTERRUPT_SEL(DMA_INT_TO_PS) ;	
+
+	if(chan_para->dma_control.irq_mode)
+	{
+		if(chan_para->link_addr)
+			chan_reg->control |= DMA_CTRL_IRQ_MOD(DMA_ERR_IRQ_ENABLE);
+		else
+			chan_reg->control |= DMA_CTRL_IRQ_MOD(DMA_ALL_IRQ_ENABLE);
+	}*/
+
+	chan_reg->src_addr	= temp_dma_lli_params[0].src_addr;
+	chan_reg->dest_addr = temp_dma_lli_params[0].dest_addr;
+	chan_reg->xpara 	= temp_dma_lli_params[0].xpara;
+	chan_reg->link_addr = temp_dma_lli_params[0].link_addr;
+	chan_reg->control 	= temp_dma_lli_params[0].control &
+							(~(DMA_CTRL_ENABLE(DMA_ENABLE)));
+
+    return 0;
+}
+
+/*allocate a channel for peripheralID,
+and return the channel number.if failed return -EAGAIN
+*/
+signed int  zx29_dma_request(dma_peripheral_id peripheral_id)
+{
+    signed int errCode = -EAGAIN;
+	
+    mutex_lock(&dma_mutex);
+    errCode=dma_find_chan(peripheral_id);
+    mutex_unlock(&dma_mutex);
+
+    return errCode;
+}
+
+static void dma_config_lli(unsigned int channel_id, dma_channel_def *chan_para)
+{
+	int i = 0;
+	dma_lli_param *temp_dma_lli_params = dma_lli_params[channel_id];
+	
+	dma_sync_lli_for_cpu(channel_id);
+	do{
+		temp_dma_lli_params[i].src_addr 	= chan_para[i].src_addr;
+		temp_dma_lli_params[i].dest_addr 	= chan_para[i].dest_addr;
+		temp_dma_lli_params[i].xpara 		= chan_para[i].count;
+		temp_dma_lli_params[i].yzpara 		= chan_para[i].ycount | (chan_para[i].zcount << 16);
+		temp_dma_lli_params[i].src_yzstep 	= chan_para[i].src_ystep | (chan_para[i].src_zstep << 16);
+		temp_dma_lli_params[i].dest_yzstep 	= chan_para[i].dest_ystep | (chan_para[i].dest_zstep << 16);
+        temp_dma_lli_params[i].control		= parse_dma_req(chan_para[i].dma_control.tran_mode)\
+					                        | DMA_CTRL_SRC_BURST_SIZE(chan_para[i].dma_control.src_burst_size) \
+					                        | DMA_CTRL_SRC_BURST_LENGTH((chan_para[i].dma_control.src_burst_len )) \
+					                        | DMA_CTRL_DEST_BURST_SIZE(chan_para[i].dma_control.dest_burst_size) \
+					                        | DMA_CTRL_DEST_BURST_LENGTH((chan_para[i].dma_control.dest_burst_len ))\
+					                        | DMA_CTRL_INTERRUPT_SEL(ZX29_DMA_INT_SEL)\
+											| DMA_CTRL_ENABLE(DMA_ENABLE);
+
+		if(chan_para[i].dma_control.irq_mode > DMA_ALL_IRQ_DISABLE)
+			temp_dma_lli_params[i].control	|= DMA_CTRL_IRQ_MOD(DMA_ERR_IRQ_ENABLE);
+
+		if(chan_para[i].link_addr > 0)
+			temp_dma_lli_params[i].link_addr = dma_lli_phy_addr[channel_id] + sizeof(dma_lli_param)*(i+1);
+		else
+		{
+			if(chan_para[i].dma_control.irq_mode > DMA_ALL_IRQ_DISABLE)
+				temp_dma_lli_params[i].control	|= DMA_CTRL_IRQ_MOD(DMA_ALL_IRQ_ENABLE);
+		
+			temp_dma_lli_params[i].link_addr = 0;
+		}
+
+//		i++;
+	}while(chan_para[i++].link_addr);
+	
+	dma_sync_lli_for_device(channel_id);	
+}
+
+signed int  zx29_dma_config(struct dma_chan *chan,
+			 struct dma_slave_config *cfg)
+{
+	struct zx29_dma_channel *dma_channel;
+	dma_peripheral_id peripheral_id;
+	unsigned int channel_id;
+	dma_channel_def *chan_para;
+
+	if (!cfg || !chan)
+		return -EINVAL;
+
+	dma_channel = to_zx29_dma_chan(chan);
+	peripheral_id = dma_channel->peripheral_id;
+
+    channel_id = get_channel_id(peripheral_id);
+
+    if(dma_dev.chan_config[channel_id].is_used == false)
+		return -EINVAL;
+
+	chan_para = (dma_channel_def *)cfg;
+    if (chan_para->dma_control.tran_mode>=DMA_TRAN_MOD_ALL\
+   		||chan_para->dma_control.irq_mode>=DMA_IRQMOD_ALL\
+        ||chan_para->dma_control.src_burst_size>=DMA_BURST_SIZE_ALL\
+        ||chan_para->dma_control.src_burst_len>=DMA_BURST_LEN_ALL\
+        ||chan_para->dma_control.dest_burst_size>=DMA_BURST_SIZE_ALL\
+        ||chan_para->dma_control.dest_burst_len>=DMA_BURST_LEN_ALL)
+    {
+        return -EINVAL;
+    }
+
+	/* config lli */
+	dma_config_lli(channel_id, chan_para);
+
+	/* config regs */
+#if 0
+    dma_dev.chan_config[channel_id].channel_callback 	= chan_para->callback;
+    dma_dev.chan_config[channel_id].data 				= chan_para->data;
+#endif
+
+    return 0;//dma_set_chan_para((unsigned int)peripheral_id, chan_para);
+}
+
+signed int zx29_dma_start(unsigned int channel_id)
+{
+    volatile dma_regs  __iomem * pReg = dma_dev.reg;
+
+    if(channel_id >= DMA_CH_NUM)
+    {
+    	BUG();
+		return -EINVAL;
+    }
+
+	dsb();
+	
+    pReg->channel[channel_id].control |= DMA_CTRL_ENABLE(DMA_ENABLE);
+	
+    return  0;
+}
+
+signed int zx29_dma_stop(unsigned int channel_id)
+{
+    volatile dma_regs  __iomem * pReg = dma_dev.reg;
+
+    if(channel_id >= DMA_CH_NUM)
+		return -EINVAL;
+
+    //pReg->channel[channel_id].control &= ~(DMA_CTRL_ENABLE(DMA_ENABLE));
+    pReg->channel[channel_id].control |= DMA_CTRL_FORCE_CLOSE(1);//change by gsn for linuxDMA
+    return  0;
+}
+signed int zx29_dma_get_transfer_num(unsigned int channel_id)
+{	
+	volatile dma_regs  __iomem * pReg = dma_dev.reg;	    
+	if(channel_id >= DMA_CH_NUM)		
+	  return -EINVAL;    
+	return (pReg->channel[channel_id].xpara);
+}
+
+signed int  zx29_dma_set_priority(dma_group_order groupOrder,  dma_group_mode  groupMode)
+{
+    if(groupOrder >= DMA_GROUP_ALL ||groupMode >= DMA_MODE_ALL)
+        return -EINVAL;
+
+    dma_dev.reg->group_order = groupOrder;
+    dma_dev.reg->arbit_mode = groupMode;
+	
+    return 0;
+}
+
+static dma_cookie_t zx29_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	dma_trace_submit(tx);
+	
+	return dma_cookie_assign(tx);
+}
+
+static int zx29_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+	int ret = 0;
+	struct zx29_dma_channel *dma_channel = to_zx29_dma_chan(chan);
+
+	ret = zx29_dma_request(dma_channel->req_peripheral_id);
+	if(ret < 0)
+		return ret;
+	
+	dma_async_tx_descriptor_init(&dma_channel->desc, chan);
+	dma_channel->desc.tx_submit = zx29_dma_tx_submit;
+
+	/* the descriptor is ready */
+	async_tx_ack(&dma_channel->desc);
+	
+	return ret;
+}
+
+void zx29_dma_free_chan_resource(struct dma_chan *chan)
+{
+    struct zx29_dma_channel *dma_channel = to_zx29_dma_chan(chan);
+    dma_reset_chan(dma_channel);
+}
+
+static struct dma_async_tx_descriptor *zx29_prep_dma_cyclic(
+		struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+		size_t period_len, enum dma_transfer_direction direction,
+		unsigned long context)
+{
+	struct zx29_dma_channel *dma_channel = to_zx29_dma_chan(chan);
+	struct dma_async_tx_descriptor *desc = &dma_channel->desc;
+	unsigned int channel_id = get_channel_id(dma_channel->peripheral_id);
+	int num_periods = buf_len / period_len;
+	int i = 0;
+	dma_lli_param *temp_dma_lli_params;
+
+	if (channel_id >= DMA_CH_NUM)
+		return NULL;
+// change by gsn for linuxDMA
+	//if(dma_channel->status == DMA_IN_PROGRESS)
+		//return NULL;
+	dma_channel->status = DMA_IN_PROGRESS;
+
+	temp_dma_lli_params = dma_lli_params[channel_id];
+
+	dma_sync_lli_for_cpu(channel_id);
+	for (i = 0; i < num_periods; i++)
+	{
+		temp_dma_lli_params[i].control	|= DMA_CTRL_IRQ_MOD(DMA_ALL_IRQ_ENABLE);
+	}
+	temp_dma_lli_params[num_periods - 1].link_addr = dma_lli_phy_addr[channel_id];
+	dma_sync_lli_for_device(channel_id);
+	
+	dma_channel->cyclic = 1;
+
+	desc->callback = NULL;
+	desc->callback_param = NULL;
+
+	dma_set_chan_para(dma_channel->peripheral_id);
+
+	return desc;
+}
+
+static struct dma_async_tx_descriptor *zx29_prep_dma_interleaved(
+										struct dma_chan *chan, 
+										struct dma_interleaved_template *xt,
+										unsigned long flags)
+{
+	struct zx29_dma_channel *dma_channel = to_zx29_dma_chan(chan);
+	struct dma_async_tx_descriptor *desc = &dma_channel->desc;
+
+// change by gsn for linuxDMA
+	//if(dma_channel->status == DMA_IN_PROGRESS)
+		//return NULL;
+	dma_channel->status = DMA_IN_PROGRESS;
+	
+	desc->callback = NULL;
+	desc->callback_param = NULL;
+
+	dma_set_chan_para(dma_channel->peripheral_id);
+
+	return desc;
+}
+
+static int zx29_dma_terminate_all(struct dma_chan *chan)
+{
+	struct zx29_dma_channel *dma_channel = to_zx29_dma_chan(chan);
+
+	return dma_disable_chan(dma_channel);
+}
+
+static void zx29_dma_issue_pending(struct dma_chan *chan)
+{
+	struct zx29_dma_channel *dma_channel = to_zx29_dma_chan(chan);
+
+	dma_trace_pending(dma_channel->peripheral_id);
+
+	zx29_dma_start(dma_channel->peripheral_id);
+}
+
+unsigned int zx29_dma_get_status(void)
+{
+    volatile dma_regs  __iomem * pReg = dma_dev.reg;
+
+    return pReg->working_status;
+}
+
+bool zx29_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+    struct zx29_dma_channel * channel = to_zx29_dma_chan(chan);
+	unsigned int channel_id = 0;
+	dma_peripheral_id peri_id = 0;
+		
+	peri_id = get_real_peri_id((dma_peripheral_id)param);
+    if(peri_id >= DMA_CH_NUM)
+		return false;
+
+	channel_id = get_channel_id(channel->peripheral_id);
+	if(channel_id == 0xff)
+		return false;
+
+	if(peri_id == DMA_CH_MEMORY)
+	{
+	
+		if ((dma_dev.chan_config[channel_id].is_used == false)&& \
+			(dma_chan_check_lock(dma_dev.chan_config[channel_id].peripheral_id)==false)&& \
+			(dma_dev.chan_config[channel_id].enable_mem2mem==true))
+			{
+				channel->req_peripheral_id = channel->peripheral_id;
+				return true;
+			}
+		else
+			return false;
+	}
+    
+	if (channel->peripheral_id != peri_id)
+		return false;
+	
+    if ((dma_dev.chan_config[channel_id].is_used == false)&& \
+		(dma_chan_check_lock(dma_dev.chan_config[channel_id].peripheral_id)==false))
+    	{
+    		channel->req_peripheral_id = (dma_peripheral_id)param;
+			return true;
+    	}
+	else
+		return false;
+}
+EXPORT_SYMBOL(zx29_dma_filter_fn);
+
+irqreturn_t  dma_Isr(int irq, void *dev)
+{
+	unsigned int need_continue = 0;
+    unsigned int i;
+    struct zx29_dma *dmac_ptr = dev;
+    dma_regs  __iomem * dma_reg=dmac_ptr->reg;
+	volatile  unsigned int control;
+	volatile  unsigned int raw_tc_int = dma_reg->raw_int_tc_status;
+    volatile  unsigned int raw_src_err_int = dma_reg->raw_int_src_err_status;
+    volatile  unsigned int raw_dest_err_int = dma_reg->raw_int_dest_err_status;
+    volatile  unsigned int raw_cfg_err_int = dma_reg->raw_int_cfg_err_status;
+	volatile  unsigned int tc_int = dma_reg->int_tc_status;
+	
+	unsigned int  channel_id;
+
+	/* error */
+    if (raw_src_err_int!=0 || raw_dest_err_int!=0 || raw_cfg_err_int!=0)
+    {
+		for (i=0; i<DMA_CH_NUM; i++)
+		{
+			if ((raw_src_err_int|raw_dest_err_int|raw_cfg_err_int)&(0x01<<i))
+			{
+				channel_id = get_channel_id(i);
+				if(channel_id >= DMA_CH_NUM)
+					continue;
+
+				dmac_ptr->dma_chan[channel_id].status = DMA_ERROR;
+				dma_trace_err(i);
+			}
+		}
+
+		BUG();
+		
+/*		dma_reg->raw_int_src_err_status 	|= raw_src_err_int ;
+		dma_reg->raw_int_dest_err_status 	|= raw_dest_err_int ;
+		dma_reg->raw_int_cfg_err_status 	|= raw_cfg_err_int ;
+
+		return IRQ_HANDLED;*/
+    }
+
+
+	do
+	{
+		need_continue = 0;
+
+		tc_int = dma_reg->int_tc_status;
+		raw_tc_int = dma_reg->raw_int_tc_status;
+
+		for (i = 0;(i< DMA_CH_NUM)&&(raw_tc_int!=0); i++)
+		{
+			if (raw_tc_int&(0x01<<i))
+			{
+				control	= dma_reg->channel[i].control;
+				channel_id = get_channel_id(i);
+				/*dma_reg->raw_int_tc_status = (0x1<<i);*//*clear here may create error clear*/
+			
+				if(channel_id >= DMA_CH_NUM)
+					continue;
+
+				if(((control&DMA_CTRL_INTERRUPT_SEL(0xf))==DMA_CTRL_INTERRUPT_SEL(ZX29_DMA_INT_SEL))&&\
+					(control&DMA_CTRL_IRQ_MOD(1))&&\
+					( ((control&DMA_CTRL_ENABLE(1)) == 0) || ((dmac_ptr->dma_chan[channel_id].cyclic)&&(tc_int&(0x1<<i))) ) )
+				{
+					dma_reg->raw_int_tc_status = (0x1<<i);
+					need_continue = 1;
+					dma_trace_success(i);
+
+					//channel_id = get_channel_id(i);
+					dmac_ptr->dma_chan[channel_id].status = DMA_SUCCESS;
+					if(dmac_ptr->dma_chan[channel_id].cyclic == 0)
+					{
+						dma_cookie_complete(&dmac_ptr->dma_chan[channel_id].desc);
+					}
+#ifdef CONFIG_PREEMPT_RT_FULL
+					if (dmac_ptr->dma_chan[channel_id].desc.callback)
+						dmac_ptr->dma_chan[channel_id].desc.callback(dmac_ptr->dma_chan[channel_id].desc.callback_param);
+#else
+					/* schedule tasklet on this channel */
+			/* yu.dong@20240617 [T106BUG-641] SPI packet loss issue, increase kernel buffer and read all cached data away, no data loss start */
+			#ifdef _USE_VEHICLE_DC || _USE_VEHICLE_DC_REF
+					if((channel_id == DMA_CH_UART0_RX) || (channel_id == DMA_CH_UART2_RX) || (channel_id == DMA_CH_SSP0_RX)){
+						if (dmac_ptr->dma_chan[channel_id].desc.callback)
+							dmac_ptr->dma_chan[channel_id].desc.callback(dmac_ptr->dma_chan[channel_id].desc.callback_param);
+					}else
+						tasklet_schedule(&dmac_ptr->dma_chan[channel_id].tasklet);
+			#else
+			/* yu.dong@20240617 [T106BUG-641] SPI packet loss issue, increase kernel buffer and read all cached data away, no data loss end */
+					tasklet_schedule(&dmac_ptr->dma_chan[channel_id].tasklet);
+			#endif
+#endif
+				}
+			}
+		}
+	}while(need_continue);
+	
+    return IRQ_HANDLED;
+}
+
+#if ZX29_DMA_TEST  
+
+#define	DMA_LLI_TEST			0
+
+#if DMA_LLI_TEST
+#define	MEM_CPY_CNT				(3)
+#else
+#define	MEM_CPY_CNT				(1)
+#endif
+
+#define	MEM_TEST_COUNT			(0x200)
+
+static unsigned int dma_int_count = 0;
+static unsigned char * test_buffer = NULL;
+static dma_addr_t test_phy_addr;
+static struct dma_chan * test_chan = NULL;
+
+void dma_cb(struct zx29_dma_channel * chan)
+{
+	int i;
+	
+//	dma_sync_single_for_cpu(dma_dev.dma.dev, test_phy_addr, MEM_TEST_COUNT*2, DMA_BIDIRECTIONAL);
+	dma_unmap_single(dma_dev.dma.dev, test_phy_addr, MEM_TEST_COUNT*2*MEM_CPY_CNT, DMA_BIDIRECTIONAL);
+
+	for(i=0; i<MEM_CPY_CNT; i++)
+	{
+		if(memcmp(test_buffer+MEM_TEST_COUNT*2*i, 
+					test_buffer+MEM_TEST_COUNT+MEM_TEST_COUNT*2*i, 
+					MEM_TEST_COUNT))
+		{
+			pr_info("[DMA] m2m test copy failed(%d). \n", i+1);
+		}
+	}
+
+	kfree(test_buffer);
+	if (test_chan)
+		dma_release_channel(test_chan);	
+
+	pr_info("[DMA] m2m test copy succeeded (%d). \n", ++dma_int_count);
+
+}
+
+static void *test_prepare_buff(size_t size)
+{
+	int i;
+	
+	/* alloc buffer */
+	test_buffer = kzalloc(size, GFP_KERNEL);
+	if (!test_buffer) {
+		dev_err(dma_dev.dma.dev, "%s: could not alloc DMA memory\n",
+			__func__);
+		BUG();
+	}
+	pr_info("[DMA] m2m test alloc buffer (%x). \n", (unsigned int)test_buffer);
+
+	/* prepare data */
+	for(i=0; i<MEM_CPY_CNT; i++)
+		memset(test_buffer+MEM_TEST_COUNT*2*i, 0x11+0x11*i, MEM_TEST_COUNT);
+
+	return test_buffer;
+}
+
+static struct dma_chan *test_alloc_channel(void) 
+{
+	dma_cap_mask_t mask;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+	
+	return dma_request_channel(mask, zx29_dma_filter_fn, (void*)DMA_CH_MEMORY);	
+}
+
+static signed int  test_dma_config(struct dma_chan *chan, dma_addr_t phy_addr)
+{
+	dma_channel_def temp[MEM_CPY_CNT];
+	int 	i;
+
+	memset(temp, 0, sizeof(temp));
+	for(i=0; i<MEM_CPY_CNT; i++)
+	{
+		temp[i].src_addr 	= phy_addr + MEM_TEST_COUNT*2*i;
+		temp[i].dest_addr	= temp[i].src_addr + MEM_TEST_COUNT;	
+		temp[i].count		= MEM_TEST_COUNT;
+//		temp[i].callback	= (dma_callback_func)dma_cb;
+		
+		temp[i].dma_control.tran_mode 		= TRAN_MEM_TO_MEM;
+		temp[i].dma_control.src_burst_size 	= DMA_BURST_SIZE_8BIT;
+		temp[i].dma_control.src_burst_len 	= DMA_BURST_LEN_16;
+		temp[i].dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+		temp[i].dma_control.dest_burst_len 	= DMA_BURST_LEN_16;
+		temp[i].dma_control.irq_mode 		= DMA_ALL_IRQ_ENABLE;
+		
+		temp[i].link_addr 		= 1;
+	}
+	temp[MEM_CPY_CNT-1].link_addr 	= 0;
+
+	return dmaengine_slave_config(chan,(struct dma_slave_config*)&temp);
+}
+
+	
+//static 
+void dma_m2m_test(struct device *dev)
+{
+	struct dma_async_tx_descriptor *desc =NULL;
+	struct zx29_dma_channel * zx29_chan = NULL;
+	unsigned char *p = NULL;
+	int ret = 0;
+	
+	p = test_prepare_buff(MEM_TEST_COUNT*2*MEM_CPY_CNT);
+
+	/* alloc dma channel */
+	test_chan = test_alloc_channel();
+	if (!test_chan)
+	{
+		pr_info("[DMA]test request channel failed \n");
+		return;
+	}
+
+	/* map dma address */
+	test_phy_addr = dma_map_single(dma_dev.dma.dev, (void *)p, MEM_TEST_COUNT*2*MEM_CPY_CNT, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dma_dev.dma.dev, test_phy_addr)) {
+		dev_err(dma_dev.dma.dev, "Failed to dma_map_single\n");
+		BUG();
+	}	
+
+	/* config dma */
+	ret = test_dma_config(test_chan, test_phy_addr);
+	if(ret < 0)
+		printk("dmaengine_slave_config failed(%d)~~~~~~", ret);
+
+	/* start transfer */
+	zx29_chan = to_zx29_dma_chan(test_chan);	
+#if 0	
+	desc = zx29_chan->dma_device->dma.device_prep_interleaved_dma(test_chan,NULL,0);
+	desc->callback = (dma_async_tx_callback)dma_cb;
+	desc->callback_param = (void *) zx29_chan;
+#else
+	desc = test_chan->device->device_prep_interleaved_dma(test_chan,NULL,0);
+	desc->callback = (dma_async_tx_callback)dma_cb;
+	desc->callback_param = (void *) zx29_chan;
+#endif
+	zx29_chan->zx29_dma_cookie = dmaengine_submit(desc);
+	dma_async_issue_pending(test_chan);
+
+	return ;
+}
+
+
+static ssize_t dma_m2m_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "dma_int_count:%d\n", dma_int_count);
+}
+
+static ssize_t dma_m2m_store(struct device *dev, struct device_attribute *attr, 
+		const char *buf, size_t count)
+{
+	dma_m2m_test(dev);
+	
+	return (count);
+}
+
+static DEVICE_ATTR(dma,0600,dma_m2m_show,dma_m2m_store);
+static struct attribute *zx29_dma_attributes[] = {
+	&dev_attr_dma.attr,
+	NULL,
+};
+
+static const struct attribute_group zx29_dma_attribute_group = {
+	.attrs = (struct attribute **) zx29_dma_attributes,
+};
+#endif
+
+static void	dma_init_channels(void)
+{
+    int i = 0;
+    struct zx29_dma_channel * dma_chan_ptr = NULL;
+
+    dma_dev.chan_config		= dma_chan_config;
+	dma_dev.channel_count	= ARRAY_SIZE(dma_chan_config);
+
+	INIT_LIST_HEAD(&dma_dev.dma.channels);
+	
+	for(i=0;i<dma_dev.channel_count;i++)
+	{
+	    dma_chan_ptr 				= &dma_dev.dma_chan[i];
+	    dma_chan_ptr->peripheral_id	= dma_dev.chan_config[i].peripheral_id;
+	    dma_chan_ptr->dma_device 	= &(dma_dev);
+	    dma_chan_ptr->chan.device 	= &(dma_dev.dma);
+	    dma_cookie_init(&dma_chan_ptr->chan);
+
+	    tasklet_init(&dma_chan_ptr->tasklet, dma_tasklet, (unsigned long)(dma_chan_ptr));
+		
+	    list_add_tail(&dma_chan_ptr->chan.device_node, &dma_dev.dma.channels);
+	}
+}
+
+static u64 general_dma_mask = DMA_BIT_MASK(32);
+
+static int dma_init_resource(struct platform_device* pdev)
+{
+	int ret = 0;
+	int irq;
+	int	i;	
+	struct device_node *np = pdev->dev.of_node;
+
+	/* registers */
+    dma_dev.reg	= (dma_regs *)of_iomap(np, 0);
+	if ( !dma_dev.reg ) {
+		dev_err(&pdev->dev, "[DMA]Cannot get IORESOURCE_MEM\n");		
+		return -ENOENT;
+	}
+
+	dma_pub_configs = (dma_pub_config *)(dma_regs *)of_iomap(np, 1);
+	if ( !dma_pub_configs ) {
+		dev_err(&pdev->dev, "[DMA]Cannot get IORESOURCE_MEM 1\n");
+		return -ENOENT;
+	}
+	// only for test
+//	memset((u8 *)dma_pub_configs, 0, 0x80);
+
+	/* irq */
+	irq = irq_of_parse_and_map(np, 0);
+	if( !irq ) {
+		dev_err(&pdev->dev, "[DMA]Cannot get IORESOURCE_IRQ\n");		
+		return -ENOENT;
+	}	
+    dma_dev.reg->irq_type 	= 0xF;	/* high level for all cores */
+    ret = request_irq(irq, dma_Isr, IRQF_NO_THREAD, "zx29dma", &dma_dev);
+	if(ret) 
+		return ret;
+
+	/* memory for lli */
+	for(i=0; i<ARRAY_SIZE(dma_chan_config); i++)
+	{
+		dma_lli_params[i] = kzalloc(MAX_LLI_PARAMS_CNT, GFP_KERNEL);
+		if (!dma_lli_params[i]) {
+			int j;
+			dev_err(&pdev->dev, "[DMA]%s: could not alloc memory for lli[%d].\n",
+				__func__, i);
+			for(j=0; j<i; j++)
+			{
+				dma_unmap_single(&pdev->dev, dma_lli_phy_addr[j], MAX_LLI_PARAMS_CNT, DMA_BIDIRECTIONAL);		
+				kfree(dma_lli_params[j]);
+				dma_lli_phy_addr[j]=0;
+				dma_lli_params[j]=NULL;
+			}		
+			return -ENOENT;
+		}
+
+		dma_lli_phy_addr[i] = dma_map_single(&pdev->dev, dma_lli_params[i], MAX_LLI_PARAMS_CNT, DMA_BIDIRECTIONAL);
+	}
+
+	return 0;
+}
+
+static int dma_register_device(struct platform_device* pdev)
+{
+	dma_cap_zero(dma_dev.dma.cap_mask);
+	dma_cap_set(DMA_SLAVE,  dma_dev.dma.cap_mask);
+	dma_cap_set(DMA_CYCLIC,  dma_dev.dma.cap_mask);
+	dma_cap_set(DMA_INTERLEAVE, dma_dev.dma.cap_mask);
+
+	dma_dev.dma.device_alloc_chan_resources = zx29_dma_alloc_chan_resources;
+	dma_dev.dma.device_free_chan_resources 	= zx29_dma_free_chan_resource;
+	dma_dev.dma.device_tx_status 			= zx29_dma_tx_status;
+	dma_dev.dma.device_config 				= zx29_dma_config;
+	dma_dev.dma.device_terminate_all		= zx29_dma_terminate_all;
+	dma_dev.dma.device_prep_dma_cyclic		= zx29_prep_dma_cyclic;
+	dma_dev.dma.device_prep_interleaved_dma = zx29_prep_dma_interleaved;
+	dma_dev.dma.device_issue_pending 		= zx29_dma_issue_pending;
+
+	dma_dev.dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+		BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+	dma_dev.dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+		BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+	dma_dev.dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | BIT(DMA_MEM_TO_MEM);
+
+	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	pdev->dev.dma_mask = &general_dma_mask;	
+
+	dma_dev.dma.dev = &pdev->dev;
+	return dma_async_device_register(&dma_dev.dma);
+}
+
+static int zx29_dma_probe(struct platform_device* pdev)
+{
+    int ret = 0;
+
+	/* resource */
+	ret = dma_init_resource(pdev);
+	if(ret)	
+	{
+		pr_info("[DMA]get resource failed!\n");
+		return ret;
+	}
+
+	/* channel info */
+	dma_init_channels();
+
+	/* register device */
+	ret = dma_register_device(pdev);
+	if (ret)
+	{
+		dev_info(dma_dev.dma.dev, "[DMA]unable to register\n");
+		return -EINVAL;
+	}	
+	
+	pr_info("[DMA]zx297520v DMA initialized\n");
+
+   	return 0;
+}
+
+static const struct of_device_id zx29_dma_dt_ids[] = {
+	{ .compatible = "arm,zx297520v3-dma" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, zx29_dma_dt_ids);
+
+struct platform_driver zx29_dma_driver = {
+	.driver = {
+		     .name = "zx29_dma",
+			 .of_match_table = of_match_ptr(zx29_dma_dt_ids),
+	},
+	.probe = zx29_dma_probe,
+};
+static int __init zx29_dma_driver_init(void)
+{
+	return platform_driver_register(&zx29_dma_driver);
+}
+subsys_initcall(zx29_dma_driver_init);
+
+
+/**
+ *  "/sys/zte/test/dma_test" 
+ */
+extern struct kobject *zx_test_kobj; 
+int __init zx_dma_test_init(void)
+{
+#if ZX29_DMA_TEST 
+	int ret;
+
+    ret = sysfs_create_group(zx_test_kobj, &zx29_dma_attribute_group);
+	if (!ret)
+    	pr_debug("[DEBUG] create test dma sysfs interface OK.\n");
+#endif
+
+	return 0;
+}
+
diff --git a/upstream/linux-5.10/drivers/misc/zcat/debug_info.c b/upstream/linux-5.10/drivers/misc/zcat/debug_info.c
new file mode 100755
index 0000000..d23e340
--- /dev/null
+++ b/upstream/linux-5.10/drivers/misc/zcat/debug_info.c
@@ -0,0 +1,396 @@
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cdev.h>
+#include <linux/semaphore.h>
+#include <linux/timer.h>
+
+// #include <linux/fs.h>
+#include <linux/ioport.h>
+// #include <linux/serial_reg.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+// #include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+// #include <linux/kthread.h>
+#include <asm/io.h>
+
+#include <linux/vmalloc.h>
+#include <linux/soc/zte/rpmsg.h>
+// #include <linux/syscalls.h>
+
+// #include "debuginfo.h"
+#include "pub_debug_info.h"
+#include "ringbuf.h"
+
+
+#if defined(_USE_ZXIC_DEBUG_INFO) && !defined(CONFIG_SYSTEM_RECOVERY)
+/*******************************************************************************
+ *                                   宏定义                                     *
+ *******************************************************************************/
+#define DEBUG_INFO_SHARE_MEM_LEN    (0x2000)
+#define DEBUG_INFO_READABLE_LEN     (0x1400)
+#define DEBUG_INFO_MAX_DATA_LEN     (128)
+#define DEBUG_INFO_MAX_TOTAL_LEN    (140) // 8 + 128 + 4
+#define DEBUG_INFO_READ_TIME_MSECS  (10000)
+
+#define DEBUG_INFO_CHANNEL          (9)
+#define DEBUG_INFO_MSG_CAP_SIZE     (2 * 1024)
+
+#define DEBUG_INFO_OK               (0)
+#define DEBUG_INFO_ERROR            (-1)
+
+#define DEBUG_INFO_IOCTL_SET_DISABLE  (0x1001)
+
+/*******************************************************************************
+ *                                结构体定义                                     *
+ *******************************************************************************/
+typedef unsigned int UINT32;
+typedef unsigned short UINT16;
+typedef unsigned char UINT8;
+
+typedef struct
+{
+    UINT16 module_id; // 模块id
+    UINT16 sub_len;   // 用户数据长度
+    UINT32 time;
+    char sub_data[]; // 用户数据
+} T_SHARED_MEM_DATA;
+
+typedef struct
+{
+    UINT32 head;                  // 0x010a0a0a
+    UINT32 total_len;             // 数据内容长度
+    long long time;               // time()函数获取
+} T_SAVE_FILE_DATA;
+
+/*******************************************************************************
+ *                                  全局变量                                     *
+ *******************************************************************************/
+volatile T_RINGBUFFER *g_debug_info_buf = NULL;
+static struct semaphore debug_sem;
+static DEFINE_RAW_SPINLOCK(debugWr_lock);
+static int g_init_flag = 0;
+
+/*******************************************************************************
+ *                              内部函数定义                                     *
+ *******************************************************************************/
+static int sc_debug_info_read_to_user(char *buf, unsigned short count);
+static int sc_debug_info_record_from_user(const char *info, unsigned short count);
+static int sc_debug_info_write(UINT32 flag, const UINT8 *buf, UINT32 len);
+static void sc_debug_info_from_ap(void *buf, unsigned int len);
+
+static void kernel_timer_timeout(struct timer_list *t);
+static ssize_t debug_info_read(struct file *fp, char __user *buf, size_t count, loff_t *pos);
+static ssize_t debug_info_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos);
+static int debug_info_open(struct inode *ip, struct file *fp);
+static long debug_info_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
+static int debug_info_release(struct inode *ip, struct file *fp);
+
+//初始化timer
+static DEFINE_TIMER(timer, kernel_timer_timeout);
+
+static const struct file_operations debug_info_fops = {
+    .owner = THIS_MODULE,
+    .read = debug_info_read,
+    .write = debug_info_write,
+    .open = debug_info_open,
+    .unlocked_ioctl = debug_info_ioctl,
+    .release = debug_info_release,
+};
+
+static struct miscdevice debug_info_device = {
+    .minor = MISC_DYNAMIC_MINOR,
+    .name = "debug_info",
+    .fops = &debug_info_fops,
+};
+
+static void kernel_timer_timeout(struct timer_list *t)
+{
+    if (debug_sem.count == 0)
+    {
+        up(&debug_sem);
+    }
+    /* 因为内核定时器是一个单次的定时器,所以如果想要多次重复定时需要在定时器绑定的函数结尾重新装载时间,并启动定时 */
+    /* Kernel Timer restart */
+    mod_timer(&timer, jiffies + msecs_to_jiffies(DEBUG_INFO_READ_TIME_MSECS));    
+}
+
+static ssize_t debug_info_read(struct file *fp, char __user *buf, size_t count, loff_t *pos)
+{
+    int ret;
+    int rd_len;
+
+    ret = down_interruptible(&debug_sem);
+    if(ret < 0)
+    {
+        return ret;
+    }
+    else
+    {
+        rd_len = sc_debug_info_read_to_user(buf, count);
+    }
+
+    return rd_len;
+}
+
+static ssize_t debug_info_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos)
+{
+    int wr_len = sc_debug_info_record_from_user(buf, count);
+
+    return wr_len;
+}
+
+static int debug_info_open(struct inode *ip, struct file *fp)
+{
+    return 0;
+}
+
+static long debug_info_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+    switch(cmd)
+    {
+        case DEBUG_INFO_IOCTL_SET_DISABLE:
+            *(volatile UINT32 *)ZCAT_DEBUG_INFO_DISABLE = arg;
+            break;
+
+        default:
+            break;
+    }
+    return 0;
+}
+
+static int debug_info_release(struct inode *ip, struct file *fp)
+{
+    return 0;
+}
+
+static void sc_debug_info_from_ap(void *buf, unsigned int len)
+{
+    T_SHARED_MEM_DATA *debug_msg = (T_SHARED_MEM_DATA *)buf;
+    debug_msg->time = jiffies;
+
+    sc_debug_info_write(ZCAT_MEM_TYPE_KERNEL, buf, len);
+}
+
+static int __init debug_info_init(void)
+{
+    int ret = misc_register(&debug_info_device);
+    if (ret)
+    {
+        printk("debug_info_device init.\n");
+        return DEBUG_INFO_ERROR;
+    }
+
+    void *mem;
+    mem = vmalloc(DEBUG_INFO_SHARE_MEM_LEN);
+    if (!mem)
+    {
+        printk("vmalloc failed.\n");
+        return DEBUG_INFO_ERROR;
+    }
+
+    g_debug_info_buf = CreateRingBuffer((UINT8 *)mem, DEBUG_INFO_SHARE_MEM_LEN);
+    if (g_debug_info_buf == NULL)
+    {
+        printk("CreateRingBuffer failed.\n");
+        return DEBUG_INFO_ERROR;
+    }
+#if 1
+    ret = rpmsgCreateChannel(
+        CORE_PS0, 
+        DEBUG_INFO_CHANNEL,
+        DEBUG_INFO_MSG_CAP_SIZE);
+    if (ret != DEBUG_INFO_OK) 
+    {
+        printk("rpmsgCreateChannel failed, ret = %d\n", ret);
+        return DEBUG_INFO_ERROR;
+    }  
+    
+    ret = rpmsgRegCallBack(
+            CORE_PS0,
+            DEBUG_INFO_CHANNEL, 
+            sc_debug_info_from_ap);
+    if (ret != DEBUG_INFO_OK) 
+    {
+        printk("rpmsgRegCallBack failed,ret = %d\n", ret);
+        return DEBUG_INFO_ERROR;
+    } 
+#endif
+    sema_init(&debug_sem, 0);
+    /* 添加并启动定时器, 10ms */
+    mod_timer(&timer, jiffies + 1);    
+
+    g_init_flag = 1;
+
+    return 0;
+}
+
+static void __exit debug_info_exit(void)
+{
+    misc_deregister(&debug_info_device);
+
+    del_timer(&timer);
+}
+
+static int sc_debug_info_write(UINT32 flag, const UINT8 *buf, UINT32 len)
+{
+    UINT32 writelen;
+    UINT32 used_space;
+    unsigned long flags;
+
+    if (len == 0 || g_debug_info_buf == NULL)
+    {
+        printk("sc_debug_info_write:: (len == 0 || g_debug_info_buf == NULL).\n");
+        return DEBUG_INFO_ERROR;
+    }
+
+    raw_spin_lock_irqsave(&debugWr_lock, flags);
+    writelen = WriteRingBuffer(g_debug_info_buf, buf, len, flag);
+    raw_spin_unlock_irqrestore(&debugWr_lock, flags);
+    used_space = GetRingBufferSize(g_debug_info_buf);
+    if (used_space > DEBUG_INFO_READABLE_LEN)
+    {
+        if (debug_sem.count == 0)
+        {
+            up(&debug_sem);
+        }
+    }
+
+    return writelen;
+}
+
+static int sc_debug_info_read_to_user(char *buf, unsigned short count)
+{
+    unsigned int bufSize_used = 0;
+    unsigned int readLen = 0;
+    unsigned int bufLen = 0;
+    T_SAVE_FILE_DATA fileDataHead;
+
+    if (g_init_flag == 0)
+    {
+        printk("debug_info not init.\n");
+        return DEBUG_INFO_ERROR;
+    }
+    if (count == 0 || buf == NULL || g_debug_info_buf == NULL)
+    {
+        printk("sc_debug_info_read_to_user:: (count == 0 || buf == NULL || g_debug_info_buf == NULL).\n");
+        return DEBUG_INFO_ERROR;
+    }
+
+    bufSize_used = GetRingBufferSize(g_debug_info_buf);
+    if (bufSize_used == 0)
+    {
+        // printk("sc_debug_info_read_to_user:: ringBuf is empty.\n");
+        return 0;
+    }
+
+    fileDataHead.head = 0x010a0a0a;
+    fileDataHead.time = 0;
+    fileDataHead.total_len = bufSize_used;
+
+    copy_to_user(buf, &fileDataHead, sizeof(T_SAVE_FILE_DATA));
+
+    readLen = ReadRingBuffer(g_debug_info_buf, (buf + sizeof(T_SAVE_FILE_DATA)), bufSize_used, ZCAT_MEM_TYPE_USER);
+    if (readLen == 0)
+    {
+        // printk("ReadRingBuffer failed.\n");
+        return 0;
+    }
+
+    return (readLen + sizeof(T_SAVE_FILE_DATA));
+}
+
+static int sc_debug_info_record_from_user(const char *info, unsigned short count)
+{
+    unsigned int cnt = 0;
+    unsigned int my_jiffies = jiffies;
+
+    if (g_init_flag == 0)
+    {
+        printk("debug_info not init.\n");
+        return DEBUG_INFO_ERROR;
+    }
+    if (info == NULL)
+    {
+        printk("sc_debug_info_record_from_user:: info is NULL.\n");
+        return DEBUG_INFO_ERROR;
+    }
+
+    copy_to_user(info + 4, &my_jiffies, sizeof(my_jiffies));
+    cnt = sc_debug_info_write(ZCAT_MEM_TYPE_USER, (UINT8 *)info, count);
+
+    return cnt;
+}
+
+module_init(debug_info_init);
+module_exit(debug_info_exit);
+
+MODULE_AUTHOR("jcw");
+MODULE_DESCRIPTION("debug_info driver");
+MODULE_LICENSE("GPL");
+
+
+int sc_debug_info_vrecord(unsigned int id, const char *format, va_list args)
+{
+    int len;
+    UINT32 writelen;
+    // va_list args;
+    char str_buf[DEBUG_INFO_MAX_TOTAL_LEN] __attribute__((aligned(4)));
+    T_SHARED_MEM_DATA *shareMemData = (T_SHARED_MEM_DATA *)str_buf;
+
+    if (g_init_flag == 0)
+    {
+        printk("debug_info not init.\n");
+        return DEBUG_INFO_ERROR;
+    }
+
+    /* args是一个char*类型指针,指向format之后的第一个参数*/
+    // va_start(args, format);
+    len = vsnprintf(shareMemData->sub_data, DEBUG_INFO_MAX_DATA_LEN, format, args);
+    // va_end(args);
+    if (len < 0)
+    {
+        printk("vsnprintf format error.\n");
+        return DEBUG_INFO_ERROR;
+    }
+
+    shareMemData->module_id = (UINT16)(id & 0xFFFF);
+    shareMemData->sub_len = len;
+    shareMemData->time = jiffies;
+
+    writelen = sc_debug_info_write(ZCAT_MEM_TYPE_KERNEL, (UINT8 *)shareMemData, len + sizeof(T_SHARED_MEM_DATA));
+    return writelen;
+}
+EXPORT_SYMBOL(sc_debug_info_vrecord);
+
+int sc_debug_info_record(unsigned int id, const char *format, ...)
+{
+    va_list args;
+	int r;
+
+	va_start(args, format);
+	r = sc_debug_info_vrecord(id, format, args);
+	va_end(args);
+
+
+    return r;
+}
+EXPORT_SYMBOL(sc_debug_info_record);
+#else
+int sc_debug_info_record(unsigned int id, const char *format, ...)
+{
+	return 0;
+}
+#endif   /*  _USE_ZXIC_DEBUG_INFO */
+
diff --git a/upstream/linux-5.10/drivers/mmc/core/mmc_ramdump.c b/upstream/linux-5.10/drivers/mmc/core/mmc_ramdump.c
new file mode 100755
index 0000000..f72860e
--- /dev/null
+++ b/upstream/linux-5.10/drivers/mmc/core/mmc_ramdump.c
@@ -0,0 +1,680 @@
+/*******************************************************************************
+* °æÈ¨ËùÓÐ (C)2014, ÉîÛÚÊÐÖÐÐËͨѶ΢µç×Ó
+*
+* ÎļþÃû³Æ£º emmc_ramdump.c
+* Îļþ±êʶ£º
+* ÄÚÈÝÕªÒª£º
+* ÆäËü˵Ã÷£º
+* µ±Ç°°æ±¾£º 1.0
+* ×÷¡¡¡¡Õߣº 
+* Íê³ÉÈÕÆÚ£º
+*******************************************************************************/
+//#include <common.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/mfd/zx234290.h>
+#include <linux/stddef.h>
+
+
+#define MMC1_REG_BASE			0x1211000
+#define MATRIX_CRM_REG_BASE		0x1306000
+#define CFG_EMMC_CLK_ENUM		400000
+#define CFG_EMMC_CLK_WORK		26000000
+#define CFG_EMMC_CLK_REF		50000000
+
+#define ZXMCI_FIFO_DEPTH		128
+#define MMC_BLOCK_SIZE			512
+
+#define mci_read(base, reg)			\
+	(*(volatile u32*)(base + reg))
+#define mci_write(base, reg, value)			\
+	(*(volatile u32*)(base+ reg) = (value))
+
+u8 mmc_data_buf[512]={0};
+//CMD register
+//bit6  1=response from card
+//bit7  1=long response from card
+//bit8  1=check response CRC
+//bit9  1=data transfer expect
+//bit12 1=send stop command at the end of data transfer
+//bit13 1=wait for previous data transfer completion before sending command
+#define R1						((1 << 6) | (1 << 8))
+#define R2						((1 << 6) | (1 << 7) | (1 << 8))
+#define R3						(1 << 6)
+#define CF_DATA					((1 << 9) | (1 << 12) | (1 << 13))
+#define CF_DATA_WR				((1 << 9)|(1 << 10) | (1 << 12) | (1 << 13))
+
+
+static u32 mmc_rca;
+static u32 block_addr = 1;
+//extern struct dw_mci *dw_mci_host_ptr[2];
+extern u32 * g_reg_base[2];
+struct mmc_cid
+{
+	u32		psn;
+	u8		oid;
+	u8		mid;
+	u8		prv;
+	u8		mdt;
+	char	pnm[7];
+};
+
+// ¸´Î»EMMCʱÖÓ
+static void emmc_clk_reset(void)
+{
+return ;
+#if 0
+    volatile u32 *crm = (u32*)MATRIX_CRM_REG_BASE;
+
+	crm[0x50>>2] &= ~(0x7<<8);//bit8~10 000:26Mhz  001:100Mhz
+
+	crm[0x54>>2] |= 0x03 << 4;				// clk enable
+
+    udelay(10);
+
+	crm[0x58>>2] |= 0x01 << 1;				// reset release
+#endif
+}
+
+// emmc ·¢ËÍÃüÁî
+static int emmc_cmd(u32 cmd, u32 arg, void *resp, u32 flags)
+{
+
+#define ERR_STATUS			(1 << 1 | 1 << 6 | 1 << 8)	// bit1:response error
+														// bit6:response CRC error
+														// bit8:response timeout
+	volatile u32 i;
+    u32 cmdreg;
+	u32 *response = resp;
+	u32 response_words = 0;
+   // volatile u32 *emmc = (u32*)MMC1_REG_BASE;
+	u32 reg_val = 0;
+	u32 regs_base = g_reg_base[1];
+	
+	//printk("(%s) cmd = %d start\n",__func__,cmd);
+	
+	//Clear all raw interrupt status
+	reg_val = mci_read(regs_base,0x44);
+	mci_write(regs_base,0x44,reg_val|((u32)-1));
+	
+	cmdreg = cmd & 0x3F;
+	cmdreg |= flags|(1 << 29) | 0x80000000;
+
+	if(flags &(1 << 7))
+	{
+		response_words = 4;						// long response expected from card
+	}
+	else if(flags & (1 << 6))
+	{
+		response_words = 1;						// response expected from card
+	}
+    //send command	
+	reg_val = mci_read(regs_base,0x44);
+	mci_write(regs_base,0x44,reg_val|((u32)-1));
+	mci_write(regs_base,0x28,arg);
+	mci_write(regs_base,0x2C,cmdreg);
+	
+    // check command done
+    i= 0;
+	do
+	{	
+        udelay(10);
+        if(++i > 1000)
+        {
+            printk("SEND CMD FAILED,CMD = %d,reg= 0x%x\n",cmd,mci_read(regs_base,0x44));
+			
+            break;
+        }
+
+	}	while(!(mci_read(regs_base,0x44) & (1 << 2)));	
+
+    // check error
+    if(mci_read(regs_base,0x44) & ERR_STATUS)
+    {
+    
+        printk("SEND CMD ERR,reg_0x44=0x%x\n",mci_read(regs_base,0x44));
+        return -1;
+    }
+	
+	if(response == NULL)
+		return 0;
+	
+	for(i = 0; i < response_words; i++)
+	{	
+		response[i]= mci_read(regs_base,(0x30 + i));
+	}
+
+	return 0;
+}
+
+//¸´Î»ËùÓп¨£¬Ê¹Æä½øÈëIDLE״̬
+static u32 emmc_idle_cards(void)
+{
+	int i;
+	u32 ret;
+	u32 regs_base = g_reg_base[1];
+
+	// Reset and initialize all cards
+	ret = (u32)emmc_cmd(0, 0, NULL, (1 << 15));
+    if(ret)
+    {
+        printk("ENTER IDLE ERR\n");
+        return (int)ret;
+    }
+
+    // wait for 80 clock at least
+	for(i = 0; i < 100; i++)
+	{
+		ret = mci_read(regs_base,0x70);
+	}
+
+	return 0;
+}
+
+//·¢ËÍCMD1
+static inline int mmc_send_op_cond(u32 ocr,u32 *rocr)
+{
+	int i;
+    int ret = 0;
+	u32 resp[4];
+
+	// ÖÁÉÙ1s£¬ÕâÀïÉèÖÃΪ4s
+	for(i = 50000; i > 0; i--)
+	{
+		ret = emmc_cmd(1,ocr,resp,R3);		
+		if(ret)
+			break;
+		
+		if(ocr == 0)
+			break;
+		if(resp[0] & 0x80000000)
+			break;
+
+		udelay(80);
+
+        ret= -1;
+	}
+
+	if(rocr)
+		*rocr = resp[0];
+
+	return ret;
+}
+
+//ö¾ÙEMMC¿¨
+static u32 ramdump_mmc_init_card(struct mmc_cid *cid, u32 ocr)
+{
+	u32 resp[4];
+	u32 rocr;
+
+	// CMD0
+	emmc_idle_cards();
+
+	// CMD1
+	mmc_send_op_cond(ocr/* | (1 << 30)*/, &rocr);
+
+	if((rocr & 0x80000000) != 0)
+	{
+		if((rocr & 0x40000000) == 0)
+		{
+			block_addr = 0;
+		}
+		else
+		{
+			block_addr = 1;
+		}
+	}
+	else
+	{
+		printk("ERR\n");
+	}
+	// CMD2
+	emmc_cmd(2, 0, resp, R2);
+
+	// CMD3
+	// Set RCA of the card that responded
+
+    mmc_rca = 1 << 16;
+	emmc_cmd(3, mmc_rca, resp, R1);
+
+	return 0;
+}
+
+// card detected numbers
+static inline int emmc_card_present(void)
+{
+	u32 regs_base = g_reg_base[1];
+
+    return ((mci_read(regs_base,0x50) & 0x3FFFFFFF) != 0x01);
+}
+
+// ²ÉÓÃCPU¶ÁÈ¡FIFO£¬½ûÖ¹ÖжÏ
+static inline void emmc_init(void)
+{
+	u32 tmp,cardnums;
+    u32 i = 0;
+	u32 reg_val=0;
+	u32 regs_base = g_reg_base[1];
+
+	mci_read(regs_base,0x00) = (0 << 5)|(1 << 1)|(1 << 0);   //½ûÖ¹dma //¸´Î»fifo //¸´Î»¿ØÖÆÆ÷
+	do
+    {
+        udelay(10);
+        if(++i > 100)
+        {
+            printk("RESET FAILED\n");
+            break;
+        }
+	} while(mci_read(regs_base,0x00)&3);
+
+	cardnums = mci_read(regs_base,0x70)&0x3E;
+	cardnums = (cardnums >> 1) + 1;
+
+	//ÉèÖÃCTRL¼Ä´æÆ÷£¬¶ÔÓÚMMC-Ver3.3-onlyģʽ£¬ÐèÒªÉèÖÃenable_OD_pullupλ¡£
+	mci_write(regs_base,0x00,0x0);
+
+    //¸øCARD¹©µç
+	mci_write(regs_base,0x04,0x01);
+	//µÈ´ýµçÔ´Îȶ¨
+	udelay(500);
+
+	//ÇåÖжÏ״̬¼Ä´æÆ÷£¬ÒÔ¼°ÉèÖÃINTMSK¼Ä´æÆ÷
+	mci_write(regs_base,0x44,((u32)-1));
+	//ÆÁ±ÎËùÓÐÖжϣ¬¸ß16λÊǶÔÓ¦sdio£¬µÍ16λ¶ÔӦÿ¸ö¿¨
+	mci_write(regs_base,0x24,0x00);
+
+    //ÐÞ¸ÄCARDµÄʱÖÓÔ´¡£ÎÒÃÇÓõÄʱÖÓÀ´Ô´ÊÇclksrc0. 2bit¶ÔÓ¦Ò»¸ö¿¨(32/2=16)
+	mci_write(regs_base,0x0C,0x0);
+
+	//ÉèÖÃHOST IPµÄһЩȱʡ²ÎÊý¡£¶ÔÓ¦TMOUT,DEBNCE,FIFOTH¼Ä´æÆ÷
+	mci_write(regs_base,0x14,((u32)-1));	                                     //data_timeout
+			                                                                 //response_timeout,ĬÈÏ0x40
+	//·´Ìø¼ÆÊý¼Ä´æÆ÷,25ms
+	mci_write(regs_base,0x64,0xFFFFFF);
+
+	//fifo·§ÖµÎªÄ¬ÈÏ
+	tmp = (2 << 28) |(((ZXMCI_FIFO_DEPTH >> 1)-1) << 16) |((ZXMCI_FIFO_DEPTH >> 1) << 0);  //dma multiple transaction size,ÄÚ²¿dma//RX WMARK //TX WMARK
+	mci_write(regs_base,0x4c,tmp);
+
+    //SD¿¨ÉϵçÖ®ºó£¬ÔËÐÐÔÚ1BITģʽ£¬ËùÒÔÈ·±£host¹¤×÷ÔÚ1BITģʽ
+    mci_write(regs_base,0x18,0);
+	mci_write(regs_base,0x100,1);
+
+	//ÉèÖÃCTRL¼Ä´æÆ÷£¬ÔÊÐíÖжÏ
+	mci_write(regs_base,0x00,(0 << 5)|(0 << 4)|(1 << 1));// ½ûÖ¹dma	// ½ûֹȫ¾ÖÖжÏ// ¸´Î»fifo
+
+    i= 0;
+    do
+    {
+        udelay(10);
+        if(++i > 100)
+        {
+            printk("FIFO RESET FAILED\n");
+            break;
+        }
+	} while(mci_read(regs_base,0x00)& 2);
+}
+//¸üÐÂʱÖÓ
+static int emmc_update_clock_reg_only(void)
+{
+	//Ö»ÐèÒªÖÃλbit21,ÒòΪ²»·¢Ë͵½¿¨£¬ËùÒÔ²»»á²úÉúÖжÏ
+    u32 rintsts;
+    u32 repeat = 0;
+    u32 cmdr = (1 << 21)| (1 << 13) | 0x80000000;
+	u32 regs_base = g_reg_base[1];
+
+    do
+	{
+        mci_write(regs_base,0x2c,cmdr);
+        rintsts = mci_read(regs_base,0x44);
+        repeat++;
+    }	while(((rintsts & (1 << 12)) != 0) && (repeat < 10));
+
+    if(repeat >= 10)
+    {
+        printk("HW LOCK ERR\n");
+		return -2;
+    }
+
+	repeat = 0;
+
+	while((mci_read(regs_base,0x2C) & 0x80000000) != 0)
+	{
+		udelay(50);
+		if(++repeat >=1000)
+		{
+			printk("UPDATE CLOCK TIMEOUT\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+//ÉèÖÃʱÖÓ
+static inline void emmc_set_clk(u32 clock)
+{
+	u32 clk_div;
+	u32 regs_base = g_reg_base[1];
+
+    //ʱÖӵıà³ÌÁ÷³Ì£¬²Î¿¼IPÊÖ²áP167
+    //È·±£CardûÓÐÔÚ´«ÊäÊý¾Ý
+    //½ûÖ¹ËùÓÐʱÖÓ
+    mci_write(regs_base,0x10,0x0);
+    emmc_update_clock_reg_only();
+    //ÉèÖÃCLKDIV,CLKSRCÁ½¸ö¼Ä´æÆ÷£¬CLKSRC²ÉÓÃĬÈÏÖµ
+    //ÕâÀïÓÃÄ£¿éʱÖÓ2·ÖƵ = 66625000.ÎÒÃÇµÄ·ÖÆµÆ÷¿ÉÒÔ×öµ½2*n(n=255) = 510¸ö·ÖƵ,0=1·ÖƵ
+	//ËùÒÔ×îµÍƵÂÊΪ66625000/510 = 130.637KHz,×î´ó= 66625000.
+	if(clock <= (CFG_EMMC_CLK_REF / 510))
+	{
+        clk_div = 0xff;
+	}
+	else
+	{
+        clk_div	= (CFG_EMMC_CLK_REF + clock )/((clock<<1)+1);//ËÄÉáÎåÈë
+	}
+	mci_write(regs_base,0x08,clk_div);
+    emmc_update_clock_reg_only();
+
+    //ÖØÐÂʹÄÜʱÖÓ
+	//»Ö¸´Ê±ÖÓ.¸ß16λ1=µÍ¹¦ºÄģʽ
+	mci_write(regs_base,0x10,0x001);
+    emmc_update_clock_reg_only();
+
+}
+//·¢ËÍEMMC¶ÁÃüÁî
+static s32 zx_mmc_read(u32 src, u8 * dst, u32 size)
+{
+	int ret;
+	u32	resp, data, wordcount, start_addr, fifo_cnt;
+	volatile u32 i= 0;
+	volatile u32	j= 0;
+	u32 *p= (u32 *)dst;
+	u32 regs_base = g_reg_base[1];
+ 
+	if(size == 0)
+		return -1;
+	
+	while((mci_read(regs_base,0x48) & (1 << 9)) != 0)
+	{
+        udelay(10);
+
+		if(++i > 200)
+			break;
+	}
+
+	start_addr = src;
+	data = mci_read(regs_base,0x00) | (1 << 1);
+	mci_write(regs_base,0x00,data);
+    mci_write(regs_base,0x20,size);
+	mci_write(regs_base,0x1C,MMC_BLOCK_SIZE);
+
+    i = 0;
+    do
+    {
+        udelay(10);
+        if(++i > 100)
+        {
+            printk("FIFO RESET FAILED\n");
+            break;
+        }
+	}	while(mci_read(regs_base,0x00) & 0x02);
+
+
+	if (size > 512)
+	{
+		ret = emmc_cmd(18,start_addr, &resp,(R1 | CF_DATA));
+		if(ret)
+			return -18;
+	}
+	else
+	{
+		ret = emmc_cmd(17,start_addr, &resp,(R1 | CF_DATA));
+		if(ret)
+			return -17;
+	}
+
+	wordcount = 0;
+	do
+	{
+		fifo_cnt =((mci_read(regs_base,0x48) >> 17) & 0x1FFF);
+
+		for(j = 0; j < fifo_cnt; j++)
+		{
+			data = mci_read(regs_base,0x200);
+			*p++= data;
+			wordcount++;
+		}
+
+	}	while(wordcount < (size >> 2));
+	udelay(2000);
+
+	return 0;
+
+}
+
+static int zx_mmc_write(u32 blknr, u8 * src_buf, u32 size)
+{
+	int ret;
+	u32 resp, data, wordcount, start_addr, fifo_cnt;
+	volatile u32 i= 0;
+	volatile u32 j= 0;
+	u32 *p= (u32 *)src_buf;
+	u32 regs_base =  g_reg_base[1];
+	u32 write_count_per =0;
+		
+	if(size == 0)
+		return -1;
+
+	while((mci_read(regs_base,0x48) & (1 << 9)) != 0)
+	{
+		udelay(10);
+
+		if(++i > 200)
+			break;
+	}
+
+	start_addr = blknr;
+	data = mci_read(regs_base,0x00) | (1 << 1);
+	mci_write(regs_base,0x00,data);
+	mci_write(regs_base,0x20,size);
+	mci_write(regs_base,0x1C,MMC_BLOCK_SIZE);
+
+	i = 0;
+	do
+	{
+		udelay(10);
+		if(++i > 100)
+		{
+			printk("FIFO RESET FAILED\n");
+			break;
+		}
+	}	while(mci_read(regs_base,0x00) & 0x02);
+
+
+	if (size > 512)
+	{
+	
+		ret = emmc_cmd(25,start_addr, &resp,(R1 | CF_DATA_WR));
+		if(ret)
+			return -18;
+	}
+	else
+	{
+	
+		ret = emmc_cmd(24,start_addr, &resp,(R1 | CF_DATA_WR));
+		if(ret)
+			return -17;
+	}
+
+	wordcount = 0;
+	
+	do
+	{
+		fifo_cnt =((mci_read(regs_base,0x48) >> 17) & 0x1FFF);
+		write_count_per = min(((size-wordcount)>>2),(ZXMCI_FIFO_DEPTH-fifo_cnt));
+
+		for(j = 0; j < write_count_per; j++)
+		{
+			mci_write(regs_base,0x200,*p++);
+			wordcount = wordcount+4;
+		}
+
+	}	while(size-wordcount);
+	
+	udelay(2000);
+
+	return 0;
+
+}
+
+//ÉèÖöÁÊý¾Ý´óС
+int mmc_bread(u32 start_addr, u32 data_size, void *dst)
+{
+	int ret;
+    u32 src = 0;
+	u32 blk_count;
+	u32 remain = 0;
+	
+	if((start_addr%MMC_BLOCK_SIZE)||(data_size==0)||(dst==NULL))
+		return -1;//err start addr
+		
+	blk_count = data_size/MMC_BLOCK_SIZE;	
+	remain = data_size%MMC_BLOCK_SIZE;	
+	if(remain)
+		memset(&mmc_data_buf,0x0,MMC_BLOCK_SIZE);
+	
+	if(block_addr == 0)
+		src	= start_addr;
+	else
+		src	= start_addr/MMC_BLOCK_SIZE;
+	
+	if(blk_count){
+		ret= zx_mmc_read(src, (u8 *) dst, blk_count * MMC_BLOCK_SIZE);
+		if(ret < 0)
+		{
+			printk("READ ERR\n");
+			return -1;
+		}
+	}
+	
+	if(remain){/*transfer remain*/
+	
+		ret= zx_mmc_read(src+blk_count, (u8 *)&mmc_data_buf, 1 * MMC_BLOCK_SIZE);
+		if(ret < 0)
+		{
+			printk("READ ERR\n");
+			return -1;
+		}	
+		memcpy((dst+blk_count * MMC_BLOCK_SIZE),&mmc_data_buf,remain);
+	}
+	
+	return data_size;
+}
+
+int mmc_bwrite(u32 start_addr, u32 data_size, void *src_buf)
+{
+	int ret;
+    u32 start_blk = 0;
+	u32 blk_count;
+	u32 remain = 0;
+	u32 resp[4];
+	
+	if((start_addr%MMC_BLOCK_SIZE)||(data_size==0)||(src_buf==NULL))
+		return -1;//err start addr
+		
+	blk_count = data_size/MMC_BLOCK_SIZE;	
+	remain = data_size%MMC_BLOCK_SIZE;	
+	if(remain){
+		memset(&mmc_data_buf,0x00,MMC_BLOCK_SIZE);
+		memcpy(&mmc_data_buf,(src_buf+blk_count*MMC_BLOCK_SIZE),remain);
+	}	
+		
+	if(block_addr == 0)
+		start_blk	= start_addr;
+	else
+		start_blk	= (start_addr/MMC_BLOCK_SIZE);
+	
+	if(blk_count){
+		ret= zx_mmc_write(start_blk, (u8 *)src_buf, blk_count * MMC_BLOCK_SIZE);
+		if(ret < 0)
+		{
+			printk("WRITE ERR\n");
+			return -1;
+		}
+		
+		resp[0]=0;
+		do{	
+			emmc_cmd(13, (1<<16), resp, R1);
+		}while(((resp[0] & 0x00001E00) >> 9)!= 4);
+	}
+	
+	if(remain){/*transfer remain*/
+		
+		ret= zx_mmc_write((start_blk+blk_count), (u8 *)&mmc_data_buf, 1 * MMC_BLOCK_SIZE);
+		if(ret < 0)
+		{
+			printk("WRITE remain ERR\n");
+			return -1;
+		}
+		
+		resp[0]=0;
+		do{ 
+			emmc_cmd(13, (1<<16), resp, R1);
+		}while(((resp[0] & 0x00001E00) >> 9)!= 4);
+	}
+	return data_size;
+}
+
+
+// ¶Á4KÊý¾Ý,½öÖ§³Öemmc£¬ºóÐø¿ÉÒÔ¿¼ÂÇ×Ô¶¯Ê¶±ð
+int mmc_ramdump_init(void)
+{
+	struct mmc_cid cid;
+	u32 resp[4];
+	u32 ocr;
+    int ret = 0;
+	block_addr = 0;
+	
+	//zDrvPmic_SetNormal_Onoff_PSM(VSD1,PM_DISABLE);
+	//mdelay(10);
+	//zDrvPmic_SetNormal_Onoff_PSM(VSD1,PM_ENABLE);
+   // emmc_clk_reset();
+	emmc_init();
+
+    if(!emmc_card_present())
+    {
+        printk("NO EMMC\n");
+        return -1;
+    }
+
+	emmc_set_clk(CFG_EMMC_CLK_ENUM);
+	emmc_idle_cards();
+
+	ret = mmc_send_op_cond(0, &ocr);
+	
+	if(!ret)
+	{
+		ramdump_mmc_init_card(&cid, ocr);
+
+		emmc_cmd(9, mmc_rca, resp, R2);
+
+        ret = emmc_cmd(7, mmc_rca, resp, R1);
+
+		if(ret)
+		{
+			return ret;
+		}
+
+        emmc_cmd(16, 512, resp, R1);
+
+        emmc_set_clk(CFG_EMMC_CLK_WORK);
+	}
+
+//    mmc_bread(0, 0, 8, (void *)CFG_LOAD_BASE);
+
+    return 0;
+}
+
+
diff --git a/upstream/linux-5.10/drivers/net/zvnet/zvnet_dev.h b/upstream/linux-5.10/drivers/net/zvnet/zvnet_dev.h
new file mode 100755
index 0000000..d1feaa5
--- /dev/null
+++ b/upstream/linux-5.10/drivers/net/zvnet/zvnet_dev.h
@@ -0,0 +1,76 @@
+#ifndef ZV_NET_H

+#define ZV_NET_H

+

+#include <linux/interrupt.h>

+#include <linux/kthread.h>

+#include <linux/spinlock.h>

+#include <linux/semaphore.h>

+#include <linux/netdevice.h>

+#include <linux/skbuff.h>

+#include <linux/soc/zte/rpmsg.h>

+

+//#define ZVNET_DEBUG

+#ifdef ZVNET_DEBUG

+#define zv_dbg(format, arg...) printk(KERN_DEBUG "[zvnet]<%s>: " format "\n" , \

+	__func__ , ## arg)

+#define zv_info(format, arg...) printk(KERN_INFO "[zvnet]<%s>: " format "\n" , \

+	__func__ , ## arg)

+#else

+#define zv_dbg(format, arg...) do {} while (0)

+#define zv_info(format, arg...) do {} while (0)

+#endif

+

+#define zv_err(format, arg...) printk(KERN_ERR "[zvnet]<%s>: " format "\n" , \

+	__func__ , ## arg)

+

+#define zv_warn(format, arg...) printk(KERN_WARNING "[zvnet]<%s>: " format "\n" , \

+	__func__ , ## arg)

+

+//zvnetÉ豸×î´óÊý

+#define DDR_ZVNET_DEV_MAX 10

+#define ZVNET_IFNAME_PREFIX "zvnet"

+

+#define  ICP_CHN_ZVNET1 20 //ICP_CHANNEL_WAN1

+#define  ICP_CHN_ZVNET2 21 //ICP_CHANNEL_WAN2 

+#define  ICP_CHN_ZVNET3 22 //ICP_CHANNEL_WAN3 

+#define  ICP_CHN_ZVNET4 23 //ICP_CHANNEL_WAN4 

+

+#define	 ICP_CHANNEL_SIZE 	(8 * 1024 *2)

+

+#define zvnetCreateChannel rpmsgCreateChannel

+#define zvnetWrite         rpmsgWrite

+#define zvnetRead          rpmsgRead

+

+struct zvnet_channel {

+    T_RpMsg_CoreID core_id;

+    T_RpMsg_ChID channel_id;

+    unsigned int channel_size;

+    struct task_struct *rcv_thread;

+};

+

+struct zvnet {

+    struct net_device  *net;

+    struct sk_buff_head rxq;

+    struct tasklet_struct bh;

+    void *dev_priv;

+};

+

+struct zvnet_device {

+    struct zvnet *dev;

+    struct net_device *net;

+    //struct zvnet_channel chn_info;

+    unsigned char retran_times;

+    //int (*write)(struct sk_buff *,struct v2x_hdr *, unsigned int, struct net_device *);

+};

+

+struct zvp_header {

+    unsigned int magic_word;

+    unsigned short chnid;

+    unsigned short tlen;

+};

+

+#define ZVP_MAGIC_WORD  0x5A5A5A5A

+#define ZVP_HEAD_LEN 	sizeof(struct zvp_header)

+

+#endif

+

diff --git a/upstream/linux-5.10/drivers/rtc/class.c b/upstream/linux-5.10/drivers/rtc/class.c
new file mode 100755
index 0000000..625effe
--- /dev/null
+++ b/upstream/linux-5.10/drivers/rtc/class.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RTC subsystem, base class
+ *
+ * Copyright (C) 2005 Tower Technologies
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * class skeleton from drivers/hwmon/hwmon.c
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/rtc.h>
+#include <linux/kdev_t.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "rtc-core.h"
+
+static DEFINE_IDA(rtc_ida);
+struct class *rtc_class;
+
+static void rtc_device_release(struct device *dev)
+{
+	struct rtc_device *rtc = to_rtc_device(dev);
+	struct timerqueue_head *head = &rtc->timerqueue;
+	struct timerqueue_node *node;
+
+	mutex_lock(&rtc->ops_lock);
+	while ((node = timerqueue_getnext(head)))
+		timerqueue_del(head, node);
+	mutex_unlock(&rtc->ops_lock);
+
+	cancel_work_sync(&rtc->irqwork);
+
+	ida_simple_remove(&rtc_ida, rtc->id);
+	kfree(rtc);
+}
+
+#ifdef CONFIG_RTC_HCTOSYS_DEVICE
+/* Result of the last RTC to system clock attempt. */
+int rtc_hctosys_ret = -ENODEV;
+
+/* IMPORTANT: the RTC only stores whole seconds. It is arbitrary
+ * whether it stores the most close value or the value with partial
+ * seconds truncated. However, it is important that we use it to store
+ * the truncated value. This is because otherwise it is necessary,
+ * in an rtc sync function, to read both xtime.tv_sec and
+ * xtime.tv_nsec. On some processors (i.e. ARM), an atomic read
+ * of >32bits is not possible. So storing the most close value would
+ * slow down the sync API. So here we have the truncated value and
+ * the best guess is to add 0.5s.
+ */
+
+static void rtc_hctosys(struct rtc_device *rtc)
+{
+	int err;
+	struct rtc_time tm;
+	struct timespec64 tv64 = {
+		.tv_nsec = NSEC_PER_SEC >> 1,
+	};
+
+	err = rtc_read_time(rtc, &tm);
+	if (err) {
+		dev_err(rtc->dev.parent,
+			"hctosys: unable to read the hardware clock\n");
+		goto err_read;
+	}
+
+	tv64.tv_sec = rtc_tm_to_time64(&tm);
+
+#if BITS_PER_LONG == 32
+	if (tv64.tv_sec > INT_MAX) {
+		err = -ERANGE;
+		goto err_read;
+	}
+#endif
+
+	err = do_settimeofday64(&tv64);
+
+	dev_info(rtc->dev.parent, "setting system clock to %ptR UTC (%lld)\n",
+		 &tm, (long long)tv64.tv_sec);
+
+err_read:
+	rtc_hctosys_ret = err;
+}
+#endif
+
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
+/*
+ * On suspend(), measure the delta between one RTC and the
+ * system's wall clock; restore it on resume().
+ */
+
+static struct timespec64 old_rtc, old_system, old_delta;
+
+static int rtc_suspend(struct device *dev)
+{
+	struct rtc_device	*rtc = to_rtc_device(dev);
+	struct rtc_time		tm;
+	struct timespec64	delta, delta_delta;
+	int err;
+
+	if (timekeeping_rtc_skipsuspend())
+		return 0;
+
+	if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
+		return 0;
+
+	/* snapshot the current RTC and system time at suspend*/
+	err = rtc_read_time(rtc, &tm);
+	if (err < 0) {
+		pr_debug("%s:  fail to read rtc time\n", dev_name(&rtc->dev));
+		return 0;
+	}
+
+	ktime_get_real_ts64(&old_system);
+	old_rtc.tv_sec = rtc_tm_to_time64(&tm);
+
+	/*
+	 * To avoid drift caused by repeated suspend/resumes,
+	 * which each can add ~1 second drift error,
+	 * try to compensate so the difference in system time
+	 * and rtc time stays close to constant.
+	 */
+	delta = timespec64_sub(old_system, old_rtc);
+	delta_delta = timespec64_sub(delta, old_delta);
+	if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
+		/*
+		 * if delta_delta is too large, assume time correction
+		 * has occurred and set old_delta to the current delta.
+		 */
+		old_delta = delta;
+	} else {
+		/* Otherwise try to adjust old_system to compensate */
+		old_system = timespec64_sub(old_system, delta_delta);
+	}
+
+	return 0;
+}
+
+static int rtc_resume(struct device *dev)
+{
+	struct rtc_device	*rtc = to_rtc_device(dev);
+	struct rtc_time		tm;
+	struct timespec64	new_system, new_rtc;
+	struct timespec64	sleep_time;
+	int err;
+
+	if (timekeeping_rtc_skipresume())
+		return 0;
+
+	rtc_hctosys_ret = -ENODEV;
+	if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
+		return 0;
+
+	/* snapshot the current rtc and system time at resume */
+	ktime_get_real_ts64(&new_system);
+	err = rtc_read_time(rtc, &tm);
+	if (err < 0) {
+		pr_debug("%s:  fail to read rtc time\n", dev_name(&rtc->dev));
+		return 0;
+	}
+
+	new_rtc.tv_sec = rtc_tm_to_time64(&tm);
+	new_rtc.tv_nsec = 0;
+
+	if (new_rtc.tv_sec < old_rtc.tv_sec) {
+		pr_debug("%s:  time travel!\n", dev_name(&rtc->dev));
+		return 0;
+	}
+
+	/* calculate the RTC time delta (sleep time)*/
+	sleep_time = timespec64_sub(new_rtc, old_rtc);
+
+	/*
+	 * Since these RTC suspend/resume handlers are not called
+	 * at the very end of suspend or the start of resume,
+	 * some run-time may pass on either sides of the sleep time
+	 * so subtract kernel run-time between rtc_suspend to rtc_resume
+	 * to keep things accurate.
+	 */
+	sleep_time = timespec64_sub(sleep_time,
+				    timespec64_sub(new_system, old_system));
+
+	if (sleep_time.tv_sec >= 0)
+		timekeeping_inject_sleeptime64(&sleep_time);
+	rtc_hctosys_ret = 0;
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rtc_class_dev_pm_ops, rtc_suspend, rtc_resume);
+#define RTC_CLASS_DEV_PM_OPS	(&rtc_class_dev_pm_ops)
+#else
+#define RTC_CLASS_DEV_PM_OPS	NULL
+#endif
+
+/* Ensure the caller will set the id before releasing the device */
+static struct rtc_device *rtc_allocate_device(void)
+{
+	struct rtc_device *rtc;
+
+	rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
+	if (!rtc)
+		return NULL;
+
+	device_initialize(&rtc->dev);
+
+	/* Drivers can revise this default after allocating the device. */
+	rtc->set_offset_nsec =  NSEC_PER_SEC / 2;
+
+	rtc->irq_freq = 1;
+	rtc->max_user_freq = 64;
+	rtc->dev.class = rtc_class;
+	rtc->dev.groups = rtc_get_dev_attribute_groups();
+	rtc->dev.release = rtc_device_release;
+
+	mutex_init(&rtc->ops_lock);
+	spin_lock_init(&rtc->irq_lock);
+	init_waitqueue_head(&rtc->irq_queue);
+
+	/* Init timerqueue */
+	timerqueue_init_head(&rtc->timerqueue);
+	INIT_WORK(&rtc->irqwork, rtc_timer_do_work);
+	/* Init aie timer */
+	rtc_timer_init(&rtc->aie_timer, rtc_aie_update_irq, rtc);
+	/* Init uie timer */
+	rtc_timer_init(&rtc->uie_rtctimer, rtc_uie_update_irq, rtc);
+	/* Init pie timer */
+	hrtimer_init(&rtc->pie_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	rtc->pie_timer.function = rtc_pie_update_irq;
+	rtc->pie_enabled = 0;
+
+	return rtc;
+}
+
+static int rtc_device_get_id(struct device *dev)
+{
+	int of_id = -1, id = -1;
+
+	if (dev->of_node)
+		of_id = of_alias_get_id(dev->of_node, "rtc");
+	else if (dev->parent && dev->parent->of_node)
+		of_id = of_alias_get_id(dev->parent->of_node, "rtc");
+
+	if (of_id >= 0) {
+		id = ida_simple_get(&rtc_ida, of_id, of_id + 1, GFP_KERNEL);
+		if (id < 0)
+			dev_warn(dev, "/aliases ID %d not available\n", of_id);
+	}
+
+	if (id < 0)
+		id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL);
+
+	return id;
+}
+
+static void rtc_device_get_offset(struct rtc_device *rtc)
+{
+	time64_t range_secs;
+	u32 start_year;
+	int ret;
+
+	/*
+	 * If RTC driver did not implement the range of RTC hardware device,
+	 * then we can not expand the RTC range by adding or subtracting one
+	 * offset.
+	 */
+	if (rtc->range_min == rtc->range_max)
+		return;
+
+	ret = device_property_read_u32(rtc->dev.parent, "start-year",
+				       &start_year);
+	if (!ret) {
+		rtc->start_secs = mktime64(start_year, 1, 1, 0, 0, 0);
+		rtc->set_start_time = true;
+	}
+
+	/*
+	 * If user did not implement the start time for RTC driver, then no
+	 * need to expand the RTC range.
+	 */
+	if (!rtc->set_start_time)
+		return;
+
+	range_secs = rtc->range_max - rtc->range_min + 1;
+
+	/*
+	 * If the start_secs is larger than the maximum seconds (rtc->range_max)
+	 * supported by RTC hardware or the maximum seconds of new expanded
+	 * range (start_secs + rtc->range_max - rtc->range_min) is less than
+	 * rtc->range_min, which means the minimum seconds (rtc->range_min) of
+	 * RTC hardware will be mapped to start_secs by adding one offset, so
+	 * the offset seconds calculation formula should be:
+	 * rtc->offset_secs = rtc->start_secs - rtc->range_min;
+	 *
+	 * If the start_secs is larger than the minimum seconds (rtc->range_min)
+	 * supported by RTC hardware, then there is one region is overlapped
+	 * between the original RTC hardware range and the new expanded range,
+	 * and this overlapped region do not need to be mapped into the new
+	 * expanded range due to it is valid for RTC device. So the minimum
+	 * seconds of RTC hardware (rtc->range_min) should be mapped to
+	 * rtc->range_max + 1, then the offset seconds formula should be:
+	 * rtc->offset_secs = rtc->range_max - rtc->range_min + 1;
+	 *
+	 * If the start_secs is less than the minimum seconds (rtc->range_min),
+	 * which is similar to case 2. So the start_secs should be mapped to
+	 * start_secs + rtc->range_max - rtc->range_min + 1, then the
+	 * offset seconds formula should be:
+	 * rtc->offset_secs = -(rtc->range_max - rtc->range_min + 1);
+	 *
+	 * Otherwise the offset seconds should be 0.
+	 */
+	if (rtc->start_secs > rtc->range_max ||
+	    rtc->start_secs + range_secs - 1 < rtc->range_min)
+		rtc->offset_secs = rtc->start_secs - rtc->range_min;
+	else if (rtc->start_secs > rtc->range_min)
+		rtc->offset_secs = range_secs;
+	else if (rtc->start_secs < rtc->range_min)
+		rtc->offset_secs = -range_secs;
+	else
+		rtc->offset_secs = 0;
+}
+
+/**
+ * rtc_device_unregister - removes the previously registered RTC class device
+ *
+ * @rtc: the RTC class device to destroy
+ */
+static void rtc_device_unregister(struct rtc_device *rtc)
+{
+	mutex_lock(&rtc->ops_lock);
+	/*
+	 * Remove innards of this RTC, then disable it, before
+	 * letting any rtc_class_open() users access it again
+	 */
+	rtc_proc_del_device(rtc);
+	cdev_device_del(&rtc->char_dev, &rtc->dev);
+	rtc->ops = NULL;
+	mutex_unlock(&rtc->ops_lock);
+	put_device(&rtc->dev);
+}
+
+static void devm_rtc_release_device(struct device *dev, void *res)
+{
+	struct rtc_device *rtc = *(struct rtc_device **)res;
+
+	rtc_nvmem_unregister(rtc);
+
+	if (rtc->registered)
+		rtc_device_unregister(rtc);
+	else
+		put_device(&rtc->dev);
+}
+
+struct rtc_device *devm_rtc_allocate_device(struct device *dev)
+{
+	struct rtc_device **ptr, *rtc;
+	int id, err;
+
+	id = rtc_device_get_id(dev);
+	if (id < 0)
+		return ERR_PTR(id);
+
+	ptr = devres_alloc(devm_rtc_release_device, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr) {
+		err = -ENOMEM;
+		goto exit_ida;
+	}
+
+	rtc = rtc_allocate_device();
+	if (!rtc) {
+		err = -ENOMEM;
+		goto exit_devres;
+	}
+
+	*ptr = rtc;
+	devres_add(dev, ptr);
+
+	rtc->id = id;
+	rtc->dev.parent = dev;
+	dev_set_name(&rtc->dev, "rtc%d", id);
+
+	return rtc;
+
+exit_devres:
+	devres_free(ptr);
+exit_ida:
+	ida_simple_remove(&rtc_ida, id);
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(devm_rtc_allocate_device);
+
+int __rtc_register_device(struct module *owner, struct rtc_device *rtc)
+{
+	struct rtc_wkalrm alrm;
+	int err;
+
+	if (!rtc->ops) {
+		dev_dbg(&rtc->dev, "no ops set\n");
+		return -EINVAL;
+	}
+
+	rtc->owner = owner;
+	rtc_device_get_offset(rtc);
+
+	/* Check to see if there is an ALARM already set in hw */
+	err = __rtc_read_alarm(rtc, &alrm);
+	if (!err && !rtc_valid_tm(&alrm.time))
+		rtc_initialize_alarm(rtc, &alrm);
+
+	rtc_dev_prepare(rtc);
+
+	err = cdev_device_add(&rtc->char_dev, &rtc->dev);
+	if (err)
+		dev_warn(rtc->dev.parent, "failed to add char device %d:%d\n",
+			 MAJOR(rtc->dev.devt), rtc->id);
+	else
+		dev_dbg(rtc->dev.parent, "char device (%d:%d)\n",
+			MAJOR(rtc->dev.devt), rtc->id);
+
+	rtc_proc_add_device(rtc);
+
+	rtc->registered = true;
+	dev_info(rtc->dev.parent, "registered as %s\n",
+		 dev_name(&rtc->dev));
+
+#ifdef CONFIG_RTC_HCTOSYS_DEVICE
+	if (!strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE))
+		rtc_hctosys(rtc);
+#endif
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__rtc_register_device);
+
+/**
+ * devm_rtc_device_register - resource managed rtc_device_register()
+ * @dev: the device to register
+ * @name: the name of the device (unused)
+ * @ops: the rtc operations structure
+ * @owner: the module owner
+ *
+ * @return a struct rtc on success, or an ERR_PTR on error
+ *
+ * Managed rtc_device_register(). The rtc_device returned from this function
+ * are automatically freed on driver detach.
+ * This function is deprecated, use devm_rtc_allocate_device and
+ * rtc_register_device instead
+ */
+struct rtc_device *devm_rtc_device_register(struct device *dev,
+					    const char *name,
+					    const struct rtc_class_ops *ops,
+					    struct module *owner)
+{
+	struct rtc_device *rtc;
+	int err;
+
+	rtc = devm_rtc_allocate_device(dev);
+	if (IS_ERR(rtc))
+		return rtc;
+
+	rtc->ops = ops;
+
+	err = __rtc_register_device(owner, rtc);
+	if (err)
+		return ERR_PTR(err);
+
+	return rtc;
+}
+EXPORT_SYMBOL_GPL(devm_rtc_device_register);
+
+static int __init rtc_init(void)
+{
+	rtc_class = class_create(THIS_MODULE, "rtc");
+	if (IS_ERR(rtc_class)) {
+		pr_err("couldn't create class\n");
+		return PTR_ERR(rtc_class);
+	}
+	rtc_class->pm = RTC_CLASS_DEV_PM_OPS;
+	rtc_dev_init();
+	return 0;
+}
+subsys_initcall(rtc_init);
diff --git a/upstream/linux-5.10/drivers/soc/sc/pcu/pcu-zx297520v3.c b/upstream/linux-5.10/drivers/soc/sc/pcu/pcu-zx297520v3.c
new file mode 100755
index 0000000..66c8cf3
--- /dev/null
+++ b/upstream/linux-5.10/drivers/soc/sc/pcu/pcu-zx297520v3.c
@@ -0,0 +1,976 @@
+/*
+ *
+ * Copyright (C) 2015-2022 ZTE-TSP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/kernel.h> 
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/refcount.h>
+#include <linux/slab.h>
+#include <linux/irqchip.h>
+#include <linux/suspend.h>
+#include <linux/syscore_ops.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include <linux/soc/sc/common.h>
+#include <linux/soc/sc/spinlock.h>
+#include <linux/soc/sc/pcu.h>
+#include <linux/soc/sc/rpmsg.h>
+#include <dt-bindings/soc/zx297520v3-irq.h>
+#include <uapi/linux/sc_bsp/bsp_api.h>
+
+#include "pcu-common.h"
+
+#if 0
+
+#define pm_ram_log(fmt, args...)      	\
+{	\
+	pm_printk("[SLP] " fmt, ##args);	\
+}
+#else
+#define pm_ram_log(fmt, args...)      	\
+{	\
+	printk(KERN_INFO "[SLP] " fmt, ##args);	\
+}
+
+#endif
+
+#define ZX_IRQ_NUM			(IRQ_ZX297520V3_SPI_NUM + 32)
+
+#define PCU_LOCK		reg_spin_lock();
+#define PCU_UNLOCK		reg_spin_unlock();
+
+static struct zx_pcu_int_info zx297520v3_pcu_int_info[] = 
+{
+	{
+		.pcu_index		= PCU_AP_TIMER1_INT,
+	 	.gic_index		= AP_TIMER1_INT,
+		.status_index	= 51,
+	 	.wake_index		= 0,
+	 	.int_name		= "ap_timer1",
+	 	.irq_type		= IRQ_TYPE_EDGE_RISING,
+	 	.wl_type		= PM_WL_EVENT_AP_TIMER1,
+	},
+	{
+		.pcu_index		= PCU_AP_TIMER2_INT,
+	 	.gic_index		= AP_TIMER2_INT,
+	 	.status_index	= 52,	 	
+		.wake_index 	= 1,
+	 	.int_name		= "ap_timer2",
+	 	.irq_type		= IRQ_TYPE_EDGE_RISING,
+	 	.wl_type		= PM_WL_EVENT_AP_TIMER2,
+	},
+	{
+		.pcu_index		= PCU_ICP_PS2AP_INT,
+		.gic_index		= ICP_PS2AP_INT,
+	 	.status_index	= 53,		
+		.wake_index 	= 2,
+		.int_name		= "icp_ps_ap",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_ICP_PS2AP,
+	},
+	{
+		.pcu_index		= PCU_USB_POWERDWN_UP_INT,
+		.gic_index		= USB_POWERDWN_UP_INT,
+	 	.status_index	= 6,		
+		.wake_index 	= 3,
+		.int_name		= "usb_up",
+		.irq_type		= IRQ_TYPE_EDGE_RISING,
+		.wl_type		= PM_WL_EVENT_USB_POWERDWN_UP,
+	},
+	{
+		.pcu_index		= PCU_USB_POWERDWN_DOWN_INT,
+		.gic_index		= USB_POWERDWN_DOWN_INT,
+	 	.status_index	= 7,		
+		.wake_index 	= 4,
+		.int_name		= "usb_down",
+		.irq_type		= IRQ_TYPE_EDGE_FALLING,
+		.wl_type		= PM_WL_EVENT_USB_POWERDWN_DOWN,
+	},
+	{
+		.pcu_index		= PCU_HSIC_POWERDWN_UP_INT,
+		.gic_index		= HSIC_POWERDWN_UP_INT,
+	 	.status_index	= 8,		
+		.wake_index 	= 5,
+		.int_name		= "hsic_up",
+		.irq_type		= IRQ_TYPE_EDGE_RISING,
+		.wl_type		= PM_WL_EVENT_HSIC_POWERDWN_UP,
+	},
+	{
+		.pcu_index		= PCU_HSIC_POWERDWN_DOWN_INT,
+		.gic_index		= HSIC_POWERDWN_DOWN_INT,
+	 	.status_index	= 9,		
+		.wake_index 	= 6,
+		.int_name		= "hsic_down",
+		.irq_type		= IRQ_TYPE_EDGE_FALLING,
+		.wl_type		= PM_WL_EVENT_HSIC_POWERDWN_DOWN,
+	},
+	{
+		.pcu_index		= PCU_ICP_M02AP_INT,
+		.gic_index		= ICP_M02AP_INT,
+	 	.status_index	= 54,		
+		.wake_index 	= 7,
+		.int_name		= "icp_m0_ap",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_ICP_M02AP,
+	},
+	{
+		.pcu_index		= PCU_RTC_ALARM_INT,
+		.gic_index		= RTC_ALARM_INT,
+	 	.status_index	= 12,		
+		.wake_index 	= 8,
+		.int_name		= "rtc_alarm",
+		.irq_type		= IRQ_TYPE_LEVEL_LOW,
+		.wl_type		= PM_WL_EVENT_ALARM,
+	},
+	{
+		.pcu_index		= PCU_RTC_TIMER_INT,
+		.gic_index		= RTC_TIMER_INT,
+	 	.status_index	= 13,		
+		.wake_index 	= 9,
+		.int_name		= "rtc_timer",
+		.irq_type		= IRQ_TYPE_LEVEL_LOW,
+		.wl_type		= PM_WL_EVENT_RTC_TIMER,
+	},
+	{
+		.pcu_index		= PCU_KEYPAD_INT,
+		.gic_index		= KEYPAD_INT,
+	 	.status_index	= 14,		
+		.wake_index 	= 10,
+		.int_name		= "kpd",
+		.irq_type		= IRQ_TYPE_EDGE_RISING,
+		.wl_type		= PM_WL_EVENT_KEYPAD,
+	},
+	{
+		.pcu_index		= PCU_SD1_DATA1_INT,
+		.gic_index		= SD1_DATA1_INT,
+	 	.status_index	= 15,		
+		.wake_index 	= 11,
+		.int_name		= "sd1_d1",
+		.irq_type		= IRQ_TYPE_LEVEL_LOW,
+		.wl_type		= PM_WL_EVENT_SD1_DATA1,
+	},
+	{
+		.pcu_index		= PCU_EX0_INT,
+		.gic_index		= EX0_INT,
+	 	.status_index	= 30,		
+		.wake_index 	= 14,
+		.int_name		= "ext0",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT0,
+	},
+	{
+		.pcu_index		= PCU_EX1_INT,
+		.gic_index		= EX1_INT,
+	 	.status_index	= 31,		
+		.wake_index 	= 15,
+		.int_name		= "ext1",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT1,
+	},
+	{
+		.pcu_index		= PCU_EX2_INT,
+		.gic_index		= EX2_INT,
+	 	.status_index	= 32,		
+		.wake_index 	= 16,
+		.int_name		= "ext2",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT2,
+	},
+	{
+		.pcu_index		= PCU_EX3_INT,
+		.gic_index		= EX3_INT,
+	 	.status_index	= 33,		
+		.wake_index 	= 17,
+		.int_name		= "ext3",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT3,
+	},
+	{
+		.pcu_index		= PCU_EX4_INT,
+		.gic_index		= EX4_INT,
+	 	.status_index	= 34,		
+		.wake_index 	= 18,
+		.int_name		= "ext4",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT4,
+	},
+	{
+		.pcu_index		= PCU_EX5_INT,
+		.gic_index		= EX5_INT,
+	 	.status_index	= 35,		
+		.wake_index 	= 19,
+		.int_name		= "ext5",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT5,
+	},
+	{
+		.pcu_index		= PCU_EX6_INT,
+		.gic_index		= EX6_INT,
+	 	.status_index	= 36,		
+		.wake_index 	= 20,
+		.int_name		= "ext6",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT6,
+	},
+	{
+		.pcu_index		= PCU_EX7_INT,
+		.gic_index		= EX7_INT,
+	 	.status_index	= 37,		
+		.wake_index 	= 21,
+		.int_name		= "ext7",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT7,
+	},
+	{
+		.pcu_index		= PCU_EX8_INT,
+		.gic_index		= EX8IN1_INT,
+	 	.status_index	= 38,		
+		.wake_index 	= 22,
+		.int_name		= "ext8",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT8,
+	},
+	{
+		.pcu_index		= PCU_EX9_INT,
+		.gic_index		= EX8IN1_INT,
+	 	.status_index	= 39,
+		.wake_index 	= 23,
+		.int_name		= "ext9",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT9,
+	},
+	{
+		.pcu_index		= PCU_EX10_INT,
+		.gic_index		= EX8IN1_INT,
+	 	.status_index	= 40,		
+		.wake_index 	= 24,
+		.int_name		= "ext10",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT10,
+	},
+	{
+		.pcu_index		= PCU_EX11_INT,
+		.gic_index		= EX8IN1_INT,
+	 	.status_index	= 41,		
+		.wake_index 	= 25,
+		.int_name		= "ext11",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT11,
+	},
+	{
+		.pcu_index		= PCU_EX12_INT,
+		.gic_index		= EX8IN1_INT,
+	 	.status_index	= 42,		
+		.wake_index 	= 26,
+		.int_name		= "ext12",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT12,
+	},
+	{
+		.pcu_index		= PCU_EX13_INT,
+		.gic_index		= EX8IN1_INT,
+	 	.status_index	= 43,		
+		.wake_index 	= 27,
+		.int_name		= "ext13",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT13,
+	},
+	{
+		.pcu_index		= PCU_EX14_INT,
+		.gic_index		= EX8IN1_INT,
+	 	.status_index	= 44,		
+		.wake_index 	= 28,
+		.int_name		= "ext14",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT14,
+	},
+	{
+		.pcu_index		= PCU_EX15_INT,
+		.gic_index		= EX8IN1_INT,
+	 	.status_index	= 45,		
+		.wake_index 	= 29,
+		.int_name		= "ext15",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT15,
+	},
+	{
+		.pcu_index		= PCU_SD0_DATA1_INT,
+		.gic_index		= SD0_DATA1_INT,
+	 	.status_index	= 2,		
+		.wake_index 	= 30,
+		.int_name		= "sd0_d1",
+		.irq_type		= IRQ_TYPE_LEVEL_LOW,
+		.wl_type		= PM_WL_EVENT_SD0_DATA1,
+	},
+	{
+		.pcu_index		= PCU_ICP_PHY2AP_INT,
+		.gic_index		= ICP_PHY2AP_INT,
+		.status_index	= 55,		
+		.wake_index 	= 31,
+		.int_name		= "icp_phy_ap",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_ICP_PHY2AP,
+	},
+	{
+		.pcu_index		= PCU_GMACPHY_WAKE_INT,
+		.gic_index		= GMACPHY_WAKE_INT,
+		.status_index	= 60,		
+		.wake_index 	= 0xff,
+		.int_name		= "gmacphy_wake",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= 0xff,
+	},
+	{
+		.pcu_index		= PCU_UART0_RXD_INT,
+		.gic_index		= UART0_RXD_INT,
+		.status_index	= 59,		
+		.wake_index 	= 42,
+		.int_name		= "uart0_rxd",
+		.irq_type		= IRQ_TYPE_EDGE_FALLING,
+		.wl_type		= 0xff,
+
+	},
+	{
+		.pcu_index		= PCU_GMAC_INT,
+		.gic_index		= GMAC_INT,
+		.status_index	= 16,		
+		.wake_index 	= 0xff,
+		.int_name		= "gmac",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= 0xff,
+	},
+	{
+		.pcu_index		= PCU_GMACPHY_INT,
+		.gic_index		= GMACPHY_INT,
+		.status_index	= 61,		
+		.wake_index 	= 0xff,
+		.int_name		= "gmacphy",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= 0xff,
+	},
+};
+
+static int zx_pcu_get_irqchip_state(struct irq_data *data,
+				     enum irqchip_irq_state which, bool *val)
+{
+	data = data->parent_data;
+
+	if (data->chip->irq_get_irqchip_state)
+		return data->chip->irq_get_irqchip_state(data, which, val);
+
+	return -ENOSYS;
+}
+
+static int zx_pcu_set_irqchip_state(struct irq_data *data,
+				     enum irqchip_irq_state which, bool val)
+{
+	data = data->parent_data;
+
+	if (data->chip->irq_set_irqchip_state)
+		return data->chip->irq_set_irqchip_state(data, which, val);
+
+	return -ENOSYS;
+}
+
+static int zx_pcu_nmi_setup(struct irq_data *data)
+{
+	data = data->parent_data;
+
+	if (data->chip->irq_nmi_setup)
+		return data->chip->irq_nmi_setup(data);
+
+	return -ENOSYS;
+}
+
+static void zx_pcu_nmi_teardown(struct irq_data *data)
+{
+	data = data->parent_data;
+
+	if (data->chip->irq_nmi_teardown)
+		data->chip->irq_nmi_teardown(data);
+}
+
+static int zx_pcu_set_wake(struct irq_data *data, unsigned int on)
+{
+	pcu_set_irq_wake(data->hwirq, on);
+
+/*	pr_info("irq:%d, onoff:%d", data->hwirq, on);*/
+	
+	return 0;
+}
+
+static void zx_pcu_eoi_irq(struct irq_data *data)
+{
+	pcu_clr_irq_pending(data->hwirq);
+
+	irq_chip_eoi_parent(data);
+}
+
+static int zx_pcu_set_type(struct irq_data *data, unsigned int type)
+{
+	unsigned int new_type = type;
+
+	if(!pcu_set_irq_type(data->hwirq, type))
+		new_type = IRQ_TYPE_LEVEL_HIGH;
+
+	return irq_chip_set_type_parent(data, new_type);
+}
+
+static int zx_pcu_set_affinity(struct irq_data *data,
+				 const struct cpumask *dest, bool force)
+{
+/*
+	if (data->hwirq == IRQ_ZX298501_AP_TIMER1)
+		return irq_chip_set_affinity_parent(data, cpumask_of(0), force); // ???
+	else
+*/		return irq_chip_set_affinity_parent(data, dest, force);
+}
+
+static struct irq_chip zx_pcu_chip = {
+	.name			= "PCU",
+	.irq_eoi		= zx_pcu_eoi_irq,
+	.irq_mask		= irq_chip_mask_parent,
+	.irq_unmask		= irq_chip_unmask_parent,
+	.irq_set_wake	= zx_pcu_set_wake,
+	.irq_set_type	= zx_pcu_set_type,
+	
+	.irq_set_affinity		= zx_pcu_set_affinity,
+	.irq_get_irqchip_state	= zx_pcu_get_irqchip_state,
+	.irq_set_irqchip_state	= zx_pcu_set_irqchip_state,
+	.irq_set_vcpu_affinity	= irq_chip_set_vcpu_affinity_parent,
+	.irq_nmi_setup			= zx_pcu_nmi_setup,
+	.irq_nmi_teardown		= zx_pcu_nmi_teardown,	
+	.flags					= IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SET_TYPE_MASKED,
+};
+
+static int zx_pcu_domain_translate(struct irq_domain *d,
+					struct irq_fwspec *fwspec,
+					unsigned long *hwirq,
+					unsigned int *type)
+{
+	if (is_of_node(fwspec->fwnode)) {
+		if (fwspec->param_count != 3)
+			return -EINVAL;
+
+		/* No PPI should point to this domain */
+		if (fwspec->param[0] != 0)
+			return -EINVAL;
+
+		*hwirq = fwspec->param[1];
+		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int zx_pcu_domain_alloc(struct irq_domain *domain,
+				    unsigned int virq,
+				    unsigned int nr_irqs, void *data)
+{
+	struct irq_fwspec *fwspec = data;
+	struct irq_fwspec parent_fwspec;
+	struct zx_pcu_dev *pcu = domain->host_data;
+	irq_hw_number_t hwirq;
+	unsigned int i;
+
+	if (fwspec->param_count != 3)
+		return -EINVAL;	/* Not GIC compliant */
+	if (fwspec->param[0] != GIC_SPI)
+		return -EINVAL;	/* No PPI should point to this domain */
+
+	hwirq = fwspec->param[1];
+	if (hwirq >= ZX_IRQ_NUM)
+		return -EINVAL;
+
+	for (i = 0; i < nr_irqs; i++) {
+		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+					      &zx_pcu_chip,
+					      (void __force *)pcu->top_reg_base);
+	}
+
+	parent_fwspec = *fwspec;
+	parent_fwspec.fwnode = domain->parent->fwnode;
+	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+					    &parent_fwspec);
+}
+
+static const struct irq_domain_ops zx_pcu_domain_ops = {
+	.translate	= zx_pcu_domain_translate,
+	.alloc		= zx_pcu_domain_alloc,
+	.free		= irq_domain_free_irqs_common,
+};
+
+static int __init zx_pcu_init(struct device_node *node,
+				   struct device_node *parent)
+{
+	struct irq_domain *parent_domain, *domain;
+	struct zx_pcu_dev *pcu;
+
+	if (!parent) {
+		pr_err("%pOF: no parent found\n", node);
+		return -ENODEV;
+	}
+
+	parent_domain = irq_find_host(parent);
+	if (!parent_domain) {
+		pr_err("%pOF: unable to obtain parent domain\n", node);
+		return -ENXIO;
+	}
+
+	pcu = &pcu_dev;
+	pcu->np				= node;
+	pcu->top_reg_base	= of_iomap(node, 0);
+	WARN(!pcu->top_reg_base, "unable to map top pcu registers\n");
+
+	pcu->int_info = zx297520v3_pcu_int_info;
+	pcu->int_count = ARRAY_SIZE(zx297520v3_pcu_int_info);
+
+	pcu_init();
+
+	domain = irq_domain_add_hierarchy(parent_domain, 0, ZX_IRQ_NUM,
+					  node, &zx_pcu_domain_ops,
+					  pcu);
+	if (!domain) {
+		pr_err("%pOF: failed to allocated domain\n", node);
+		return -ENOMEM;
+	}
+
+//	set_smp_cross_call();
+	pm_pcu_init();
+
+	return 0;
+}
+
+IRQCHIP_DECLARE(zx297520v3_pcu, "zte,zx297520v3-pcu", zx_pcu_init);
+
+/* pcu debug */
+#ifdef CONFIG_PM
+#define PCU_TOP						(pcu_dev.top_reg_base)
+
+#define ARM_AP_CONFIG_REG           (PCU_TOP + 0x0)
+#define ARM_AP_SLEEP_TIME_REG       (PCU_TOP + 4*0x3C)
+#define AP_INT_WAKE_DIS_REG        	(PCU_TOP + 4*0xD)
+#define CORE_SWITCH_CONFIG_REG    	(PCU_TOP + 4*0x2b)
+
+#define M0_INT_WAKE_DIS_REG        	(PCU_TOP + 4*0xE)
+#define PCU_INT_READOUT_REG1		(PCU_TOP + 4*0x1EB)
+#define PCU_INT_READOUT_REG2		(PCU_TOP + 4*0x1EC)
+#define PCU_INT_READOUT_REG3		(PCU_TOP + 4*0x1ED)
+
+
+/*ARM_AP_CONFIG_REG*/
+#define	PCU_SLEEP_MODE				(1U << 0)
+#define	PCU_POWEROFF_MODE			(1U << 1)
+#define	PCU_L2_CLK_GATE				(1U << 2)		/*1-can turn off*/
+#define PCU_SLEEP_2M0               (1U << 3)
+#define	PCU_SLEEP_DONE_BYPASS		(1U << 4)	 	
+#define	PCU_SW_CONFIG_MASK			(1U << 5)	 	/* ?????  */
+
+#define	PCU_MODE_MASK				(0x3U << 0)
+
+/*ARM_AP_SLEEP_TIME_REG*/
+#define	PCU_AP_SLEEP_TIME_DIS       (1U << 31)
+
+
+
+/* low power function */
+extern unsigned int pm_get_wakesource(void);
+
+/**
+ * clear pcu sleep mode.
+ * 
+ */
+void pm_clear_pcu(void)
+{
+	zx_clr_reg(ARM_AP_CONFIG_REG, PCU_MODE_MASK);
+}
+
+void pm_pcu_init(void)
+{
+	zx_clr_reg(ARM_AP_CONFIG_REG, PCU_MODE_MASK);
+	zx_set_reg(ARM_AP_CONFIG_REG, PCU_L2_CLK_GATE);
+	zx_write_reg(AP_INT_WAKE_DIS_REG, ~(pm_get_wakesource()));
+}
+
+void zx_apmgclken_set(unsigned en)
+{
+	unsigned tmp;
+	if(en){
+		//set ps_clk_switch=1
+		tmp = zx_read_reg(CORE_SWITCH_CONFIG_REG);
+		tmp |= (0x1<<2);
+		zx_write_reg(CORE_SWITCH_CONFIG_REG, tmp);
+	} else{
+		//set ps_clk_switch=0
+		tmp = zx_read_reg(CORE_SWITCH_CONFIG_REG);
+		tmp &= ~(0x1<<2);
+		zx_write_reg(CORE_SWITCH_CONFIG_REG, tmp);
+	}
+}
+
+
+/**
+ * config pcu before poweroff
+ * 
+ */
+void pm_set_pcu_poweroff(u32 sleep_time)
+{
+	zx_set_reg(ARM_AP_CONFIG_REG, PCU_POWEROFF_MODE);
+	zx_write_reg(ARM_AP_SLEEP_TIME_REG, sleep_time);
+}
+EXPORT_SYMBOL(pm_set_pcu_poweroff);
+
+
+/**
+ * config pcu before sleep
+ * 
+ */
+void pm_set_pcu_sleep(u32 sleep_time)
+{
+	zx_set_reg(ARM_AP_CONFIG_REG, PCU_SLEEP_MODE);
+	zx_write_reg(ARM_AP_SLEEP_TIME_REG, sleep_time);
+}
+
+/**
+ * get wakeup setting.
+ * 
+ */
+unsigned int pcu_get_wakeup_setting(void)
+{
+	return zx_read_reg(AP_INT_WAKE_DIS_REG);
+}
+/**
+ * set wakeup enable by gic.
+ * 
+ * 
+ */
+unsigned int  gic_wake_enable[3]=
+{
+	(1<<ICP_PS2AP_INT) |(1<<ICP_M02AP_INT) | (1<<AP_TIMER1_INT) | (1<<EX8IN1_INT),
+	0, 
+	0
+};
+
+extern void show_icp_state(T_RpMsg_CoreID actorID);
+void pm_get_wake_cause(void)
+{
+	unsigned int	int_status[2];
+	int 			i = 0;
+	int 			index_found = 0xff;
+	unsigned int	pcu_wake_setting[2];
+		
+	/* when wake up, the level is high&the value is 0*/
+	int_status[0] = zx_read_reg(PCU_INT_READOUT_REG1);
+	int_status[1] = zx_read_reg(PCU_INT_READOUT_REG2);
+
+	pcu_wake_setting[0] = zx_read_reg(AP_INT_WAKE_DIS_REG);
+	pcu_wake_setting[1] = zx_read_reg(M0_INT_WAKE_DIS_REG);
+
+	for(i=0; i<ARRAY_SIZE(zx297520v3_pcu_int_info); i++)
+	{
+		if (zx297520v3_pcu_int_info[i].wake_index == 0xff)
+			continue;
+
+		if(pcu_wake_setting[0]&BIT(zx297520v3_pcu_int_info[i].wake_index))	
+			continue;
+
+		if(int_status[zx297520v3_pcu_int_info[i].status_index/32]&(1<<(zx297520v3_pcu_int_info[i].status_index%32)))
+			continue;
+
+		index_found = i;
+		break;
+	}
+	
+	if(index_found != 0xff)
+	{
+		pm_ram_log(" wake: %d  [%s]\n", zx297520v3_pcu_int_info[index_found].gic_index, zx297520v3_pcu_int_info[index_found].int_name);
+
+		if(zx297520v3_pcu_int_info[index_found].gic_index ==ICP_PS2AP_INT) {
+			show_icp_state(CORE_PS0);
+		}
+		pm_ram_log(" pcu int status:0x%x 0x%x\n",int_status[0], int_status[1]);
+
+		pm_wl_set_event(pcu_get_wl_index_by_gic(zx297520v3_pcu_int_info[index_found].gic_index));
+	}
+	else
+	{
+		pm_ram_log(" wake abnormal\n");
+		pm_ram_log(" pcu int status:0x%x 0x%x\n",int_status[0], int_status[1]);
+	}
+}
+
+static struct wakeup_source *zx_main_ws;
+static int zx_pcu_pm_callback(struct notifier_block *nb,
+			unsigned long action, void *ptr)
+{
+	switch (action) {
+
+	case PM_POST_SUSPEND:
+		__pm_wakeup_event(zx_main_ws, 1000);		
+		break;
+
+	default:
+		return NOTIFY_DONE;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int pcu_pm_suspend(void)
+{
+	int ret = 0;
+
+	return ret;
+}
+
+static void pcu_pm_resume(void)
+{
+//	pcu_get_wake_cause();
+}
+
+static struct syscore_ops pcu_pm_syscore_ops = {
+	.suspend = pcu_pm_suspend,
+	.resume = pcu_pm_resume,
+};
+
+static int pcu_pm_init(void)
+{
+	zx_main_ws = wakeup_source_register(NULL, "zx_main");
+	if (!zx_main_ws)
+		return -ENOMEM;
+
+	pm_notifier(zx_pcu_pm_callback, 0);
+
+	register_syscore_ops(&pcu_pm_syscore_ops);
+	return 0;
+}
+core_initcall(pcu_pm_init);
+#endif
+
+/* --------------------------------------------------------------------
+ * extint_8in1
+ * -------------------------------------------------------------------- */
+
+struct ext8in1_info {
+	struct irq_domain   *domain;
+	struct regmap 		*regmap;
+	int                 parent_irq;
+	
+};
+
+struct ext8in1_info ext8in1_dev = {0};
+
+/*
+ * return external interrupt number from ex8-ex15,
+ * return value is 0-7
+ */
+static unsigned int pcu_get_8in1_int_source(void)
+{  
+	unsigned int vector_8in1 = 0;
+ 
+	vector_8in1 = zx_read_reg(pcu_dev.top_reg_base+0x12C);	 
+	
+	return 	(vector_8in1&0x7);
+}
+ 
+
+/*external int 8-15 need extra clear*/
+static void pcu_int_clear_8in1(unsigned int pcu_index)
+{	
+	unsigned int vector=0;
+	
+	if ( (pcu_index >= PCU_EX8_INT)&&(pcu_index <= PCU_EX15_INT) )
+	{
+		/*
+		 *in 7510 platform, 8in1 interrupt would be used by different cores.
+		 *when any core installs a new 8in1 interrupt, another core may be 
+		 * responding another 8in1 interrupt, so  8in1 interrupt shouldn't be 
+		 *cleared. in this case, nothing to be done. but a new problem comes,
+		 * the core install new  8in1 interrupt will receive a fake interrupt.
+		 */
+		vector = pcu_get_8in1_int_source();
+		if (pcu_index != (vector + PCU_EX8_INT) )
+			return;
+
+		PCU_LOCK
+   		zx_write_reg(pcu_dev.top_reg_base+0x128, 0x1);
+		PCU_UNLOCK
+
+		pcu_int_clear(pcu_index);
+	}
+}
+
+static void ext8in1_irq_lock(struct irq_data *data){}
+static void ext8in1_irq_sync_unlock(struct irq_data *data){}
+static void ext8in1_irq_mask(struct irq_data *data){}
+static void ext8in1_irq_unmask(struct irq_data *data){}
+static int ext8in1_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+	if (!data)
+		return -EINVAL;
+
+	pcu_set_irq_wake_by_pcu(data->hwirq + PCU_EX8_INT, on);
+
+	return 0;
+}
+
+static int ext8in1_irq_set_type(struct irq_data *data, unsigned int type)
+{
+	if (!data)
+		return -EINVAL;
+
+	pcu_int_set_type(data->hwirq + PCU_EX8_INT, type);
+
+	pcu_int_clear_8in1(data->hwirq + PCU_EX8_INT);
+
+	return 0;
+}
+
+static struct irq_chip ext8in1_irq_chip =
+{
+    .name           		= "ext8in1",
+
+	.irq_set_wake			= ext8in1_irq_set_wake,
+	.irq_set_type			= ext8in1_irq_set_type,
+	.irq_mask				= ext8in1_irq_mask,
+	.irq_unmask				= ext8in1_irq_unmask,
+	.irq_bus_lock			= ext8in1_irq_lock,
+	.irq_bus_sync_unlock	= ext8in1_irq_sync_unlock,
+};
+
+static void ext8in1_handle_irq(struct irq_desc *desc)
+{
+	struct ext8in1_info *data = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	int virq;
+	int hwirq, new_hwirq;
+
+	chained_irq_enter(chip, desc);
+   
+    hwirq = pcu_get_8in1_int_source();
+
+	while(1) {
+		pcu_int_clear_8in1(hwirq + PCU_EX8_INT);
+
+		virq = irq_find_mapping(data->domain, hwirq);
+		if (virq > 0)
+			generic_handle_irq(virq);
+
+		new_hwirq = pcu_get_8in1_int_source();
+        if (hwirq == new_hwirq)
+            break;
+        else
+            hwirq = new_hwirq;
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+extern void mask_irq(struct irq_desc *desc);
+extern void unmask_irq(struct irq_desc *desc);
+static int ext8in1_irq_resume(struct device *dev)
+{
+	unmask_irq(irq_to_desc(ext8in1_dev.parent_irq));
+
+	return 0;
+}
+
+static int ext8in1_irq_suspend(struct device *dev)
+{
+	mask_irq(irq_to_desc(ext8in1_dev.parent_irq));
+
+	return 0;
+}
+
+static int zx_ext8in1_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *parent_np;
+	struct regmap *regmap;
+	struct ext8in1_info *data = &ext8in1_dev;
+	int i;
+
+	parent_np = of_parse_phandle(pdev->dev.of_node, "parent-syscon", 0);
+	if (!parent_np) {
+		dev_err(&pdev->dev, "Can't get parent-syscon\n");
+		return -EINVAL;
+	}
+	
+	regmap = syscon_node_to_regmap(parent_np);
+	if (IS_ERR(regmap)) {
+		of_node_put(parent_np);
+		return PTR_ERR(regmap);
+	}	
+	data->regmap = regmap;
+
+	data->parent_irq = platform_get_irq(pdev, 0);
+	if (data->parent_irq <= 0)
+		return -EPROBE_DEFER;
+
+	data->domain = irq_domain_add_linear(np, 8, &irq_domain_simple_ops, NULL);
+	if (!data->domain)
+		return -ENODEV;
+
+	for (i = EX8_INT; i <= EX15_INT; i++) {
+		int virq = irq_create_mapping(data->domain, i);
+
+		irq_set_chip_and_handler(virq, &ext8in1_irq_chip,
+					 handle_simple_irq);
+		irq_set_chip_data(virq, data);
+	}
+
+	irq_set_chained_handler_and_data(data->parent_irq,
+					 ext8in1_handle_irq, data);
+	enable_irq_wake(data->parent_irq);
+
+	pr_info("zx_ext8in1 init OK. \n");
+
+	return 0;
+}
+
+static const struct of_device_id zx_ext8in1_match[] = {
+	{ .compatible = "zte,zx297520v3-ext8in1" },
+	{ }
+};
+
+static const struct dev_pm_ops ext8in1_irq_pm_ops = {
+	.resume = ext8in1_irq_resume,
+	.suspend = ext8in1_irq_suspend,
+};
+
+static struct platform_driver zx_ext8in1_driver = {
+	.probe = zx_ext8in1_probe,
+	.driver = {
+		.name = "zx_ext8in1_drv",
+		.of_match_table = zx_ext8in1_match,
+		.pm = &ext8in1_irq_pm_ops,
+	},
+};
+
+static int __init zx_ext8in1_driver_init(void)
+{
+	return platform_driver_register(&zx_ext8in1_driver);
+}
+core_initcall(zx_ext8in1_driver_init);
+
diff --git a/upstream/linux-5.10/drivers/soc/sc/plat/plat-zx297520v3.c b/upstream/linux-5.10/drivers/soc/sc/plat/plat-zx297520v3.c
new file mode 100755
index 0000000..b2da8d9
--- /dev/null
+++ b/upstream/linux-5.10/drivers/soc/sc/plat/plat-zx297520v3.c
@@ -0,0 +1,2048 @@
+/*
+ * drivers/soc/zte/plat/plat-zx298501.c
+ *
+ *  Copyright (C) 2021 ZTE-TSP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/clockchips.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/syscore_ops.h>
+#include <linux/gpio.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/reset.h>
+#include <linux/io.h>
+#include <linux/amba/serial.h>
+#include <linux/serial_reg.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/sched/clock.h>
+#include <linux/suspend.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/reboot.h>
+#include <linux/miscdevice.h>
+#include <linux/soc/sc/common.h>
+#include <linux/soc/sc/pcu.h>
+#include <linux/soc/sc/spinlock.h>
+#include <dt-bindings/soc/zx297520v3-irq.h>
+#include <uapi/linux/sc_bsp/bsp_api.h>
+
+/*
+ *  we use sysfs to test&debug some system funcs
+ *
+ */
+struct kobject *zx_root_kobj; 
+struct kobject *zx_test_kobj;
+struct kobject *zx_pm_kobj;
+
+extern int __init zx_clk_test_init(void);
+extern int __init zx_dma_test_init(void);
+extern int __init zx_icp_test_init(void);
+extern int __init zx_timer_test_init(void);
+
+#define DOUBLE_EINT_DBG     0
+#define EINT_THREAD_TEST    0
+
+#define	CONFIG_USE_DEBUG_LED		1
+#define ZX_RESET_TEST		1
+#define ZX_CLK_TEST			1
+#define ZX_PINCTRL_TEST		1
+#define ZX_GPIO_TEST		1
+#define ZX_EINT_TEST		1
+#define ZX_PM_TEST			1
+#if ZX_PM_TEST
+#define PM_RUNTIME_AUTO_TEST	1
+#endif
+#define ZX_SPINLOCK_TEST	0
+#define ZX_PM_QOS_TEST		1
+
+
+/* 
+ * 
+ * some test need device probe
+ */
+struct zx_drv_test
+{
+	struct device	*dev;
+#if ZX_RESET_TEST	
+	struct reset_control *rst;
+#endif
+
+#if ZX_PINCTRL_TEST
+	struct pinctrl		  *pctrl;
+	struct pinctrl_state  *state0;
+	struct pinctrl_state  *state1;
+	struct pinctrl_state  *state2;
+#endif
+
+#if ZX_GPIO_TEST
+	int						gpio;
+	int						gpio2;
+	int						gpio3;
+	struct gpio_desc 		*gd;
+
+#endif
+
+#if ZX_EINT_TEST
+	int						eint_irq;
+	int 					eint_irq2;
+#endif
+
+#if	ZX_CLK_TEST
+	struct clk 				*clk;
+#endif
+};
+
+struct zx_drv_test drv_test = {0};
+
+#if 0//ZX_RESET_TEST
+static void drv_reset_test(struct reset_control *rstc)
+{
+	reset_control_assert(rstc);
+	udelay(10);
+	reset_control_deassert(rstc);	
+}
+#endif
+
+#if ZX_EINT_TEST
+#if EINT_THREAD_TEST
+static irqreturn_t test_eint_pri_isr(int irq, void *p)
+{
+	disable_irq_nosync(irq);
+
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t test_eint_isr(int irq, void *p)
+{
+	static int eint_cnt = 0;
+	if ( pinctrl_select_state(drv_test.pctrl, drv_test.state0) < 0) {
+		pr_err("setting state0 failed\n");
+	}
+
+	irq_set_irq_type(drv_test.eint_irq, gpio_get_value(drv_test.gpio)?IRQ_TYPE_LEVEL_LOW:IRQ_TYPE_LEVEL_HIGH);
+
+	if ( pinctrl_select_state(drv_test.pctrl, drv_test.state1) < 0) {
+		pr_err("setting state0 failed\n");
+	}
+
+	enable_irq(irq);
+
+	pr_info("eint9 get = %d\n", ++eint_cnt);
+
+	return IRQ_HANDLED;
+}
+#else
+static irqreturn_t test_eint_isr(int irq, void *p)
+{
+	static int eint_cnt = 0;
+
+	irq_set_irq_type(drv_test.eint_irq, gpio_get_value(drv_test.gpio)?IRQ_TYPE_LEVEL_LOW:IRQ_TYPE_LEVEL_HIGH);
+
+	//pr_info("eint9 get = %d\n", ++eint_cnt);
+
+	return IRQ_HANDLED;
+}
+#endif
+static irqreturn_t test_eint_isr2(int irq, void *p)
+{
+	static int eint_cnt1 = 0;
+
+	irq_set_irq_type(drv_test.eint_irq2, gpio_get_value(drv_test.gpio2)?IRQ_TYPE_LEVEL_LOW:IRQ_TYPE_LEVEL_HIGH);
+
+	pr_info("eint12 get = %d\n", ++eint_cnt1);
+
+	return IRQ_HANDLED;
+}
+
+#endif
+
+#if ZX_GPIO_TEST
+static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
+{
+	static int irq_cnt = 0;
+
+	irq_cnt ++;
+	pr_info("gpio irq_cnt = %d\n", irq_cnt);
+	
+	return IRQ_HANDLED;
+}
+#endif
+
+/*
+ *  test led helper interface
+ *
+ */
+#if CONFIG_USE_DEBUG_LED
+static void test_led_init(void)
+{
+	int ret;
+
+	if (!drv_test.dev)
+		return;
+
+	ret = gpio_request(drv_test.gpio, "led_test");
+	if (ret) 
+	{
+		pr_info("led_test gpio request error.\n");
+		return ;
+	}
+
+	gpio_direction_output(drv_test.gpio, 0);
+}
+static void test_led_on(void)
+{
+	if (!drv_test.dev)
+		return;
+
+	gpio_direction_output(drv_test.gpio, 1);
+}
+static void test_led_off(void)
+{
+	if (!drv_test.dev)
+		return;
+
+	gpio_direction_output(drv_test.gpio, 0);
+}
+#else
+static void test_led_init(void){}
+static void test_led_on(void){}
+static void test_led_off(void){}
+#endif
+
+#if ZX_PM_TEST
+
+static int zx_drv_test_pm_resume(struct device *dev)
+{
+	pm_stay_awake(dev);
+
+	pr_info("zx_drv_test_pm_resume\n");
+	return 0;
+}
+
+static int zx_drv_test_pm_suspend(struct device *dev)
+{
+	pr_info("zx_drv_test_pm_suspend\n");
+	return 0;
+}
+
+static int zx_drv_test_pm_runtime_resume(struct device *dev)
+{
+	/* enable clk and restore regs */
+	pr_info("zx_drv_test_pm_runtime_resume\n");
+	return 0;
+}
+
+static int zx_drv_test_pm_runtime_suspend(struct device *dev)
+{
+	/* backup regs and disable clk */
+	pr_info("zx_drv_test_pm_runtime_suspend\n");
+	return 0;
+}
+
+static int zx_drv_test_pm_runtime_idle(struct device *dev)
+{
+	pr_info("zx_drv_test_pm_runtime_idle\n");
+	return 0;
+}
+
+static const struct dev_pm_ops zx_drv_test_pm = {
+	.resume = zx_drv_test_pm_resume,
+	.suspend = zx_drv_test_pm_suspend,
+	.runtime_resume = zx_drv_test_pm_runtime_resume,
+	.runtime_suspend = zx_drv_test_pm_runtime_suspend,
+	.runtime_idle = zx_drv_test_pm_runtime_idle
+};
+#endif
+
+
+static int zx_drv_test_probe(struct platform_device *pdev)
+{
+	int gpio;
+	int irq;
+	enum of_gpio_flags flags;
+	int ret;
+	
+	drv_test.dev = &pdev->dev;
+
+	/* reset */
+#if ZX_RESET_TEST
+	drv_test.rst = devm_reset_control_get(&pdev->dev, "test_rst");
+#endif
+
+	/* clk */
+#if	ZX_CLK_TEST
+	drv_test.clk = devm_clk_get(&pdev->dev, "test");
+	if (IS_ERR(drv_test.clk)) {
+		ret = PTR_ERR(drv_test.clk);
+		dev_err(&pdev->dev, "failed to get test_clk: %d\n", ret);
+		return ret;
+	}
+	clk_prepare_enable(drv_test.clk);
+#endif
+
+#if	ZX_GPIO_TEST
+		drv_test.gd = gpiod_get_index(drv_test.dev, "testtt", 0, GPIOD_OUT_HIGH);
+
+		/* gpio test */
+		gpio = of_get_gpio_flags(pdev->dev.of_node, 0, &flags);
+		if (!gpio_is_valid(gpio)) {
+			pr_info("test gpio no found\n");
+			goto gpio_init_end;
+		}
+		/* pr_info("test gpio :%d flag=0x%x\n", gpio, flags); */
+
+		drv_test.gpio = gpio;
+
+		ret = gpio_request(drv_test.gpio, "gpio119");
+		if (ret)
+		{
+			pr_info("led_test gpio request error.\n");
+			BUG();
+			return 0;
+		}
+		gpio_direction_input(drv_test.gpio);
+
+
+#if DOUBLE_EINT_DBG
+		gpio = of_get_gpio_flags(pdev->dev.of_node, 1, &flags);
+		if (!gpio_is_valid(gpio)) {
+			pr_info("test gpio1 no found\n");
+			goto gpio_init_end;
+		}
+
+		drv_test.gpio2 = gpio;
+
+		ret = gpio_request(drv_test.gpio2, "gpio131");
+		if (ret)
+		{
+			pr_info("led_test gpio2 request error.\n");
+			BUG();
+			return 0;
+		}
+		gpio_direction_input(drv_test.gpio2);
+
+		pr_info("test gpio :%d gpio2 : %d\n", drv_test.gpio, drv_test.gpio2);
+#endif
+
+	gpio = of_get_gpio_flags(pdev->dev.of_node, 2, &flags);
+	if (!gpio_is_valid(gpio)) {
+		pr_info("test gpio1 no found\n");
+		goto gpio_init_end;
+	}
+
+	drv_test.gpio3 = gpio;
+
+	ret = gpio_request(drv_test.gpio3, "gpio120");
+	if (ret)
+	{
+		pr_info("led_test gpio3 request error.\n");
+		BUG();
+		return 0;
+	}
+	gpio_direction_output(drv_test.gpio3, 1);
+
+
+gpio_init_end:
+
+#endif
+
+	/* pinctrl */
+#if	ZX_PINCTRL_TEST
+/*	
+	drv_test.pctrl = devm_pinctrl_get_select_default(&pdev->dev);
+*/	
+	drv_test.pctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR(drv_test.pctrl)) {
+		dev_warn(&pdev->dev, "Failed to get test pins");
+		drv_test.pctrl = NULL;
+		goto pinctrl_init_end;
+	}
+	drv_test.state0 = pinctrl_lookup_state(drv_test.pctrl, "state0");
+	if (IS_ERR(drv_test.state0)) {
+		dev_err(&pdev->dev, "TEST: missing state0\n");
+	}
+	drv_test.state1 = pinctrl_lookup_state(drv_test.pctrl, "state1");  // int9
+	if (IS_ERR(drv_test.state1)) {
+		dev_err(&pdev->dev, "TEST: missing state1\n");
+	}
+	drv_test.state2 = pinctrl_lookup_state(drv_test.pctrl, "ext_int5"); // int12
+	if (IS_ERR(drv_test.state2)) {
+		dev_err(&pdev->dev, "TEST: missing state2\n");
+	}
+	if ( pinctrl_select_state(drv_test.pctrl, drv_test.state1) < 0) {
+		dev_err(&pdev->dev, "setting state0 failed\n");
+	}
+
+#if DOUBLE_EINT_DBG
+	/* eint5 */
+	if ( pinctrl_select_state(drv_test.pctrl, drv_test.state2) < 0) {
+		dev_err(&pdev->dev, "setting eint5 failed\n");
+	}
+#endif
+	
+pinctrl_init_end:
+#endif
+
+
+#if	ZX_PM_TEST
+	/* just show how a device use wake source */
+	device_init_wakeup(&pdev->dev, true);
+//	pm_stay_awake(&pdev->dev);
+#endif
+
+	/* eint5 irq */
+#if	ZX_EINT_TEST
+	drv_test.eint_irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+	irq_set_irq_type(drv_test.eint_irq, gpio_get_value(drv_test.gpio)?IRQ_TYPE_LEVEL_LOW:IRQ_TYPE_LEVEL_HIGH);
+
+#if EINT_THREAD_TEST
+	ret = request_threaded_irq(drv_test.eint_irq, test_eint_pri_isr, test_eint_isr, IRQF_ONESHOT, "test_eint9", &drv_test);
+#else
+	ret = request_irq(drv_test.eint_irq,
+					test_eint_isr,
+					0, 
+					"test_eint9",
+					&drv_test);
+#endif
+	if(ret<0)
+		BUG();
+	enable_irq_wake(drv_test.eint_irq);
+
+#if DOUBLE_EINT_DBG
+	drv_test.eint_irq2 = irq_of_parse_and_map(pdev->dev.of_node, 1);
+	irq_set_irq_type(drv_test.eint_irq2, gpio_get_value(drv_test.gpio2)?IRQ_TYPE_LEVEL_LOW:IRQ_TYPE_LEVEL_HIGH);
+	ret = request_irq(drv_test.eint_irq2,
+					test_eint_isr2,
+					0,
+					"test_eint12",
+					&drv_test);
+	if(ret<0)
+		BUG();
+	enable_irq_wake(drv_test.eint_irq2);
+#endif
+
+#endif
+
+
+#if	ZX_PM_TEST
+#if PM_RUNTIME_AUTO_TEST
+	pm_runtime_set_autosuspend_delay(&pdev->dev, 3000 /*ms*/);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_get_sync(&pdev->dev);
+	if (!pm_runtime_enabled(&pdev->dev)) {
+		zx_drv_test_pm_runtime_resume(&pdev->dev);
+	}
+
+	/* put to suspend 3s later */
+	pm_runtime_mark_last_busy(&pdev->dev);
+	pm_runtime_put_sync_autosuspend(&pdev->dev);
+#else
+	if (pdev->dev.pm_domain) {
+		pm_runtime_set_active(&pdev->dev);
+		pm_runtime_enable(&pdev->dev);
+	}
+
+	if (pm_runtime_enabled(&pdev->dev))
+		pm_runtime_get_sync(&pdev->dev);
+#endif
+#endif
+
+	return 0;
+}
+
+static const struct of_device_id zx297520v3_drv_test_match[] = {
+	{ .compatible = "zte,drv-test", },
+	{ }
+};
+
+static struct platform_driver zx_test_driver = {
+	.probe = zx_drv_test_probe,
+	.driver = {
+		.name = "zx297520v3_drv_test",
+#if	ZX_PM_TEST
+		.pm = &zx_drv_test_pm,
+#endif		
+		.of_match_table = zx297520v3_drv_test_match,
+	},
+};
+
+/*sys fs*/
+#define zte_attr(_name) \
+static struct kobj_attribute _name##_attr = 	\
+{                           \
+	.attr	= 				\
+	{                       \
+		.name = __stringify(_name),	\
+		.mode = 0644,			\
+	},					\
+	.show	= _name##_show,			\
+	.store	= _name##_store,		\
+}
+
+/*=============================================================================
+ *========  /sys/zte/test/os_timer  ==============================================
+ *=============================================================================
+ */
+static ssize_t os_timer_show(struct kobject *kobj, struct kobj_attribute *attr,
+			  char *buf)
+{
+	char *s = buf;
+
+	s += sprintf(s, "%s\n", "[TEST]Test will light on/off led every 5s~");	
+
+	return (s - buf);
+}
+
+/*echo 1 > /sys/zte/test/os_timer*/
+static struct timer_list test_timer;
+static unsigned long test_timer_count = 0;
+static unsigned int os_timer_timeout = 5*1000;
+static void test_timer_expired(struct timer_list *unused)
+{
+	mod_timer(&test_timer, jiffies + msecs_to_jiffies(os_timer_timeout));
+
+//	gpio_set_value(drv_test.gpio3, gpio_get_value(drv_test.gpio3)^1);
+
+	pr_info("[TEST]Test timer arrived:%lu \n", 
+			++test_timer_count);
+
+/*
+	if(test_timer_count&1)
+		test_led_on();
+	else
+		test_led_off();
+*/		
+}
+
+static ssize_t os_timer_store(struct kobject *kobj, struct kobj_attribute *attr,
+			   const char *buf, size_t n)
+{
+	int error = 0;
+	unsigned int temp;
+
+//	if(strict_strtol(buf, 0, &temp))
+	if(sscanf(buf, "%u", &temp) != 1)
+		error = -EINVAL;
+
+	pr_info("temp=%d", temp);
+
+	if(temp == 1)
+	{
+		mod_timer(&test_timer, jiffies + msecs_to_jiffies(os_timer_timeout));
+	}
+	else
+	{
+		del_timer(&test_timer);
+		test_timer_count = 0;	
+	}
+		
+	return error ? error : n;
+}
+
+zte_attr(os_timer);
+
+/*=============================================================================
+ *========  /sys/zte/test/timer  ==============================================
+ *=============================================================================
+ */
+/*echo 0xXXXXXXXX > /sys/zte/test/reg_read*/
+#if ZX_PM_TEST
+static ssize_t wake_show(struct kobject *kobj, struct kobj_attribute *attr,
+			  char *buf)
+{
+	char *s = buf;
+
+	s += sprintf(s, "0x%x\n", 0xaa55aa55);
+
+	return (s - buf);
+}
+
+static ssize_t wake_store(struct kobject *kobj, struct kobj_attribute *attr,
+			   const char *buf, size_t n)
+{
+	int error = 0;
+	unsigned int temp;	
+	
+	if(sscanf(buf, "%u", &temp) != 1)
+		error = -EINVAL;
+
+	pr_info("temp=%d", temp);
+
+	if(temp == 1)
+	{
+		pm_stay_awake(drv_test.dev);
+	}
+	else if(temp == 2) 
+	{
+		pm_relax(drv_test.dev);
+	}
+	
+#if 0
+	if(sscanf(buf, "%08x", &addr) != 1)
+		error = -EINVAL;	
+
+	reg_vir_addr = ioremap(addr, 0x1000);
+	pr_info("reg[%08x]=%08x\n", addr, ioread32((void __iomem *)reg_vir_addr));
+
+	iounmap(reg_vir_addr);
+#endif	
+	return error ? error : n;
+}
+
+zte_attr(wake);
+#endif
+/*=============================================================================
+ *========  /sys/zte/test/spinlock  ==============================================
+ *=============================================================================
+ */
+/*echo 0xXXXXXXXX > /sys/zte/test/spinlock*/
+#if ZX_SPINLOCK_TEST
+void hw_spin_lock(u32 hwid);
+void hw_spin_unlock(u32 hwid);
+static ssize_t spinlock_show(struct kobject *kobj, struct kobj_attribute *attr,
+			  char *buf)
+{
+	char *s = buf;
+
+//	s += sprintf(s, "%s\n", "[TEST]Read register[0xXXXXXXXX] value~");	
+
+	return (s - buf);
+}
+
+static ssize_t spinlock_store(struct kobject *kobj, struct kobj_attribute *attr,
+			   const char *buf, size_t n)
+{
+	int error = 0;
+	u32 temp;	
+
+	{
+#if 0
+		int irq_base ;
+	
+		pr_info("current irq=%d\n", irq);	
+		irq_base = irq_alloc_descs(-1, 0, 11, 0);
+		pr_info("next irq=%d\n", irq_base); 
+#endif
+		struct of_phandle_args out_irq;
+		int rc;
+	
+		rc = of_irq_parse_one(drv_test.dev->of_node, 0, &out_irq);
+		pr_info("pcie irq=%d\n", rc);	
+	
+	}
+
+	sscanf(buf, "%u", &temp);
+	pr_info("spinlock store:%d\n", temp);
+#if 0
+	/* 1--lock  2--unlock */
+	if(temp == 1)
+	{
+		hw_spin_lock(7);
+		pr_info("spinlock lock ok!\n");		
+	}
+	else if(temp == 2)
+	{
+		hw_spin_unlock(7);
+		pr_info("spinlock unlock ok!\n");		
+	}
+#endif
+	return error ? error : n;
+}
+
+zte_attr(spinlock);
+#endif
+
+/*=============================================================================
+ *========  /sys/zte/test/reset  ==============================================
+ *=============================================================================
+ */
+/* echo 1/0 > /sys/zte/test/reset */
+#if ZX_RESET_TEST
+static ssize_t reset_show(struct kobject *kobj, struct kobj_attribute *attr,
+			  char *buf)
+{
+	char *s = buf;
+
+	s += sprintf(s, "%s %d\n", "reset signal status:", reset_control_status(drv_test.rst));	
+
+	return (s - buf);
+}
+
+static ssize_t reset_store(struct kobject *kobj, struct kobj_attribute *attr,
+			   const char *buf, size_t n)
+{
+	int error = 0;
+	u32 temp;	
+
+	sscanf(buf, "%u", &temp);
+
+	/* 1--assert 0--deassert */
+	if(temp == 1)
+	{
+		reset_control_deassert(drv_test.rst);
+
+		pr_info("reset signal assert!\n");
+	}
+	else if(temp == 0)
+	{
+		reset_control_deassert(drv_test.rst);
+
+		pr_info("reset signal release!\n");		
+	}
+
+	return error ? error : n;
+}
+
+zte_attr(reset);
+#endif
+/*=============================================================================
+ *========  /sys/zte/test/gpio  ============================================
+ *=============================================================================
+ */
+/* echo 1/0 > /sys/zte/test/gpio */
+#if	ZX_PINCTRL_TEST
+static ssize_t gpio_show(struct kobject *kobj, struct kobj_attribute *attr,
+			  char *buf)
+{
+	char *s = buf;
+
+	return (s - buf);
+}
+
+static ssize_t gpio_store(struct kobject *kobj, struct kobj_attribute *attr,
+			   const char *buf, size_t n)
+{
+	int error = 0;
+	u32 temp;	
+
+	sscanf(buf, "%u", &temp);
+
+	if (!drv_test.dev)
+		return error;
+
+ 	/* 0-out_l 1-out_h 2-in and get  */
+	if(temp == 0) {
+		gpio_direction_output(drv_test.gpio, 0); 
+		pr_info("gpio out low");
+	}
+	else if(temp == 1) {
+		gpio_direction_output(drv_test.gpio, 1);
+		pr_info("gpio out high");
+	}
+	else if(temp == 2) {
+		gpio_direction_input(drv_test.gpio);
+		pr_info("gpio get value(%d) !\n",__gpio_get_value(drv_test.gpio));
+	}
+
+	return error ? error : n;
+}
+
+zte_attr(gpio);
+#endif
+
+/*=============================================================================
+ *========  /sys/zte/test/pinctrl  ============================================
+ *=============================================================================
+ */
+/* echo 1/0 > /sys/zte/test/pinctrl */
+#if	ZX_PINCTRL_TEST
+static ssize_t pinctrl_show(struct kobject *kobj, struct kobj_attribute *attr,
+			  char *buf)
+{
+	char *s = buf;
+	int i;
+
+	for (i=0; i<16; i++)
+		printk("gpio_%d mapped irq to %d \n", i, gpio_to_irq(i));
+
+	return (s - buf);
+}
+
+static ssize_t pinctrl_store(struct kobject *kobj, struct kobj_attribute *attr,
+			   const char *buf, size_t n)
+{
+	int error = 0;
+	u32 temp;	
+
+	sscanf(buf, "%u", &temp);
+
+	/* temp --> pin state */
+	if(temp == 1)
+	{
+		if ( pinctrl_select_state(drv_test.pctrl, drv_test.state1) < 0) {
+			dev_err(drv_test.dev, "setting state1 failed\n");
+		}
+
+		pr_info("setting state1 !\n");
+	}
+	else if(temp == 0)
+	{
+		if ( pinctrl_select_state(drv_test.pctrl, drv_test.state0) < 0) {
+			dev_err(drv_test.dev, "setting state0 failed\n");
+		}
+
+		pr_info("setting state0 !\n");		
+	}
+	else if(temp == 2)
+	{
+		if ( pinctrl_select_state(drv_test.pctrl, drv_test.state2) < 0) {
+			dev_err(drv_test.dev, "setting state2 failed\n");
+		}
+
+		pr_info("setting state2 !\n");		
+	}
+
+	return error ? error : n;
+}
+
+zte_attr(pinctrl);
+#endif
+
+/*=============================================================================
+ *========  /sys/zte/test/pd  ==============================================
+ *=============================================================================
+ */
+/* echo 1/0 > /sys/zte/test/pd */
+#if ZX_PM_TEST
+static ssize_t pd_show(struct kobject *kobj, struct kobj_attribute *attr,
+			  char *buf)
+{
+	char *s = buf;
+
+	return (s - buf);
+}
+
+static ssize_t pd_store(struct kobject *kobj, struct kobj_attribute *attr,
+			   const char *buf, size_t n)
+{
+	int error = 0;
+
+#ifdef CONFIG_PM
+	u32 temp;	
+
+	sscanf(buf, "%u", &temp);
+
+	/* 1--on 0--off */
+	if(temp == 1)
+	{
+		pm_runtime_get_sync(drv_test.dev);
+
+		pr_info("power on!\n");
+	}
+	else if(temp == 0)
+	{
+#if PM_RUNTIME_AUTO_TEST
+		pm_runtime_mark_last_busy(drv_test.dev);
+		pm_runtime_put_sync_autosuspend(drv_test.dev);
+#else	
+		pm_runtime_put_sync(drv_test.dev);
+#endif
+		pr_info("power off!\n");		
+	}
+#else
+	error = -ENXIO;
+#endif
+	return error ? error : n;
+}
+
+zte_attr(pd);
+#endif
+
+
+/*=============================================================================
+ *========  /sys/zte/test/clk  ==============================================
+ *=============================================================================
+ */
+/* echo 1/0 > /sys/zte/test/clk */
+#if	ZX_CLK_TEST
+static ssize_t clk_show(struct kobject *kobj, struct kobj_attribute *attr,
+			  char *buf)
+{
+	char *s = buf;
+
+	s += sprintf(s, "%s %d\n", "clk enable status:", __clk_is_enabled(drv_test.clk));
+	s += sprintf(s, "%s %d\n", "clk rate:", clk_get_rate(drv_test.clk));
+
+	return (s - buf);
+}
+
+static ssize_t clk_store(struct kobject *kobj, struct kobj_attribute *attr,
+			   const char *buf, size_t n)
+{
+	int error = 0;
+	u32 temp;	
+
+	sscanf(buf, "%u", &temp);
+
+	/* 1--on 0--off */
+	if(temp == 1) {
+		clk_enable(drv_test.clk);
+	}
+	else if(temp == 0) {
+		clk_disable(drv_test.clk);
+	} else {
+		clk_set_rate(drv_test.clk, temp);
+	}
+
+	return error ? error : n;
+}
+
+zte_attr(clk);
+#endif
+
+/*=============================================================================
+ *========  /sys/zte/test/pm_qos  ==============================================
+ *=============================================================================
+ */
+/* echo 1/0 > /sys/zte/test/pm_qos */
+#if	ZX_PM_QOS_TEST
+static unsigned int pm_qos_test = 0;
+static ssize_t pm_qos_show(struct kobject *kobj, struct kobj_attribute *attr,
+			  char *buf)
+{
+	char *s = buf;
+	const char *mode[3] = {"normal", "performance", "powersave"};
+
+	s += sprintf(s, "pm_qos mode: %s\n", (pm_qos_test<=2) ? mode[pm_qos_test] : "unknown");
+
+	return (s - buf);
+}
+
+static ssize_t pm_qos_store(struct kobject *kobj, struct kobj_attribute *attr,
+			   const char *buf, size_t n)
+{
+	int error = 0;
+	u32 temp;
+
+	sscanf(buf, "%u", &temp);
+
+	if(temp == 1) {
+		freq_performance(FREQ_OWNER_MANAGER);
+	} else if(temp == 2) {
+		freq_powersave(FREQ_OWNER_MANAGER);
+	} else if(temp == 0) {
+		freq_normal(FREQ_OWNER_MANAGER);
+	} else {
+		return -EINVAL;
+	}
+
+	pm_qos_test = temp;
+
+	return error ? error : n;
+}
+
+zte_attr(pm_qos);
+#endif
+
+
+static ssize_t rpmsg_log_show(struct kobject *kobj, struct kobj_attribute *attr,
+			  char *buf);
+
+static ssize_t rpmsg_log_store(struct kobject *kobj, struct kobj_attribute *attr,
+			   const char *buf, size_t n)
+{
+	int error = 0;
+
+	return error ? error : n;
+}
+zte_attr(rpmsg_log);
+
+/*test group*/
+static struct attribute * g[] = 
+{
+	&os_timer_attr.attr,
+#if ZX_PM_TEST
+	&wake_attr.attr,
+#endif	
+#if ZX_SPINLOCK_TEST
+	&spinlock_attr.attr,
+#endif	
+#if ZX_RESET_TEST	
+	&reset_attr.attr,
+#endif
+#if ZX_GPIO_TEST
+	&gpio_attr.attr,
+#endif	
+#if	ZX_PINCTRL_TEST
+	&pinctrl_attr.attr,
+#endif
+#if ZX_PM_TEST
+	&pd_attr.attr,
+#endif	
+#if	ZX_CLK_TEST
+	&clk_attr.attr,
+#endif	
+#if	ZX_PM_QOS_TEST
+	&pm_qos_attr.attr,
+#endif
+	&rpmsg_log_attr.attr,
+	NULL,
+};
+
+static struct attribute_group zte_test_attr_group = 
+{
+	.attrs = g,
+};
+
+/**
+ *  1¡¢create sysfs "/sys/zte/test" 
+ *  2¡¢call other debug modules 
+ */
+static int __init zx_test_init(void)
+{
+	int ret;
+
+	zx_test_kobj = kobject_create_and_add("test", zx_root_kobj);
+	if (!zx_test_kobj)
+		return -ENOMEM;
+
+    ret = sysfs_create_group(zx_test_kobj, &zte_test_attr_group);
+    if (ret)
+    {
+        pr_info("[DEBUG] sysfs_create_group ret %d\n", ret);
+		return ret;
+    }
+
+	timer_setup(&test_timer, test_timer_expired, 0);
+
+    pr_info("[DEBUG] create test sysfs interface OK.\n");
+
+	return platform_driver_register(&zx_test_driver);
+}
+
+/*
+#define SC_LIBPM_LPMODE_CPU_HALT			(0)
+#define SC_LIBPM_LPMODE_CPU_CLKOFF			(1)
+#define SC_LIBPM_LPMODE_CPU_POWEROFF		(2)
+*/
+static int zx_lp_mode = 2;
+int pm_get_lpmode(void)
+{
+	return zx_lp_mode;
+}
+
+static ssize_t lp_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "lp_mode:%d\n", zx_lp_mode);
+}
+
+static ssize_t lp_mode_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	int error = 0;
+	unsigned int lp_mode;
+
+	if(sscanf(buf, "%u", &lp_mode) != 1)
+		error = -EINVAL;
+
+//	pr_info("%s: lp_mode=%d\n", __func__, lp_mode);
+	if (lp_mode > 2)
+		error = -EINVAL;
+	else
+		zx_lp_mode = lp_mode;
+
+	return error ? error : count;
+}
+
+static DEVICE_ATTR(lp_mode, 0600, lp_mode_show, lp_mode_store);
+static struct attribute *zx_pm_attributes[] = {
+	&dev_attr_lp_mode.attr,
+	NULL,
+};
+
+static const struct attribute_group zx_pm_attribute_group = {
+	.attrs = (struct attribute **) zx_pm_attributes,
+};
+
+#ifdef CONFIG_PM_SLEEP_DEBUG
+extern bool pm_debug_messages_on;
+#endif
+
+/**
+ *  1¡¢create sysfs "/sys/zte" 
+ *  2¡¢call other debug modules 
+ */
+int __init zx_dma_test_init(void); 
+static int __init zx_debug_init(void)
+{
+    pr_info("[DEBUG] create zte sysfs interface OK.\n");
+	zx_root_kobj = kobject_create_and_add("zte", NULL);
+	if (!zx_root_kobj)
+		return -ENOMEM;	
+
+	zx_pm_kobj = kobject_create_and_add("power", zx_root_kobj);
+	if (!zx_pm_kobj)
+		return -ENOMEM;
+    sysfs_create_group(zx_pm_kobj, &zx_pm_attribute_group);
+
+	zx_test_init();
+
+	zx_dma_test_init();
+
+/*	zx_clk_test_init(); */
+
+	zx_icp_test_init();
+
+	zx_timer_test_init();
+
+#ifdef CONFIG_PM_SLEEP_DEBUG
+	pm_debug_messages_on = true;
+#endif
+
+    return 0;
+}
+
+late_initcall(zx_debug_init);
+
+void __init zx29_clock_init(void);
+
+struct zx297520v3_chip_info {
+	void __iomem 				*stdcrm_base;
+	void __iomem 				*socsys_base;
+	void __iomem 				*sflock_base;
+	void __iomem 				*apcrm_base;	
+};
+
+static struct zx297520v3_chip_info zx_chip_info;
+
+void __iomem *get_stdcrm_base(void)
+{
+	return zx_chip_info.stdcrm_base;
+}
+
+void __iomem *get_socsys_base(void)
+{
+	return zx_chip_info.socsys_base;
+}
+
+static int spinlock_init(void)
+{
+	struct device_node *np;
+	void __iomem *param[2]; 
+
+	np = of_find_compatible_node(NULL, NULL, "zte,zx297520v3-standby");
+	if (!np)
+	{
+		BUG();
+		return -ENODEV;
+	}
+
+	zx_chip_info.stdcrm_base = of_iomap(np, 0);
+	WARN(!zx_chip_info.stdcrm_base, "unable to map stdcrm_base registers\n");
+
+
+	np = of_find_compatible_node(NULL, NULL, "zte,zx29_spinlock");
+	if (!np)
+	{
+		BUG();
+		return -ENODEV;
+	}
+
+	zx_chip_info.sflock_base = of_iomap(np, 0);
+	WARN(!zx_chip_info.sflock_base, "unable to map sflock_base registers\n");
+
+	
+	param[0] = zx_chip_info.stdcrm_base;
+	param[1] = zx_chip_info.sflock_base;
+	zx_spinlock_init(param);
+
+	return 0;
+}
+
+static void socsys_init(void) 
+{
+	struct device_node *np;
+
+	np = of_find_compatible_node(NULL, NULL, "zte,zx297520v3-socsys");
+	if (!np)
+	{
+		BUG();
+	}
+
+	zx_chip_info.socsys_base = of_iomap(np, 0);
+	WARN(!zx_chip_info.socsys_base, "unable to map socsys_base registers\n");
+}
+
+/*---------------------------------------------------------*/
+#define AP_INT_MODE_BASE 			(zx_chip_info.apcrm_base + 0x70)
+#define AP_PPI_MODE_REG 			(zx_chip_info.apcrm_base + 0xA0)
+
+#define	INT_HIGHLEVEL   			(0x0)       /* 00: high level */
+#define	INT_LOWLEVEL    			(0x1)       /* 01: low level */
+#define	INT_POSEDGE     			(0x2)       /* 10: raise edge */
+#define	INT_NEGEDGE     			(0x3)       /* 11: fall edge */
+
+static int zx29_int_set_type(unsigned int hwirq, unsigned int type)
+{
+	unsigned int data_tmp=0;
+	unsigned int srctype=0;
+	unsigned int reg_index=0,pos_index=0;
+
+	switch (type) {
+		case IRQ_TYPE_LEVEL_HIGH:
+			srctype = INT_HIGHLEVEL;
+			break;
+		case IRQ_TYPE_EDGE_RISING:
+			srctype = INT_POSEDGE;
+			break;
+		case IRQ_TYPE_LEVEL_LOW:		
+			srctype = INT_LOWLEVEL;
+			break;
+		case IRQ_TYPE_EDGE_FALLING:		
+			srctype = INT_NEGEDGE;
+			break;
+		default:
+			return -EINVAL;
+	}
+    reg_index=(hwirq)/16;
+	pos_index=((hwirq)%16)*2;
+	
+	data_tmp=zx_read_reg(AP_INT_MODE_BASE+reg_index*4);
+	data_tmp &= ~(3<<pos_index);
+	data_tmp |= srctype<<pos_index;
+	zx_write_reg(AP_INT_MODE_BASE+reg_index*4, data_tmp);
+
+	return 0;
+}
+
+static void int_set_type_default(unsigned int line)
+{
+	unsigned int int_type=0;
+
+    switch ( line )
+    {
+		case WDT_INT:
+		case AP_TIMER0_INT:
+		case GSM_RFSSCR_INT:
+		case GSM_RFSSCT_INT:
+		case AP_TIMER3_INT:
+		case AP_TIMER4_INT:
+		case SYS_COUNTER_INT:
+		{
+			int_type = IRQ_TYPE_EDGE_RISING;
+			break;
+		}
+		case MCU_LCD_INT:
+		{
+			int_type = IRQ_TYPE_LEVEL_LOW;
+			break;
+		}
+
+		default:
+		{	
+		    int_type = IRQ_TYPE_LEVEL_HIGH;
+		    break;
+		}
+    }
+
+	zx29_int_set_type(line, int_type);
+}
+
+static void apcrm_init(void) 
+{
+	int i;
+	struct device_node *np;
+
+	np = of_find_compatible_node(NULL, NULL, "zte,zx297520v3-apcrm");
+	if (!np)
+	{
+		BUG();
+	}
+
+	zx_chip_info.apcrm_base = of_iomap(np, 0);
+	WARN(!zx_chip_info.apcrm_base, "unable to map apcrm_base registers\n");
+
+	zx_write_reg(AP_PPI_MODE_REG, 0x55545555);
+
+	for (i=0; i<IRQ_ZX297520V3_SPI_NUM; i++)
+		int_set_type_default(i);
+
+}
+
+void early_drv_init(void)
+{
+	spinlock_init();
+
+	socsys_init();
+
+	apcrm_init();
+}
+
+//early_initcall(early_drv_init);
+
+/*-------------------------------------------------------------------*/
+#define ZX_PM_MINOR			(235)
+
+static unsigned int	pm_wl_mask = 0;
+static unsigned int	pm_wl_event = 0;
+static u64 pm_sleep_time;
+
+static DECLARE_WAIT_QUEUE_HEAD(zx_pm_wait);
+static bool zx_pm_wake_flag = false;
+static bool zx_in_suspend = false; 
+
+void pm_wl_set_event(unsigned int wake_event)
+{
+	if (wake_event >=PM_WL_EVENT_END)
+		return;
+
+	pm_wl_event = BIT(wake_event)&pm_wl_mask;
+}
+
+static unsigned int pm_wl_get_event(void)
+{
+//	return pm_wl_event&pm_wl_mask;
+	return pm_wl_event;
+}
+
+void pm_set_sleeptime(u64 sleep_time)
+{
+	pm_sleep_time = sleep_time;
+}
+
+static u64 pm_get_sleeptime(void)
+{
+	return pm_sleep_time;
+}
+
+static int zx_pm_open(struct inode *inode, struct file *filp)
+{
+	int error = 0;
+
+	return error;
+}
+
+static int zx_pm_release(struct inode *inode, struct file *filp)
+{
+	return 0;
+}
+
+void zx_enter_suspend(void)
+{
+         zx_in_suspend = true;
+}
+ 
+ void zx_exit_suspend(void)
+ {
+         zx_in_suspend = false;
+ }
+ 
+ bool zx_suspend_query(void)
+ {
+         return zx_in_suspend;
+ }
+ 
+void pm_notify_wake_event(void)
+{
+	if (pm_wl_event) {
+		zx_pm_wake_flag = true;
+		wake_up_interruptible(&zx_pm_wait);
+	}
+}
+
+static ssize_t zx_pm_read(struct file *filp, char __user *buf,
+                             size_t count, loff_t *offp)
+{
+	ssize_t res = 0;
+	int ret;
+	struct sc_pm_info pm_info;
+
+	zx_pm_wake_flag = false;
+	wait_event_freezable(zx_pm_wait, zx_pm_wake_flag);
+
+	pm_info.sleep_time = pm_get_sleeptime();
+	pm_info.wake_event = pm_wl_get_event();
+
+	res = sizeof(struct sc_pm_info);
+	ret = copy_to_user((void __user *)buf, &pm_info, res);
+	if (ret < 0)
+		return -EFAULT;
+
+	return res;
+}
+
+static unsigned int pm_event_convert(unsigned int req_event)
+{
+	return 0;
+}
+
+static long zx_pm_ioctl(struct file *filp, unsigned int cmd,
+							unsigned long arg)
+{
+	int error = 0;
+	unsigned int wl_event;
+
+	switch (cmd) {
+
+	case SC_PM_WL_SET:
+		wl_event = (unsigned int)arg;
+		if (wl_event >= BIT(PM_WL_EVENT_END)) {
+			error = -ENOTTY;
+			break;
+		}
+		pm_wl_mask |= wl_event;
+		/* pr_info("%s:pm_wl_mask(0x%x) user_set(0x%x)\n", __func__, pm_wl_mask, wl_event); */
+		break;
+
+	case SC_PM_WL_CLEAR:
+		wl_event = (unsigned int)arg;
+		if (wl_event >= BIT(PM_WL_EVENT_END)) {
+			error = -ENOTTY;
+			break;
+		}
+		pm_wl_mask &= (~wl_event);
+		/* pr_info("%s:pm_wl_mask(0x%x) user_clr(0x%x)\n", __func__, pm_wl_mask, wl_event); */
+		break;
+
+	case SC_PM_WL_GET:
+		/* pr_info("%s:pm_wl_mask(0x%x)\n", __func__, pm_wl_mask); */
+		error = put_user(pm_wl_mask, (unsigned int __user *)arg);
+		break;
+
+	default:
+		error = -ENOTTY;
+		break;
+	}
+
+	return error;
+}
+
+#ifdef CONFIG_COMPAT
+
+static long
+zx_pm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	BUILD_BUG_ON(sizeof(loff_t) != sizeof(compat_loff_t));
+
+
+	if (_IOC_TYPE(cmd) != SC_PM_IOC_MAGIC)
+		return -ENOTTY;
+
+	if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
+		cmd &= ~IOCSIZE_MASK;
+		cmd |= sizeof(char *) << IOCSIZE_SHIFT;
+	}
+
+	return zx_pm_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+
+}
+
+#endif /* CONFIG_COMPAT */
+
+static const struct file_operations zx_pm_fops = {
+	.open = zx_pm_open,
+	.release = zx_pm_release,
+	.read = zx_pm_read,
+/*	.write = zx_pm_write,*/
+	.llseek = no_llseek,
+	.unlocked_ioctl = zx_pm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = zx_pm_compat_ioctl,
+#endif
+};
+
+static struct miscdevice zx_pm_device = {
+	.minor = ZX_PM_MINOR,
+	.name = "sc_pm",
+	.fops = &zx_pm_fops,
+};
+
+static int zx_pm_device_init(void)
+{
+	return misc_register(&zx_pm_device);
+};
+
+/*-------------------------------------------------------------------*/
+#define ZX_IRQ_MINOR			(237)
+#define SC_LIBIRQ_MAX			(16)
+
+/*
+ * line 		: request
+ * type 		: driver use
+ * wake_flag	: for wait
+ *
+ */
+struct libirq_info {
+	unsigned int 		line;
+	unsigned int 		hirq;
+	unsigned int 		virq;
+	unsigned int 		type;
+	int 				wake;
+	unsigned int 		used;
+	bool				wake_flag;
+	wait_queue_head_t 	wait;
+	char				name[16];
+};
+
+struct libirq_context {
+	unsigned int 			pending;
+	spinlock_t 				lock;
+	struct pinctrl			*pctrl;
+	struct pinctrl_state	*state[SC_LIBIRQ_MAX];
+
+	struct device_node 		*np;
+	struct device_node 		*ext8in1_np;
+	struct libirq_info		info[SC_LIBIRQ_MAX];
+};
+static struct libirq_context irq_ctx = {0};
+
+#define line_used(l)		irq_ctx.info[l].used
+
+static int irq_line_convert(int line)
+{
+	if (line>=8)
+		return EX8_INT + line - 8;
+	else
+		return EX0_INT + line;
+}
+
+static unsigned int irq_type_convert(unsigned int req_type)
+{
+	if (req_type == 0)
+		return IRQ_TYPE_EDGE_RISING;
+	else if (req_type == 1)
+		return IRQ_TYPE_EDGE_FALLING;
+	else if (req_type == 2)
+		return IRQ_TYPE_LEVEL_HIGH;
+	else if (req_type == 3)
+		return IRQ_TYPE_LEVEL_LOW;
+	else
+		return IRQ_TYPE_NONE;
+}
+
+static int zx_irq_map_ext8in1(int hirq, unsigned int type)
+{
+	struct of_phandle_args args;
+
+	args.args_count = 2;
+	args.args[0] = hirq;
+	args.args[1] = type;
+	args.np = irq_ctx.ext8in1_np;
+
+	return irq_create_of_mapping(&args);
+}
+
+static int zx_irq_map(int hirq, unsigned int type)
+{
+	struct of_phandle_args args;
+
+	if (hirq>=EX8_INT && hirq<=EX15_INT)
+		return zx_irq_map_ext8in1(hirq, type);
+
+	args.args_count = 3;
+	args.args[0] = 0;
+	args.args[1] = hirq;
+	args.args[2] = type;
+	args.np = irq_ctx.np;
+
+	return irq_create_of_mapping(&args);
+}
+
+static void zx_irq_wait(unsigned int line)
+{
+	struct libirq_info *info = &(irq_ctx.info[line]);
+
+	info->wake_flag = false;
+	wait_event_freezable(info->wait, info->wake_flag);
+}
+
+static void zx_irq_wakeup(unsigned int line)
+{
+	struct libirq_info *info = &(irq_ctx.info[line]);
+
+	info->wake_flag = true;
+	wake_up_interruptible(&info->wait);
+}
+
+static irqreturn_t zx_irq_isr(int irq, void *p)
+{
+	struct libirq_info *info = (struct libirq_info *)p;
+	unsigned int line = info->line;
+
+	if(line_used(line)) {
+		irq_ctx.pending |= BIT(line);
+
+		zx_irq_wakeup(line);
+	}
+
+/*	pr_info("%s:eint get = %d\n", __func__, line); */
+
+	return IRQ_HANDLED;
+}
+
+static int zx_irq_open(struct inode *inode, struct file *filp)
+{
+	int error = 0;
+	unsigned int line;
+
+	line = iminor(inode) - ZX_IRQ_MINOR;
+
+	filp->private_data = &(irq_ctx.info[line]);
+
+	return error;
+}
+
+static int zx_irq_release(struct inode *inode, struct file *filp)
+{
+	struct libirq_info *info;
+
+	info = (struct libirq_info *)filp->private_data;
+
+	if(line_used(info->line)) {
+		irq_set_irq_type(info->virq, IRQ_TYPE_NONE);
+		free_irq(info->virq, info);
+		line_used(info->line) = 0;
+	}
+
+	filp->private_data = NULL;
+
+	return 0;
+}
+
+static long zx_irq_ioctl(struct file *filp, unsigned int cmd,
+							unsigned long arg)
+{
+	int error = 0;
+	struct libirq_info *info;
+	int virq;
+	unsigned int type;
+	unsigned int en;
+	int ret = 0;
+	unsigned long flags;
+
+	info = (struct libirq_info *)filp->private_data;
+
+	switch (cmd) {
+
+	case SC_IRQ_INSTALL:
+
+		if(line_used(info->line))
+			return -EEXIST;
+
+		type = irq_type_convert((unsigned int)arg);
+		virq = zx_irq_map(info->hirq, type);
+		if (virq <= 0) {
+			pr_err("%s:zx_irq_map %d failed %d(%d)\n", __func__, info->line, virq, type);
+			return -ENOMEM;
+		}
+		/* pr_err("%s:zx_irq_map %d %d %d\n", __func__, info->line, virq, info->hirq);*/
+
+		if ( pinctrl_select_state(irq_ctx.pctrl, irq_ctx.state[info->line]) < 0) {
+			pr_err("%s:setting state%d failed\n", __func__, info->line);
+			return -ENODEV;
+		}
+
+		ret = request_irq(virq, zx_irq_isr, 0, info->name, info);
+		if(ret<0) {
+			pr_err("%s:request_irq %d failed %d\n", __func__, info->line, type);
+			return ret;
+		}
+
+		info->virq = virq;
+		info->type = type;
+
+		line_used(info->line) = 1;
+
+		/* pr_info("%s:install(%d) hirq(%d) virq(%d) type(%d)\n", __func__, info->line, info->hirq, info->virq, info->type);*/
+		break;
+
+	case SC_IRQ_UNINSTALL:
+
+		if(!line_used(info->line))
+			return -ENODEV;
+
+		irq_set_irq_type(info->virq, IRQ_TYPE_NONE);
+
+		free_irq(info->virq, info);
+
+		line_used(info->line) = 0;
+
+		/* pr_info("%s:uninstall(%d)\n", __func__, info->line);*/
+		break;
+
+	case SC_IRQ_SET_TYPE:
+
+		if(!line_used(info->line))
+			return -ENODEV;
+
+		type = irq_type_convert((unsigned int)arg);
+		ret = irq_set_irq_type(info->virq, type);
+		if (ret)
+			return ret;
+
+		info->type = type;
+
+		/* pr_info("%s:set_type(%d) virq(%d) type(%d)\n", __func__, info->line, info->virq, info->type); */
+
+		break;
+
+	case SC_IRQ_SET_WAKE:
+
+		if(!line_used(info->line))
+			return -ENODEV;
+
+		en = (unsigned int)arg;
+		ret = irq_set_irq_wake(info->virq, en);
+		if (ret)
+			return ret;
+
+		info->wake = en;
+
+		/* pr_info("%s:set_wake(%d) virq(%d) wake(%d)\n", __func__, info->line, info->virq, info->wake); */
+
+		break;
+
+	case SC_IRQ_GET_WAKE:
+
+		if(!line_used(info->line))
+			return -ENODEV;
+
+		error = put_user(info->wake, (unsigned int __user *)arg);
+
+		/* pr_info("%s:get_wake(%d) virq(%d) wake(%d)\n", __func__, info->line, info->virq, info->wake); */
+
+		break;
+
+	case SC_IRQ_GET_STATUS:
+
+		if(!line_used(info->line))
+			return -ENODEV;
+
+		zx_irq_wait(info->line);
+
+		spin_lock_irqsave(&irq_ctx.lock, flags);
+		error = put_user(irq_ctx.pending, (unsigned int __user *)arg);
+		spin_unlock_irqrestore(&irq_ctx.lock, flags);
+
+		/* pr_debug("%s:get_status(%d) virq(%d) wake(%d) pending(0x%x)\n",
+			__func__, info->line, info->virq, info->wake, irq_ctx.pending); */
+
+		break;
+
+	case SC_IRQ_CLEAR_STATUS:
+
+		if(!line_used(info->line))
+			return -ENODEV;
+
+		spin_lock_irqsave(&irq_ctx.lock, flags);
+		irq_ctx.pending &= ~(1 << info->line);
+		spin_unlock_irqrestore(&irq_ctx.lock, flags);
+
+		/* pr_info("%s:clear_status(%d)\n", __func__, info->line); */
+
+		break;
+
+	default:
+		error = -ENOTTY;
+		break;
+	}
+
+	return error;
+}
+
+#ifdef CONFIG_COMPAT
+
+static long
+zx_irq_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	BUILD_BUG_ON(sizeof(loff_t) != sizeof(compat_loff_t));
+
+
+	if (_IOC_TYPE(cmd) != SC_IRQ_IOC_MAGIC)
+		return -ENOTTY;
+
+	if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
+		cmd &= ~IOCSIZE_MASK;
+		cmd |= sizeof(char *) << IOCSIZE_SHIFT;
+	}
+
+	return zx_irq_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+
+#endif /* CONFIG_COMPAT */
+
+static const struct file_operations zx_irq_fops = {
+	.open = zx_irq_open,
+	.release = zx_irq_release,
+	.llseek = no_llseek,
+	.unlocked_ioctl = zx_irq_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = zx_irq_compat_ioctl,
+#endif
+};
+
+static struct miscdevice zx_irq_device[SC_LIBIRQ_MAX] = {0};
+static int zx_irq_device_init(struct platform_device *pdev)
+{
+	int i;
+	int ret = 0;
+	char name[16];
+	struct miscdevice *misc_dev;
+	struct device_node *np;
+
+	irq_ctx.pctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR(irq_ctx.pctrl)) {
+		dev_warn(&pdev->dev, "Failed to get sc_irq pins");
+		irq_ctx.pctrl = NULL;
+		return -ENODEV;
+	}
+
+	for (i=0; i<SC_LIBIRQ_MAX; i++) {
+		misc_dev = &zx_irq_device[i];
+		misc_dev->minor = ZX_IRQ_MINOR + i,
+		sprintf(name, "%s%d", "sc_irq", i);
+		misc_dev->name = name,
+		misc_dev->fops = &zx_irq_fops,
+
+		strcpy(irq_ctx.info[i].name, name);
+		irq_ctx.info[i].line = i;
+		irq_ctx.info[i].hirq = irq_line_convert(i);
+		init_waitqueue_head(&irq_ctx.info[i].wait);
+
+		ret = misc_register(misc_dev);
+		if (ret) {
+			pr_err("%s:register dev(%d) failed:%d \n", __func__, i, ret);
+			return ret;
+		}
+
+		irq_ctx.state[i] = pinctrl_lookup_state(irq_ctx.pctrl, name);
+		if (IS_ERR(irq_ctx.state[i])) {
+			dev_err(&pdev->dev, "TEST: missing state(%s)\n", name);
+			return -ENODEV;
+		}
+	}
+
+	np = of_find_compatible_node(NULL, NULL, "zte,zx297520v3-pcu");
+	if (NULL == np)	{
+		pr_err("Can't find interrupt-controller \n");
+		return -ENODEV;
+	}
+	irq_ctx.np = np;
+
+	np = of_find_compatible_node(NULL, NULL, "zte,zx297520v3-ext8in1");
+	if (NULL == np)	{
+		pr_err("Can't find ext8in1 interrupt-controller \n");
+		return -ENODEV;
+	}
+	irq_ctx.ext8in1_np = np;
+
+	spin_lock_init(&irq_ctx.lock);
+
+	return ret;
+};
+
+static int zx_bsp_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	ret = zx_pm_device_init();
+	if (ret)
+		return ret;
+
+	ret = zx_irq_device_init(pdev);
+
+	device_init_wakeup(&pdev->dev, true);
+
+	return ret;
+}
+
+static int zx_bsp_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id zx_bsp_match[] = {
+	{ .compatible = "sc,sc-bsp", },
+	{ }
+};
+
+static struct platform_driver zx_bsp_driver = {
+	.probe = zx_bsp_probe,
+	.remove = zx_bsp_remove,
+	.driver = {
+		.name = "sc_bsp",
+		.of_match_table = zx_bsp_match,
+	},
+};
+builtin_platform_driver(zx_bsp_driver)
+
+/*---------------------------------------------------------------*/
+static struct reset_control *reboot_rst;
+static int zx_restart(struct notifier_block *this,
+			   unsigned long mode, void *cmd)
+{
+	if (reboot_rst) {
+		reset_control_assert(reboot_rst);		
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block zx_restart_handler = {
+	.notifier_call = zx_restart,
+	.priority = 129,
+};
+
+static int zx_reboot_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device_node *np = pdev->dev.of_node;
+
+	reboot_rst = of_reset_control_get_by_index(np, 0);
+	if (!reboot_rst) {
+		dev_err(&pdev->dev, "No reset handler found!");
+		return -EINVAL;
+	}
+
+	ret = register_restart_handler(&zx_restart_handler);
+	if (ret)
+		pr_warn("cannot register restart handler, %d\n", ret);
+
+	return 0;
+}
+
+static const struct of_device_id zx_reboot_match[] = {
+	{ .compatible = "zte,reboot", },
+	{ }
+};
+
+static struct platform_driver zx_reboot_driver = {
+	.probe = zx_reboot_probe,
+	.driver = {
+		.name = "zx_reboot",
+		.of_match_table = zx_reboot_match,
+	},
+};
+builtin_platform_driver(zx_reboot_driver)
+
+/*----------------------------------------------------------------*/
+#define CONFIG_RPMSG_LOG	1
+
+#ifdef CONFIG_RPMSG_LOG
+#define	 RPMSG_LOG_SIZE	(20*1024)
+static char rpmsg_printk_buf[RPMSG_LOG_SIZE];
+static u32  rpmsg_log_point = 0;
+static u32  rpmsg_log_turn = 0;
+static u32  rpmsg_sram_inited = 0;
+//static char rpmsg_log_temp_buf[512] = {0};
+static spinlock_t	rpmsg_log_lock;
+
+static void rpmsg_sram_cpy(char *s, unsigned len)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rpmsg_log_lock, flags);
+
+	if(rpmsg_log_point + len + 2 >= RPMSG_LOG_SIZE) {
+		rpmsg_log_point = 0;
+
+		if (!rpmsg_log_turn)
+			rpmsg_log_turn = 1;
+	}
+
+	memcpy(rpmsg_printk_buf+rpmsg_log_point, s, len);
+	rpmsg_log_point += len;
+	rpmsg_printk_buf[rpmsg_log_point]=0;
+
+	spin_unlock_irqrestore(&rpmsg_log_lock, flags);
+}
+#endif
+
+static ssize_t rpmsg_log_show(struct kobject *kobj, struct kobj_attribute *attr,
+			  char *buf)
+{
+	char *s = buf;
+
+#ifdef CONFIG_RPMSG_LOG
+	unsigned long flags;
+
+	if (rpmsg_sram_inited) {
+		spin_lock_irqsave(&rpmsg_log_lock, flags);
+
+		if (!rpmsg_log_turn) {
+			s += sprintf(s, "%s", rpmsg_printk_buf);
+		}
+		else {
+			s += sprintf(s, "%s", rpmsg_printk_buf+rpmsg_log_point+2);
+			s += sprintf(s, "%s", rpmsg_printk_buf);
+		}
+
+		spin_unlock_irqrestore(&rpmsg_log_lock, flags);
+	}
+#endif
+
+	return (s - buf);
+}
+
+/**
+ * usage: like printk(...)
+ */
+void rpmsg_printk(const char *fmt, ...)
+{
+#ifdef CONFIG_RPMSG_LOG
+	va_list args;
+	unsigned long long t;
+	unsigned long nanosec_rem;
+	int tlen, len;
+	char rpmsg_log_temp_buf[512] = {0};
+
+	if(!rpmsg_sram_inited)
+		return;
+
+	va_start(args, fmt);
+
+	/* add time stamp */
+	t = cpu_clock(smp_processor_id());
+	nanosec_rem = do_div(t, 1000000000);
+	tlen = sprintf(rpmsg_log_temp_buf, ">%5lu.%06lu< ",
+		       (unsigned long) t, nanosec_rem / 1000);
+
+	len = vsprintf(rpmsg_log_temp_buf+tlen, fmt, args);
+	len += tlen;
+
+	rpmsg_sram_cpy(rpmsg_log_temp_buf, len);
+
+	va_end(args);
+#endif
+}
+
+void rpmsg_sram_init(void)
+{
+#ifdef CONFIG_RPMSG_LOG
+    pr_info("[RPMSG] LOG_INIT \n");
+
+	memset(rpmsg_printk_buf, 0, RPMSG_LOG_SIZE);
+	rpmsg_log_point = 0;
+
+	spin_lock_init(&rpmsg_log_lock);
+
+	rpmsg_sram_inited = 1;
+#endif
+}
diff --git a/upstream/linux-5.10/drivers/soc/sc/power/zx29-cpufreq.c b/upstream/linux-5.10/drivers/soc/sc/power/zx29-cpufreq.c
new file mode 100755
index 0000000..7eacfd7
--- /dev/null
+++ b/upstream/linux-5.10/drivers/soc/sc/power/zx29-cpufreq.c
@@ -0,0 +1,528 @@
+/*
+ * ZTE zx297510 dvfs driver
+ *
+ * Copyright (C) 2013 ZTE Ltd.
+ * 	by zxp
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/cpufreq.h>
+#include <linux/suspend.h>
+
+#include <linux/soc/zte/rpmsg.h>
+//#include "mach/clock.h"
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include "zx-pm.h"
+#define ZX_CPUFREQ_IOC_MAGIC     'W'
+
+/*ioctl cmd usd by device*/
+#define ZX_CPUFREQ_SET_FREQ         	_IOW(ZX_CPUFREQ_IOC_MAGIC, 1, char *)
+#define ZX_CPUFREQ_GET_FREQ           _IOW(ZX_CPUFREQ_IOC_MAGIC, 2, char *)
+
+#define ZX_CPUFREQ_DEV    "/dev/zx_cpufreq"
+
+#define	PM_FREQ_TRACE	1
+#if PM_FREQ_TRACE
+
+#define	FREQ_CHANGE_COUNT	20
+
+typedef struct
+{
+	volatile unsigned int old_index;
+	volatile unsigned int new_idex;
+	volatile unsigned int time;
+}freq_change_view_trace_t;
+
+static freq_change_view_trace_t 	freq_change_view[FREQ_CHANGE_COUNT] ;
+static unsigned int				freq_change_index = 0;
+static int cpufreq_driver_inited = 0;
+
+void trace_freq_change(unsigned int old_index,unsigned int new_index)
+{
+	freq_change_view[freq_change_index].old_index = old_index;
+	freq_change_view[freq_change_index].new_idex = new_index;
+	freq_change_view[freq_change_index].time = ktime_to_us(ktime_get());
+	freq_change_index++;
+	if(freq_change_index == FREQ_CHANGE_COUNT)
+	{
+		freq_change_index = 0;
+	}
+}
+#else
+void trace_freq_change(unsigned int old_index,unsigned int new_index){}
+#endif
+
+unsigned int freq_change_enabled_by_startup = 0;
+static struct delayed_work 	pm_freq_work;
+#define PM_FREQ_DELAY 		msecs_to_jiffies(25000)
+
+/* for count change time by M0 */
+#define DEBUG_CPUFREQ_TIME	1
+
+#ifdef CONFIG_DDR_FREQ
+#ifdef CONFIG_ARCH_ZX297520V2
+#define	get_cur_ddr()				pm_read_reg_16(AXI_CURRENT_FREQ)
+#define	set_target_ddr(f)			pm_write_reg_16(AXI_AP2M0_TARGET, f)
+#define	set_ddr_req()				pm_write_reg_16(AXI_AP2M0_FLAG, 1)
+#define	clr_ddr_ack()				pm_write_reg_16(AXI_M02AP_ACK, 0)
+
+#define wait_ddr_ack()				while(!pm_read_reg_16(AXI_M02AP_ACK))
+#else
+static ddr_freq_regs *ddr_regs = (ddr_freq_regs *)IRAM_CHANGE_DDR_BASE;
+#define	get_cur_ddr()				(ddr_regs->cur_freq)
+#define	set_target_ddr(f)			(ddr_regs->ap_exp_freq = f)
+#define	set_ddr_req()				(ddr_regs->ap_req_flag = 1)
+
+#endif
+#endif
+
+//#undef CONFIG_AXI_FREQ
+#ifdef CONFIG_AXI_FREQ
+static DEFINE_MUTEX(axifreq_lock);
+
+static axi_freq_regs *axi_regs; // = (axi_freq_regs *)IRAM_CHANGE_AXI_BASE;
+static vol_dvs_regs *vol_regs; // = (vol_dvs_regs *)IRAM_CHANGE_DVS_BASE;
+
+#define	get_cur_axi()				(axi_regs->cur_freq)
+#define	set_target_axi_sw(f)				(axi_regs->ap_exp_freq = f)
+#define	set_axi_req()				(axi_regs->ap_req_flag = 1)
+
+#define get_target_axi_hw(addr)     (pm_read_reg(addr)&(0x7))
+
+#if 1
+#define DDR_FREQ_156M_HW             (0x4e)
+#define DDR_FREQ_208M_HW             (0x68)
+#define DDR_FREQ_312M_HW             (0x9c)
+#define DDR_FREQ_400M_HW             (0xc8)
+
+#define set_ddr_freq_hw(addr,f)      (pm_read_reg(addr)&(~0xff)|f)
+#define set_ddr_freq_sync(addr,f)    (pm_read_reg(addr)&(~0x1)|f)
+#endif
+
+#define	get_cur_vol()				(vol_regs->cur_vol)
+#define	set_target_vol(f)				(vol_regs->ap_exp_vol = f)
+#define	set_vol_req()				(vol_regs->ap_req_flag = 1)
+
+#if 0
+#define	WAIT_AXI_ACK_TIMEOUT		(jiffies + msecs_to_jiffies(2))	/* wait 2 ms, we count max 200us also */
+#define wait_axi_ack(timeout)		while(!pm_read_reg_16(AXI_M02AP_ACK) && time_before(jiffies, timeout))
+#else
+#define	WAIT_AXI_ACK_TIMEOUT		(200)			/* wait 120us, we count max 200us also */
+static void wait_axi_ack(unsigned timeout)
+{
+	ktime_t begin_time = ktime_get();
+	
+	while(((vol_regs->ap_req_flag) ||(axi_regs->ap_req_flag) )&& (unsigned)ktime_to_us(ktime_sub(ktime_get(), begin_time))<timeout);
+}
+#endif
+
+static int send_msg_to_m0(void)
+{
+	unsigned int ap_m0_buf = AXI_VOL_CHANGE_ICP_BUF;		/* the icp interface need a buffer */
+	T_RpMsg_Msg Icp_Msg;
+	int				ret;	
+
+	Icp_Msg.coreID  = CORE_M0;
+	Icp_Msg.chID 	= 1;
+	Icp_Msg.flag 	= RPMSG_WRITE_INT;		/* 1- means send an icp interrupt> */
+	Icp_Msg.buf 	= &ap_m0_buf;
+	Icp_Msg.len 	= 0x4;	
+
+	ret = rpmsgWrite(&Icp_Msg);
+	if(Icp_Msg.len == ret)
+		return 0;
+	else
+		return ret;
+}
+
+static int axi_freq_change_allowed(void)
+{
+	if(pm_get_mask_info()&PM_NO_AXI_FREQ)
+		return false;
+	
+	return true;
+}
+
+/**
+ * request to change vol.
+ *
+ * vol_dvs: input vol enum
+ */
+int request_vol(zx29_vol vol_dvs)
+{
+	unsigned int current_vol = get_cur_vol();
+
+	set_target_vol(vol_dvs);
+#if DEBUG_CPUFREQ_TIME		
+	pm_printk("[CPUFREQ] current_vol(%d)  request_vol(%d)  \n",(u32)current_vol,(u32)vol_dvs);	
+#endif
+	
+	if(vol_dvs != current_vol)
+	{
+		/* request freq */
+		set_vol_req();
+	}
+	
+	return 0;
+}
+
+/**
+ * input axi freq.
+ */
+static zx29_vol request_vol_by_axi(zx29_axi_freq axi_freq)
+{
+	if(axi_freq == AXI_FREQ_156M)
+		return VOL_VO_900;
+	else
+		return VOL_VO_850;
+}
+
+/**
+ * set vol . 
+ *
+ * we will do this by M0.
+ */
+static int set_vol_by_axi(zx29_axi_freq axi_freq)
+{
+	zx29_vol vol_dvs= request_vol_by_axi(axi_freq);
+	
+	/* set new vol*/
+	return request_vol(vol_dvs);
+}
+
+
+/**
+ * request to change axi freq.
+ *
+ * axi_freq: input freq enum
+ */
+int request_axi_freq(zx29_axi_freq axi_freq)
+{
+    unsigned int  current_axi_freq = get_cur_axi();
+	unsigned int tmp;
+	int		 ret = 0;	
+	
+#if DEBUG_CPUFREQ_TIME	
+	ktime_t begin_time, end_time;
+	s64 total_time;
+#endif	
+
+	if(!axi_freq_change_allowed())
+		return 0;
+
+#ifdef SET_AXI_BY_HW
+	tmp = (pm_read_reg(PS_MATRIX_AXI_SEL)&(~0x7))|axi_freq;
+	pm_write_reg(PS_MATRIX_AXI_SEL,tmp);
+    pm_printk("[CPUFREQ] current_axi_freq(%d)  request_axi_freq(%d)  after_request_axi_freq(%d)  after_request_vol(%d)\n",(u32)current_axi_freq,(u32)axi_freq,get_cur_axi(),get_cur_vol());		
+#else
+	set_target_axi_sw(axi_freq);
+
+	if(axi_freq != current_axi_freq)
+	{
+		/* request freq */
+		set_axi_req();
+		
+//		set_vol_by_axi(axi_freq);//set vol
+
+		ret = send_msg_to_m0();	
+#if DEBUG_CPUFREQ_TIME	
+        begin_time = ktime_get();
+#endif
+        if(!ret)
+        {
+            /* wait axi freq changed ok! we will set a timeout for safety~ */
+            wait_axi_ack(WAIT_AXI_ACK_TIMEOUT);		
+        }
+        else
+        {
+            pm_printk("[CPUFREQ] request_axi_freq(%d) failed: (%d) \n",(u32)axi_freq, ret);
+        }
+
+#if DEBUG_CPUFREQ_TIME	
+        end_time = ktime_get();
+        total_time = ktime_to_us(ktime_sub(end_time, begin_time));
+        pm_printk("[CPUFREQ] total axi time: %d us  current_axi_freq(%d)  request_axi_freq(%d)  after_request_axi_freq(%d)  after_request_vol(%d)\n",(u32)total_time,(u32)current_axi_freq,(u32)axi_freq,get_cur_axi(),get_cur_vol());	
+	}	
+	else
+	{
+    pm_printk("[CPUFREQ]   current_axi_freq(%d)  request_axi_freq(%d) \n",(u32)current_axi_freq,(u32)axi_freq);	
+#endif
+	}
+#endif
+
+	return 0;
+}
+
+
+/**
+ * input cpu freq [KHz].
+ */
+static zx29_axi_freq request_axi_freq_by_cpu(unsigned int freq)
+{
+	if(freq >= 600*1000)
+		return AXI_FREQ_156M;
+	else
+		return AXI_FREQ_78M;
+}
+
+/**
+ * set axi freq . 
+ *
+ * we will do this by M0.
+ */
+static int set_axi_frequency_by_cpu(unsigned int freq)
+{
+	zx29_axi_freq axi_freq = request_axi_freq_by_cpu(freq);
+	
+	/* set new freq */
+	return request_axi_freq(axi_freq);
+}
+
+int zx_request_axi_freq(unsigned int axifreq)
+{
+	zx29_axi_freq axi_freq;
+
+	if (axifreq == 0xff)
+		return -EINVAL;
+
+	if(cpufreq_driver_inited==0)
+			return -EPERM;
+
+	if(axifreq >= 600*1000*1000)
+		return AXI_FREQ_156M;
+	else
+		return AXI_FREQ_78M;
+
+	return request_axi_freq(axi_freq);
+}
+
+#endif
+
+
+#ifdef CONFIG_AXI_FREQ
+/**
+ * zx_axifreq_pm_notifier - acquire axifreq in suspend-resume context
+ *			
+ * @notifier
+ * @pm_event
+ * @v
+ *
+ */
+
+static int zx_axifreq_pm_notifier(struct notifier_block *notifier,
+				       unsigned long pm_event, void *v)
+{
+	mutex_lock(&axifreq_lock);
+	
+	switch (pm_event) 
+	{
+	case PM_SUSPEND_PREPARE:
+		request_axi_freq(AXI_FREQ_78M);
+		break;
+
+	case PM_POST_SUSPEND:
+		request_axi_freq(AXI_FREQ_156M);
+		break;
+	}
+	
+	mutex_unlock(&axifreq_lock);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block zx_axifreq_nb = 
+{
+	.notifier_call = zx_axifreq_pm_notifier,
+};
+
+static int __init zx29_axifreq_init(void)
+{
+
+	/* pm notify */
+	register_pm_notifier(&zx_axifreq_nb);
+//	request_vol(VOL_VO_900);
+	request_axi_freq(AXI_FREQ_156M);		
+
+	return 0;
+}
+
+//late_initcall(zx29_axifreq_init);
+#endif	
+
+/*=============================================================================
+ *========  zx29 DDR freq ===============================================
+ *** ap/phy request --> m0 notify --> jump to iram --> wait completely -->  ***
+ *** -->jump to ddr                                                     ***====
+ *=============================================================================
+ */
+#ifdef CONFIG_DDR_FREQ
+static DEFINE_MUTEX(ddrfreq_lock);
+static int ddr_freq_change_allowed(void)
+{
+	if(pm_get_mask_info()&PM_NO_DDR_FREQ)
+		return false;
+	
+	return true;
+}
+
+static int send_msg_to_ps(void)
+{
+	unsigned int ap_m0_buf = AXI_VOL_CHANGE_ICP_BUF;		/* the icp interface need a buffer */
+	T_ZDrvRpMsg_Msg Icp_Msg;
+	int				ret;	
+	Icp_Msg.actorID = PS_ID;
+	Icp_Msg.chID 	= ICP_CHANNEL_PSM;
+	Icp_Msg.flag 	= RPMSG_WRITE_INT;		/* 1- means send an icp interrupt> */
+	Icp_Msg.buf 	= &ap_m0_buf;
+	Icp_Msg.len 	= 0x4;	
+	ret = zDrvRpMsg_Write(&Icp_Msg);
+	if(Icp_Msg.len == ret)
+		return 0;
+	else
+		return ret;
+}
+
+int request_ddr_freq_hw(unsigned int ddr_freq)
+{
+    if(!ddr_freq_change_allowed())
+		return 0;
+	pm_write_reg(AP_DDR_FFC_SEL_SYNC,0x0);
+	pm_write_reg(AP_DDR_FFC_SEL,ddr_freq);
+	pm_write_reg(AP_DDR_FFC_SEL_SYNC,0x1);
+	return 0;
+}
+
+int request_ddr_freq(zx29_ddr_freq ddr_freq)
+{
+	int		 ret = 0;
+	unsigned current_ddr_freq = get_cur_ddr();
+	if(!ddr_freq_change_allowed())
+		return 0;
+
+	if(ddr_freq == current_ddr_freq)
+		return 0;
+	
+#ifdef SET_DDR_BY_HW
+    //set_ddr_freq_hw(AP_DDR_FFC_SEL, ddr_exp_freq);
+    set_ddr_freq_sync(AP_DDR_FFC_SEL_SYNC,0x1);
+#else
+	set_target_ddr(ddr_freq);
+	ret = send_msg_to_ps();
+	if(!ret)
+	{
+		printk("[DDRFREQ] ddr_freq [%d]\n",get_cur_ddr());
+	}
+	else
+	{
+		printk("[DDRFREQ] request_ddr_freq failed\n");
+	}
+#endif
+#if 0
+    unsigned current_ddr_freq = get_cur_ddr();
+	int		 ret = 0;	
+	
+#if DEBUG_CPUFREQ_TIME	
+	ktime_t begin_time, end_time;
+	s64 total_time;
+#endif	
+
+	if(!ddr_freq_change_allowed())
+		return 0;
+
+	set_target_ddr(ddr_freq);
+
+    if(ddr_freq != current_ddr_freq)
+    {
+		/* request freq */
+		clr_ddr_ack();
+		set_ddr_req();
+
+#if DEBUG_CPUFREQ_TIME	
+		begin_time = ktime_get();
+#endif
+
+		ret = send_msg_to_m0();
+		if(!ret)
+			/* wait axi freq changed ok! we will set a timeout for safety~ */
+			wait_ddr_ack();		
+		else
+			pr_info("[CPUFREQ] request_ddr_freq(%d) failed: (%d) \n",(u32)ddr_freq, ret);
+
+#if DEBUG_CPUFREQ_TIME	
+		end_time = ktime_get();
+		total_time = ktime_to_us(ktime_sub(end_time, begin_time));
+		pr_info("[CPUFREQ] total ddr time: %d us\n",(u32)total_time);		
+#endif
+    }	
+#endif
+	return 0;
+}
+
+#ifdef CONFIG_ARCH_ZX297520V2
+static void ddr_freq_handler(void)
+{
+	local_irq_disable();
+	waiting_ddr_dfs((unsigned long)DDR_DFS_CODE_ADDR);
+	local_irq_enable();	
+}
+#else
+static int zx_ddrfreq_pm_notifier(struct notifier_block *notifier,
+				       unsigned long pm_event, void *v)
+{
+	mutex_lock(&ddrfreq_lock);
+	switch (pm_event) 
+	{
+	case PM_SUSPEND_PREPARE:
+		request_ddr_freq_hw(0);
+		break;
+	case PM_POST_SUSPEND:
+		request_ddr_freq_hw(0x9c);
+		break;
+	}
+	mutex_unlock(&ddrfreq_lock);
+	return NOTIFY_OK;
+}
+static struct notifier_block zx_ddrfreq_nb = 
+{
+	.notifier_call = zx_ddrfreq_pm_notifier,
+};
+#endif
+
+static int __init zx29_ddrfreq_init(void)
+{
+#ifdef CONFIG_ARCH_ZX297520V2
+#else
+	register_pm_notifier(&zx_ddrfreq_nb);
+#endif
+	return 0;
+}
+
+#endif
+
+static void pm_m0_handler(void *buf, unsigned int len)
+{
+	/* deal msg from m0 */
+}
+
+int zx29_cpufreq_init(void)
+{
+	if(cpufreq_driver_inited)
+		return 0;
+
+	axi_regs = (axi_freq_regs *)IRAM_CHANGE_AXI_BASE;
+	vol_regs = (vol_dvs_regs *)IRAM_CHANGE_DVS_BASE;
+
+	cpufreq_driver_inited = 1;
+
+	pr_info("[CPUFREQ] zx29_cpufreq_init ok \n");
+	return 0;
+}
+
diff --git a/upstream/linux-5.10/drivers/soc/sc/rpmsg/zx29_icp.c b/upstream/linux-5.10/drivers/soc/sc/rpmsg/zx29_icp.c
new file mode 100755
index 0000000..aae42a2
--- /dev/null
+++ b/upstream/linux-5.10/drivers/soc/sc/rpmsg/zx29_icp.c
@@ -0,0 +1,490 @@
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/clockchips.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/syscore_ops.h>
+#include <linux/gpio.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/soc/sc/common.h>
+#include <linux/soc/sc/spinlock.h>
+
+#include "icp_dev.h"
+#include "zx29_icp.h"
+#include "icp_rpmsg.h"
+
+static icp_callback_fn	_icp_fn;
+static T_HalIcp_Reg *icp_ap2m0_reg;
+static T_HalIcp_Reg *icp_ap2ps_reg;
+
+static inline T_HalIcp_Reg *icp_get_reg(T_ZDrvRpMsg_ActorID actor_id)
+{
+	if (CORE_M0 == actor_id )
+		return icp_ap2m0_reg;
+	else if (CORE_PS0 == actor_id )
+		return icp_ap2ps_reg;
+	else
+		BUG();
+}
+
+/*******************************************************************************
+* Function: icp_set_int
+* Description: This function is used for generating icp interrupt to inform remote cpu;
+* Parameters:
+*   Input:
+           actorID: id of remote cpu
+           chID: id of channel
+*   Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+static int icp_set_int(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+	T_HalIcp_Reg *icp_reg;
+
+    if ((actorID >= CORE_MAXID) || (chID >= CHANNEL_MAXID(actorID)))
+        return -EINVAL;
+
+	icp_reg = icp_get_reg(actorID);
+
+    if(chID<32)
+		icp_reg->control.low_word	= (1<<chID);
+    else
+		icp_reg->control.high_word	= (1<<(chID-32));
+
+	return 0;
+}
+  
+/*******************************************************************************
+* Function: icp_clear_int
+* Description: This function is used for clear icp interrupt from remote cpu;
+* Parameters:
+*   Input:
+           actorID: id of remote cpu
+           chID: id of channel
+*   Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+static void icp_clear_int(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+	T_HalIcp_Reg *icp_reg = icp_get_reg(actorID);
+
+    if(chID<32)
+        icp_reg->clear.low_word  = (1<<chID);
+    else
+       	icp_reg->clear.high_word = (1<<(chID-32)) ;
+}
+
+/*******************************************************************************
+* Function: icp_get_int
+* Description: This function is used for get icp interrupt from remote cpu;
+* Parameters:
+*   Input:
+*           actorID: id of remote cpu
+*           chID: id of channel
+*   Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+T_HalIcp_Dword icp_get_int(T_ZDrvRpMsg_ActorID actorID)
+{
+	T_HalIcp_Dword IcpState;
+	T_HalIcp_Reg *icp_reg;
+
+    if (actorID >= CORE_MAXID)
+    {
+		IcpState.high_word	= 0;
+		IcpState.low_word	= 0;
+    
+        return IcpState;
+    }
+
+	icp_reg = icp_get_reg(actorID);
+	
+	IcpState.high_word 	= icp_reg->state.high_word;
+	IcpState.low_word 	= icp_reg->state.low_word;
+	
+	return IcpState;
+}
+
+/*******************************************************************************
+* Function: icp_get_int_state
+* Description: This function is used for get the state of icp interruptting  of remote cpu;
+* Parameters:
+*   Input:
+           actorID: id of remote cpu
+           chID: id of channel
+*   Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+static int icp_get_int_state(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+	T_HalIcp_Reg *icp_reg;
+
+	icp_reg = icp_get_reg(actorID);
+
+	if(chID<32)
+	{
+		if(icp_reg->in_state.low_word & (0x1<<chID))
+			return true;
+	}
+	else
+	{
+		if(icp_reg->in_state.high_word & (0x1<<(chID-32)))
+			return true;
+	}
+
+	return false;
+}
+	
+/*******************************************************************************
+* Function: icp_mask_int
+* Description: This function is used for Mask interrupt of channel;
+* Parameters:
+*   Input:
+*   Output:
+*
+* Returns:  NONE
+*
+*
+* Others:
+********************************************************************************/
+static int icp_mask_int(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+	T_HalIcp_Reg *icp_reg;
+
+    if ((actorID >= CORE_MAXID) || (chID >= CHANNEL_MAXID(actorID)))
+        return -EINVAL;
+
+	icp_reg = icp_get_reg(actorID);
+
+    if(chID<32)
+        icp_reg->mask.low_word  |= (0x1<<chID);
+    else
+        icp_reg->mask.high_word |= (0x1<<(chID-32));
+
+	return 0;
+}
+
+/*******************************************************************************
+* Function: icp_unmask_int
+* Description: This function is used for unmask interrupt of channel;
+* Parameters:
+*   Input:
+*   Output:
+*
+* Returns:
+*            NONE
+*
+*
+* Others:
+********************************************************************************/
+static int icp_unmask_int(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+	T_HalIcp_Reg *icp_reg;
+
+    if ((actorID >= CORE_MAXID) || (chID >= CHANNEL_MAXID(actorID)))
+        return -EINVAL;
+
+	icp_reg = icp_get_reg(actorID);
+
+	if(chID < 32)
+		icp_reg->mask.low_word  &= ~(0x1<<chID);
+    else
+		icp_reg->mask.high_word &= ~(0x1<<(chID-32));
+
+	return 0;
+}
+
+int icp_int_count = 0;
+#ifdef CONFIG_ZX29_WATCHDOG
+extern void zx_wdt_icp_wake(void);
+#endif
+irqreturn_t icp_isr(int irq, void *data)
+{
+	icp_msg	_icp_msg;	
+	T_HalIcp_Dword IcpState;	
+	unsigned int i;
+
+	_icp_msg.src_id = (unsigned int)data;
+
+	IcpState = icp_get_int(_icp_msg.src_id);
+
+	for(i=0; i<CHANNEL_MAXID(_icp_msg.src_id); i++)
+	{
+		if((((i<32)&&((IcpState.low_word>>i) & 0x1))||((i>=32)&&((IcpState.high_word>>(i-32)) & 0x1)))) {
+			_icp_msg.event_id = i;			
+		#ifdef CONFIG_ZX29_WATCHDOG			
+			if((CORE_M0 == _icp_msg.src_id)&&(2 == i))
+	  			zx_wdt_icp_wake();
+		#endif
+			if(_icp_fn)
+				_icp_fn(&_icp_msg);
+			
+			icp_clear_int(_icp_msg.src_id, i);
+		}
+	}
+
+	icp_int_count ++;
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * for loopback test
+ */
+void fake_icp_isr(T_RpMsg_CoreID src_core, T_RpMsg_CoreID dest_core, T_RpMsg_ChID ch)
+{
+	icp_msg	_icp_msg;
+	unsigned int i;
+
+	_icp_msg.src_id 	= src_core;
+	_icp_msg.dest_core 	= dest_core;	
+	_icp_msg.event_id 	= ch;
+
+	if(_icp_fn)
+		_icp_fn(&_icp_msg);
+}
+
+/*
+ * for get wake state
+ */
+void icp_get_int_info(T_ZDrvRpMsg_ActorID actorID, unsigned int *high_word, unsigned int *low_word)
+{
+	T_HalIcp_Dword IcpState;
+
+	IcpState = icp_get_int(actorID);
+
+	*high_word = IcpState.high_word;
+	*low_word = IcpState.low_word;
+}
+
+static const char * const ps_channel_info[64] = {
+	[0] = "drv test",
+	[2] = "Power Management",
+	[3] = "ADB agent",
+	[4] = "USB app config",
+	[5] = "USB kernel config",
+	[6] = "audio",
+	[7] = "console switch",
+	[8] = "NV",
+	[9] = "debug",
+	[10] = "ramdump",
+	[11] = "tee common",
+	[12] = "tee RPC",
+	[13] = "ap2cap message queue",
+	[14] = "cap2ap message queue",
+	[15] = "AMT framework",
+	[16] = "APP rsvd 16",
+	[17] = "APP rsvd 17",
+	[18] = "APP rsvd 18",
+	[19] = "APP rsvd 19",
+	[20] = "zvnet 20",
+	[21] = "zvnet 21",
+	[22] = "zvnet 22",
+	[23] = "zvnet 23",
+	[24] = "zvnet 24",
+	[25] = "zvnet 25",
+	[26] = "zvnet 26",
+	[27] = "zvnet 27",
+	[28] = "zvnet 28",
+	[29] = "free skb",
+	[30] = "ttygs0",
+	[31] = "ttygs1",
+	[32] = "socket ipc",
+	[33] = "binder ipc",
+	[34] = "at channel 34",
+	[35] = "at channel 35",
+	[36] = "at channel 36",
+	[37] = "at channel 37",
+	[38] = "at channel 38",
+	[39] = "at channel 39",
+	[40] = "at channel 40",
+	[41] = "voice buffer",
+};
+
+void show_icp_state(T_ZDrvRpMsg_ActorID actorID)
+{
+	unsigned int	hw, lw;
+	int i;
+
+	if (actorID != CORE_PS0)
+		return;
+
+	icp_get_int_info(actorID, &hw, &lw);
+	pr_info("[SLP] icpwake: 0x%x 0x%x\n", hw, lw);
+
+	for (i=0; i<32; i++)
+		if (lw&BIT(i))
+			pr_info("[SLP] icpwake: channel(%d) function(%s)\n", i, ps_channel_info[i] ? ps_channel_info[i] : "NA");
+
+	for (i=0; i<32; i++)
+		if (hw&BIT(i))
+			pr_info("[SLP] icpwake: channel(%d) function(%s)\n", i+32, ps_channel_info[i+32] ? ps_channel_info[i+32] : "NA");
+}
+
+static void icp_register_callback(icp_callback_fn cb)
+{
+	_icp_fn = cb;
+}
+
+static int icp_send_message(unsigned int core_id, icp_msg *icp_msg)
+{
+	if(!icp_msg || icp_msg->dest_core > CORE_MAXID )
+		return -EINVAL;
+		
+	if(icp_get_int_state(icp_msg->dest_core, icp_msg->event_id)==false)
+	{
+		icp_set_int(icp_msg->dest_core, icp_msg->event_id);
+	}
+	
+	return 0;	
+}
+
+static t_icpdev_ops zx29_icp_ops = {
+	.register_callback	= icp_register_callback,
+	.send_message		= icp_send_message, 
+	.mask_int			= icp_mask_int,
+	.unmask_int			= icp_unmask_int,
+	.set_int			= icp_set_int,
+};
+
+static int icp_ap2ps_init(struct device *dev)
+{
+	void __iomem *reg_base;
+	unsigned int irq;
+	int ret;
+	struct device_node *np = dev->of_node;
+
+	reg_base = of_iomap(np, 0);
+	if ( !reg_base ){
+		pr_err("%s: [ICP]Cannot get IORESOURCE_MEM\n", __func__);
+		return -ENOENT;
+	}
+	
+	icp_ap2ps_reg = (T_HalIcp_Reg *)reg_base;
+
+	irq = irq_of_parse_and_map(np, 0);
+	if( !irq ){
+		pr_err("%s: [ICP]Cannot get IORESOURCE_IRQ\n", __func__);
+		return -ENOENT;
+	}		
+
+	icp_ap2ps_reg->mask.high_word	= 0xffffffff;
+	icp_ap2ps_reg->mask.low_word	= 0xffffffff;
+
+	ret = request_irq(irq, icp_isr, 0, "zx_icp", CORE_PS0);
+	if (ret)
+	{
+		pr_err("%s: [ICP]register irq failed\n", __func__);
+		return ret;
+	}
+	
+	enable_irq_wake(irq);
+
+	icpdev_register_ops(&zx29_icp_ops);
+		
+	rpmsgInit(CORE_PS0, np);
+/*
+	dev->id = CORE_PS0;
+	ret = icp_rpmsg_device_register(dev);
+*/
+	pr_info("%s: ok! irq(%d) icp_address(%llx \n", __func__, irq, reg_base );
+
+	return ret;
+}
+
+static int icp_ap2m0_init(struct device *dev)
+{
+	void __iomem *reg_base;
+	unsigned int irq;
+	int ret;
+	struct device_node *np = dev->of_node;
+
+	pr_info("%s: enter \n", __func__);
+
+	reg_base = of_iomap(np, 0);
+	if ( !reg_base ){
+		pr_err("%s: [ICP]Cannot get IORESOURCE_MEM\n", __func__);
+		return -ENOENT;
+	}
+	
+	icp_ap2m0_reg = (T_HalIcp_Reg *)reg_base;
+
+	irq = irq_of_parse_and_map(np, 0);
+	if( !irq ){
+		pr_err("%s: [ICP]Cannot get IORESOURCE_IRQ\n", __func__);
+		return -ENOENT;
+	}		
+
+	icp_ap2m0_reg->mask.high_word	= 0xffffffff;
+	icp_ap2m0_reg->mask.low_word	= 0xffffffff;
+
+	ret = request_irq(irq, icp_isr, 0, "zx_icp", CORE_M0);
+	if (ret)
+	{
+		pr_err("%s: [ICP]register irq failed\n", __func__);
+		return ret;
+	}
+
+	enable_irq_wake(irq);
+
+	icpdev_register_ops(&zx29_icp_ops);
+
+	rpmsgInit(CORE_M0, np);
+
+	pr_info("%s: ok! irq(%d) icp_address(%llx \n", __func__, irq, reg_base );
+
+	return 0;
+}
+
+static const struct of_device_id zx29_icp_dt_ids[] = {
+	{	.compatible = "zte,zx29-icp-ap2m0", .data = &icp_ap2m0_init	}, 
+	{	.compatible = "zte,zx29-icp-ap2ps", .data = &icp_ap2ps_init	}, 		
+	{	/* sentinel */	}
+};
+
+static int zx29_icp_probe(struct platform_device *pdev)
+{
+	int (*init_fn)(struct device *dev);
+
+	init_fn = of_device_get_match_data(&pdev->dev);
+	if (!init_fn) {
+		dev_err(&pdev->dev, "Error: No device match found\n");
+		return -ENODEV;
+	}
+
+	return init_fn(&pdev->dev);
+}
+	
+static struct platform_driver zx29_icp_driver = {
+	.driver = {
+		.name = "zx29-icp",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(zx29_icp_dt_ids),
+	},
+	.probe	= zx29_icp_probe,
+};
+
+builtin_platform_driver(zx29_icp_driver)
diff --git a/upstream/linux-5.10/drivers/soc/sc/spinlock/spinlock-zx297520v3.c b/upstream/linux-5.10/drivers/soc/sc/spinlock/spinlock-zx297520v3.c
new file mode 100755
index 0000000..7843e46
--- /dev/null
+++ b/upstream/linux-5.10/drivers/soc/sc/spinlock/spinlock-zx297520v3.c
@@ -0,0 +1,513 @@
+/*

+ * arch/arm/mach-zx297520v2/zx297520v2-clock.c

+ *

+ *  Copyright (C) 2015 ZTE-TSP

+ *

+ * This program is free software; you can redistribute it and/or modify

+ * it under the terms of the GNU General Public License as published by

+ * the Free Software Foundation; either version 2 of the License, or

+ * (at your option) any later version.

+ *

+ */

+

+#include <linux/kernel.h>

+#include <linux/interrupt.h>

+#include <linux/of_device.h>

+#include <linux/of_address.h>

+#include <linux/of_irq.h>

+#include <linux/platform_device.h>

+#include <linux/delay.h>

+#include <linux/mutex.h>

+#include <linux/device.h>

+#include <linux/module.h>

+#include <linux/cdev.h>

+#include <linux/fs.h>

+#include <linux/uaccess.h>

+#include <linux/soc/zte/spinlock.h>

+

+#define	USE_HW_SPINLOCK				1

+

+#define SHARED_DEVICE_REG1          (hwlock_reg_base + 0x170)

+#define SHARED_DEVICE_REG2          (hwlock_reg_base + 0x174)

+#define SHARED_DEVICE_REG3          (hwlock_reg_base + 0x178)

+#define SHARED_DEVICE_REG4          (hwlock_reg_base + 0x17C)

+

+#if USE_HW_SPINLOCK

+

+#define MACH_NR_SFLOCKS		SFLOCK_NUM

+#define MACH_NR_HWLOCKS		HWLOCK_NUM

+

+#define SELF_CORE_ID 		CORE_ID_AP

+

+/* now use 8*MACH_NR_SFLOCKS bytes */

+#define SOFTLOCK_DESC_BASE		(sf_base)//(SPINLOCK_SOFTLOCK_BASE)

+

+#define SPINLOCK_DEBUG		1

+

+#if SPINLOCK_DEBUG

+#define zspinlock_debug(fmt, ...) \

+	printk(KERN_INFO fmt, ##__VA_ARGS__)

+#else

+#define zspinlock_debug(fmt, ...)

+#endif

+

+#define zspinlock_assert(_EXP) BUG_ON(!_EXP)//ZDRV_ASSERT(_EXP)

+static DEFINE_MUTEX(zspinlock_mutex);

+static unsigned long s_hwSpinlockMsr[HWLOCK_NUM];

+/****************************************************************************

+* 	                                        Types

+****************************************************************************/

+struct zte_softlock_desc {

+	unsigned long	used;

+	unsigned long	owner;

+};

+/**************************************************************************

+ *                                                Global Variables                                    *

+ **************************************************************************/

+static volatile struct zte_softlock_desc *softlock_desc[MACH_NR_SFLOCKS];

+static void __iomem __force *hwlock_reg_base;

+static void __iomem __force *sf_base;

+static void __iomem __force *hwlock_regs[MACH_NR_HWLOCKS] ;

+/*

+=

+{

+	SHARED_DEVICE_REG1,

+	SHARED_DEVICE_REG2,

+	SHARED_DEVICE_REG3,

+	SHARED_DEVICE_REG4

+};

+*/

+extern void msleep(unsigned int msecs);

+

+ /*******************************************************************************

+ * Function: _hw_spin_lock

+ * Description:»ñȡӲ¼þËø£¬id 0~3

+ * Parameters:

+ *   Input:

+ *

+ *   Output:

+ *

+ * Returns:

+ *

+ *

+ * Others:

+ ********************************************************************************/

+ static void  _hw_spin_lock(unsigned long hwid)

+{

+   unsigned long tmp;

+   unsigned long msr;

+   local_irq_save(msr);

+   s_hwSpinlockMsr[hwid] = msr;

+

+   while(ioread32(hwlock_regs[hwid])&0x1);

+   tmp = ioread32(hwlock_regs[hwid]);

+   tmp &= 0x00ffffff;

+   tmp |= (SELF_CORE_ID&0xff)<<24;

+   iowrite32(tmp, hwlock_regs[hwid]);

+

+}

+/*******************************************************************************

+ * Function: _hw_spin_unlock

+ * Description:ÊÍ·ÅÓ²¼þËø£¬id 0~3

+ * Parameters:

+ *   Input:

+ *

+ *   Output:

+ *

+ * Returns:

+ *

+ *

+ * Others:

+ ********************************************************************************/

+static void  _hw_spin_unlock(unsigned long hwid)

+{

+   unsigned long tmp;

+

+

+   	if(SELF_CORE_ID != (ioread32(hwlock_regs[hwid])&0xff000000)>>24){

+		zspinlock_assert(0);

+   	}

+  	 tmp = ioread32(hwlock_regs[hwid]);

+	 tmp &= 0x00fffffe;

+	 iowrite32(tmp, hwlock_regs[hwid]);

+

+	 local_irq_restore(s_hwSpinlockMsr[hwid]);

+}

+/*******************************************************************************

+ * Function: hw_spin_lock

+ * Description:»ñȡӲ¼þËø£¬id 0~2£¬

+ *			id 3±£Áô¸øÈí¼þËøÊ¹Óã¬ÍⲿÇý¶¯²»¿ÉÓá£

+ * Parameters:

+ *   Input:

+ *

+ *   Output:

+ *

+ * Returns:

+ *

+ *

+ * Others:

+ ********************************************************************************/

+void  hw_spin_lock(emhw_lock_id hwid)

+{

+   _hw_spin_lock(hwid);

+//   zspinlock_debug("cpu %d gets %d hardware lock!/n",SELF_CORE_ID,hwid);

+}

+/*******************************************************************************

+ * Function: hw_spin_unlock

+ * Description:Çý¶¯ÊÍ·ÅÓ²¼þËø£¬id 0~2

+ * Parameters:

+ *   Input:

+ *

+ *   Output:

+ *

+ * Returns:

+ *

+ *

+ * Others:

+ ********************************************************************************/

+void  hw_spin_unlock(emhw_lock_id hwid)

+{

+   _hw_spin_unlock(hwid);

+//   zspinlock_debug("cpu %d releases %d hardware lock!/n",SELF_CORE_ID,hwid);

+}

+/*******************************************************************************

+ * Function: soft_spin_lock

+ * Description:Çý¶¯»ñµÃÈí¼þËø½Ó¿Ú

+ * Parameters:

+ *   Input:	sfid: Èí¼þËøid¡£

+ *			coreid: ±£³ÖidºÅΪsfidÈí¼þËøµÄcpuid¡£

+ *   Output:

+ *

+ * Returns:

+ *

+ *

+ * Others:

+ ********************************************************************************/

+void  soft_spin_lock(emsf_lock_id sfid)

+{

+	static unsigned long lock_count = 0;

+

+softlock_loop:

+   	while(softlock_desc[sfid]->owner != SELF_CORE_ID && softlock_desc[sfid]->used)

+   	{

+   		lock_count++;

+		if(lock_count == 1000)

+		{

+			lock_count = 0;

+			msleep(5);

+		}

+   	}

+

+	_hw_spin_lock(SOFTLOCK_HWLOCK);

+	if(softlock_desc[sfid]->owner != SELF_CORE_ID && softlock_desc[sfid]->used)

+   	{

+	      _hw_spin_unlock(SOFTLOCK_HWLOCK);

+		goto softlock_loop;

+   	}

+    softlock_desc[sfid]->used ++;

+    softlock_desc[sfid]->owner = SELF_CORE_ID;

+    _hw_spin_unlock(SOFTLOCK_HWLOCK);

+    //zspinlock_debug("cpu %d releases %d software lock!/n",SELF_CORE_ID,sfid);

+

+}

+#if 1

+int  soft_spin_lock_printf(emsf_lock_id sfid)

+{

+	static unsigned long lock_count = 0;

+softlock_loop:

+   	while(softlock_desc[sfid]->owner != SELF_CORE_ID && softlock_desc[sfid]->used)

+   	{

+   		ndelay(1);

+   		lock_count++;

+		if(lock_count >= 5000)

+		{

+			lock_count = 0;

+			return -1;

+		}

+   	}

+	_hw_spin_lock(SOFTLOCK_HWLOCK);

+	if(softlock_desc[sfid]->owner != SELF_CORE_ID && softlock_desc[sfid]->used)

+   	{

+	      _hw_spin_unlock(SOFTLOCK_HWLOCK);

+		goto softlock_loop;

+   	}

+    softlock_desc[sfid]->used ++;

+    softlock_desc[sfid]->owner = SELF_CORE_ID;

+    _hw_spin_unlock(SOFTLOCK_HWLOCK);

+	return 0;

+}

+#endif

+/*******************************************************************************

+ * Function: soft_spin_unlock

+ * Description:Óësoft_spin_lock¶ÔÓ¦µÄÊÍ·ÅÈí¼þËø½Ó¿Ú¡£

+ * Parameters:

+ *   Input:

+ *

+ *   Output:

+ *

+ * Returns:

+ *

+ *

+ * Others:

+ ********************************************************************************/

+ void  soft_spin_unlock(emsf_lock_id sfid)

+{

+    if(softlock_desc[sfid]->used){

+		if(SELF_CORE_ID != softlock_desc[sfid]->owner){

+			zspinlock_assert(0);

+		}

+	_hw_spin_lock(SOFTLOCK_HWLOCK);

+   	softlock_desc[sfid]->used --;

+	if(softlock_desc[sfid]->used == 0) {

+   		softlock_desc[sfid]->owner = 0x0;

+	}

+   	_hw_spin_unlock(SOFTLOCK_HWLOCK);

+	//zspinlock_debug("cpu %d releases %d software lock!/n",SELF_CORE_ID,sfid);

+   }

+}

+

+/*******************************************************************************

+ * Function: soft_spin_lock_psm

+ * Description:Çý¶¯»ñµÃÈí¼þËø½Ó¿Ú

+ * Parameters:

+ *   Input:	sfid: Èí¼þËøid¡£

+ *			coreid: ±£³ÖidºÅΪsfidÈí¼þËøµÄcpuid¡£

+ *   Output:

+ *

+ * Returns:

+ *

+ *

+ * Others:

+ ********************************************************************************/

+void  soft_spin_lock_psm(emsf_lock_id sfid)

+{

+softlock_loop:

+   	while(softlock_desc[sfid]->owner != SELF_CORE_ID && softlock_desc[sfid]->used)

+   	{

+

+   	}

+

+	_hw_spin_lock(SOFTLOCK_HWLOCK);

+	if(softlock_desc[sfid]->owner != SELF_CORE_ID && softlock_desc[sfid]->used)

+   	{

+	      _hw_spin_unlock(SOFTLOCK_HWLOCK);

+		goto softlock_loop;

+   	}

+    softlock_desc[sfid]->used ++;

+    softlock_desc[sfid]->owner = SELF_CORE_ID;

+    _hw_spin_unlock(SOFTLOCK_HWLOCK);

+    //zspinlock_debug("cpu %d releases %d software lock!/n",SELF_CORE_ID,sfid);

+

+}

+

+/*******************************************************************************

+ * Function: soft_spin_unlock_psm

+ * Description:Óësoft_spin_lock_psm¶ÔÓ¦µÄÊÍ·ÅÈí¼þËø½Ó¿Ú¡£

+ * Parameters:

+ *   Input:

+ *

+ *   Output:

+ *

+ * Returns:

+ *

+ *

+ * Others:

+ ********************************************************************************/

+void  soft_spin_unlock_psm(emsf_lock_id sfid)

+{

+	soft_spin_unlock(sfid);

+}

+

+/*******************************************************************************

+ * Function: reg_spin_lock

+ * Description:Çý¶¯»ñµÃ¼Ä´æÆ÷Ëø½Ó¿Ú

+ * Parameters:

+ *   Input:

+ *   Output:

+ *

+ * Returns:

+ *

+ *

+ * Others:

+ ********************************************************************************/

+void reg_spin_lock(void)

+{

+   _hw_spin_lock(REGLOCK_HWLOCK);

+    softlock_desc[REG_SFLOCK]->owner = SELF_CORE_ID;

+}

+EXPORT_SYMBOL(reg_spin_lock);

+

+/*******************************************************************************

+ * Function: reg_spin_unlock

+ * Description:Óëreg_spin_lock¶ÔÓ¦µÄÊͷżĴæÆ÷Ëø½Ó¿Ú¡£

+ * Parameters:

+ *   Input:

+ *

+ *   Output:

+ *

+ * Returns:

+ *

+ *

+ * Others:

+ ********************************************************************************/

+ void  reg_spin_unlock(void)

+{

+    softlock_desc[REG_SFLOCK]->owner = 0x0;

+	_hw_spin_unlock(REGLOCK_HWLOCK);

+

+}

+EXPORT_SYMBOL(reg_spin_unlock);

+

+/*******************************************************************************

+ * Function: softspinlock_init

+ * Description:Èí¼þËø³õʼ»¯¡£

+ * Parameters:

+ *   Input:

+ *

+ *   Output:

+ *

+ * Returns:

+ *

+ *

+ * Others:

+ ********************************************************************************/

+int softspinlock_init(void)

+{

+    int i;

+

+    for(i = 0; i<MACH_NR_SFLOCKS; i++){

+    	softlock_desc[i] =

+			(struct zte_softlock_desc *)(SOFTLOCK_DESC_BASE +i*sizeof(struct zte_softlock_desc));

+		//softlock_desc[i]->used = 0;

+		//softlock_desc[i]->owner= CORE_ID_NUM;

+    }

+	zspinlock_debug("softspinlock init success base=0x%x!",(int)SOFTLOCK_DESC_BASE);

+	return 0;

+}

+

+typedef struct _zx29_softspinlock_ser 

+{

+	struct cdev 	cdev;

+	struct module 	*owner;

+	struct class *classes;  		      

+	const struct file_operations *ops;

+}zx29_softspinlock_ser; 

+

+static zx29_softspinlock_ser softspinlock_zx29 = {

+	.owner   		= THIS_MODULE,

+};

+

+int soft_spin_lock_get(emsf_lock_id sfid)

+{

+	if(sfid>=SFLOCK_NUM)

+		return -EFAULT;

+	

+	if(softlock_desc[sfid]->owner != SELF_CORE_ID && softlock_desc[sfid]->used)

+		return 1;

+	else

+		return 0;

+}

+

+static long softspinlock_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)

+{

+	int ret = 0;

+	char k_arg;

+ 

+	switch(cmd)

+	{

+		case SPINLOCK_GET_STATUS:

+			ret = copy_from_user(&k_arg, arg, sizeof(char));

+			if (ret)

+				return -EFAULT;

+			

+			if(k_arg>= SFLOCK_NUM)

+				return -EFAULT;

+				

+			k_arg = (char)soft_spin_lock_get(k_arg);			

+			ret = copy_to_user(arg,&k_arg, sizeof(char));

+			if (ret)

+				return -EFAULT;

+			break;  		 

+		default:

+			return -EPERM;

+		}

+	

+	return ret;

+}

+

+

+static const struct file_operations	softspinlock_ops = {

+	.owner 		= THIS_MODULE,

+	.unlocked_ioctl = softspinlock_ioctl,

+};

+

+

+static int __init softspinlock_dev_init(void)

+{

+	int ret = 0;

+	dev_t dev;

+

+	softspinlock_zx29.ops = &softspinlock_ops;

+

+	ret = alloc_chrdev_region(&dev, 0, 1, "softspinlock");

+	if (ret)

+	{

+		printk(KERN_ERR "%s: softspinlock failed to allocate char dev region\n",

+			__FILE__);

+		return ret;

+	}

+

+	cdev_init(&softspinlock_zx29.cdev, &softspinlock_ops);

+	softspinlock_zx29.cdev.owner = softspinlock_zx29.owner;

+

+	ret = cdev_add(&softspinlock_zx29.cdev, dev, 1);

+	if (ret)

+	{

+		unregister_chrdev_region(dev, 1);

+		printk(KERN_ERR "%s: softspinlock failed to add cdev\n",

+			__FILE__);

+		return ret;

+	}

+

+	softspinlock_zx29.classes = class_create(THIS_MODULE, "softspinlock");

+	if (IS_ERR(softspinlock_zx29.classes))

+		return PTR_ERR(softspinlock_zx29.classes);

+

+	device_create(softspinlock_zx29.classes, NULL, dev, NULL, "softspinlock");

+

+	printk("[xxx] softspinlock dev inited! \n");	

+

+	return ret;

+}

+

+void zx_spinlock_init(void __iomem *spinlock_base)

+{

+	void __iomem **data = (void __iomem **)spinlock_base;

+

+	hwlock_reg_base = data[0];

+	sf_base = data[1];

+

+	hwlock_regs[0] = SHARED_DEVICE_REG1;

+	hwlock_regs[1] = SHARED_DEVICE_REG2;

+	hwlock_regs[2] = SHARED_DEVICE_REG3;

+	hwlock_regs[3] = SHARED_DEVICE_REG4;

+

+	softspinlock_init();	

+}

+

+//arch_initcall(softspinlock_init);

+#else

+int softspinlock_init(void){return 0;}

+void reg_spin_lock(void){}

+void reg_spin_unlock(void){}

+void soft_spin_lock(emsf_lock_id sfid){}

+void soft_spin_unlock(emsf_lock_id sfid){}

+void  soft_spin_lock_psm(emsf_lock_id sfid){}

+void  soft_spin_unlock_psm(emsf_lock_id sfid){}

+void  hw_spin_lock(emhw_lock_id hwid){}

+void  hw_spin_unlock(emhw_lock_id hwid){}

+static int __init softspinlock_dev_init(void){return 0;}

+#endif

+

+module_init(softspinlock_dev_init);

+

diff --git a/upstream/linux-5.10/drivers/staging/Kconfig b/upstream/linux-5.10/drivers/staging/Kconfig
new file mode 100755
index 0000000..443587b
--- /dev/null
+++ b/upstream/linux-5.10/drivers/staging/Kconfig
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: GPL-2.0
+menuconfig STAGING
+	bool "Staging drivers"
+	help
+	  This option allows you to select a number of drivers that are
+	  not of the "normal" Linux kernel quality level.  These drivers
+	  are placed here in order to get a wider audience to make use of
+	  them.  Please note that these drivers are under heavy
+	  development, may or may not work, and may contain userspace
+	  interfaces that most likely will be changed in the near
+	  future.
+
+	  Using any of these drivers will taint your kernel which might
+	  affect support options from both the community, and various
+	  commercial support organizations.
+
+	  If you wish to work on these drivers, to help improve them, or
+	  to report problems you have with them, please see the
+	  drivers/staging/<driver_name>/TODO file to see what needs to be
+	  worked on, and who to contact.
+
+	  If in doubt, say N here.
+
+
+if STAGING
+
+source "drivers/staging/wlan-ng/Kconfig"
+
+source "drivers/staging/comedi/Kconfig"
+
+source "drivers/staging/olpc_dcon/Kconfig"
+
+source "drivers/staging/rtl8192u/Kconfig"
+
+source "drivers/staging/rtl8192e/Kconfig"
+
+source "drivers/staging/rtl8723bs/Kconfig"
+
+source "drivers/staging/rtl8712/Kconfig"
+
+source "drivers/staging/rtl8188eu/Kconfig"
+
+source "drivers/staging/rts5208/Kconfig"
+
+source "drivers/staging/octeon/Kconfig"
+
+source "drivers/staging/octeon-usb/Kconfig"
+
+source "drivers/staging/vt6655/Kconfig"
+
+source "drivers/staging/vt6656/Kconfig"
+
+source "drivers/staging/iio/Kconfig"
+
+source "drivers/staging/sm750fb/Kconfig"
+
+source "drivers/staging/emxx_udc/Kconfig"
+
+source "drivers/staging/nvec/Kconfig"
+
+source "drivers/staging/media/Kconfig"
+
+source "drivers/staging/android/Kconfig"
+
+source "drivers/staging/board/Kconfig"
+
+source "drivers/staging/gdm724x/Kconfig"
+
+source "drivers/staging/fwserial/Kconfig"
+
+source "drivers/staging/goldfish/Kconfig"
+
+source "drivers/staging/netlogic/Kconfig"
+
+source "drivers/staging/gs_fpgaboot/Kconfig"
+
+source "drivers/staging/unisys/Kconfig"
+
+source "drivers/staging/clocking-wizard/Kconfig"
+
+source "drivers/staging/fbtft/Kconfig"
+
+source "drivers/staging/fsl-dpaa2/Kconfig"
+
+source "drivers/staging/most/Kconfig"
+
+source "drivers/staging/ks7010/Kconfig"
+
+source "drivers/staging/greybus/Kconfig"
+
+source "drivers/staging/vc04_services/Kconfig"
+
+source "drivers/staging/pi433/Kconfig"
+
+source "drivers/staging/mt7621-pci/Kconfig"
+
+source "drivers/staging/mt7621-pci-phy/Kconfig"
+
+source "drivers/staging/mt7621-pinctrl/Kconfig"
+
+source "drivers/staging/mt7621-dma/Kconfig"
+
+source "drivers/staging/ralink-gdma/Kconfig"
+
+source "drivers/staging/mt7621-dts/Kconfig"
+
+source "drivers/staging/gasket/Kconfig"
+
+source "drivers/staging/axis-fifo/Kconfig"
+
+source "drivers/staging/fieldbus/Kconfig"
+
+source "drivers/staging/kpc2000/Kconfig"
+
+source "drivers/staging/qlge/Kconfig"
+
+source "drivers/staging/wfx/Kconfig"
+
+source "drivers/staging/hikey9xx/Kconfig"
+
+source "drivers/staging/voicebufferdrv/Kconfig"
+endif # STAGING
diff --git a/upstream/linux-5.10/drivers/staging/Makefile b/upstream/linux-5.10/drivers/staging/Makefile
new file mode 100755
index 0000000..0cb5246
--- /dev/null
+++ b/upstream/linux-5.10/drivers/staging/Makefile
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for staging directory
+
+obj-y				+= media/
+obj-$(CONFIG_PRISM2_USB)	+= wlan-ng/
+obj-$(CONFIG_COMEDI)		+= comedi/
+obj-$(CONFIG_FB_OLPC_DCON)	+= olpc_dcon/
+obj-$(CONFIG_RTL8192U)		+= rtl8192u/
+obj-$(CONFIG_RTL8192E)		+= rtl8192e/
+obj-$(CONFIG_RTL8723BS)		+= rtl8723bs/
+obj-$(CONFIG_R8712U)		+= rtl8712/
+obj-$(CONFIG_R8188EU)		+= rtl8188eu/
+obj-$(CONFIG_RTS5208)		+= rts5208/
+obj-$(CONFIG_NETLOGIC_XLR_NET)	+= netlogic/
+obj-$(CONFIG_OCTEON_ETHERNET)	+= octeon/
+obj-$(CONFIG_OCTEON_USB)	+= octeon-usb/
+obj-$(CONFIG_VT6655)		+= vt6655/
+obj-$(CONFIG_VT6656)		+= vt6656/
+obj-$(CONFIG_VME_BUS)		+= vme/
+obj-$(CONFIG_IIO)		+= iio/
+obj-$(CONFIG_FB_SM750)		+= sm750fb/
+obj-$(CONFIG_USB_EMXX)		+= emxx_udc/
+obj-$(CONFIG_MFD_NVEC)		+= nvec/
+obj-$(CONFIG_ANDROID)		+= android/
+obj-$(CONFIG_STAGING_BOARD)	+= board/
+obj-$(CONFIG_LTE_GDM724X)	+= gdm724x/
+obj-$(CONFIG_FIREWIRE_SERIAL)	+= fwserial/
+obj-$(CONFIG_GOLDFISH)		+= goldfish/
+obj-$(CONFIG_GS_FPGABOOT)	+= gs_fpgaboot/
+obj-$(CONFIG_UNISYSSPAR)	+= unisys/
+obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD)	+= clocking-wizard/
+obj-$(CONFIG_FB_TFT)		+= fbtft/
+obj-$(CONFIG_FSL_DPAA2)		+= fsl-dpaa2/
+obj-$(CONFIG_MOST)		+= most/
+obj-$(CONFIG_KS7010)		+= ks7010/
+obj-$(CONFIG_GREYBUS)		+= greybus/
+obj-$(CONFIG_BCM2835_VCHIQ)	+= vc04_services/
+obj-$(CONFIG_PI433)		+= pi433/
+obj-$(CONFIG_PCI_MT7621)	+= mt7621-pci/
+obj-$(CONFIG_PCI_MT7621_PHY)	+= mt7621-pci-phy/
+obj-$(CONFIG_PINCTRL_RT2880)	+= mt7621-pinctrl/
+obj-$(CONFIG_SOC_MT7621)	+= mt7621-dma/
+obj-$(CONFIG_DMA_RALINK)	+= ralink-gdma/
+obj-$(CONFIG_SOC_MT7621)	+= mt7621-dts/
+obj-$(CONFIG_STAGING_GASKET_FRAMEWORK)	+= gasket/
+obj-$(CONFIG_XIL_AXIS_FIFO)	+= axis-fifo/
+obj-$(CONFIG_FIELDBUS_DEV)     += fieldbus/
+obj-$(CONFIG_KPC2000)		+= kpc2000/
+obj-$(CONFIG_QLGE)		+= qlge/
+obj-$(CONFIG_WFX)		+= wfx/
+obj-y				+= hikey9xx/
+obj-$(CONFIG_VOICE_BUFFER_DRV)		+= voicebufferdrv/
diff --git a/upstream/linux-5.10/drivers/staging/voicebufferdrv/voice_buffer_dev_multicore.c b/upstream/linux-5.10/drivers/staging/voicebufferdrv/voice_buffer_dev_multicore.c
new file mode 100755
index 0000000..9c7d0c3
--- /dev/null
+++ b/upstream/linux-5.10/drivers/staging/voicebufferdrv/voice_buffer_dev_multicore.c
Binary files differ
diff --git a/upstream/linux-5.10/drivers/tty/tty_io.c b/upstream/linux-5.10/drivers/tty/tty_io.c
new file mode 100755
index 0000000..669aef7
--- /dev/null
+++ b/upstream/linux-5.10/drivers/tty/tty_io.c
@@ -0,0 +1,3602 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+/*
+ * 'tty_io.c' gives an orthogonal feeling to tty's, be they consoles
+ * or rs-channels. It also implements echoing, cooked mode etc.
+ *
+ * Kill-line thanks to John T Kohl, who also corrected VMIN = VTIME = 0.
+ *
+ * Modified by Theodore Ts'o, 9/14/92, to dynamically allocate the
+ * tty_struct and tty_queue structures.  Previously there was an array
+ * of 256 tty_struct's which was statically allocated, and the
+ * tty_queue structures were allocated at boot time.  Both are now
+ * dynamically allocated only when the tty is open.
+ *
+ * Also restructured routines so that there is more of a separation
+ * between the high-level tty routines (tty_io.c and tty_ioctl.c) and
+ * the low-level tty routines (serial.c, pty.c, console.c).  This
+ * makes for cleaner and more compact code.  -TYT, 9/17/92
+ *
+ * Modified by Fred N. van Kempen, 01/29/93, to add line disciplines
+ * which can be dynamically activated and de-activated by the line
+ * discipline handling modules (like SLIP).
+ *
+ * NOTE: pay no attention to the line discipline code (yet); its
+ * interface is still subject to change in this version...
+ * -- TYT, 1/31/92
+ *
+ * Added functionality to the OPOST tty handling.  No delays, but all
+ * other bits should be there.
+ *	-- Nick Holloway <alfie@dcs.warwick.ac.uk>, 27th May 1993.
+ *
+ * Rewrote canonical mode and added more termios flags.
+ * 	-- julian@uhunix.uhcc.hawaii.edu (J. Cowley), 13Jan94
+ *
+ * Reorganized FASYNC support so mouse code can share it.
+ *	-- ctm@ardi.com, 9Sep95
+ *
+ * New TIOCLINUX variants added.
+ *	-- mj@k332.feld.cvut.cz, 19-Nov-95
+ *
+ * Restrict vt switching via ioctl()
+ *      -- grif@cs.ucr.edu, 5-Dec-95
+ *
+ * Move console and virtual terminal code to more appropriate files,
+ * implement CONFIG_VT and generalize console device interface.
+ *	-- Marko Kohtala <Marko.Kohtala@hut.fi>, March 97
+ *
+ * Rewrote tty_init_dev and tty_release_dev to eliminate races.
+ *	-- Bill Hawes <whawes@star.net>, June 97
+ *
+ * Added devfs support.
+ *      -- C. Scott Ananian <cananian@alumni.princeton.edu>, 13-Jan-1998
+ *
+ * Added support for a Unix98-style ptmx device.
+ *      -- C. Scott Ananian <cananian@alumni.princeton.edu>, 14-Jan-1998
+ *
+ * Reduced memory usage for older ARM systems
+ *      -- Russell King <rmk@arm.linux.org.uk>
+ *
+ * Move do_SAK() into process context.  Less stack use in devfs functions.
+ * alloc_tty_struct() always uses kmalloc()
+ *			 -- Andrew Morton <andrewm@uow.edu.eu> 17Mar01
+ */
+
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/fcntl.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/task.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/devpts_fs.h>
+#include <linux/file.h>
+#include <linux/fdtable.h>
+#include <linux/console.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/kd.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/ppp-ioctl.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/serial.h>
+#include <linux/ratelimit.h>
+#include <linux/compat.h>
+
+#include <linux/uaccess.h>
+
+#include <linux/kbd_kern.h>
+#include <linux/vt_kern.h>
+#include <linux/selection.h>
+
+#include <linux/kmod.h>
+#include <linux/nsproxy.h>
+
+#undef TTY_DEBUG_HANGUP
+#ifdef TTY_DEBUG_HANGUP
+# define tty_debug_hangup(tty, f, args...)	tty_debug(tty, f, ##args)
+#else
+# define tty_debug_hangup(tty, f, args...)	do { } while (0)
+#endif
+
+#define TTY_PARANOIA_CHECK 1
+#define CHECK_TTY_COUNT 1
+
+struct ktermios tty_std_termios = {	/* for the benefit of tty drivers  */
+	.c_iflag = ICRNL | IXON,
+	.c_oflag = OPOST | ONLCR,
+	.c_cflag = B38400 | CS8 | CREAD | HUPCL,
+	.c_lflag = ISIG | ICANON | ECHO | ECHOE | ECHOK |
+		   ECHOCTL | ECHOKE | IEXTEN,
+	.c_cc = INIT_C_CC,
+	.c_ispeed = 38400,
+	.c_ospeed = 38400,
+	/* .c_line = N_TTY, */
+};
+
+EXPORT_SYMBOL(tty_std_termios);
+
+/* This list gets poked at by procfs and various bits of boot up code. This
+   could do with some rationalisation such as pulling the tty proc function
+   into this file */
+
+LIST_HEAD(tty_drivers);			/* linked list of tty drivers */
+
+/* Mutex to protect creating and releasing a tty */
+DEFINE_MUTEX(tty_mutex);
+
+static ssize_t tty_read(struct kiocb *, struct iov_iter *);
+static ssize_t tty_write(struct kiocb *, struct iov_iter *);
+static __poll_t tty_poll(struct file *, poll_table *);
+static int tty_open(struct inode *, struct file *);
+#ifdef CONFIG_COMPAT
+static long tty_compat_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg);
+#else
+#define tty_compat_ioctl NULL
+#endif
+static int __tty_fasync(int fd, struct file *filp, int on);
+static int tty_fasync(int fd, struct file *filp, int on);
+static void release_tty(struct tty_struct *tty, int idx);
+
+/**
+ *	free_tty_struct		-	free a disused tty
+ *	@tty: tty struct to free
+ *
+ *	Free the write buffers, tty queue and tty memory itself.
+ *
+ *	Locking: none. Must be called after tty is definitely unused
+ */
+
+static void free_tty_struct(struct tty_struct *tty)
+{
+	tty_ldisc_deinit(tty);
+	put_device(tty->dev);
+	kfree(tty->write_buf);
+	tty->magic = 0xDEADDEAD;
+	kfree(tty);
+}
+
+static inline struct tty_struct *file_tty(struct file *file)
+{
+	return ((struct tty_file_private *)file->private_data)->tty;
+}
+
+int tty_alloc_file(struct file *file)
+{
+	struct tty_file_private *priv;
+
+	priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	file->private_data = priv;
+
+	return 0;
+}
+
+/* Associate a new file with the tty structure */
+void tty_add_file(struct tty_struct *tty, struct file *file)
+{
+	struct tty_file_private *priv = file->private_data;
+
+	priv->tty = tty;
+	priv->file = file;
+
+	spin_lock(&tty->files_lock);
+	list_add(&priv->list, &tty->tty_files);
+	spin_unlock(&tty->files_lock);
+}
+
+/**
+ * tty_free_file - free file->private_data
+ *
+ * This shall be used only for fail path handling when tty_add_file was not
+ * called yet.
+ */
+void tty_free_file(struct file *file)
+{
+	struct tty_file_private *priv = file->private_data;
+
+	file->private_data = NULL;
+	kfree(priv);
+}
+
+/* Delete file from its tty */
+static void tty_del_file(struct file *file)
+{
+	struct tty_file_private *priv = file->private_data;
+	struct tty_struct *tty = priv->tty;
+
+	spin_lock(&tty->files_lock);
+	list_del(&priv->list);
+	spin_unlock(&tty->files_lock);
+	tty_free_file(file);
+}
+
+/**
+ *	tty_name	-	return tty naming
+ *	@tty: tty structure
+ *
+ *	Convert a tty structure into a name. The name reflects the kernel
+ *	naming policy and if udev is in use may not reflect user space
+ *
+ *	Locking: none
+ */
+
+const char *tty_name(const struct tty_struct *tty)
+{
+	if (!tty) /* Hmm.  NULL pointer.  That's fun. */
+		return "NULL tty";
+	return tty->name;
+}
+
+EXPORT_SYMBOL(tty_name);
+
+const char *tty_driver_name(const struct tty_struct *tty)
+{
+	if (!tty || !tty->driver)
+		return "";
+	return tty->driver->name;
+}
+
+static int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
+			      const char *routine)
+{
+#ifdef TTY_PARANOIA_CHECK
+	if (!tty) {
+		pr_warn("(%d:%d): %s: NULL tty\n",
+			imajor(inode), iminor(inode), routine);
+		return 1;
+	}
+	if (tty->magic != TTY_MAGIC) {
+		pr_warn("(%d:%d): %s: bad magic number\n",
+			imajor(inode), iminor(inode), routine);
+		return 1;
+	}
+#endif
+	return 0;
+}
+
+/* Caller must hold tty_lock */
+static int check_tty_count(struct tty_struct *tty, const char *routine)
+{
+#ifdef CHECK_TTY_COUNT
+	struct list_head *p;
+	int count = 0, kopen_count = 0;
+
+	spin_lock(&tty->files_lock);
+	list_for_each(p, &tty->tty_files) {
+		count++;
+	}
+	spin_unlock(&tty->files_lock);
+	if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+	    tty->driver->subtype == PTY_TYPE_SLAVE &&
+	    tty->link && tty->link->count)
+		count++;
+	if (tty_port_kopened(tty->port))
+		kopen_count++;
+	if (tty->count != (count + kopen_count)) {
+		tty_warn(tty, "%s: tty->count(%d) != (#fd's(%d) + #kopen's(%d))\n",
+			 routine, tty->count, count, kopen_count);
+		return (count + kopen_count);
+	}
+#endif
+	return 0;
+}
+
+/**
+ *	get_tty_driver		-	find device of a tty
+ *	@device: device identifier
+ *	@index: returns the index of the tty
+ *
+ *	This routine returns a tty driver structure, given a device number
+ *	and also passes back the index number.
+ *
+ *	Locking: caller must hold tty_mutex
+ */
+
+static struct tty_driver *get_tty_driver(dev_t device, int *index)
+{
+	struct tty_driver *p;
+
+	list_for_each_entry(p, &tty_drivers, tty_drivers) {
+		dev_t base = MKDEV(p->major, p->minor_start);
+		if (device < base || device >= base + p->num)
+			continue;
+		*index = device - base;
+		return tty_driver_kref_get(p);
+	}
+	return NULL;
+}
+
+/**
+ *	tty_dev_name_to_number	-	return dev_t for device name
+ *	@name: user space name of device under /dev
+ *	@number: pointer to dev_t that this function will populate
+ *
+ *	This function converts device names like ttyS0 or ttyUSB1 into dev_t
+ *	like (4, 64) or (188, 1). If no corresponding driver is registered then
+ *	the function returns -ENODEV.
+ *
+ *	Locking: this acquires tty_mutex to protect the tty_drivers list from
+ *		being modified while we are traversing it, and makes sure to
+ *		release it before exiting.
+ */
+int tty_dev_name_to_number(const char *name, dev_t *number)
+{
+	struct tty_driver *p;
+	int ret;
+	int index, prefix_length = 0;
+	const char *str;
+
+	for (str = name; *str && !isdigit(*str); str++)
+		;
+
+	if (!*str)
+		return -EINVAL;
+
+	ret = kstrtoint(str, 10, &index);
+	if (ret)
+		return ret;
+
+	prefix_length = str - name;
+	mutex_lock(&tty_mutex);
+
+	list_for_each_entry(p, &tty_drivers, tty_drivers)
+		if (prefix_length == strlen(p->name) && strncmp(name,
+					p->name, prefix_length) == 0) {
+			if (index < p->num) {
+				*number = MKDEV(p->major, p->minor_start + index);
+				goto out;
+			}
+		}
+
+	/* if here then driver wasn't found */
+	ret = -ENODEV;
+out:
+	mutex_unlock(&tty_mutex);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(tty_dev_name_to_number);
+
+#ifdef CONFIG_CONSOLE_POLL
+
+/**
+ *	tty_find_polling_driver	-	find device of a polled tty
+ *	@name: name string to match
+ *	@line: pointer to resulting tty line nr
+ *
+ *	This routine returns a tty driver structure, given a name
+ *	and the condition that the tty driver is capable of polled
+ *	operation.
+ */
+struct tty_driver *tty_find_polling_driver(char *name, int *line)
+{
+	struct tty_driver *p, *res = NULL;
+	int tty_line = 0;
+	int len;
+	char *str, *stp;
+
+	for (str = name; *str; str++)
+		if ((*str >= '0' && *str <= '9') || *str == ',')
+			break;
+	if (!*str)
+		return NULL;
+
+	len = str - name;
+	tty_line = simple_strtoul(str, &str, 10);
+
+	mutex_lock(&tty_mutex);
+	/* Search through the tty devices to look for a match */
+	list_for_each_entry(p, &tty_drivers, tty_drivers) {
+		if (!len || strncmp(name, p->name, len) != 0)
+			continue;
+		stp = str;
+		if (*stp == ',')
+			stp++;
+		if (*stp == '\0')
+			stp = NULL;
+
+		if (tty_line >= 0 && tty_line < p->num && p->ops &&
+		    p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
+			res = tty_driver_kref_get(p);
+			*line = tty_line;
+			break;
+		}
+	}
+	mutex_unlock(&tty_mutex);
+
+	return res;
+}
+EXPORT_SYMBOL_GPL(tty_find_polling_driver);
+#endif
+
+static ssize_t hung_up_tty_read(struct kiocb *iocb, struct iov_iter *to)
+{
+	return 0;
+}
+
+static ssize_t hung_up_tty_write(struct kiocb *iocb, struct iov_iter *from)
+{
+	return -EIO;
+}
+
+/* No kernel lock held - none needed ;) */
+static __poll_t hung_up_tty_poll(struct file *filp, poll_table *wait)
+{
+	return EPOLLIN | EPOLLOUT | EPOLLERR | EPOLLHUP | EPOLLRDNORM | EPOLLWRNORM;
+}
+
+static long hung_up_tty_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg)
+{
+	return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
+}
+
+static long hung_up_tty_compat_ioctl(struct file *file,
+				     unsigned int cmd, unsigned long arg)
+{
+	return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
+}
+
+static int hung_up_tty_fasync(int fd, struct file *file, int on)
+{
+	return -ENOTTY;
+}
+
+static void tty_show_fdinfo(struct seq_file *m, struct file *file)
+{
+	struct tty_struct *tty = file_tty(file);
+
+	if (tty && tty->ops && tty->ops->show_fdinfo)
+		tty->ops->show_fdinfo(tty, m);
+}
+
+static const struct file_operations tty_fops = {
+	.llseek		= no_llseek,
+	.read_iter	= tty_read,
+	.write_iter	= tty_write,
+	.splice_read	= generic_file_splice_read,
+	.splice_write	= iter_file_splice_write,
+	.poll		= tty_poll,
+	.unlocked_ioctl	= tty_ioctl,
+	.compat_ioctl	= tty_compat_ioctl,
+	.open		= tty_open,
+	.release	= tty_release,
+	.fasync		= tty_fasync,
+	.show_fdinfo	= tty_show_fdinfo,
+};
+
+static const struct file_operations console_fops = {
+	.llseek		= no_llseek,
+	.read_iter	= tty_read,
+	.write_iter	= redirected_tty_write,
+	.splice_read	= generic_file_splice_read,
+	.splice_write	= iter_file_splice_write,
+	.poll		= tty_poll,
+	.unlocked_ioctl	= tty_ioctl,
+	.compat_ioctl	= tty_compat_ioctl,
+	.open		= tty_open,
+	.release	= tty_release,
+	.fasync		= tty_fasync,
+};
+
+static const struct file_operations hung_up_tty_fops = {
+	.llseek		= no_llseek,
+	.read_iter	= hung_up_tty_read,
+	.write_iter	= hung_up_tty_write,
+	.poll		= hung_up_tty_poll,
+	.unlocked_ioctl	= hung_up_tty_ioctl,
+	.compat_ioctl	= hung_up_tty_compat_ioctl,
+	.release	= tty_release,
+	.fasync		= hung_up_tty_fasync,
+};
+
+static DEFINE_SPINLOCK(redirect_lock);
+static struct file *redirect;
+
+extern void tty_sysctl_init(void);
+
+/**
+ *	tty_wakeup	-	request more data
+ *	@tty: terminal
+ *
+ *	Internal and external helper for wakeups of tty. This function
+ *	informs the line discipline if present that the driver is ready
+ *	to receive more output data.
+ */
+
+void tty_wakeup(struct tty_struct *tty)
+{
+	struct tty_ldisc *ld;
+
+	if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) {
+		ld = tty_ldisc_ref(tty);
+		if (ld) {
+			if (ld->ops->write_wakeup)
+				ld->ops->write_wakeup(tty);
+			tty_ldisc_deref(ld);
+		}
+	}
+	wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
+}
+
+EXPORT_SYMBOL_GPL(tty_wakeup);
+
+/**
+ *	__tty_hangup		-	actual handler for hangup events
+ *	@tty: tty device
+ *
+ *	This can be called by a "kworker" kernel thread.  That is process
+ *	synchronous but doesn't hold any locks, so we need to make sure we
+ *	have the appropriate locks for what we're doing.
+ *
+ *	The hangup event clears any pending redirections onto the hung up
+ *	device. It ensures future writes will error and it does the needed
+ *	line discipline hangup and signal delivery. The tty object itself
+ *	remains intact.
+ *
+ *	Locking:
+ *		BTM
+ *		  redirect lock for undoing redirection
+ *		  file list lock for manipulating list of ttys
+ *		  tty_ldiscs_lock from called functions
+ *		  termios_rwsem resetting termios data
+ *		  tasklist_lock to walk task list for hangup event
+ *		    ->siglock to protect ->signal/->sighand
+ */
+static void __tty_hangup(struct tty_struct *tty, int exit_session)
+{
+	struct file *cons_filp = NULL;
+	struct file *filp, *f = NULL;
+	struct tty_file_private *priv;
+	int    closecount = 0, n;
+	int refs;
+
+	if (!tty)
+		return;
+
+
+	spin_lock(&redirect_lock);
+	if (redirect && file_tty(redirect) == tty) {
+		f = redirect;
+		redirect = NULL;
+	}
+	spin_unlock(&redirect_lock);
+
+	tty_lock(tty);
+
+	if (test_bit(TTY_HUPPED, &tty->flags)) {
+		tty_unlock(tty);
+		return;
+	}
+
+	/*
+	 * Some console devices aren't actually hung up for technical and
+	 * historical reasons, which can lead to indefinite interruptible
+	 * sleep in n_tty_read().  The following explicitly tells
+	 * n_tty_read() to abort readers.
+	 */
+	set_bit(TTY_HUPPING, &tty->flags);
+
+	/* inuse_filps is protected by the single tty lock,
+	   this really needs to change if we want to flush the
+	   workqueue with the lock held */
+	check_tty_count(tty, "tty_hangup");
+
+	spin_lock(&tty->files_lock);
+	/* This breaks for file handles being sent over AF_UNIX sockets ? */
+	list_for_each_entry(priv, &tty->tty_files, list) {
+		filp = priv->file;
+		if (filp->f_op->write_iter == redirected_tty_write)
+			cons_filp = filp;
+		if (filp->f_op->write_iter != tty_write)
+			continue;
+		closecount++;
+		__tty_fasync(-1, filp, 0);	/* can't block */
+		filp->f_op = &hung_up_tty_fops;
+	}
+	spin_unlock(&tty->files_lock);
+
+	refs = tty_signal_session_leader(tty, exit_session);
+	/* Account for the p->signal references we killed */
+	while (refs--)
+		tty_kref_put(tty);
+
+	tty_ldisc_hangup(tty, cons_filp != NULL);
+
+	spin_lock_irq(&tty->ctrl_lock);
+	clear_bit(TTY_THROTTLED, &tty->flags);
+	clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+	put_pid(tty->session);
+	put_pid(tty->pgrp);
+	tty->session = NULL;
+	tty->pgrp = NULL;
+	tty->ctrl_status = 0;
+	spin_unlock_irq(&tty->ctrl_lock);
+
+	/*
+	 * If one of the devices matches a console pointer, we
+	 * cannot just call hangup() because that will cause
+	 * tty->count and state->count to go out of sync.
+	 * So we just call close() the right number of times.
+	 */
+	if (cons_filp) {
+		if (tty->ops->close)
+			for (n = 0; n < closecount; n++)
+				tty->ops->close(tty, cons_filp);
+	} else if (tty->ops->hangup)
+		tty->ops->hangup(tty);
+	/*
+	 * We don't want to have driver/ldisc interactions beyond the ones
+	 * we did here. The driver layer expects no calls after ->hangup()
+	 * from the ldisc side, which is now guaranteed.
+	 */
+	set_bit(TTY_HUPPED, &tty->flags);
+	clear_bit(TTY_HUPPING, &tty->flags);
+	tty_unlock(tty);
+
+	if (f)
+		fput(f);
+}
+
+static void do_tty_hangup(struct work_struct *work)
+{
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, hangup_work);
+
+	__tty_hangup(tty, 0);
+}
+
+/**
+ *	tty_hangup		-	trigger a hangup event
+ *	@tty: tty to hangup
+ *
+ *	A carrier loss (virtual or otherwise) has occurred on this like
+ *	schedule a hangup sequence to run after this event.
+ */
+
+void tty_hangup(struct tty_struct *tty)
+{
+	tty_debug_hangup(tty, "hangup\n");
+	schedule_work(&tty->hangup_work);
+}
+
+EXPORT_SYMBOL(tty_hangup);
+
+/**
+ *	tty_vhangup		-	process vhangup
+ *	@tty: tty to hangup
+ *
+ *	The user has asked via system call for the terminal to be hung up.
+ *	We do this synchronously so that when the syscall returns the process
+ *	is complete. That guarantee is necessary for security reasons.
+ */
+
+void tty_vhangup(struct tty_struct *tty)
+{
+	tty_debug_hangup(tty, "vhangup\n");
+	__tty_hangup(tty, 0);
+}
+
+EXPORT_SYMBOL(tty_vhangup);
+
+
+/**
+ *	tty_vhangup_self	-	process vhangup for own ctty
+ *
+ *	Perform a vhangup on the current controlling tty
+ */
+
+void tty_vhangup_self(void)
+{
+	struct tty_struct *tty;
+
+	tty = get_current_tty();
+	if (tty) {
+		tty_vhangup(tty);
+		tty_kref_put(tty);
+	}
+}
+
+/**
+ *	tty_vhangup_session		-	hangup session leader exit
+ *	@tty: tty to hangup
+ *
+ *	The session leader is exiting and hanging up its controlling terminal.
+ *	Every process in the foreground process group is signalled SIGHUP.
+ *
+ *	We do this synchronously so that when the syscall returns the process
+ *	is complete. That guarantee is necessary for security reasons.
+ */
+
+void tty_vhangup_session(struct tty_struct *tty)
+{
+	tty_debug_hangup(tty, "session hangup\n");
+	__tty_hangup(tty, 1);
+}
+
+/**
+ *	tty_hung_up_p		-	was tty hung up
+ *	@filp: file pointer of tty
+ *
+ *	Return true if the tty has been subject to a vhangup or a carrier
+ *	loss
+ */
+
+int tty_hung_up_p(struct file *filp)
+{
+	return (filp && filp->f_op == &hung_up_tty_fops);
+}
+
+EXPORT_SYMBOL(tty_hung_up_p);
+
+/**
+ *	stop_tty	-	propagate flow control
+ *	@tty: tty to stop
+ *
+ *	Perform flow control to the driver. May be called
+ *	on an already stopped device and will not re-call the driver
+ *	method.
+ *
+ *	This functionality is used by both the line disciplines for
+ *	halting incoming flow and by the driver. It may therefore be
+ *	called from any context, may be under the tty atomic_write_lock
+ *	but not always.
+ *
+ *	Locking:
+ *		flow_lock
+ */
+
+void __stop_tty(struct tty_struct *tty)
+{
+	if (tty->stopped)
+		return;
+	tty->stopped = 1;
+	if (tty->ops->stop)
+		tty->ops->stop(tty);
+}
+
+void stop_tty(struct tty_struct *tty)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&tty->flow_lock, flags);
+	__stop_tty(tty);
+	spin_unlock_irqrestore(&tty->flow_lock, flags);
+}
+EXPORT_SYMBOL(stop_tty);
+
+/**
+ *	start_tty	-	propagate flow control
+ *	@tty: tty to start
+ *
+ *	Start a tty that has been stopped if at all possible. If this
+ *	tty was previous stopped and is now being started, the driver
+ *	start method is invoked and the line discipline woken.
+ *
+ *	Locking:
+ *		flow_lock
+ */
+
+void __start_tty(struct tty_struct *tty)
+{
+	if (!tty->stopped || tty->flow_stopped)
+		return;
+	tty->stopped = 0;
+	if (tty->ops->start)
+		tty->ops->start(tty);
+	tty_wakeup(tty);
+}
+
+void start_tty(struct tty_struct *tty)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&tty->flow_lock, flags);
+	__start_tty(tty);
+	spin_unlock_irqrestore(&tty->flow_lock, flags);
+}
+EXPORT_SYMBOL(start_tty);
+
+static void tty_update_time(struct timespec64 *time)
+{
+	time64_t sec = ktime_get_real_seconds();
+
+	/*
+	 * We only care if the two values differ in anything other than the
+	 * lower three bits (i.e every 8 seconds).  If so, then we can update
+	 * the time of the tty device, otherwise it could be construded as a
+	 * security leak to let userspace know the exact timing of the tty.
+	 */
+	if ((sec ^ time->tv_sec) & ~7)
+		time->tv_sec = sec;
+}
+
+/*
+ * Iterate on the ldisc ->read() function until we've gotten all
+ * the data the ldisc has for us.
+ *
+ * The "cookie" is something that the ldisc read function can fill
+ * in to let us know that there is more data to be had.
+ *
+ * We promise to continue to call the ldisc until it stops returning
+ * data or clears the cookie. The cookie may be something that the
+ * ldisc maintains state for and needs to free.
+ */
+static int iterate_tty_read(struct tty_ldisc *ld, struct tty_struct *tty,
+		struct file *file, struct iov_iter *to)
+{
+	int retval = 0;
+	void *cookie = NULL;
+	unsigned long offset = 0;
+	char kernel_buf[64];
+	size_t count = iov_iter_count(to);
+
+	do {
+		int size, copied;
+
+		size = count > sizeof(kernel_buf) ? sizeof(kernel_buf) : count;
+		size = ld->ops->read(tty, file, kernel_buf, size, &cookie, offset);
+		if (!size)
+			break;
+
+		if (size < 0) {
+			/* Did we have an earlier error (ie -EFAULT)? */
+			if (retval)
+				break;
+			retval = size;
+
+			/*
+			 * -EOVERFLOW means we didn't have enough space
+			 * for a whole packet, and we shouldn't return
+			 * a partial result.
+			 */
+			if (retval == -EOVERFLOW)
+				offset = 0;
+			break;
+		}
+
+		copied = copy_to_iter(kernel_buf, size, to);
+		offset += copied;
+		count -= copied;
+
+		/*
+		 * If the user copy failed, we still need to do another ->read()
+		 * call if we had a cookie to let the ldisc clear up.
+		 *
+		 * But make sure size is zeroed.
+		 */
+		if (unlikely(copied != size)) {
+			count = 0;
+			retval = -EFAULT;
+		}
+	} while (cookie);
+
+	/* We always clear tty buffer in case they contained passwords */
+	memzero_explicit(kernel_buf, sizeof(kernel_buf));
+	return offset ? offset : retval;
+}
+
+
+/**
+ *	tty_read	-	read method for tty device files
+ *	@file: pointer to tty file
+ *	@buf: user buffer
+ *	@count: size of user buffer
+ *	@ppos: unused
+ *
+ *	Perform the read system call function on this terminal device. Checks
+ *	for hung up devices before calling the line discipline method.
+ *
+ *	Locking:
+ *		Locks the line discipline internally while needed. Multiple
+ *	read calls may be outstanding in parallel.
+ */
+
+static ssize_t tty_read(struct kiocb *iocb, struct iov_iter *to)
+{
+	int i;
+	struct file *file = iocb->ki_filp;
+	struct inode *inode = file_inode(file);
+	struct tty_struct *tty = file_tty(file);
+	struct tty_ldisc *ld;
+
+	if (tty_paranoia_check(tty, inode, "tty_read"))
+		return -EIO;
+	if (!tty || tty_io_error(tty))
+		return -EIO;
+
+	/* We want to wait for the line discipline to sort out in this
+	   situation */
+	ld = tty_ldisc_ref_wait(tty);
+	if (!ld)
+		return hung_up_tty_read(iocb, to);
+	i = -EIO;
+	if (ld->ops->read)
+		i = iterate_tty_read(ld, tty, file, to);
+	tty_ldisc_deref(ld);
+
+	if (i > 0)
+		tty_update_time(&inode->i_atime);
+
+	return i;
+}
+
+static void tty_write_unlock(struct tty_struct *tty)
+{
+	mutex_unlock(&tty->atomic_write_lock);
+	wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
+}
+
+static int tty_write_lock(struct tty_struct *tty, int ndelay)
+{
+	if (!mutex_trylock(&tty->atomic_write_lock)) {
+		if (ndelay)
+			return -EAGAIN;
+		if (mutex_lock_interruptible(&tty->atomic_write_lock))
+			return -ERESTARTSYS;
+	}
+	return 0;
+}
+
+/*
+ * Split writes up in sane blocksizes to avoid
+ * denial-of-service type attacks
+ */
+static inline ssize_t do_tty_write(
+	ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
+	struct tty_struct *tty,
+	struct file *file,
+	struct iov_iter *from)
+{
+	size_t count = iov_iter_count(from);
+	ssize_t ret, written = 0;
+	unsigned int chunk;
+
+	ret = tty_write_lock(tty, file->f_flags & O_NDELAY);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * We chunk up writes into a temporary buffer. This
+	 * simplifies low-level drivers immensely, since they
+	 * don't have locking issues and user mode accesses.
+	 *
+	 * But if TTY_NO_WRITE_SPLIT is set, we should use a
+	 * big chunk-size..
+	 *
+	 * The default chunk-size is 2kB, because the NTTY
+	 * layer has problems with bigger chunks. It will
+	 * claim to be able to handle more characters than
+	 * it actually does.
+	 *
+	 * FIXME: This can probably go away now except that 64K chunks
+	 * are too likely to fail unless switched to vmalloc...
+	 */
+	chunk = 2048;
+	if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags))
+		chunk = 65536;
+	if (count < chunk)
+		chunk = count;
+
+	/* write_buf/write_cnt is protected by the atomic_write_lock mutex */
+	if (tty->write_cnt < chunk) {
+		unsigned char *buf_chunk;
+
+		if (chunk < 1024)
+			chunk = 1024;
+
+		buf_chunk = kmalloc(chunk, GFP_KERNEL);
+		if (!buf_chunk) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		kfree(tty->write_buf);
+		tty->write_cnt = chunk;
+		tty->write_buf = buf_chunk;
+	}
+
+	/* Do the write .. */
+	for (;;) {
+		size_t size = count;
+		if (size > chunk)
+			size = chunk;
+
+		ret = -EFAULT;
+		if (copy_from_iter(tty->write_buf, size, from) != size)
+			break;
+
+		ret = write(tty, file, tty->write_buf, size);
+		if (ret <= 0)
+			break;
+
+		written += ret;
+		if (ret > size)
+			break;
+
+		/* FIXME! Have Al check this! */
+		if (ret != size)
+			iov_iter_revert(from, size-ret);
+
+		count -= ret;
+		if (!count)
+			break;
+		ret = -ERESTARTSYS;
+		if (signal_pending(current))
+			break;
+		cond_resched();
+	}
+	if (written) {
+		tty_update_time(&file_inode(file)->i_mtime);
+		ret = written;
+	}
+out:
+	tty_write_unlock(tty);
+	return ret;
+}
+
+/**
+ * tty_write_message - write a message to a certain tty, not just the console.
+ * @tty: the destination tty_struct
+ * @msg: the message to write
+ *
+ * This is used for messages that need to be redirected to a specific tty.
+ * We don't put it into the syslog queue right now maybe in the future if
+ * really needed.
+ *
+ * We must still hold the BTM and test the CLOSING flag for the moment.
+ */
+
+void tty_write_message(struct tty_struct *tty, char *msg)
+{
+	if (tty) {
+		mutex_lock(&tty->atomic_write_lock);
+		tty_lock(tty);
+		if (tty->ops->write && tty->count > 0)
+			tty->ops->write(tty, msg, strlen(msg));
+		tty_unlock(tty);
+		tty_write_unlock(tty);
+	}
+	return;
+}
+
+
+/**
+ *	tty_write		-	write method for tty device file
+ *	@file: tty file pointer
+ *	@buf: user data to write
+ *	@count: bytes to write
+ *	@ppos: unused
+ *
+ *	Write data to a tty device via the line discipline.
+ *
+ *	Locking:
+ *		Locks the line discipline as required
+ *		Writes to the tty driver are serialized by the atomic_write_lock
+ *	and are then processed in chunks to the device. The line discipline
+ *	write method will not be invoked in parallel for each device.
+ */
+
+static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_iter *from)
+{
+	struct tty_struct *tty = file_tty(file);
+ 	struct tty_ldisc *ld;
+	ssize_t ret;
+
+	if (tty_paranoia_check(tty, file_inode(file), "tty_write"))
+		return -EIO;
+	if (!tty || !tty->ops->write ||	tty_io_error(tty))
+			return -EIO;
+	/* Short term debug to catch buggy drivers */
+	if (tty->ops->write_room == NULL)
+		tty_err(tty, "missing write_room method\n");
+	ld = tty_ldisc_ref_wait(tty);
+	if (!ld)
+		return hung_up_tty_write(iocb, from);
+	if (!ld->ops->write)
+		ret = -EIO;
+	else
+		ret = do_tty_write(ld->ops->write, tty, file, from);
+	tty_ldisc_deref(ld);
+	return ret;
+}
+
+static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
+{
+	return file_tty_write(iocb->ki_filp, iocb, from);
+}
+
+ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
+{
+	struct file *p = NULL;
+
+	spin_lock(&redirect_lock);
+	if (redirect)
+		p = get_file(redirect);
+	spin_unlock(&redirect_lock);
+
+	/*
+	 * We know the redirected tty is just another tty, we can can
+	 * call file_tty_write() directly with that file pointer.
+	 */
+	if (p) {
+		ssize_t res;
+		res = file_tty_write(p, iocb, iter);
+		fput(p);
+		return res;
+	}
+	return tty_write(iocb, iter);
+}
+
+/**
+ *	tty_send_xchar	-	send priority character
+ *
+ *	Send a high priority character to the tty even if stopped
+ *
+ *	Locking: none for xchar method, write ordering for write method.
+ */
+
+int tty_send_xchar(struct tty_struct *tty, char ch)
+{
+	int	was_stopped = tty->stopped;
+
+	if (tty->ops->send_xchar) {
+		down_read(&tty->termios_rwsem);
+		tty->ops->send_xchar(tty, ch);
+		up_read(&tty->termios_rwsem);
+		return 0;
+	}
+
+	if (tty_write_lock(tty, 0) < 0)
+		return -ERESTARTSYS;
+
+	down_read(&tty->termios_rwsem);
+	if (was_stopped)
+		start_tty(tty);
+	tty->ops->write(tty, &ch, 1);
+	if (was_stopped)
+		stop_tty(tty);
+	up_read(&tty->termios_rwsem);
+	tty_write_unlock(tty);
+	return 0;
+}
+
+static char ptychar[] = "pqrstuvwxyzabcde";
+
+/**
+ *	pty_line_name	-	generate name for a pty
+ *	@driver: the tty driver in use
+ *	@index: the minor number
+ *	@p: output buffer of at least 6 bytes
+ *
+ *	Generate a name from a driver reference and write it to the output
+ *	buffer.
+ *
+ *	Locking: None
+ */
+static void pty_line_name(struct tty_driver *driver, int index, char *p)
+{
+	int i = index + driver->name_base;
+	/* ->name is initialized to "ttyp", but "tty" is expected */
+	sprintf(p, "%s%c%x",
+		driver->subtype == PTY_TYPE_SLAVE ? "tty" : driver->name,
+		ptychar[i >> 4 & 0xf], i & 0xf);
+}
+
+/**
+ *	tty_line_name	-	generate name for a tty
+ *	@driver: the tty driver in use
+ *	@index: the minor number
+ *	@p: output buffer of at least 7 bytes
+ *
+ *	Generate a name from a driver reference and write it to the output
+ *	buffer.
+ *
+ *	Locking: None
+ */
+static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
+{
+	if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE)
+		return sprintf(p, "%s", driver->name);
+	else
+		return sprintf(p, "%s%d", driver->name,
+			       index + driver->name_base);
+}
+
+/**
+ *	tty_driver_lookup_tty() - find an existing tty, if any
+ *	@driver: the driver for the tty
+ *	@idx:	 the minor number
+ *
+ *	Return the tty, if found. If not found, return NULL or ERR_PTR() if the
+ *	driver lookup() method returns an error.
+ *
+ *	Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
+ */
+static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
+		struct file *file, int idx)
+{
+	struct tty_struct *tty;
+
+	if (driver->ops->lookup)
+		if (!file)
+			tty = ERR_PTR(-EIO);
+		else
+			tty = driver->ops->lookup(driver, file, idx);
+	else
+		tty = driver->ttys[idx];
+
+	if (!IS_ERR(tty))
+		tty_kref_get(tty);
+	return tty;
+}
+
+/**
+ *	tty_init_termios	-  helper for termios setup
+ *	@tty: the tty to set up
+ *
+ *	Initialise the termios structure for this tty. This runs under
+ *	the tty_mutex currently so we can be relaxed about ordering.
+ */
+
+void tty_init_termios(struct tty_struct *tty)
+{
+	struct ktermios *tp;
+	int idx = tty->index;
+
+	if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
+		tty->termios = tty->driver->init_termios;
+	else {
+		/* Check for lazy saved data */
+		tp = tty->driver->termios[idx];
+		if (tp != NULL) {
+			tty->termios = *tp;
+			tty->termios.c_line  = tty->driver->init_termios.c_line;
+		} else
+			tty->termios = tty->driver->init_termios;
+	}
+	/* Compatibility until drivers always set this */
+	tty->termios.c_ispeed = tty_termios_input_baud_rate(&tty->termios);
+	tty->termios.c_ospeed = tty_termios_baud_rate(&tty->termios);
+}
+EXPORT_SYMBOL_GPL(tty_init_termios);
+
+int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+	tty_init_termios(tty);
+	tty_driver_kref_get(driver);
+	tty->count++;
+	driver->ttys[tty->index] = tty;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tty_standard_install);
+
+/**
+ *	tty_driver_install_tty() - install a tty entry in the driver
+ *	@driver: the driver for the tty
+ *	@tty: the tty
+ *
+ *	Install a tty object into the driver tables. The tty->index field
+ *	will be set by the time this is called. This method is responsible
+ *	for ensuring any need additional structures are allocated and
+ *	configured.
+ *
+ *	Locking: tty_mutex for now
+ */
+static int tty_driver_install_tty(struct tty_driver *driver,
+						struct tty_struct *tty)
+{
+	return driver->ops->install ? driver->ops->install(driver, tty) :
+		tty_standard_install(driver, tty);
+}
+
+/**
+ *	tty_driver_remove_tty() - remove a tty from the driver tables
+ *	@driver: the driver for the tty
+ *	@tty: tty to remove
+ *
+ *	Remvoe a tty object from the driver tables. The tty->index field
+ *	will be set by the time this is called.
+ *
+ *	Locking: tty_mutex for now
+ */
+static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)
+{
+	if (driver->ops->remove)
+		driver->ops->remove(driver, tty);
+	else
+		driver->ttys[tty->index] = NULL;
+}
+
+/**
+ *	tty_reopen()	- fast re-open of an open tty
+ *	@tty: the tty to open
+ *
+ *	Return 0 on success, -errno on error.
+ *	Re-opens on master ptys are not allowed and return -EIO.
+ *
+ *	Locking: Caller must hold tty_lock
+ */
+static int tty_reopen(struct tty_struct *tty)
+{
+	struct tty_driver *driver = tty->driver;
+	struct tty_ldisc *ld;
+	int retval = 0;
+
+	if (driver->type == TTY_DRIVER_TYPE_PTY &&
+	    driver->subtype == PTY_TYPE_MASTER)
+		return -EIO;
+
+	if (!tty->count)
+		return -EAGAIN;
+
+	if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
+		return -EBUSY;
+
+	ld = tty_ldisc_ref_wait(tty);
+	if (ld) {
+		tty_ldisc_deref(ld);
+	} else {
+		retval = tty_ldisc_lock(tty, 5 * HZ);
+		if (retval)
+			return retval;
+
+		if (!tty->ldisc)
+			retval = tty_ldisc_reinit(tty, tty->termios.c_line);
+		tty_ldisc_unlock(tty);
+	}
+
+	if (retval == 0)
+		tty->count++;
+
+	return retval;
+}
+
+/**
+ *	tty_init_dev		-	initialise a tty device
+ *	@driver: tty driver we are opening a device on
+ *	@idx: device index
+ *
+ *	Prepare a tty device. This may not be a "new" clean device but
+ *	could also be an active device. The pty drivers require special
+ *	handling because of this.
+ *
+ *	Locking:
+ *		The function is called under the tty_mutex, which
+ *	protects us from the tty struct or driver itself going away.
+ *
+ *	On exit the tty device has the line discipline attached and
+ *	a reference count of 1. If a pair was created for pty/tty use
+ *	and the other was a pty master then it too has a reference count of 1.
+ *
+ * WSH 06/09/97: Rewritten to remove races and properly clean up after a
+ * failed open.  The new code protects the open with a mutex, so it's
+ * really quite straightforward.  The mutex locking can probably be
+ * relaxed for the (most common) case of reopening a tty.
+ *
+ *	Return: returned tty structure
+ */
+
+struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
+{
+	struct tty_struct *tty;
+	int retval;
+
+	/*
+	 * First time open is complex, especially for PTY devices.
+	 * This code guarantees that either everything succeeds and the
+	 * TTY is ready for operation, or else the table slots are vacated
+	 * and the allocated memory released.  (Except that the termios
+	 * may be retained.)
+	 */
+
+	if (!try_module_get(driver->owner))
+		return ERR_PTR(-ENODEV);
+
+	tty = alloc_tty_struct(driver, idx);
+	if (!tty) {
+		retval = -ENOMEM;
+		goto err_module_put;
+	}
+
+	tty_lock(tty);
+	retval = tty_driver_install_tty(driver, tty);
+	if (retval < 0)
+		goto err_free_tty;
+
+	if (!tty->port)
+		tty->port = driver->ports[idx];
+
+	if (WARN_RATELIMIT(!tty->port,
+			"%s: %s driver does not set tty->port. This would crash the kernel. Fix the driver!\n",
+			__func__, tty->driver->name)) {
+		retval = -EINVAL;
+		goto err_release_lock;
+	}
+
+	retval = tty_ldisc_lock(tty, 5 * HZ);
+	if (retval)
+		goto err_release_lock;
+	tty->port->itty = tty;
+
+	/*
+	 * Structures all installed ... call the ldisc open routines.
+	 * If we fail here just call release_tty to clean up.  No need
+	 * to decrement the use counts, as release_tty doesn't care.
+	 */
+	retval = tty_ldisc_setup(tty, tty->link);
+	if (retval)
+		goto err_release_tty;
+	tty_ldisc_unlock(tty);
+	/* Return the tty locked so that it cannot vanish under the caller */
+	return tty;
+
+err_free_tty:
+	tty_unlock(tty);
+	free_tty_struct(tty);
+err_module_put:
+	module_put(driver->owner);
+	return ERR_PTR(retval);
+
+	/* call the tty release_tty routine to clean out this slot */
+err_release_tty:
+	tty_ldisc_unlock(tty);
+	tty_info_ratelimited(tty, "ldisc open failed (%d), clearing slot %d\n",
+			     retval, idx);
+err_release_lock:
+	tty_unlock(tty);
+	release_tty(tty, idx);
+	return ERR_PTR(retval);
+}
+
+/**
+ * tty_save_termios() - save tty termios data in driver table
+ * @tty: tty whose termios data to save
+ *
+ * Locking: Caller guarantees serialisation with tty_init_termios().
+ */
+void tty_save_termios(struct tty_struct *tty)
+{
+	struct ktermios *tp;
+	int idx = tty->index;
+
+	/* If the port is going to reset then it has no termios to save */
+	if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
+		return;
+
+	/* Stash the termios data */
+	tp = tty->driver->termios[idx];
+	if (tp == NULL) {
+		tp = kmalloc(sizeof(*tp), GFP_KERNEL);
+		if (tp == NULL)
+			return;
+		tty->driver->termios[idx] = tp;
+	}
+	*tp = tty->termios;
+}
+EXPORT_SYMBOL_GPL(tty_save_termios);
+
+/**
+ *	tty_flush_works		-	flush all works of a tty/pty pair
+ *	@tty: tty device to flush works for (or either end of a pty pair)
+ *
+ *	Sync flush all works belonging to @tty (and the 'other' tty).
+ */
+static void tty_flush_works(struct tty_struct *tty)
+{
+	flush_work(&tty->SAK_work);
+	flush_work(&tty->hangup_work);
+	if (tty->link) {
+		flush_work(&tty->link->SAK_work);
+		flush_work(&tty->link->hangup_work);
+	}
+}
+
+/**
+ *	release_one_tty		-	release tty structure memory
+ *	@work: work of tty we are obliterating
+ *
+ *	Releases memory associated with a tty structure, and clears out the
+ *	driver table slots. This function is called when a device is no longer
+ *	in use. It also gets called when setup of a device fails.
+ *
+ *	Locking:
+ *		takes the file list lock internally when working on the list
+ *	of ttys that the driver keeps.
+ *
+ *	This method gets called from a work queue so that the driver private
+ *	cleanup ops can sleep (needed for USB at least)
+ */
+static void release_one_tty(struct work_struct *work)
+{
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, hangup_work);
+	struct tty_driver *driver = tty->driver;
+	struct module *owner = driver->owner;
+
+	if (tty->ops->cleanup)
+		tty->ops->cleanup(tty);
+
+	tty->magic = 0;
+	tty_driver_kref_put(driver);
+	module_put(owner);
+
+	spin_lock(&tty->files_lock);
+	list_del_init(&tty->tty_files);
+	spin_unlock(&tty->files_lock);
+
+	put_pid(tty->pgrp);
+	put_pid(tty->session);
+	free_tty_struct(tty);
+}
+
+static void queue_release_one_tty(struct kref *kref)
+{
+	struct tty_struct *tty = container_of(kref, struct tty_struct, kref);
+
+	/* The hangup queue is now free so we can reuse it rather than
+	   waste a chunk of memory for each port */
+	INIT_WORK(&tty->hangup_work, release_one_tty);
+	schedule_work(&tty->hangup_work);
+}
+
+/**
+ *	tty_kref_put		-	release a tty kref
+ *	@tty: tty device
+ *
+ *	Release a reference to a tty device and if need be let the kref
+ *	layer destruct the object for us
+ */
+
+void tty_kref_put(struct tty_struct *tty)
+{
+	if (tty)
+		kref_put(&tty->kref, queue_release_one_tty);
+}
+EXPORT_SYMBOL(tty_kref_put);
+
+/**
+ *	release_tty		-	release tty structure memory
+ *
+ *	Release both @tty and a possible linked partner (think pty pair),
+ *	and decrement the refcount of the backing module.
+ *
+ *	Locking:
+ *		tty_mutex
+ *		takes the file list lock internally when working on the list
+ *	of ttys that the driver keeps.
+ *
+ */
+static void release_tty(struct tty_struct *tty, int idx)
+{
+	/* This should always be true but check for the moment */
+	WARN_ON(tty->index != idx);
+	WARN_ON(!mutex_is_locked(&tty_mutex));
+	if (tty->ops->shutdown)
+		tty->ops->shutdown(tty);
+	tty_save_termios(tty);
+	tty_driver_remove_tty(tty->driver, tty);
+	if (tty->port)
+		tty->port->itty = NULL;
+	if (tty->link)
+		tty->link->port->itty = NULL;
+	if (tty->port)
+		tty_buffer_cancel_work(tty->port);
+	if (tty->link)
+		tty_buffer_cancel_work(tty->link->port);
+
+	tty_kref_put(tty->link);
+	tty_kref_put(tty);
+}
+
+/**
+ *	tty_release_checks - check a tty before real release
+ *	@tty: tty to check
+ *	@idx: index of the tty
+ *
+ *	Performs some paranoid checking before true release of the @tty.
+ *	This is a no-op unless TTY_PARANOIA_CHECK is defined.
+ */
+static int tty_release_checks(struct tty_struct *tty, int idx)
+{
+#ifdef TTY_PARANOIA_CHECK
+	if (idx < 0 || idx >= tty->driver->num) {
+		tty_debug(tty, "bad idx %d\n", idx);
+		return -1;
+	}
+
+	/* not much to check for devpts */
+	if (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)
+		return 0;
+
+	if (tty != tty->driver->ttys[idx]) {
+		tty_debug(tty, "bad driver table[%d] = %p\n",
+			  idx, tty->driver->ttys[idx]);
+		return -1;
+	}
+	if (tty->driver->other) {
+		struct tty_struct *o_tty = tty->link;
+
+		if (o_tty != tty->driver->other->ttys[idx]) {
+			tty_debug(tty, "bad other table[%d] = %p\n",
+				  idx, tty->driver->other->ttys[idx]);
+			return -1;
+		}
+		if (o_tty->link != tty) {
+			tty_debug(tty, "bad link = %p\n", o_tty->link);
+			return -1;
+		}
+	}
+#endif
+	return 0;
+}
+
+/**
+ *      tty_kclose      -       closes tty opened by tty_kopen
+ *      @tty: tty device
+ *
+ *      Performs the final steps to release and free a tty device. It is the
+ *      same as tty_release_struct except that it also resets TTY_PORT_KOPENED
+ *      flag on tty->port.
+ */
+void tty_kclose(struct tty_struct *tty)
+{
+	/*
+	 * Ask the line discipline code to release its structures
+	 */
+	tty_ldisc_release(tty);
+
+	/* Wait for pending work before tty destruction commmences */
+	tty_flush_works(tty);
+
+	tty_debug_hangup(tty, "freeing structure\n");
+	/*
+	 * The release_tty function takes care of the details of clearing
+	 * the slots and preserving the termios structure.
+	 */
+	mutex_lock(&tty_mutex);
+	tty_port_set_kopened(tty->port, 0);
+	release_tty(tty, tty->index);
+	mutex_unlock(&tty_mutex);
+}
+EXPORT_SYMBOL_GPL(tty_kclose);
+
+/**
+ *	tty_release_struct	-	release a tty struct
+ *	@tty: tty device
+ *	@idx: index of the tty
+ *
+ *	Performs the final steps to release and free a tty device. It is
+ *	roughly the reverse of tty_init_dev.
+ */
+void tty_release_struct(struct tty_struct *tty, int idx)
+{
+	/*
+	 * Ask the line discipline code to release its structures
+	 */
+	tty_ldisc_release(tty);
+
+	/* Wait for pending work before tty destruction commmences */
+	tty_flush_works(tty);
+
+	tty_debug_hangup(tty, "freeing structure\n");
+	/*
+	 * The release_tty function takes care of the details of clearing
+	 * the slots and preserving the termios structure.
+	 */
+	mutex_lock(&tty_mutex);
+	release_tty(tty, idx);
+	mutex_unlock(&tty_mutex);
+}
+EXPORT_SYMBOL_GPL(tty_release_struct);
+
+/**
+ *	tty_release		-	vfs callback for close
+ *	@inode: inode of tty
+ *	@filp: file pointer for handle to tty
+ *
+ *	Called the last time each file handle is closed that references
+ *	this tty. There may however be several such references.
+ *
+ *	Locking:
+ *		Takes bkl. See tty_release_dev
+ *
+ * Even releasing the tty structures is a tricky business.. We have
+ * to be very careful that the structures are all released at the
+ * same time, as interrupts might otherwise get the wrong pointers.
+ *
+ * WSH 09/09/97: rewritten to avoid some nasty race conditions that could
+ * lead to double frees or releasing memory still in use.
+ */
+
+int tty_release(struct inode *inode, struct file *filp)
+{
+	struct tty_struct *tty = file_tty(filp);
+	struct tty_struct *o_tty = NULL;
+	int	do_sleep, final;
+	int	idx;
+	long	timeout = 0;
+	int	once = 1;
+
+	if (tty_paranoia_check(tty, inode, __func__))
+		return 0;
+
+	tty_lock(tty);
+	check_tty_count(tty, __func__);
+
+	__tty_fasync(-1, filp, 0);
+
+	idx = tty->index;
+	if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+	    tty->driver->subtype == PTY_TYPE_MASTER)
+		o_tty = tty->link;
+
+	if (tty_release_checks(tty, idx)) {
+		tty_unlock(tty);
+		return 0;
+	}
+
+	tty_debug_hangup(tty, "releasing (count=%d)\n", tty->count);
+
+	if (tty->ops->close)
+		tty->ops->close(tty, filp);
+
+	/* If tty is pty master, lock the slave pty (stable lock order) */
+	tty_lock_slave(o_tty);
+
+	/*
+	 * Sanity check: if tty->count is going to zero, there shouldn't be
+	 * any waiters on tty->read_wait or tty->write_wait.  We test the
+	 * wait queues and kick everyone out _before_ actually starting to
+	 * close.  This ensures that we won't block while releasing the tty
+	 * structure.
+	 *
+	 * The test for the o_tty closing is necessary, since the master and
+	 * slave sides may close in any order.  If the slave side closes out
+	 * first, its count will be one, since the master side holds an open.
+	 * Thus this test wouldn't be triggered at the time the slave closed,
+	 * so we do it now.
+	 */
+	while (1) {
+		do_sleep = 0;
+
+		if (tty->count <= 1) {
+			if (waitqueue_active(&tty->read_wait)) {
+				wake_up_poll(&tty->read_wait, EPOLLIN);
+				do_sleep++;
+			}
+			if (waitqueue_active(&tty->write_wait)) {
+				wake_up_poll(&tty->write_wait, EPOLLOUT);
+				do_sleep++;
+			}
+		}
+		if (o_tty && o_tty->count <= 1) {
+			if (waitqueue_active(&o_tty->read_wait)) {
+				wake_up_poll(&o_tty->read_wait, EPOLLIN);
+				do_sleep++;
+			}
+			if (waitqueue_active(&o_tty->write_wait)) {
+				wake_up_poll(&o_tty->write_wait, EPOLLOUT);
+				do_sleep++;
+			}
+		}
+		if (!do_sleep)
+			break;
+
+		if (once) {
+			once = 0;
+			tty_warn(tty, "read/write wait queue active!\n");
+		}
+		schedule_timeout_killable(timeout);
+		if (timeout < 120 * HZ)
+			timeout = 2 * timeout + 1;
+		else
+			timeout = MAX_SCHEDULE_TIMEOUT;
+	}
+
+	if (o_tty) {
+		if (--o_tty->count < 0) {
+			tty_warn(tty, "bad slave count (%d)\n", o_tty->count);
+			o_tty->count = 0;
+		}
+	}
+	if (--tty->count < 0) {
+		tty_warn(tty, "bad tty->count (%d)\n", tty->count);
+		tty->count = 0;
+	}
+
+	/*
+	 * We've decremented tty->count, so we need to remove this file
+	 * descriptor off the tty->tty_files list; this serves two
+	 * purposes:
+	 *  - check_tty_count sees the correct number of file descriptors
+	 *    associated with this tty.
+	 *  - do_tty_hangup no longer sees this file descriptor as
+	 *    something that needs to be handled for hangups.
+	 */
+	tty_del_file(filp);
+
+	/*
+	 * Perform some housekeeping before deciding whether to return.
+	 *
+	 * If _either_ side is closing, make sure there aren't any
+	 * processes that still think tty or o_tty is their controlling
+	 * tty.
+	 */
+	if (!tty->count) {
+		read_lock(&tasklist_lock);
+		session_clear_tty(tty->session);
+		if (o_tty)
+			session_clear_tty(o_tty->session);
+		read_unlock(&tasklist_lock);
+	}
+
+	/* check whether both sides are closing ... */
+	final = !tty->count && !(o_tty && o_tty->count);
+
+	tty_unlock_slave(o_tty);
+	tty_unlock(tty);
+
+	/* At this point, the tty->count == 0 should ensure a dead tty
+	   cannot be re-opened by a racing opener */
+
+	if (!final)
+		return 0;
+
+	tty_debug_hangup(tty, "final close\n");
+
+	tty_release_struct(tty, idx);
+	return 0;
+}
+
+/**
+ *	tty_open_current_tty - get locked tty of current task
+ *	@device: device number
+ *	@filp: file pointer to tty
+ *	@return: locked tty of the current task iff @device is /dev/tty
+ *
+ *	Performs a re-open of the current task's controlling tty.
+ *
+ *	We cannot return driver and index like for the other nodes because
+ *	devpts will not work then. It expects inodes to be from devpts FS.
+ */
+static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
+{
+	struct tty_struct *tty;
+	int retval;
+
+	if (device != MKDEV(TTYAUX_MAJOR, 0))
+		return NULL;
+
+	tty = get_current_tty();
+	if (!tty)
+		return ERR_PTR(-ENXIO);
+
+	filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
+	/* noctty = 1; */
+	tty_lock(tty);
+	tty_kref_put(tty);	/* safe to drop the kref now */
+
+	retval = tty_reopen(tty);
+	if (retval < 0) {
+		tty_unlock(tty);
+		tty = ERR_PTR(retval);
+	}
+	return tty;
+}
+
+/**
+ *	tty_lookup_driver - lookup a tty driver for a given device file
+ *	@device: device number
+ *	@filp: file pointer to tty
+ *	@index: index for the device in the @return driver
+ *	@return: driver for this inode (with increased refcount)
+ *
+ * 	If @return is not erroneous, the caller is responsible to decrement the
+ * 	refcount by tty_driver_kref_put.
+ *
+ *	Locking: tty_mutex protects get_tty_driver
+ */
+static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
+		int *index)
+{
+	struct tty_driver *driver = NULL;
+
+	switch (device) {
+#ifdef CONFIG_VT
+	case MKDEV(TTY_MAJOR, 0): {
+		extern struct tty_driver *console_driver;
+		driver = tty_driver_kref_get(console_driver);
+		*index = fg_console;
+		break;
+	}
+#endif
+	case MKDEV(TTYAUX_MAJOR, 1): {
+		struct tty_driver *console_driver = console_device(index);
+		if (console_driver) {
+			driver = tty_driver_kref_get(console_driver);
+			if (driver && filp) {
+				/* Don't let /dev/console block */
+				filp->f_flags |= O_NONBLOCK;
+				break;
+			}
+		}
+		if (driver)
+			tty_driver_kref_put(driver);
+		return ERR_PTR(-ENODEV);
+	}
+	default:
+		driver = get_tty_driver(device, index);
+		if (!driver)
+			return ERR_PTR(-ENODEV);
+		break;
+	}
+	return driver;
+}
+
+/**
+ *	tty_kopen	-	open a tty device for kernel
+ *	@device: dev_t of device to open
+ *
+ *	Opens tty exclusively for kernel. Performs the driver lookup,
+ *	makes sure it's not already opened and performs the first-time
+ *	tty initialization.
+ *
+ *	Returns the locked initialized &tty_struct
+ *
+ *	Claims the global tty_mutex to serialize:
+ *	  - concurrent first-time tty initialization
+ *	  - concurrent tty driver removal w/ lookup
+ *	  - concurrent tty removal from driver table
+ */
+struct tty_struct *tty_kopen(dev_t device)
+{
+	struct tty_struct *tty;
+	struct tty_driver *driver;
+	int index = -1;
+
+	mutex_lock(&tty_mutex);
+	driver = tty_lookup_driver(device, NULL, &index);
+	if (IS_ERR(driver)) {
+		mutex_unlock(&tty_mutex);
+		return ERR_CAST(driver);
+	}
+
+	/* check whether we're reopening an existing tty */
+	tty = tty_driver_lookup_tty(driver, NULL, index);
+	if (IS_ERR(tty))
+		goto out;
+
+	if (tty) {
+		/* drop kref from tty_driver_lookup_tty() */
+		tty_kref_put(tty);
+		tty = ERR_PTR(-EBUSY);
+	} else { /* tty_init_dev returns tty with the tty_lock held */
+		tty = tty_init_dev(driver, index);
+		if (IS_ERR(tty))
+			goto out;
+		tty_port_set_kopened(tty->port, 1);
+	}
+out:
+	mutex_unlock(&tty_mutex);
+	tty_driver_kref_put(driver);
+	return tty;
+}
+EXPORT_SYMBOL_GPL(tty_kopen);
+
+/**
+ *	tty_open_by_driver	-	open a tty device
+ *	@device: dev_t of device to open
+ *	@filp: file pointer to tty
+ *
+ *	Performs the driver lookup, checks for a reopen, or otherwise
+ *	performs the first-time tty initialization.
+ *
+ *	Returns the locked initialized or re-opened &tty_struct
+ *
+ *	Claims the global tty_mutex to serialize:
+ *	  - concurrent first-time tty initialization
+ *	  - concurrent tty driver removal w/ lookup
+ *	  - concurrent tty removal from driver table
+ */
+static struct tty_struct *tty_open_by_driver(dev_t device,
+					     struct file *filp)
+{
+	struct tty_struct *tty;
+	struct tty_driver *driver = NULL;
+	int index = -1;
+	int retval;
+
+	mutex_lock(&tty_mutex);
+	driver = tty_lookup_driver(device, filp, &index);
+	if (IS_ERR(driver)) {
+		mutex_unlock(&tty_mutex);
+		return ERR_CAST(driver);
+	}
+
+	/* check whether we're reopening an existing tty */
+	tty = tty_driver_lookup_tty(driver, filp, index);
+	if (IS_ERR(tty)) {
+		mutex_unlock(&tty_mutex);
+		goto out;
+	}
+
+	if (tty) {
+		if (tty_port_kopened(tty->port)) {
+			tty_kref_put(tty);
+			mutex_unlock(&tty_mutex);
+			tty = ERR_PTR(-EBUSY);
+			goto out;
+		}
+		mutex_unlock(&tty_mutex);
+		retval = tty_lock_interruptible(tty);
+		tty_kref_put(tty);  /* drop kref from tty_driver_lookup_tty() */
+		if (retval) {
+			if (retval == -EINTR)
+				retval = -ERESTARTSYS;
+			tty = ERR_PTR(retval);
+			goto out;
+		}
+		retval = tty_reopen(tty);
+		if (retval < 0) {
+			tty_unlock(tty);
+			tty = ERR_PTR(retval);
+		}
+	} else { /* Returns with the tty_lock held for now */
+		tty = tty_init_dev(driver, index);
+		mutex_unlock(&tty_mutex);
+	}
+out:
+	tty_driver_kref_put(driver);
+	return tty;
+}
+
+/**
+ *	tty_open		-	open a tty device
+ *	@inode: inode of device file
+ *	@filp: file pointer to tty
+ *
+ *	tty_open and tty_release keep up the tty count that contains the
+ *	number of opens done on a tty. We cannot use the inode-count, as
+ *	different inodes might point to the same tty.
+ *
+ *	Open-counting is needed for pty masters, as well as for keeping
+ *	track of serial lines: DTR is dropped when the last close happens.
+ *	(This is not done solely through tty->count, now.  - Ted 1/27/92)
+ *
+ *	The termios state of a pty is reset on first open so that
+ *	settings don't persist across reuse.
+ *
+ *	Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev.
+ *		 tty->count should protect the rest.
+ *		 ->siglock protects ->signal/->sighand
+ *
+ *	Note: the tty_unlock/lock cases without a ref are only safe due to
+ *	tty_mutex
+ */
+
+static int tty_open(struct inode *inode, struct file *filp)
+{
+	struct tty_struct *tty;
+	int noctty, retval;
+	dev_t device = inode->i_rdev;
+	unsigned saved_flags = filp->f_flags;
+
+	nonseekable_open(inode, filp);
+
+retry_open:
+	retval = tty_alloc_file(filp);
+	if (retval)
+		return -ENOMEM;
+
+	tty = tty_open_current_tty(device, filp);
+	if (!tty)
+		tty = tty_open_by_driver(device, filp);
+
+	if (IS_ERR(tty)) {
+		tty_free_file(filp);
+		retval = PTR_ERR(tty);
+		if (retval != -EAGAIN || signal_pending(current))
+			return retval;
+		schedule();
+		goto retry_open;
+	}
+
+	tty_add_file(tty, filp);
+
+	check_tty_count(tty, __func__);
+	tty_debug_hangup(tty, "opening (count=%d)\n", tty->count);
+
+	if (tty->ops->open)
+		retval = tty->ops->open(tty, filp);
+	else
+		retval = -ENODEV;
+	filp->f_flags = saved_flags;
+
+	if (retval) {
+		tty_debug_hangup(tty, "open error %d, releasing\n", retval);
+
+		tty_unlock(tty); /* need to call tty_release without BTM */
+		tty_release(inode, filp);
+		if (retval != -ERESTARTSYS)
+			return retval;
+
+		if (signal_pending(current))
+			return retval;
+
+		schedule();
+		/*
+		 * Need to reset f_op in case a hangup happened.
+		 */
+		if (tty_hung_up_p(filp))
+			filp->f_op = &tty_fops;
+		goto retry_open;
+	}
+	clear_bit(TTY_HUPPED, &tty->flags);
+
+	noctty = (filp->f_flags & O_NOCTTY) ||
+		 (IS_ENABLED(CONFIG_VT) && device == MKDEV(TTY_MAJOR, 0)) ||
+		 device == MKDEV(TTYAUX_MAJOR, 1) ||
+		 (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+		  tty->driver->subtype == PTY_TYPE_MASTER);
+	if (!noctty)
+		tty_open_proc_set_tty(filp, tty);
+	tty_unlock(tty);
+	return 0;
+}
+
+
+
+/**
+ *	tty_poll	-	check tty status
+ *	@filp: file being polled
+ *	@wait: poll wait structures to update
+ *
+ *	Call the line discipline polling method to obtain the poll
+ *	status of the device.
+ *
+ *	Locking: locks called line discipline but ldisc poll method
+ *	may be re-entered freely by other callers.
+ */
+
+static __poll_t tty_poll(struct file *filp, poll_table *wait)
+{
+	struct tty_struct *tty = file_tty(filp);
+	struct tty_ldisc *ld;
+	__poll_t ret = 0;
+
+	if (tty_paranoia_check(tty, file_inode(filp), "tty_poll"))
+		return 0;
+
+	ld = tty_ldisc_ref_wait(tty);
+	if (!ld)
+		return hung_up_tty_poll(filp, wait);
+	if (ld->ops->poll)
+		ret = ld->ops->poll(tty, filp, wait);
+	tty_ldisc_deref(ld);
+	return ret;
+}
+
+static int __tty_fasync(int fd, struct file *filp, int on)
+{
+	struct tty_struct *tty = file_tty(filp);
+	unsigned long flags;
+	int retval = 0;
+
+	if (tty_paranoia_check(tty, file_inode(filp), "tty_fasync"))
+		goto out;
+
+	retval = fasync_helper(fd, filp, on, &tty->fasync);
+	if (retval <= 0)
+		goto out;
+
+	if (on) {
+		enum pid_type type;
+		struct pid *pid;
+
+		spin_lock_irqsave(&tty->ctrl_lock, flags);
+		if (tty->pgrp) {
+			pid = tty->pgrp;
+			type = PIDTYPE_PGID;
+		} else {
+			pid = task_pid(current);
+			type = PIDTYPE_TGID;
+		}
+		get_pid(pid);
+		spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+		__f_setown(filp, pid, type, 0);
+		put_pid(pid);
+		retval = 0;
+	}
+out:
+	return retval;
+}
+
+static int tty_fasync(int fd, struct file *filp, int on)
+{
+	struct tty_struct *tty = file_tty(filp);
+	int retval = -ENOTTY;
+
+	tty_lock(tty);
+	if (!tty_hung_up_p(filp))
+		retval = __tty_fasync(fd, filp, on);
+	tty_unlock(tty);
+
+	return retval;
+}
+
+/**
+ *	tiocsti			-	fake input character
+ *	@tty: tty to fake input into
+ *	@p: pointer to character
+ *
+ *	Fake input to a tty device. Does the necessary locking and
+ *	input management.
+ *
+ *	FIXME: does not honour flow control ??
+ *
+ *	Locking:
+ *		Called functions take tty_ldiscs_lock
+ *		current->signal->tty check is safe without locks
+ */
+
+static int tiocsti(struct tty_struct *tty, char __user *p)
+{
+	char ch, mbz = 0;
+	struct tty_ldisc *ld;
+
+	if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	if (get_user(ch, p))
+		return -EFAULT;
+	tty_audit_tiocsti(tty, ch);
+	ld = tty_ldisc_ref_wait(tty);
+	if (!ld)
+		return -EIO;
+	tty_buffer_lock_exclusive(tty->port);
+	if (ld->ops->receive_buf)
+		ld->ops->receive_buf(tty, &ch, &mbz, 1);
+	tty_buffer_unlock_exclusive(tty->port);
+	tty_ldisc_deref(ld);
+	return 0;
+}
+
+/**
+ *	tiocgwinsz		-	implement window query ioctl
+ *	@tty: tty
+ *	@arg: user buffer for result
+ *
+ *	Copies the kernel idea of the window size into the user buffer.
+ *
+ *	Locking: tty->winsize_mutex is taken to ensure the winsize data
+ *		is consistent.
+ */
+
+static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
+{
+	int err;
+
+	mutex_lock(&tty->winsize_mutex);
+	err = copy_to_user(arg, &tty->winsize, sizeof(*arg));
+	mutex_unlock(&tty->winsize_mutex);
+
+	return err ? -EFAULT: 0;
+}
+
+/**
+ *	tty_do_resize		-	resize event
+ *	@tty: tty being resized
+ *	@ws: new dimensions
+ *
+ *	Update the termios variables and send the necessary signals to
+ *	peform a terminal resize correctly
+ */
+
+int tty_do_resize(struct tty_struct *tty, struct winsize *ws)
+{
+	struct pid *pgrp;
+
+	/* Lock the tty */
+	mutex_lock(&tty->winsize_mutex);
+	if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
+		goto done;
+
+	/* Signal the foreground process group */
+	pgrp = tty_get_pgrp(tty);
+	if (pgrp)
+		kill_pgrp(pgrp, SIGWINCH, 1);
+	put_pid(pgrp);
+
+	tty->winsize = *ws;
+done:
+	mutex_unlock(&tty->winsize_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(tty_do_resize);
+
+/**
+ *	tiocswinsz		-	implement window size set ioctl
+ *	@tty: tty side of tty
+ *	@arg: user buffer for result
+ *
+ *	Copies the user idea of the window size to the kernel. Traditionally
+ *	this is just advisory information but for the Linux console it
+ *	actually has driver level meaning and triggers a VC resize.
+ *
+ *	Locking:
+ *		Driver dependent. The default do_resize method takes the
+ *	tty termios mutex and ctrl_lock. The console takes its own lock
+ *	then calls into the default method.
+ */
+
+static int tiocswinsz(struct tty_struct *tty, struct winsize __user *arg)
+{
+	struct winsize tmp_ws;
+	if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
+		return -EFAULT;
+
+	if (tty->ops->resize)
+		return tty->ops->resize(tty, &tmp_ws);
+	else
+		return tty_do_resize(tty, &tmp_ws);
+}
+
+/**
+ *	tioccons	-	allow admin to move logical console
+ *	@file: the file to become console
+ *
+ *	Allow the administrator to move the redirected console device
+ *
+ *	Locking: uses redirect_lock to guard the redirect information
+ */
+
+static int tioccons(struct file *file)
+{
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	if (file->f_op->write_iter == redirected_tty_write) {
+		struct file *f;
+		spin_lock(&redirect_lock);
+		f = redirect;
+		redirect = NULL;
+		spin_unlock(&redirect_lock);
+		if (f)
+			fput(f);
+		return 0;
+	}
+	if (file->f_op->write_iter != tty_write)
+		return -ENOTTY;
+	if (!(file->f_mode & FMODE_WRITE))
+		return -EBADF;
+	if (!(file->f_mode & FMODE_CAN_WRITE))
+		return -EINVAL;
+	spin_lock(&redirect_lock);
+	if (redirect) {
+		spin_unlock(&redirect_lock);
+		return -EBUSY;
+	}
+	redirect = get_file(file);
+	spin_unlock(&redirect_lock);
+	return 0;
+}
+
+/**
+ *	tiocsetd	-	set line discipline
+ *	@tty: tty device
+ *	@p: pointer to user data
+ *
+ *	Set the line discipline according to user request.
+ *
+ *	Locking: see tty_set_ldisc, this function is just a helper
+ */
+
+static int tiocsetd(struct tty_struct *tty, int __user *p)
+{
+	int disc;
+	int ret;
+
+	if (get_user(disc, p))
+		return -EFAULT;
+
+	ret = tty_set_ldisc(tty, disc);
+
+	return ret;
+}
+
+/**
+ *	tiocgetd	-	get line discipline
+ *	@tty: tty device
+ *	@p: pointer to user data
+ *
+ *	Retrieves the line discipline id directly from the ldisc.
+ *
+ *	Locking: waits for ldisc reference (in case the line discipline
+ *		is changing or the tty is being hungup)
+ */
+
+static int tiocgetd(struct tty_struct *tty, int __user *p)
+{
+	struct tty_ldisc *ld;
+	int ret;
+
+	ld = tty_ldisc_ref_wait(tty);
+	if (!ld)
+		return -EIO;
+	ret = put_user(ld->ops->num, p);
+	tty_ldisc_deref(ld);
+	return ret;
+}
+
+/**
+ *	send_break	-	performed time break
+ *	@tty: device to break on
+ *	@duration: timeout in mS
+ *
+ *	Perform a timed break on hardware that lacks its own driver level
+ *	timed break functionality.
+ *
+ *	Locking:
+ *		atomic_write_lock serializes
+ *
+ */
+
+static int send_break(struct tty_struct *tty, unsigned int duration)
+{
+	int retval;
+
+	if (tty->ops->break_ctl == NULL)
+		return 0;
+
+	if (tty->driver->flags & TTY_DRIVER_HARDWARE_BREAK)
+		retval = tty->ops->break_ctl(tty, duration);
+	else {
+		/* Do the work ourselves */
+		if (tty_write_lock(tty, 0) < 0)
+			return -EINTR;
+		retval = tty->ops->break_ctl(tty, -1);
+		if (retval)
+			goto out;
+		if (!signal_pending(current))
+			msleep_interruptible(duration);
+		retval = tty->ops->break_ctl(tty, 0);
+out:
+		tty_write_unlock(tty);
+		if (signal_pending(current))
+			retval = -EINTR;
+	}
+	return retval;
+}
+
+/**
+ *	tty_tiocmget		-	get modem status
+ *	@tty: tty device
+ *	@p: pointer to result
+ *
+ *	Obtain the modem status bits from the tty driver if the feature
+ *	is supported. Return -ENOTTY if it is not available.
+ *
+ *	Locking: none (up to the driver)
+ */
+
+static int tty_tiocmget(struct tty_struct *tty, int __user *p)
+{
+	int retval = -ENOTTY;
+
+	if (tty->ops->tiocmget) {
+		retval = tty->ops->tiocmget(tty);
+
+		if (retval >= 0)
+			retval = put_user(retval, p);
+	}
+	return retval;
+}
+
+/**
+ *	tty_tiocmset		-	set modem status
+ *	@tty: tty device
+ *	@cmd: command - clear bits, set bits or set all
+ *	@p: pointer to desired bits
+ *
+ *	Set the modem status bits from the tty driver if the feature
+ *	is supported. Return -ENOTTY if it is not available.
+ *
+ *	Locking: none (up to the driver)
+ */
+
+static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
+	     unsigned __user *p)
+{
+	int retval;
+	unsigned int set, clear, val;
+
+	if (tty->ops->tiocmset == NULL)
+		return -ENOTTY;
+
+	retval = get_user(val, p);
+	if (retval)
+		return retval;
+	set = clear = 0;
+	switch (cmd) {
+	case TIOCMBIS:
+		set = val;
+		break;
+	case TIOCMBIC:
+		clear = val;
+		break;
+	case TIOCMSET:
+		set = val;
+		clear = ~val;
+		break;
+	}
+	set &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
+	clear &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
+	return tty->ops->tiocmset(tty, set, clear);
+}
+
+static int tty_tiocgicount(struct tty_struct *tty, void __user *arg)
+{
+	int retval = -EINVAL;
+	struct serial_icounter_struct icount;
+	memset(&icount, 0, sizeof(icount));
+	if (tty->ops->get_icount)
+		retval = tty->ops->get_icount(tty, &icount);
+	if (retval != 0)
+		return retval;
+	if (copy_to_user(arg, &icount, sizeof(icount)))
+		return -EFAULT;
+	return 0;
+}
+
+static int tty_tiocsserial(struct tty_struct *tty, struct serial_struct __user *ss)
+{
+	static DEFINE_RATELIMIT_STATE(depr_flags,
+			DEFAULT_RATELIMIT_INTERVAL,
+			DEFAULT_RATELIMIT_BURST);
+	char comm[TASK_COMM_LEN];
+	struct serial_struct v;
+	int flags;
+
+	if (copy_from_user(&v, ss, sizeof(*ss)))
+		return -EFAULT;
+
+	flags = v.flags & ASYNC_DEPRECATED;
+
+	if (flags && __ratelimit(&depr_flags))
+		pr_warn("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n",
+			__func__, get_task_comm(comm, current), flags);
+	if (!tty->ops->set_serial)
+		return -ENOTTY;
+	return tty->ops->set_serial(tty, &v);
+}
+
+static int tty_tiocgserial(struct tty_struct *tty, struct serial_struct __user *ss)
+{
+	struct serial_struct v;
+	int err;
+
+	memset(&v, 0, sizeof(v));
+	if (!tty->ops->get_serial)
+		return -ENOTTY;
+	err = tty->ops->get_serial(tty, &v);
+	if (!err && copy_to_user(ss, &v, sizeof(v)))
+		err = -EFAULT;
+	return err;
+}
+
+/*
+ * if pty, return the slave side (real_tty)
+ * otherwise, return self
+ */
+static struct tty_struct *tty_pair_get_tty(struct tty_struct *tty)
+{
+	if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+	    tty->driver->subtype == PTY_TYPE_MASTER)
+		tty = tty->link;
+	return tty;
+}
+
+/*
+ * Split this up, as gcc can choke on it otherwise..
+ */
+long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct tty_struct *tty = file_tty(file);
+	struct tty_struct *real_tty;
+	void __user *p = (void __user *)arg;
+	int retval;
+	struct tty_ldisc *ld;
+
+	if (tty_paranoia_check(tty, file_inode(file), "tty_ioctl"))
+		return -EINVAL;
+
+	real_tty = tty_pair_get_tty(tty);
+
+	/*
+	 * Factor out some common prep work
+	 */
+	switch (cmd) {
+	case TIOCSETD:
+	case TIOCSBRK:
+	case TIOCCBRK:
+	case TCSBRK:
+	case TCSBRKP:
+		retval = tty_check_change(tty);
+		if (retval)
+			return retval;
+		if (cmd != TIOCCBRK) {
+			tty_wait_until_sent(tty, 0);
+			if (signal_pending(current))
+				return -EINTR;
+		}
+		break;
+	}
+
+	/*
+	 *	Now do the stuff.
+	 */
+	switch (cmd) {
+	case TIOCSTI:
+		return tiocsti(tty, p);
+	case TIOCGWINSZ:
+		return tiocgwinsz(real_tty, p);
+	case TIOCSWINSZ:
+		return tiocswinsz(real_tty, p);
+	case TIOCCONS:
+		return real_tty != tty ? -EINVAL : tioccons(file);
+	case TIOCEXCL:
+		set_bit(TTY_EXCLUSIVE, &tty->flags);
+		return 0;
+	case TIOCNXCL:
+		clear_bit(TTY_EXCLUSIVE, &tty->flags);
+		return 0;
+	case TIOCGEXCL:
+	{
+		int excl = test_bit(TTY_EXCLUSIVE, &tty->flags);
+		return put_user(excl, (int __user *)p);
+	}
+	case TIOCGETD:
+		return tiocgetd(tty, p);
+	case TIOCSETD:
+		return tiocsetd(tty, p);
+	case TIOCVHANGUP:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		tty_vhangup(tty);
+		return 0;
+	case TIOCGDEV:
+	{
+		unsigned int ret = new_encode_dev(tty_devnum(real_tty));
+		return put_user(ret, (unsigned int __user *)p);
+	}
+	/*
+	 * Break handling
+	 */
+	case TIOCSBRK:	/* Turn break on, unconditionally */
+		if (tty->ops->break_ctl)
+			return tty->ops->break_ctl(tty, -1);
+		return 0;
+	case TIOCCBRK:	/* Turn break off, unconditionally */
+		if (tty->ops->break_ctl)
+			return tty->ops->break_ctl(tty, 0);
+		return 0;
+	case TCSBRK:   /* SVID version: non-zero arg --> no break */
+		/* non-zero arg means wait for all output data
+		 * to be sent (performed above) but don't send break.
+		 * This is used by the tcdrain() termios function.
+		 */
+		if (!arg)
+			return send_break(tty, 250);
+		return 0;
+	case TCSBRKP:	/* support for POSIX tcsendbreak() */
+		return send_break(tty, arg ? arg*100 : 250);
+
+	case TIOCMGET:
+		return tty_tiocmget(tty, p);
+	case TIOCMSET:
+	case TIOCMBIC:
+	case TIOCMBIS:
+		return tty_tiocmset(tty, cmd, p);
+	case TIOCGICOUNT:
+		return tty_tiocgicount(tty, p);
+	case TCFLSH:
+		switch (arg) {
+		case TCIFLUSH:
+		case TCIOFLUSH:
+		/* flush tty buffer and allow ldisc to process ioctl */
+			tty_buffer_flush(tty, NULL);
+			break;
+		}
+		break;
+	case TIOCSSERIAL:
+		return tty_tiocsserial(tty, p);
+	case TIOCGSERIAL:
+		return tty_tiocgserial(tty, p);
+	case TIOCGPTPEER:
+		/* Special because the struct file is needed */
+		return ptm_open_peer(file, tty, (int)arg);
+	default:
+		retval = tty_jobctrl_ioctl(tty, real_tty, file, cmd, arg);
+		if (retval != -ENOIOCTLCMD)
+			return retval;
+	}
+	if (tty->ops->ioctl) {
+		retval = tty->ops->ioctl(tty, cmd, arg);
+		if (retval != -ENOIOCTLCMD)
+			return retval;
+	}
+	ld = tty_ldisc_ref_wait(tty);
+	if (!ld)
+		return hung_up_tty_ioctl(file, cmd, arg);
+	retval = -EINVAL;
+	if (ld->ops->ioctl) {
+		retval = ld->ops->ioctl(tty, file, cmd, arg);
+		if (retval == -ENOIOCTLCMD)
+			retval = -ENOTTY;
+	}
+	tty_ldisc_deref(ld);
+	return retval;
+}
+
+#ifdef CONFIG_COMPAT
+
+struct serial_struct32 {
+	compat_int_t    type;
+	compat_int_t    line;
+	compat_uint_t   port;
+	compat_int_t    irq;
+	compat_int_t    flags;
+	compat_int_t    xmit_fifo_size;
+	compat_int_t    custom_divisor;
+	compat_int_t    baud_base;
+	unsigned short  close_delay;
+	char    io_type;
+	char    reserved_char;
+	compat_int_t    hub6;
+	unsigned short  closing_wait; /* time to wait before closing */
+	unsigned short  closing_wait2; /* no longer used... */
+	compat_uint_t   iomem_base;
+	unsigned short  iomem_reg_shift;
+	unsigned int    port_high;
+	/* compat_ulong_t  iomap_base FIXME */
+	compat_int_t    reserved;
+};
+
+static int compat_tty_tiocsserial(struct tty_struct *tty,
+		struct serial_struct32 __user *ss)
+{
+	static DEFINE_RATELIMIT_STATE(depr_flags,
+			DEFAULT_RATELIMIT_INTERVAL,
+			DEFAULT_RATELIMIT_BURST);
+	char comm[TASK_COMM_LEN];
+	struct serial_struct32 v32;
+	struct serial_struct v;
+	int flags;
+
+	if (copy_from_user(&v32, ss, sizeof(*ss)))
+		return -EFAULT;
+
+	memcpy(&v, &v32, offsetof(struct serial_struct32, iomem_base));
+	v.iomem_base = compat_ptr(v32.iomem_base);
+	v.iomem_reg_shift = v32.iomem_reg_shift;
+	v.port_high = v32.port_high;
+	v.iomap_base = 0;
+
+	flags = v.flags & ASYNC_DEPRECATED;
+
+	if (flags && __ratelimit(&depr_flags))
+		pr_warn("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n",
+			__func__, get_task_comm(comm, current), flags);
+	if (!tty->ops->set_serial)
+		return -ENOTTY;
+	return tty->ops->set_serial(tty, &v);
+}
+
+static int compat_tty_tiocgserial(struct tty_struct *tty,
+			struct serial_struct32 __user *ss)
+{
+	struct serial_struct32 v32;
+	struct serial_struct v;
+	int err;
+
+	memset(&v, 0, sizeof(v));
+	memset(&v32, 0, sizeof(v32));
+
+	if (!tty->ops->get_serial)
+		return -ENOTTY;
+	err = tty->ops->get_serial(tty, &v);
+	if (!err) {
+		memcpy(&v32, &v, offsetof(struct serial_struct32, iomem_base));
+		v32.iomem_base = (unsigned long)v.iomem_base >> 32 ?
+			0xfffffff : ptr_to_compat(v.iomem_base);
+		v32.iomem_reg_shift = v.iomem_reg_shift;
+		v32.port_high = v.port_high;
+		if (copy_to_user(ss, &v32, sizeof(v32)))
+			err = -EFAULT;
+	}
+	return err;
+}
+static long tty_compat_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	struct tty_struct *tty = file_tty(file);
+	struct tty_ldisc *ld;
+	int retval = -ENOIOCTLCMD;
+
+	switch (cmd) {
+	case TIOCOUTQ:
+	case TIOCSTI:
+	case TIOCGWINSZ:
+	case TIOCSWINSZ:
+	case TIOCGEXCL:
+	case TIOCGETD:
+	case TIOCSETD:
+	case TIOCGDEV:
+	case TIOCMGET:
+	case TIOCMSET:
+	case TIOCMBIC:
+	case TIOCMBIS:
+	case TIOCGICOUNT:
+	case TIOCGPGRP:
+	case TIOCSPGRP:
+	case TIOCGSID:
+	case TIOCSERGETLSR:
+	case TIOCGRS485:
+	case TIOCSRS485:
+#ifdef TIOCGETP
+	case TIOCGETP:
+	case TIOCSETP:
+	case TIOCSETN:
+#endif
+#ifdef TIOCGETC
+	case TIOCGETC:
+	case TIOCSETC:
+#endif
+#ifdef TIOCGLTC
+	case TIOCGLTC:
+	case TIOCSLTC:
+#endif
+	case TCSETSF:
+	case TCSETSW:
+	case TCSETS:
+	case TCGETS:
+#ifdef TCGETS2
+	case TCGETS2:
+	case TCSETSF2:
+	case TCSETSW2:
+	case TCSETS2:
+#endif
+	case TCGETA:
+	case TCSETAF:
+	case TCSETAW:
+	case TCSETA:
+	case TIOCGLCKTRMIOS:
+	case TIOCSLCKTRMIOS:
+#ifdef TCGETX
+	case TCGETX:
+	case TCSETX:
+	case TCSETXW:
+	case TCSETXF:
+#endif
+	case TIOCGSOFTCAR:
+	case TIOCSSOFTCAR:
+
+	case PPPIOCGCHAN:
+	case PPPIOCGUNIT:
+		return tty_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+	case TIOCCONS:
+	case TIOCEXCL:
+	case TIOCNXCL:
+	case TIOCVHANGUP:
+	case TIOCSBRK:
+	case TIOCCBRK:
+	case TCSBRK:
+	case TCSBRKP:
+	case TCFLSH:
+	case TIOCGPTPEER:
+	case TIOCNOTTY:
+	case TIOCSCTTY:
+	case TCXONC:
+	case TIOCMIWAIT:
+	case TIOCSERCONFIG:
+		return tty_ioctl(file, cmd, arg);
+	}
+
+	if (tty_paranoia_check(tty, file_inode(file), "tty_ioctl"))
+		return -EINVAL;
+
+	switch (cmd) {
+	case TIOCSSERIAL:
+		return compat_tty_tiocsserial(tty, compat_ptr(arg));
+	case TIOCGSERIAL:
+		return compat_tty_tiocgserial(tty, compat_ptr(arg));
+	}
+	if (tty->ops->compat_ioctl) {
+		retval = tty->ops->compat_ioctl(tty, cmd, arg);
+		if (retval != -ENOIOCTLCMD)
+			return retval;
+	}
+
+	ld = tty_ldisc_ref_wait(tty);
+	if (!ld)
+		return hung_up_tty_compat_ioctl(file, cmd, arg);
+	if (ld->ops->compat_ioctl)
+		retval = ld->ops->compat_ioctl(tty, file, cmd, arg);
+	if (retval == -ENOIOCTLCMD && ld->ops->ioctl)
+		retval = ld->ops->ioctl(tty, file,
+				(unsigned long)compat_ptr(cmd), arg);
+	tty_ldisc_deref(ld);
+
+	return retval;
+}
+#endif
+
+static int this_tty(const void *t, struct file *file, unsigned fd)
+{
+	if (likely(file->f_op->read_iter != tty_read))
+		return 0;
+	return file_tty(file) != t ? 0 : fd + 1;
+}
+	
+/*
+ * This implements the "Secure Attention Key" ---  the idea is to
+ * prevent trojan horses by killing all processes associated with this
+ * tty when the user hits the "Secure Attention Key".  Required for
+ * super-paranoid applications --- see the Orange Book for more details.
+ *
+ * This code could be nicer; ideally it should send a HUP, wait a few
+ * seconds, then send a INT, and then a KILL signal.  But you then
+ * have to coordinate with the init process, since all processes associated
+ * with the current tty must be dead before the new getty is allowed
+ * to spawn.
+ *
+ * Now, if it would be correct ;-/ The current code has a nasty hole -
+ * it doesn't catch files in flight. We may send the descriptor to ourselves
+ * via AF_UNIX socket, close it and later fetch from socket. FIXME.
+ *
+ * Nasty bug: do_SAK is being called in interrupt context.  This can
+ * deadlock.  We punt it up to process context.  AKPM - 16Mar2001
+ */
+void __do_SAK(struct tty_struct *tty)
+{
+#ifdef TTY_SOFT_SAK
+	tty_hangup(tty);
+#else
+	struct task_struct *g, *p;
+	struct pid *session;
+	int		i;
+	unsigned long flags;
+
+	if (!tty)
+		return;
+
+	spin_lock_irqsave(&tty->ctrl_lock, flags);
+	session = get_pid(tty->session);
+	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+
+	tty_ldisc_flush(tty);
+
+	tty_driver_flush_buffer(tty);
+
+	read_lock(&tasklist_lock);
+	/* Kill the entire session */
+	do_each_pid_task(session, PIDTYPE_SID, p) {
+		tty_notice(tty, "SAK: killed process %d (%s): by session\n",
+			   task_pid_nr(p), p->comm);
+		group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
+	} while_each_pid_task(session, PIDTYPE_SID, p);
+
+	/* Now kill any processes that happen to have the tty open */
+	do_each_thread(g, p) {
+		if (p->signal->tty == tty) {
+			tty_notice(tty, "SAK: killed process %d (%s): by controlling tty\n",
+				   task_pid_nr(p), p->comm);
+			group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
+			continue;
+		}
+		task_lock(p);
+		i = iterate_fd(p->files, 0, this_tty, tty);
+		if (i != 0) {
+			tty_notice(tty, "SAK: killed process %d (%s): by fd#%d\n",
+				   task_pid_nr(p), p->comm, i - 1);
+			group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
+		}
+		task_unlock(p);
+	} while_each_thread(g, p);
+	read_unlock(&tasklist_lock);
+	put_pid(session);
+#endif
+}
+
+static void do_SAK_work(struct work_struct *work)
+{
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, SAK_work);
+	__do_SAK(tty);
+}
+
+/*
+ * The tq handling here is a little racy - tty->SAK_work may already be queued.
+ * Fortunately we don't need to worry, because if ->SAK_work is already queued,
+ * the values which we write to it will be identical to the values which it
+ * already has. --akpm
+ */
+void do_SAK(struct tty_struct *tty)
+{
+	if (!tty)
+		return;
+	schedule_work(&tty->SAK_work);
+}
+
+EXPORT_SYMBOL(do_SAK);
+
+/* Must put_device() after it's unused! */
+static struct device *tty_get_device(struct tty_struct *tty)
+{
+	dev_t devt = tty_devnum(tty);
+	return class_find_device_by_devt(tty_class, devt);
+}
+
+
+/**
+ *	alloc_tty_struct
+ *
+ *	This subroutine allocates and initializes a tty structure.
+ *
+ *	Locking: none - tty in question is not exposed at this point
+ */
+
+struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
+{
+	struct tty_struct *tty;
+
+	tty = kzalloc(sizeof(*tty), GFP_KERNEL);
+	if (!tty)
+		return NULL;
+
+	kref_init(&tty->kref);
+	tty->magic = TTY_MAGIC;
+	if (tty_ldisc_init(tty)) {
+		kfree(tty);
+		return NULL;
+	}
+	tty->session = NULL;
+	tty->pgrp = NULL;
+	mutex_init(&tty->legacy_mutex);
+	mutex_init(&tty->throttle_mutex);
+	init_rwsem(&tty->termios_rwsem);
+	mutex_init(&tty->winsize_mutex);
+	init_ldsem(&tty->ldisc_sem);
+	init_waitqueue_head(&tty->write_wait);
+	init_waitqueue_head(&tty->read_wait);
+	INIT_WORK(&tty->hangup_work, do_tty_hangup);
+	mutex_init(&tty->atomic_write_lock);
+	spin_lock_init(&tty->ctrl_lock);
+	spin_lock_init(&tty->flow_lock);
+	spin_lock_init(&tty->files_lock);
+	INIT_LIST_HEAD(&tty->tty_files);
+	INIT_WORK(&tty->SAK_work, do_SAK_work);
+
+	tty->driver = driver;
+	tty->ops = driver->ops;
+	tty->index = idx;
+	tty_line_name(driver, idx, tty->name);
+	tty->dev = tty_get_device(tty);
+
+	return tty;
+}
+
+/**
+ *	tty_put_char	-	write one character to a tty
+ *	@tty: tty
+ *	@ch: character
+ *
+ *	Write one byte to the tty using the provided put_char method
+ *	if present. Returns the number of characters successfully output.
+ *
+ *	Note: the specific put_char operation in the driver layer may go
+ *	away soon. Don't call it directly, use this method
+ */
+
+int tty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	if (tty->ops->put_char)
+		return tty->ops->put_char(tty, ch);
+	return tty->ops->write(tty, &ch, 1);
+}
+EXPORT_SYMBOL_GPL(tty_put_char);
+
+struct class *tty_class;
+
+static int tty_cdev_add(struct tty_driver *driver, dev_t dev,
+		unsigned int index, unsigned int count)
+{
+	int err;
+
+	/* init here, since reused cdevs cause crashes */
+	driver->cdevs[index] = cdev_alloc();
+	if (!driver->cdevs[index])
+		return -ENOMEM;
+	driver->cdevs[index]->ops = &tty_fops;
+	driver->cdevs[index]->owner = driver->owner;
+	err = cdev_add(driver->cdevs[index], dev, count);
+	if (err)
+		kobject_put(&driver->cdevs[index]->kobj);
+	return err;
+}
+
+/**
+ *	tty_register_device - register a tty device
+ *	@driver: the tty driver that describes the tty device
+ *	@index: the index in the tty driver for this tty device
+ *	@device: a struct device that is associated with this tty device.
+ *		This field is optional, if there is no known struct device
+ *		for this tty device it can be set to NULL safely.
+ *
+ *	Returns a pointer to the struct device for this tty device
+ *	(or ERR_PTR(-EFOO) on error).
+ *
+ *	This call is required to be made to register an individual tty device
+ *	if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set.  If
+ *	that bit is not set, this function should not be called by a tty
+ *	driver.
+ *
+ *	Locking: ??
+ */
+
+struct device *tty_register_device(struct tty_driver *driver, unsigned index,
+				   struct device *device)
+{
+	return tty_register_device_attr(driver, index, device, NULL, NULL);
+}
+EXPORT_SYMBOL(tty_register_device);
+
+static void tty_device_create_release(struct device *dev)
+{
+	dev_dbg(dev, "releasing...\n");
+	kfree(dev);
+}
+
+/**
+ *	tty_register_device_attr - register a tty device
+ *	@driver: the tty driver that describes the tty device
+ *	@index: the index in the tty driver for this tty device
+ *	@device: a struct device that is associated with this tty device.
+ *		This field is optional, if there is no known struct device
+ *		for this tty device it can be set to NULL safely.
+ *	@drvdata: Driver data to be set to device.
+ *	@attr_grp: Attribute group to be set on device.
+ *
+ *	Returns a pointer to the struct device for this tty device
+ *	(or ERR_PTR(-EFOO) on error).
+ *
+ *	This call is required to be made to register an individual tty device
+ *	if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set.  If
+ *	that bit is not set, this function should not be called by a tty
+ *	driver.
+ *
+ *	Locking: ??
+ */
+struct device *tty_register_device_attr(struct tty_driver *driver,
+				   unsigned index, struct device *device,
+				   void *drvdata,
+				   const struct attribute_group **attr_grp)
+{
+	char name[64];
+	dev_t devt = MKDEV(driver->major, driver->minor_start) + index;
+	struct ktermios *tp;
+	struct device *dev;
+	int retval;
+
+	if (index >= driver->num) {
+		pr_err("%s: Attempt to register invalid tty line number (%d)\n",
+		       driver->name, index);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (driver->type == TTY_DRIVER_TYPE_PTY)
+		pty_line_name(driver, index, name);
+	else
+		tty_line_name(driver, index, name);
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return ERR_PTR(-ENOMEM);
+
+	dev->devt = devt;
+	dev->class = tty_class;
+	dev->parent = device;
+	dev->release = tty_device_create_release;
+	dev_set_name(dev, "%s", name);
+	dev->groups = attr_grp;
+	dev_set_drvdata(dev, drvdata);
+
+	dev_set_uevent_suppress(dev, 1);
+
+	retval = device_register(dev);
+	if (retval)
+		goto err_put;
+
+	if (!(driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)) {
+		/*
+		 * Free any saved termios data so that the termios state is
+		 * reset when reusing a minor number.
+		 */
+		tp = driver->termios[index];
+		if (tp) {
+			driver->termios[index] = NULL;
+			kfree(tp);
+		}
+
+		retval = tty_cdev_add(driver, devt, index, 1);
+		if (retval)
+			goto err_del;
+	}
+
+	dev_set_uevent_suppress(dev, 0);
+	kobject_uevent(&dev->kobj, KOBJ_ADD);
+
+	return dev;
+
+err_del:
+	device_del(dev);
+err_put:
+	put_device(dev);
+
+	return ERR_PTR(retval);
+}
+EXPORT_SYMBOL_GPL(tty_register_device_attr);
+
+/**
+ * 	tty_unregister_device - unregister a tty device
+ * 	@driver: the tty driver that describes the tty device
+ * 	@index: the index in the tty driver for this tty device
+ *
+ * 	If a tty device is registered with a call to tty_register_device() then
+ *	this function must be called when the tty device is gone.
+ *
+ *	Locking: ??
+ */
+
+void tty_unregister_device(struct tty_driver *driver, unsigned index)
+{
+	device_destroy(tty_class,
+		MKDEV(driver->major, driver->minor_start) + index);
+	if (!(driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)) {
+		cdev_del(driver->cdevs[index]);
+		driver->cdevs[index] = NULL;
+	}
+}
+EXPORT_SYMBOL(tty_unregister_device);
+
+/**
+ * __tty_alloc_driver -- allocate tty driver
+ * @lines: count of lines this driver can handle at most
+ * @owner: module which is responsible for this driver
+ * @flags: some of TTY_DRIVER_* flags, will be set in driver->flags
+ *
+ * This should not be called directly, some of the provided macros should be
+ * used instead. Use IS_ERR and friends on @retval.
+ */
+struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner,
+		unsigned long flags)
+{
+	struct tty_driver *driver;
+	unsigned int cdevs = 1;
+	int err;
+
+	if (!lines || (flags & TTY_DRIVER_UNNUMBERED_NODE && lines > 1))
+		return ERR_PTR(-EINVAL);
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver)
+		return ERR_PTR(-ENOMEM);
+
+	kref_init(&driver->kref);
+	driver->magic = TTY_DRIVER_MAGIC;
+	driver->num = lines;
+	driver->owner = owner;
+	driver->flags = flags;
+
+	if (!(flags & TTY_DRIVER_DEVPTS_MEM)) {
+		driver->ttys = kcalloc(lines, sizeof(*driver->ttys),
+				GFP_KERNEL);
+		driver->termios = kcalloc(lines, sizeof(*driver->termios),
+				GFP_KERNEL);
+		if (!driver->ttys || !driver->termios) {
+			err = -ENOMEM;
+			goto err_free_all;
+		}
+	}
+
+	if (!(flags & TTY_DRIVER_DYNAMIC_ALLOC)) {
+		driver->ports = kcalloc(lines, sizeof(*driver->ports),
+				GFP_KERNEL);
+		if (!driver->ports) {
+			err = -ENOMEM;
+			goto err_free_all;
+		}
+		cdevs = lines;
+	}
+
+	driver->cdevs = kcalloc(cdevs, sizeof(*driver->cdevs), GFP_KERNEL);
+	if (!driver->cdevs) {
+		err = -ENOMEM;
+		goto err_free_all;
+	}
+
+	return driver;
+err_free_all:
+	kfree(driver->ports);
+	kfree(driver->ttys);
+	kfree(driver->termios);
+	kfree(driver->cdevs);
+	kfree(driver);
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL(__tty_alloc_driver);
+
+static void destruct_tty_driver(struct kref *kref)
+{
+	struct tty_driver *driver = container_of(kref, struct tty_driver, kref);
+	int i;
+	struct ktermios *tp;
+
+	if (driver->flags & TTY_DRIVER_INSTALLED) {
+		for (i = 0; i < driver->num; i++) {
+			tp = driver->termios[i];
+			if (tp) {
+				driver->termios[i] = NULL;
+				kfree(tp);
+			}
+			if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV))
+				tty_unregister_device(driver, i);
+		}
+		proc_tty_unregister_driver(driver);
+		if (driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)
+			cdev_del(driver->cdevs[0]);
+	}
+	kfree(driver->cdevs);
+	kfree(driver->ports);
+	kfree(driver->termios);
+	kfree(driver->ttys);
+	kfree(driver);
+}
+
+void tty_driver_kref_put(struct tty_driver *driver)
+{
+	kref_put(&driver->kref, destruct_tty_driver);
+}
+EXPORT_SYMBOL(tty_driver_kref_put);
+
+void tty_set_operations(struct tty_driver *driver,
+			const struct tty_operations *op)
+{
+	driver->ops = op;
+};
+EXPORT_SYMBOL(tty_set_operations);
+
+void put_tty_driver(struct tty_driver *d)
+{
+	tty_driver_kref_put(d);
+}
+EXPORT_SYMBOL(put_tty_driver);
+
+/*
+ * Called by a tty driver to register itself.
+ */
+int tty_register_driver(struct tty_driver *driver)
+{
+	int error;
+	int i;
+	dev_t dev;
+	struct device *d;
+
+	if (!driver->major) {
+		error = alloc_chrdev_region(&dev, driver->minor_start,
+						driver->num, driver->name);
+		if (!error) {
+			driver->major = MAJOR(dev);
+			driver->minor_start = MINOR(dev);
+		}
+	} else {
+		dev = MKDEV(driver->major, driver->minor_start);
+		error = register_chrdev_region(dev, driver->num, driver->name);
+	}
+	if (error < 0)
+		goto err;
+
+	if (driver->flags & TTY_DRIVER_DYNAMIC_ALLOC) {
+		error = tty_cdev_add(driver, dev, 0, driver->num);
+		if (error)
+			goto err_unreg_char;
+	}
+
+	mutex_lock(&tty_mutex);
+	list_add(&driver->tty_drivers, &tty_drivers);
+	mutex_unlock(&tty_mutex);
+
+	if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV)) {
+		for (i = 0; i < driver->num; i++) {
+			d = tty_register_device(driver, i, NULL);
+			if (IS_ERR(d)) {
+				error = PTR_ERR(d);
+				goto err_unreg_devs;
+			}
+		}
+	}
+	proc_tty_register_driver(driver);
+	driver->flags |= TTY_DRIVER_INSTALLED;
+	return 0;
+
+err_unreg_devs:
+	for (i--; i >= 0; i--)
+		tty_unregister_device(driver, i);
+
+	mutex_lock(&tty_mutex);
+	list_del(&driver->tty_drivers);
+	mutex_unlock(&tty_mutex);
+
+err_unreg_char:
+	unregister_chrdev_region(dev, driver->num);
+err:
+	return error;
+}
+EXPORT_SYMBOL(tty_register_driver);
+
+/*
+ * Called by a tty driver to unregister itself.
+ */
+int tty_unregister_driver(struct tty_driver *driver)
+{
+#if 0
+	/* FIXME */
+	if (driver->refcount)
+		return -EBUSY;
+#endif
+	unregister_chrdev_region(MKDEV(driver->major, driver->minor_start),
+				driver->num);
+	mutex_lock(&tty_mutex);
+	list_del(&driver->tty_drivers);
+	mutex_unlock(&tty_mutex);
+	return 0;
+}
+
+EXPORT_SYMBOL(tty_unregister_driver);
+
+dev_t tty_devnum(struct tty_struct *tty)
+{
+	return MKDEV(tty->driver->major, tty->driver->minor_start) + tty->index;
+}
+EXPORT_SYMBOL(tty_devnum);
+
+void tty_default_fops(struct file_operations *fops)
+{
+	*fops = tty_fops;
+}
+
+static char *tty_devnode(struct device *dev, umode_t *mode)
+{
+	if (!mode)
+		return NULL;
+	if (dev->devt == MKDEV(TTYAUX_MAJOR, 0) ||
+	    dev->devt == MKDEV(TTYAUX_MAJOR, 2))
+		*mode = 0666;
+	return NULL;
+}
+
+static int __init tty_class_init(void)
+{
+	tty_class = class_create(THIS_MODULE, "tty");
+	if (IS_ERR(tty_class))
+		return PTR_ERR(tty_class);
+	tty_class->devnode = tty_devnode;
+	return 0;
+}
+
+postcore_initcall(tty_class_init);
+
+/* 3/2004 jmc: why do these devices exist? */
+static struct cdev tty_cdev, console_cdev;
+
+static ssize_t show_cons_active(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct console *cs[16];
+	int i = 0;
+	struct console *c;
+	ssize_t count = 0;
+
+	console_lock();
+	for_each_console(c) {
+		if (!c->device)
+			continue;
+		if (!c->write)
+			continue;
+		if ((c->flags & CON_ENABLED) == 0)
+			continue;
+		cs[i++] = c;
+		if (i >= ARRAY_SIZE(cs))
+			break;
+	}
+	while (i--) {
+		int index = cs[i]->index;
+		struct tty_driver *drv = cs[i]->device(cs[i], &index);
+
+		/* don't resolve tty0 as some programs depend on it */
+		if (drv && (cs[i]->index > 0 || drv->major != TTY_MAJOR))
+			count += tty_line_name(drv, index, buf + count);
+		else
+			count += sprintf(buf + count, "%s%d",
+					 cs[i]->name, cs[i]->index);
+
+		count += sprintf(buf + count, "%c", i ? ' ':'\n');
+	}
+	console_unlock();
+
+	return count;
+}
+static DEVICE_ATTR(active, S_IRUGO, show_cons_active, NULL);
+
+static struct attribute *cons_dev_attrs[] = {
+	&dev_attr_active.attr,
+	NULL
+};
+
+ATTRIBUTE_GROUPS(cons_dev);
+
+static struct device *consdev;
+
+void console_sysfs_notify(void)
+{
+	if (consdev)
+		sysfs_notify(&consdev->kobj, NULL, "active");
+}
+
+/*
+ * Ok, now we can initialize the rest of the tty devices and can count
+ * on memory allocations, interrupts etc..
+ */
+int __init tty_init(void)
+{
+	tty_sysctl_init();
+	cdev_init(&tty_cdev, &tty_fops);
+	if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
+	    register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
+		panic("Couldn't register /dev/tty driver\n");
+	device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 0), NULL, "tty");
+
+	cdev_init(&console_cdev, &console_fops);
+	if (cdev_add(&console_cdev, MKDEV(TTYAUX_MAJOR, 1), 1) ||
+	    register_chrdev_region(MKDEV(TTYAUX_MAJOR, 1), 1, "/dev/console") < 0)
+		panic("Couldn't register /dev/console driver\n");
+	consdev = device_create_with_groups(tty_class, NULL,
+					    MKDEV(TTYAUX_MAJOR, 1), NULL,
+					    cons_dev_groups, "console");
+	if (IS_ERR(consdev))
+		consdev = NULL;
+
+#ifdef CONFIG_VT
+	vty_init(&console_fops);
+#endif
+	return 0;
+}
+