[Feature][T106_eSDK]T106-V2.01.01.02P56U06.AP.15.11_CAP.15.11(SDK4.6)diff_17.02(SDK4.7)
Only Configure: No
Affected branch: master
Affected module: unknow
Is it affected on both ZXIC and MTK: only ZXIC
Self-test: Yes
Doc Update: No
Change-Id: I9dc02273b59a074828ab3eeaa84306415b153138
diff --git a/upstream/linux-5.10/arch/arm/kernel/svc_s.S b/upstream/linux-5.10/arch/arm/kernel/svc_s.S
new file mode 100755
index 0000000..a52abb7
--- /dev/null
+++ b/upstream/linux-5.10/arch/arm/kernel/svc_s.S
@@ -0,0 +1,101 @@
+
+#define GIC_DIST_BASE (0xF2000000)
+#define GIC_RDIST_BASE (0xF2040000)
+
+#define REAL_TXT_ADDR (CONFIG_PHYS_OFFSET + 0x8000)
+#define DTB_ADDR (CONFIG_DTB_ADDR)
+
+secure_init:
+
+#if 0
+ mov r5, r0
+ mov r6, r1
+ mov r7, r2
+#endif
+ mov r4, lr
+
+#if 1
+ /* use r0--r4 only */
+ bl get_core_id
+ mov r1, r0
+ bl get_cluster_id
+ mov r2, r0
+
+ ldr r3, =GIC_DIST_BASE
+ ldr r0, =0x50
+ str r0, [r3]
+
+ ldr r3, =GIC_RDIST_BASE
+ lsl r2, r2, #2
+ add r1, r1, r2
+ lsl r1, r1, #17
+
+ add r1, r1, r3
+ add r1, r1, #0x14
+
+ LDR R0, [R1]
+ LDR R2, =0xfffffffd
+ AND R0, R0, R2
+ STR R0, [R1]
+
+ LDR R2, = 0xFFFFFFFB
+wait:
+ LDR R0, [R1]
+ AND R0, R0, R2
+ CMP R0, #0
+ BNE wait
+
+ SUB R1, R1, #0x14
+ LDR R2, =0x10080
+ ADD R1, R1, R2
+ LDR R2, =0xFFFFFFFF
+ STR R2, [R1]
+#endif
+
+ MRS R0, CPSR
+ BIC R0, #0x1F
+ ORR R0, #0xD6
+ MSR CPSR_c, R0
+
+ MOV r3, #0xD
+ MCR p15,#0x6,r3,c12,c12,#5
+ MCR p15,0,r3,c12,c12,#5
+
+ MRC p15,0,r1,c1,c1,0
+ MOV r2, r1
+ ORR r2, #0x1
+ MCR p15,0,r2,c1,c1,0
+
+ MCR p15,#0x4,r3,c12,c9,#5
+
+ MRS R0, CPSR
+ BIC R0, #0x1F
+ ORR R0, #0xD3
+ MSR CPSR_c, R0
+
+#if 0
+ mov r0, r5
+ mov r1, r6
+ mov r2, r7
+#else
+ ldr r0, =0
+ ldr r1, =REAL_TXT_ADDR
+ ldr r2, =DTB_ADDR
+#endif
+ mov lr, r4
+
+ ret lr
+ENDPROC(secure_init)
+
+get_core_id:
+ MRC p15, 0, R0, c0, c0, 5
+ AND R0, R0, #0xFF
+ BX R14
+ENDPROC(get_core_id)
+
+get_cluster_id:
+ MRC p15, 0, r0, c0, c0, 5
+ AND r0, r0, #0xFF00
+ LSR r0, r0, #0x8
+ BX lr
+ENDPROC(get_cluster_id)
diff --git a/upstream/linux-5.10/drivers/dma/sc/zx297520v3_dma.c b/upstream/linux-5.10/drivers/dma/sc/zx297520v3_dma.c
new file mode 100755
index 0000000..6ef00d8
--- /dev/null
+++ b/upstream/linux-5.10/drivers/dma/sc/zx297520v3_dma.c
@@ -0,0 +1,1464 @@
+/*******************************************************************************
+ * Copyright (C) 2013, ZTE Corporation.
+ *
+ * File Name:dma.c
+ * File Mark:
+ * Description:
+ * Others:
+ * Version: 0.1
+ * Author: limeifeng
+ * Date:
+ * modify
+
+
+ ********************************************************************************/
+
+/****************************************************************************
+* Include files
+****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include "../dmaengine.h"
+#include <linux/soc/sc/common.h>
+#include <linux/soc/sc/spinlock.h>
+
+#include "zx297520v3_dma.h"
+
+//#pragma GCC optimize("O0")
+#define DMA_SUCCESS DMA_COMPLETE
+
+#define DMA_CHANNEL_CONFIG(peripheral_id, is_used , enable_mem2mem) {peripheral_id, is_used, enable_mem2mem}
+
+/*dma channel config define*/
+typedef struct
+{
+ dma_peripheral_id peripheral_id; /* hw channel id */
+ unsigned int is_used;
+ unsigned int enable_mem2mem;
+#if 0
+ void * data;
+ dma_callback_func channel_callback;
+#endif
+}dma_channel_config;
+
+static dma_channel_config dma_chan_config[] =
+{
+ DMA_CHANNEL_CONFIG(DMA_CH_UART0_TX, false, false),
+ DMA_CHANNEL_CONFIG(DMA_CH_UART0_RX, false, false),
+ DMA_CHANNEL_CONFIG(DMA_CH_UART1_TX, false, false),
+ DMA_CHANNEL_CONFIG(DMA_CH_UART1_RX, false, false),
+ DMA_CHANNEL_CONFIG(DMA_CH_SSP0_TX, false, false),
+
+ DMA_CHANNEL_CONFIG(DMA_CH_SSP0_RX, false, true),
+#if 1 /* only ps core used */
+ DMA_CHANNEL_CONFIG(DMA_CH_GPRS0, false, false),
+ DMA_CHANNEL_CONFIG(DMA_CH_GPRS1, false, false),
+ DMA_CHANNEL_CONFIG(DMA_CH_USIM, false, false),
+#endif
+ DMA_CHANNEL_CONFIG(DMA_CH_I2S0_TX, false, false),
+
+ DMA_CHANNEL_CONFIG(DMA_CH_I2S0_RX0, false, false),
+ DMA_CHANNEL_CONFIG(DMA_CH_I2S1_TX, false, false),
+ DMA_CHANNEL_CONFIG(DMA_CH_I2S1_RX0, false, false),
+ DMA_CHANNEL_CONFIG(DMA_CH_SPIFC_TX, false, false),
+ DMA_CHANNEL_CONFIG(DMA_CH_SPIFC_RX, false, false),
+
+ DMA_CHANNEL_CONFIG(DMA_CH_SSP1_TX, false, true),
+ DMA_CHANNEL_CONFIG(DMA_CH_SSP1_RX, false, false),
+ DMA_CHANNEL_CONFIG(DMA_CH_UART2_TX, false, true),
+ DMA_CHANNEL_CONFIG(DMA_CH_UART2_RX, false, true),
+
+ DMA_CHANNEL_CONFIG(DMA_CH_EMBMS, false, false),
+ #if 1 /* only ps core used */
+ DMA_CHANNEL_CONFIG(DMA_CH_USIM1, false, false),
+ #endif
+ DMA_CHANNEL_CONFIG(DMA_CH_M2M_TX, false, true),
+ DMA_CHANNEL_CONFIG(DMA_CH_M2M_RX, false, true),
+};
+
+/****************************************************************************
+* Local Macros
+****************************************************************************/
+#define BIT_SHIFT_L(value,BIT_NO) ((unsigned int)(value << (BIT_NO)))
+#define GET_HIGH_16BIT(val) (unsigned int)(val >> (16))
+#define GET_LOW_16BIT(val) (unsigned int)(val & (0xffff))
+#define DMA_CHANNEL(dmac,channel) (unsigned int)(dmac << (16)|(channel) )
+
+/*dma control reg bit */
+#define DMA_CTRL_ENABLE(value) BIT_SHIFT_L(value,0)
+#define DMA_CTRL_SOFT_B_REQ(value) BIT_SHIFT_L(value,1)
+#define DMA_CTRL_SRC_FIFO_MOD(value) BIT_SHIFT_L(value,2)
+#define DMA_CTRL_DEST_FIFO_MOD(value) BIT_SHIFT_L(value,3)
+#define DMA_CTRL_IRQ_MOD(value) BIT_SHIFT_L(value,4)
+#define DMA_CTRL_SRC_BURST_SIZE(value) BIT_SHIFT_L(value,6)
+#define DMA_CTRL_SRC_BURST_LENGTH(value) BIT_SHIFT_L(value,9)
+#define DMA_CTRL_DEST_BURST_SIZE(value) BIT_SHIFT_L(value,13)
+#define DMA_CTRL_DEST_BURST_LENGTH(value) BIT_SHIFT_L(value,16)
+#define DMA_CTRL_INTERRUPT_SEL(value) BIT_SHIFT_L(value,20)
+#define DMA_CTRL_FORCE_CLOSE(value) BIT_SHIFT_L(value,31)
+
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+
+/* һ��������������������� */
+#define MAX_LLI_PARA_CNT (32)
+
+
+/* config dma reused */
+#define DMA_SEL_CFG_REG (get_socsys_base() + 0x120)
+
+#if defined(CONFIG_ARCH_ZX297520V2)
+#define DMA_SEL_UART2_I2S (1U << 0)
+#define DMA_SEL_UART1_HASH (1U << 1)
+#define DMA_SEL_I2S0_TDM (1U << 2)
+#define DMA_SEL_I2S1_TDM (1U << 3)
+#elif defined(CONFIG_ARCH_ZX297520V3)
+#define DMA_SEL_UART2TX_I2S0RX1 (1U << 0)
+#define DMA_SEL_UART2RX_I2S1RX1 (1U << 1)
+#define DMA_SEL_UART1RX_HASH (1U << 2)
+#define DMA_SEL_I2S0TX_TDMTX0 (1U << 3)
+#define DMA_SEL_I2S0RX0_TDMRX0 (1U << 4)
+#define DMA_SEL_I2S1TX_TDMTX1 (1U << 5)
+#define DMA_SEL_I2S1RX0_TDMRX1 (1U << 6)
+#endif
+
+typedef struct
+{
+ volatile unsigned int src_addr;
+ volatile unsigned int dest_addr;
+ volatile unsigned int xpara;
+ volatile unsigned int yzpara;
+ volatile unsigned int src_yzstep;
+ volatile unsigned int dest_yzstep;
+ volatile unsigned int reserved0;
+ volatile unsigned int link_addr;
+ volatile unsigned int control;
+}dma_lli_param;
+
+#define MAX_LLI_PARAMS_CNT (sizeof(dma_lli_param)*MAX_LLI_PARA_CNT)
+static dma_lli_param *dma_lli_params[DMA_CH_NUM];
+static dma_addr_t dma_lli_phy_addr[DMA_CH_NUM];
+
+#define ZX29_DMA_TEST 0
+
+typedef struct
+{
+ volatile unsigned short core_id; /* zte_coreid -- for debug */
+ volatile unsigned short is_used;
+}dma_pub_config;
+
+static dma_pub_config *dma_pub_configs;
+
+#define ZX29_DMA_INT_SEL DMA_INT_TO_A9
+
+/****************************************************************************
+* Local Types
+****************************************************************************/
+static DEFINE_MUTEX(dma_mutex);
+
+struct zx29_dma_channel
+{
+ dma_peripheral_id peripheral_id;
+ struct zx29_dma * dma_device;
+ struct dma_chan chan;
+ struct dma_async_tx_descriptor desc;
+ struct tasklet_struct tasklet;
+ enum dma_status status;
+ unsigned int cyclic;
+ dma_peripheral_id req_peripheral_id;
+// dma_channel_def dma_chan_par;
+ dma_cookie_t zx29_dma_cookie;
+};
+
+struct zx29_dma
+{
+ struct dma_device dma;
+ dma_regs __iomem * reg;
+ dma_channel_config * chan_config;
+ unsigned int channel_count;
+ struct zx29_dma_channel dma_chan[DMA_CH_NUM];
+};
+/****************************************************************************
+* DMA trace
+****************************************************************************/
+/* #define ZX_TRACE_DMA */
+
+#ifdef ZX_TRACE_DMA
+/*#pragma GCC optimize("O0")*/
+
+extern unsigned int test_timer_read( void );
+
+#define TRACE_DMA_COUNT 1000
+
+typedef enum
+{
+ DMA_DO_SUBMIT = 0,
+ DMA_DO_START = 1,
+ DMA_DO_ERR = 2,
+ DMA_DO_SUCCESS = 3,
+}dma_behavior_t;
+
+typedef struct
+{
+ dma_peripheral_id peripheral_id;
+ dma_behavior_t behavior;
+}dma_trace_t;
+
+volatile dma_trace_t dma_trace_view[TRACE_DMA_COUNT+10];
+volatile unsigned int dma_trace_index = 0;
+
+#define dma_trace_index_inc() \
+do{ \
+ dma_trace_index++;\
+ if(dma_trace_index>=TRACE_DMA_COUNT)\
+ dma_trace_index=0;\
+}while(0)
+
+static struct zx29_dma_channel *to_zx29_dma_chan(struct dma_chan *chan);
+static void dma_trace_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct zx29_dma_channel *dma_channel = to_zx29_dma_chan(tx->chan);
+
+ dma_trace_view[dma_trace_index].peripheral_id = dma_channel->peripheral_id;
+ dma_trace_view[dma_trace_index].behavior = DMA_DO_SUBMIT;
+ dma_trace_index_inc();
+}
+
+static void dma_trace_pending(dma_peripheral_id peripheral_id)
+{
+ dma_trace_view[dma_trace_index].peripheral_id = peripheral_id;
+ dma_trace_view[dma_trace_index].behavior = DMA_DO_START;
+ dma_trace_index_inc();
+}
+
+static void dma_trace_err(dma_peripheral_id peripheral_id)
+{
+ dma_trace_view[dma_trace_index].peripheral_id = peripheral_id;
+ dma_trace_view[dma_trace_index].behavior = DMA_DO_ERR;
+ dma_trace_index_inc();
+}
+
+static void dma_trace_success(dma_peripheral_id peripheral_id)
+{
+ dma_trace_view[dma_trace_index].peripheral_id = peripheral_id;
+ dma_trace_view[dma_trace_index].behavior = DMA_DO_SUCCESS;
+ dma_trace_index_inc();
+}
+#else
+static void dma_trace_submit(struct dma_async_tx_descriptor *tx){}
+static void dma_trace_pending(dma_peripheral_id peripheral_id){}
+static void dma_trace_err(dma_peripheral_id peripheral_id){}
+static void dma_trace_success(dma_peripheral_id peripheral_id){}
+#endif
+
+static struct zx29_dma dma_dev;
+
+unsigned int dma_err_num = 0;
+
+#if 0
+#define DMA_CHANNEL_CONFIG(peripheral_id, is_used , enable_mem2mem) {peripheral_id, is_used, enable_mem2mem}
+static dma_channel_config dma_chan_config[] =
+{
+ DMA_CHANNEL_CONFIG(DMA_CH_UART0_TX, false, true),
+ DMA_CHANNEL_CONFIG(DMA_CH_UART0_RX, false, true),
+ DMA_CHANNEL_CONFIG(DMA_CH_UART1_TX, false, true),
+ DMA_CHANNEL_CONFIG(DMA_CH_UART1_RX, false, true),
+ DMA_CHANNEL_CONFIG(DMA_CH_SSP0_TX, false, true),
+
+ DMA_CHANNEL_CONFIG(DMA_CH_SSP0_RX, false, true),
+#if 0 /* only ps core used */
+ DMA_CHANNEL_CONFIG(DMA_CH_GPRS0, true, true),
+ DMA_CHANNEL_CONFIG(DMA_CH_GPRS1, true, true),
+ DMA_CHANNEL_CONFIG(DMA_CH_USIM, true, true),
+#endif
+ DMA_CHANNEL_CONFIG(DMA_CH_I2S0_TX, false, false),
+
+ DMA_CHANNEL_CONFIG(DMA_CH_I2S0_RX0, false, false),
+ DMA_CHANNEL_CONFIG(DMA_CH_I2S1_TX, false, false),
+ DMA_CHANNEL_CONFIG(DMA_CH_I2S1_RX0, false, false),
+ DMA_CHANNEL_CONFIG(DMA_CH_SPIFC_TX, false, false),
+ DMA_CHANNEL_CONFIG(DMA_CH_SPIFC_RX, false, false),
+
+ DMA_CHANNEL_CONFIG(DMA_CH_SSP1_TX, false, true),
+ DMA_CHANNEL_CONFIG(DMA_CH_SSP1_RX, false, true),
+ DMA_CHANNEL_CONFIG(DMA_CH_UART2_TX, false, true),
+ DMA_CHANNEL_CONFIG(DMA_CH_UART2_RX, false, true),
+
+#ifdef CONFIG_ARCH_ZX297520V2
+#else
+ DMA_CHANNEL_CONFIG(DMA_CH_EMBMS, false, true),
+ #if 0 /* only ps core used */
+ DMA_CHANNEL_CONFIG(DMA_CH_USIM1, false, true),
+ #endif
+ DMA_CHANNEL_CONFIG(DMA_CH_M2M_TX, false, true),
+ DMA_CHANNEL_CONFIG(DMA_CH_M2M_RX, false, true),
+#endif
+};
+
+#endif
+static unsigned short dma_chan_check_lock(dma_peripheral_id peripheral_id)
+{
+ if((peripheral_id == DMA_CH_SPIFC_TX) || (peripheral_id == DMA_CH_SPIFC_RX))
+ return false;
+
+ return dma_pub_configs[peripheral_id].is_used;
+}
+
+static void dma_chan_lock(dma_peripheral_id peripheral_id)
+{
+ if((peripheral_id == DMA_CH_SPIFC_TX) || (peripheral_id == DMA_CH_SPIFC_RX))
+ return;
+ dma_pub_configs[peripheral_id].core_id = 208 /*for cap CORE_ID_AP*/;
+ dma_pub_configs[peripheral_id].is_used = true;
+}
+
+static void dma_chan_unlock(dma_peripheral_id peripheral_id)
+{
+ if((peripheral_id == DMA_CH_SPIFC_TX) || (peripheral_id == DMA_CH_SPIFC_RX))
+ return;
+ dma_pub_configs[peripheral_id].core_id = CORE_ID_NUM;
+ dma_pub_configs[peripheral_id].is_used = false;
+}
+
+/* some channel need config reuse register */
+static void dma_reuse_config(dma_peripheral_id peripheral_id)
+{
+ switch(peripheral_id)
+ {
+#if defined(CONFIG_ARCH_ZX297520V2)
+ case DMA_CH_UART2_TX:
+ case DMA_CH_UART2_RX:
+ zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_UART2_I2S);
+ break;
+
+ case DMA_CH_I2S0_RX1:
+ case DMA_CH_I2S1_RX1:
+ zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_UART2_I2S);
+ break;
+
+ case DMA_CH_UART1_RX:
+ zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_UART1_HASH);
+ break;
+
+ case DMA_CH_HASH_RX:
+ zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_UART1_HASH);
+ break;
+
+ case DMA_CH_I2S0_TX:
+ case DMA_CH_I2S0_RX0:
+ zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S0_TDM);
+ break;
+
+ case DMA_CH_TDM_TX0:
+ case DMA_CH_TDM_RX0:
+ zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S0_TDM);
+ break;
+
+ case DMA_CH_I2S1_TX:
+ case DMA_CH_I2S1_RX0:
+ zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S1_TDM);
+ break;
+
+ case DMA_CH_TDM_TX1:
+ case DMA_CH_TDM_RX1:
+ zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S1_TDM);
+ break;
+
+#elif defined(CONFIG_ARCH_ZX297520V3)
+ case DMA_CH_UART1_RX:
+ zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_UART1RX_HASH);
+ break;
+ case DMA_CH_I2S0_TX:
+ zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S0TX_TDMTX0);
+ break;
+ case DMA_CH_I2S0_RX0:
+ zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S0RX0_TDMRX0);
+ break;
+ case DMA_CH_I2S1_TX:
+ zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S1TX_TDMTX1);
+ break;
+ case DMA_CH_I2S1_RX0:
+ zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S1RX0_TDMRX1);
+ break;
+ case DMA_CH_UART2_TX:
+ zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_UART2TX_I2S0RX1);
+ break;
+ case DMA_CH_UART2_RX:
+ zx_clr_reg(DMA_SEL_CFG_REG, DMA_SEL_UART2RX_I2S1RX1);
+ break;
+ case DMA_CH_HASH_RX:
+ zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_UART1RX_HASH);
+ break;
+ case DMA_CH_TDM_TX0:
+ zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S0TX_TDMTX0);
+ break;
+ case DMA_CH_TDM_RX0:
+ zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S0RX0_TDMRX0);
+ break;
+ case DMA_CH_TDM_TX1:
+ zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S1TX_TDMTX1);
+ break;
+ case DMA_CH_TDM_RX1:
+ zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_I2S1RX0_TDMRX1);
+ break;
+ case DMA_CH_I2S0_RX1:
+ zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_UART2TX_I2S0RX1);
+ break;
+ case DMA_CH_I2S1_RX1:
+ zx_set_reg(DMA_SEL_CFG_REG, DMA_SEL_UART2RX_I2S1RX1);
+ break;
+#endif
+
+ default:
+ break;
+ }
+}
+
+static dma_peripheral_id get_real_peri_id(dma_peripheral_id peripheral_id)
+{
+ if(peripheral_id < DMA_CH_NUM)
+ return peripheral_id;
+
+ switch(peripheral_id)
+ {
+ case DMA_CH_HASH_RX:
+ return DMA_CH_UART1_RX;
+ case DMA_CH_TDM_TX0:
+ return DMA_CH_I2S0_TX;
+ case DMA_CH_TDM_RX0:
+ return DMA_CH_I2S0_RX0;
+ case DMA_CH_TDM_TX1:
+ return DMA_CH_I2S1_TX;
+ case DMA_CH_TDM_RX1:
+ return DMA_CH_I2S1_RX0;
+ case DMA_CH_I2S0_RX1:
+ return DMA_CH_UART2_TX;
+ case DMA_CH_I2S1_RX1:
+ return DMA_CH_UART2_RX;
+ default:
+ return 0xff;
+ }
+}
+
+static unsigned int get_channel_id(dma_peripheral_id peripheral_id)
+{
+ int i;
+ dma_channel_config *chan_config = dma_dev.chan_config;
+ dma_peripheral_id real_peripheral_id = 0xff;
+
+ real_peripheral_id = get_real_peri_id(peripheral_id);
+
+ for(i=0; i<dma_dev.channel_count; i++)
+ {
+ if ( (chan_config[i].peripheral_id==real_peripheral_id))
+ return i;
+ }
+
+ return 0xff;
+}
+
+static void dma_sync_lli_for_cpu(unsigned int channel_id)
+{
+ dma_sync_single_for_cpu(dma_dev.dma.dev, dma_lli_phy_addr[channel_id], MAX_LLI_PARAMS_CNT, DMA_BIDIRECTIONAL);
+}
+
+static void dma_sync_lli_for_device(unsigned int channel_id)
+{
+ dma_sync_single_for_device(dma_dev.dma.dev, dma_lli_phy_addr[channel_id], MAX_LLI_PARAMS_CNT, DMA_BIDIRECTIONAL);
+}
+
+static signed int dma_find_chan(dma_peripheral_id channel_id);
+static signed int dma_reset_chan(struct zx29_dma_channel *channel);
+
+static void dma_tasklet(unsigned long data)
+{
+ struct zx29_dma_channel *chan = (struct zx29_dma_channel *)data;
+
+ if (chan->desc.callback)
+ chan->desc.callback(chan->desc.callback_param);
+}
+
+static struct zx29_dma_channel *to_zx29_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct zx29_dma_channel, chan);
+}
+
+static signed int dma_disable_chan(struct zx29_dma_channel *chan)
+{
+ dma_chan_reg __iomem* chan_reg_ptr=NULL;
+
+ if (chan->peripheral_id >= DMA_CH_NUM)
+ {
+ return -EINVAL;
+ }
+
+ chan_reg_ptr= &(dma_dev.reg->channel[chan->peripheral_id]);
+ chan_reg_ptr->control |= DMA_CTRL_FORCE_CLOSE(1);
+
+ return 0;
+}
+
+/*reset channel para*/
+static signed int dma_reset_chan(struct zx29_dma_channel *chan)
+{
+ unsigned int peripheral_id;
+ unsigned int channel_id;
+ dma_regs __iomem* pReg;
+ dma_chan_reg __iomem* chan_reg_ptr;
+
+ if (!chan) {
+ return -EINVAL;
+ }
+
+ peripheral_id = (unsigned int)chan->peripheral_id;
+ if (peripheral_id >= DMA_CH_NUM) {
+ return -EINVAL;
+ }
+
+ channel_id = get_channel_id(chan->peripheral_id);
+ if(channel_id == 0xff)
+ return -EINVAL;
+
+ pReg= dma_dev.reg;
+ chan_reg_ptr= &(pReg->channel[peripheral_id]);
+
+ /*force close current channel*/
+ chan_reg_ptr->control |= DMA_CTRL_FORCE_CLOSE(1);
+
+ //memset((void*) chan_reg_ptr,0,sizeof(dma_chan_reg));
+ pReg->raw_int_tc_status = BIT_SHIFT_L(0x1,peripheral_id);
+ pReg->raw_int_src_err_status = BIT_SHIFT_L(0x1,peripheral_id);
+ pReg->raw_int_dest_err_status = BIT_SHIFT_L(0x1,peripheral_id);
+ pReg->raw_int_cfg_err_status = BIT_SHIFT_L(0x1,peripheral_id);
+ memset((void*) chan_reg_ptr,0,sizeof(dma_chan_reg));
+ //dma_dev[dmac_id].chan_config[channel_id].channelCbk = NULL;
+ //dma_dev[dmac_id].chan_config[channel_id].data = NULL;
+ chan->status = DMA_SUCCESS;
+ chan->cyclic = 0;
+ dma_dev.chan_config[channel_id].is_used = false;
+ dma_chan_unlock(dma_dev.chan_config[channel_id].peripheral_id);
+
+ return 0;
+}
+
+/*find the fixed free channel for peripheralID*/
+static signed int dma_find_chan(dma_peripheral_id peripheral_id)
+{
+ unsigned int channel_id = 0xff;
+ dma_channel_config *chan_config = dma_dev.chan_config;
+
+#if 0/*move to zx29_dma_filter_fn*/
+ /*in case there is free channel,allocate it to M2M*/
+ if (DMA_CH_MEMORY==peripheral_id)
+ {
+ for(i=0; i<dma_dev.channel_count; i++)
+ {
+ if((chan_config[i].is_used==false) && \
+ (dma_chan_check_lock(chan_config[i].peripheral_id)==false) && \
+ (chan_config[i].enable_mem2mem==true))
+ {
+ chan_config[i].is_used = true;
+ dma_chan_lock(chan_config[i].peripheral_id);
+ return i;
+ }
+ }
+ return -EAGAIN;
+ }
+#endif
+
+ channel_id = get_channel_id(peripheral_id);
+ if(channel_id==0xff)
+ return -EAGAIN;
+
+ reg_spin_lock();
+ /*if channle has been used,return error*/
+ if((chan_config[channel_id].is_used==true) || \
+ (dma_chan_check_lock(chan_config[channel_id].peripheral_id)==true))
+ {
+ reg_spin_unlock();
+ return -EAGAIN;
+ }
+
+ /*get the channel number*/
+ chan_config[channel_id].is_used =true;
+ dma_chan_lock(chan_config[channel_id].peripheral_id);
+ reg_spin_unlock();
+
+ /* channel reuse*/
+ dma_reuse_config(peripheral_id);
+
+ return channel_id;
+}
+
+static u32 dma_get_residue(struct zx29_dma_channel *chan)
+{
+ dma_regs __iomem* pReg = NULL;
+ dma_chan_reg __iomem* chan_reg_ptr = NULL;
+
+ pReg= dma_dev.reg;
+ chan_reg_ptr= &(pReg->channel[chan->peripheral_id]);
+
+ return chan_reg_ptr->xpara;
+}
+
+static enum dma_status zx29_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct zx29_dma_channel *zx29_chan = to_zx29_dma_chan(chan);
+ dma_cookie_t last_used;
+ u32 bytes;
+
+ bytes = dma_get_residue(zx29_chan);
+ last_used = chan->cookie;
+ dma_set_tx_state(txstate, chan->completed_cookie, last_used, bytes);
+
+ return zx29_chan->status;
+}
+
+static unsigned int parse_dma_req(dma_transfer_mode trans_mode)
+{
+ unsigned int control = 0;
+
+ switch(trans_mode)
+ {
+ case TRAN_PERI_TO_PERI:
+ control = DMA_CTRL_SOFT_B_REQ(DMA_PERIPHERAL_REQ)\
+ | DMA_CTRL_SRC_FIFO_MOD(DMA_ADDRMOD_FIFO) \
+ | DMA_CTRL_DEST_FIFO_MOD(DMA_ADDRMOD_FIFO);
+ break;
+
+ case TRAN_PERI_TO_MEM:
+ control = DMA_CTRL_SOFT_B_REQ(DMA_PERIPHERAL_REQ)\
+ | DMA_CTRL_SRC_FIFO_MOD(DMA_ADDRMOD_FIFO) \
+ | DMA_CTRL_DEST_FIFO_MOD(DMA_ADDRMOD_RAM);
+ break;
+
+ case TRAN_MEM_TO_PERI:
+ control = DMA_CTRL_SOFT_B_REQ(DMA_PERIPHERAL_REQ)\
+ | DMA_CTRL_SRC_FIFO_MOD(DMA_ADDRMOD_RAM) \
+ | DMA_CTRL_DEST_FIFO_MOD(DMA_ADDRMOD_FIFO);
+ break;
+
+ case TRAN_MEM_TO_MEM:
+ default:
+ control = DMA_CTRL_SOFT_B_REQ(DMA_SOFT_REQ)\
+ | DMA_CTRL_SRC_FIFO_MOD(DMA_ADDRMOD_RAM) \
+ | DMA_CTRL_DEST_FIFO_MOD(DMA_ADDRMOD_RAM);
+ break;
+ }
+
+ return control;
+}
+
+static signed int dma_set_chan_para(unsigned int channel)//,dma_channel_def * chan_para)
+{
+ volatile dma_chan_reg __iomem* chan_reg = &(dma_dev.reg->channel[channel]);
+ unsigned int channel_id = get_channel_id(channel);
+ dma_lli_param *temp_dma_lli_params = NULL;
+
+ if(channel_id>= DMA_CH_NUM)
+ return -EAGAIN;
+
+ temp_dma_lli_params = dma_lli_params[channel_id];
+
+/* chan_reg->src_addr = chan_para->src_addr;
+ chan_reg->dest_addr = chan_para->dest_addr;
+ chan_reg->xpara = chan_para->count;
+ chan_reg->link_addr = chan_para->link_addr;
+
+ if(chan_para->link_addr)
+ chan_reg->link_addr = dma_lli_phy_addr[get_channel_id(channel)];
+
+ chan_reg->control = parse_dma_req(chan_para->dma_control.tran_mode)\
+ | DMA_CTRL_SRC_BURST_SIZE(chan_para->dma_control.src_burst_size) \
+ | DMA_CTRL_SRC_BURST_LENGTH((chan_para->dma_control.src_burst_len )) \
+ | DMA_CTRL_DEST_BURST_SIZE(chan_para->dma_control.dest_burst_size) \
+ | DMA_CTRL_DEST_BURST_LENGTH((chan_para->dma_control.dest_burst_len ))\
+ | DMA_CTRL_INTERRUPT_SEL(DMA_INT_TO_PS) ;
+
+ if(chan_para->dma_control.irq_mode)
+ {
+ if(chan_para->link_addr)
+ chan_reg->control |= DMA_CTRL_IRQ_MOD(DMA_ERR_IRQ_ENABLE);
+ else
+ chan_reg->control |= DMA_CTRL_IRQ_MOD(DMA_ALL_IRQ_ENABLE);
+ }*/
+
+ chan_reg->src_addr = temp_dma_lli_params[0].src_addr;
+ chan_reg->dest_addr = temp_dma_lli_params[0].dest_addr;
+ chan_reg->xpara = temp_dma_lli_params[0].xpara;
+ chan_reg->link_addr = temp_dma_lli_params[0].link_addr;
+ chan_reg->control = temp_dma_lli_params[0].control &
+ (~(DMA_CTRL_ENABLE(DMA_ENABLE)));
+
+ return 0;
+}
+
+/*allocate a channel for peripheralID,
+and return the channel number.if failed return -EAGAIN
+*/
+signed int zx29_dma_request(dma_peripheral_id peripheral_id)
+{
+ signed int errCode = -EAGAIN;
+
+ mutex_lock(&dma_mutex);
+ errCode=dma_find_chan(peripheral_id);
+ mutex_unlock(&dma_mutex);
+
+ return errCode;
+}
+
+static void dma_config_lli(unsigned int channel_id, dma_channel_def *chan_para)
+{
+ int i = 0;
+ dma_lli_param *temp_dma_lli_params = dma_lli_params[channel_id];
+
+ dma_sync_lli_for_cpu(channel_id);
+ do{
+ temp_dma_lli_params[i].src_addr = chan_para[i].src_addr;
+ temp_dma_lli_params[i].dest_addr = chan_para[i].dest_addr;
+ temp_dma_lli_params[i].xpara = chan_para[i].count;
+ temp_dma_lli_params[i].yzpara = chan_para[i].ycount | (chan_para[i].zcount << 16);
+ temp_dma_lli_params[i].src_yzstep = chan_para[i].src_ystep | (chan_para[i].src_zstep << 16);
+ temp_dma_lli_params[i].dest_yzstep = chan_para[i].dest_ystep | (chan_para[i].dest_zstep << 16);
+ temp_dma_lli_params[i].control = parse_dma_req(chan_para[i].dma_control.tran_mode)\
+ | DMA_CTRL_SRC_BURST_SIZE(chan_para[i].dma_control.src_burst_size) \
+ | DMA_CTRL_SRC_BURST_LENGTH((chan_para[i].dma_control.src_burst_len )) \
+ | DMA_CTRL_DEST_BURST_SIZE(chan_para[i].dma_control.dest_burst_size) \
+ | DMA_CTRL_DEST_BURST_LENGTH((chan_para[i].dma_control.dest_burst_len ))\
+ | DMA_CTRL_INTERRUPT_SEL(ZX29_DMA_INT_SEL)\
+ | DMA_CTRL_ENABLE(DMA_ENABLE);
+
+ if(chan_para[i].dma_control.irq_mode > DMA_ALL_IRQ_DISABLE)
+ temp_dma_lli_params[i].control |= DMA_CTRL_IRQ_MOD(DMA_ERR_IRQ_ENABLE);
+
+ if(chan_para[i].link_addr > 0)
+ temp_dma_lli_params[i].link_addr = dma_lli_phy_addr[channel_id] + sizeof(dma_lli_param)*(i+1);
+ else
+ {
+ if(chan_para[i].dma_control.irq_mode > DMA_ALL_IRQ_DISABLE)
+ temp_dma_lli_params[i].control |= DMA_CTRL_IRQ_MOD(DMA_ALL_IRQ_ENABLE);
+
+ temp_dma_lli_params[i].link_addr = 0;
+ }
+
+// i++;
+ }while(chan_para[i++].link_addr);
+
+ dma_sync_lli_for_device(channel_id);
+}
+
+signed int zx29_dma_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct zx29_dma_channel *dma_channel;
+ dma_peripheral_id peripheral_id;
+ unsigned int channel_id;
+ dma_channel_def *chan_para;
+
+ if (!cfg || !chan)
+ return -EINVAL;
+
+ dma_channel = to_zx29_dma_chan(chan);
+ peripheral_id = dma_channel->peripheral_id;
+
+ channel_id = get_channel_id(peripheral_id);
+
+ if(dma_dev.chan_config[channel_id].is_used == false)
+ return -EINVAL;
+
+ chan_para = (dma_channel_def *)cfg;
+ if (chan_para->dma_control.tran_mode>=DMA_TRAN_MOD_ALL\
+ ||chan_para->dma_control.irq_mode>=DMA_IRQMOD_ALL\
+ ||chan_para->dma_control.src_burst_size>=DMA_BURST_SIZE_ALL\
+ ||chan_para->dma_control.src_burst_len>=DMA_BURST_LEN_ALL\
+ ||chan_para->dma_control.dest_burst_size>=DMA_BURST_SIZE_ALL\
+ ||chan_para->dma_control.dest_burst_len>=DMA_BURST_LEN_ALL)
+ {
+ return -EINVAL;
+ }
+
+ /* config lli */
+ dma_config_lli(channel_id, chan_para);
+
+ /* config regs */
+#if 0
+ dma_dev.chan_config[channel_id].channel_callback = chan_para->callback;
+ dma_dev.chan_config[channel_id].data = chan_para->data;
+#endif
+
+ return 0;//dma_set_chan_para((unsigned int)peripheral_id, chan_para);
+}
+
+signed int zx29_dma_start(unsigned int channel_id)
+{
+ volatile dma_regs __iomem * pReg = dma_dev.reg;
+
+ if(channel_id >= DMA_CH_NUM)
+ {
+ BUG();
+ return -EINVAL;
+ }
+
+ dsb();
+
+ pReg->channel[channel_id].control |= DMA_CTRL_ENABLE(DMA_ENABLE);
+
+ return 0;
+}
+
+signed int zx29_dma_stop(unsigned int channel_id)
+{
+ volatile dma_regs __iomem * pReg = dma_dev.reg;
+
+ if(channel_id >= DMA_CH_NUM)
+ return -EINVAL;
+
+ //pReg->channel[channel_id].control &= ~(DMA_CTRL_ENABLE(DMA_ENABLE));
+ pReg->channel[channel_id].control |= DMA_CTRL_FORCE_CLOSE(1);//change by gsn for linuxDMA
+ return 0;
+}
+signed int zx29_dma_get_transfer_num(unsigned int channel_id)
+{
+ volatile dma_regs __iomem * pReg = dma_dev.reg;
+ if(channel_id >= DMA_CH_NUM)
+ return -EINVAL;
+ return (pReg->channel[channel_id].xpara);
+}
+
+signed int zx29_dma_set_priority(dma_group_order groupOrder, dma_group_mode groupMode)
+{
+ if(groupOrder >= DMA_GROUP_ALL ||groupMode >= DMA_MODE_ALL)
+ return -EINVAL;
+
+ dma_dev.reg->group_order = groupOrder;
+ dma_dev.reg->arbit_mode = groupMode;
+
+ return 0;
+}
+
+static dma_cookie_t zx29_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ dma_trace_submit(tx);
+
+ return dma_cookie_assign(tx);
+}
+
+static int zx29_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ int ret = 0;
+ struct zx29_dma_channel *dma_channel = to_zx29_dma_chan(chan);
+
+ ret = zx29_dma_request(dma_channel->req_peripheral_id);
+ if(ret < 0)
+ return ret;
+
+ dma_async_tx_descriptor_init(&dma_channel->desc, chan);
+ dma_channel->desc.tx_submit = zx29_dma_tx_submit;
+
+ /* the descriptor is ready */
+ async_tx_ack(&dma_channel->desc);
+
+ return ret;
+}
+
+void zx29_dma_free_chan_resource(struct dma_chan *chan)
+{
+ struct zx29_dma_channel *dma_channel = to_zx29_dma_chan(chan);
+ dma_reset_chan(dma_channel);
+}
+
+static struct dma_async_tx_descriptor *zx29_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long context)
+{
+ struct zx29_dma_channel *dma_channel = to_zx29_dma_chan(chan);
+ struct dma_async_tx_descriptor *desc = &dma_channel->desc;
+ unsigned int channel_id = get_channel_id(dma_channel->peripheral_id);
+ int num_periods = buf_len / period_len;
+ int i = 0;
+ dma_lli_param *temp_dma_lli_params;
+
+ if (channel_id >= DMA_CH_NUM)
+ return NULL;
+// change by gsn for linuxDMA
+ //if(dma_channel->status == DMA_IN_PROGRESS)
+ //return NULL;
+ dma_channel->status = DMA_IN_PROGRESS;
+
+ temp_dma_lli_params = dma_lli_params[channel_id];
+
+ dma_sync_lli_for_cpu(channel_id);
+ for (i = 0; i < num_periods; i++)
+ {
+ temp_dma_lli_params[i].control |= DMA_CTRL_IRQ_MOD(DMA_ALL_IRQ_ENABLE);
+ }
+ temp_dma_lli_params[num_periods - 1].link_addr = dma_lli_phy_addr[channel_id];
+ dma_sync_lli_for_device(channel_id);
+
+ dma_channel->cyclic = 1;
+
+ desc->callback = NULL;
+ desc->callback_param = NULL;
+
+ dma_set_chan_para(dma_channel->peripheral_id);
+
+ return desc;
+}
+
+static struct dma_async_tx_descriptor *zx29_prep_dma_interleaved(
+ struct dma_chan *chan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct zx29_dma_channel *dma_channel = to_zx29_dma_chan(chan);
+ struct dma_async_tx_descriptor *desc = &dma_channel->desc;
+
+// change by gsn for linuxDMA
+ //if(dma_channel->status == DMA_IN_PROGRESS)
+ //return NULL;
+ dma_channel->status = DMA_IN_PROGRESS;
+
+ desc->callback = NULL;
+ desc->callback_param = NULL;
+
+ dma_set_chan_para(dma_channel->peripheral_id);
+
+ return desc;
+}
+
+static int zx29_dma_terminate_all(struct dma_chan *chan)
+{
+ struct zx29_dma_channel *dma_channel = to_zx29_dma_chan(chan);
+
+ return dma_disable_chan(dma_channel);
+}
+
+static void zx29_dma_issue_pending(struct dma_chan *chan)
+{
+ struct zx29_dma_channel *dma_channel = to_zx29_dma_chan(chan);
+
+ dma_trace_pending(dma_channel->peripheral_id);
+
+ zx29_dma_start(dma_channel->peripheral_id);
+}
+
+unsigned int zx29_dma_get_status(void)
+{
+ volatile dma_regs __iomem * pReg = dma_dev.reg;
+
+ return pReg->working_status;
+}
+
+bool zx29_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct zx29_dma_channel * channel = to_zx29_dma_chan(chan);
+ unsigned int channel_id = 0;
+ dma_peripheral_id peri_id = 0;
+
+ peri_id = get_real_peri_id((dma_peripheral_id)param);
+ if(peri_id >= DMA_CH_NUM)
+ return false;
+
+ channel_id = get_channel_id(channel->peripheral_id);
+ if(channel_id == 0xff)
+ return false;
+
+ if(peri_id == DMA_CH_MEMORY)
+ {
+
+ if ((dma_dev.chan_config[channel_id].is_used == false)&& \
+ (dma_chan_check_lock(dma_dev.chan_config[channel_id].peripheral_id)==false)&& \
+ (dma_dev.chan_config[channel_id].enable_mem2mem==true))
+ {
+ channel->req_peripheral_id = channel->peripheral_id;
+ return true;
+ }
+ else
+ return false;
+ }
+
+ if (channel->peripheral_id != peri_id)
+ return false;
+
+ if ((dma_dev.chan_config[channel_id].is_used == false)&& \
+ (dma_chan_check_lock(dma_dev.chan_config[channel_id].peripheral_id)==false))
+ {
+ channel->req_peripheral_id = (dma_peripheral_id)param;
+ return true;
+ }
+ else
+ return false;
+}
+EXPORT_SYMBOL(zx29_dma_filter_fn);
+
+irqreturn_t dma_Isr(int irq, void *dev)
+{
+ unsigned int need_continue = 0;
+ unsigned int i;
+ struct zx29_dma *dmac_ptr = dev;
+ dma_regs __iomem * dma_reg=dmac_ptr->reg;
+ volatile unsigned int control;
+ volatile unsigned int raw_tc_int = dma_reg->raw_int_tc_status;
+ volatile unsigned int raw_src_err_int = dma_reg->raw_int_src_err_status;
+ volatile unsigned int raw_dest_err_int = dma_reg->raw_int_dest_err_status;
+ volatile unsigned int raw_cfg_err_int = dma_reg->raw_int_cfg_err_status;
+ volatile unsigned int tc_int = dma_reg->int_tc_status;
+
+ unsigned int channel_id;
+
+ /* error */
+ if (raw_src_err_int!=0 || raw_dest_err_int!=0 || raw_cfg_err_int!=0)
+ {
+ for (i=0; i<DMA_CH_NUM; i++)
+ {
+ if ((raw_src_err_int|raw_dest_err_int|raw_cfg_err_int)&(0x01<<i))
+ {
+ channel_id = get_channel_id(i);
+ if(channel_id >= DMA_CH_NUM)
+ continue;
+
+ dmac_ptr->dma_chan[channel_id].status = DMA_ERROR;
+ dma_trace_err(i);
+ }
+ }
+
+ BUG();
+
+/* dma_reg->raw_int_src_err_status |= raw_src_err_int ;
+ dma_reg->raw_int_dest_err_status |= raw_dest_err_int ;
+ dma_reg->raw_int_cfg_err_status |= raw_cfg_err_int ;
+
+ return IRQ_HANDLED;*/
+ }
+
+
+ do
+ {
+ need_continue = 0;
+
+ tc_int = dma_reg->int_tc_status;
+ raw_tc_int = dma_reg->raw_int_tc_status;
+
+ for (i = 0;(i< DMA_CH_NUM)&&(raw_tc_int!=0); i++)
+ {
+ if (raw_tc_int&(0x01<<i))
+ {
+ control = dma_reg->channel[i].control;
+ channel_id = get_channel_id(i);
+ /*dma_reg->raw_int_tc_status = (0x1<<i);*//*clear here may create error clear*/
+
+ if(channel_id >= DMA_CH_NUM)
+ continue;
+
+ if(((control&DMA_CTRL_INTERRUPT_SEL(0xf))==DMA_CTRL_INTERRUPT_SEL(ZX29_DMA_INT_SEL))&&\
+ (control&DMA_CTRL_IRQ_MOD(1))&&\
+ ( ((control&DMA_CTRL_ENABLE(1)) == 0) || ((dmac_ptr->dma_chan[channel_id].cyclic)&&(tc_int&(0x1<<i))) ) )
+ {
+ dma_reg->raw_int_tc_status = (0x1<<i);
+ need_continue = 1;
+ dma_trace_success(i);
+
+ //channel_id = get_channel_id(i);
+ dmac_ptr->dma_chan[channel_id].status = DMA_SUCCESS;
+ if(dmac_ptr->dma_chan[channel_id].cyclic == 0)
+ {
+ dma_cookie_complete(&dmac_ptr->dma_chan[channel_id].desc);
+ }
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (dmac_ptr->dma_chan[channel_id].desc.callback)
+ dmac_ptr->dma_chan[channel_id].desc.callback(dmac_ptr->dma_chan[channel_id].desc.callback_param);
+#else
+ /* schedule tasklet on this channel */
+ /* yu.dong@20240617 [T106BUG-641] SPI packet loss issue, increase kernel buffer and read all cached data away, no data loss start */
+ #ifdef _USE_VEHICLE_DC || _USE_VEHICLE_DC_REF
+ if((channel_id == DMA_CH_UART0_RX) || (channel_id == DMA_CH_UART2_RX) || (channel_id == DMA_CH_SSP0_RX)){
+ if (dmac_ptr->dma_chan[channel_id].desc.callback)
+ dmac_ptr->dma_chan[channel_id].desc.callback(dmac_ptr->dma_chan[channel_id].desc.callback_param);
+ }else
+ tasklet_schedule(&dmac_ptr->dma_chan[channel_id].tasklet);
+ #else
+ /* yu.dong@20240617 [T106BUG-641] SPI packet loss issue, increase kernel buffer and read all cached data away, no data loss end */
+ tasklet_schedule(&dmac_ptr->dma_chan[channel_id].tasklet);
+ #endif
+#endif
+ }
+ }
+ }
+ }while(need_continue);
+
+ return IRQ_HANDLED;
+}
+
+#if ZX29_DMA_TEST
+
+#define DMA_LLI_TEST 0
+
+#if DMA_LLI_TEST
+#define MEM_CPY_CNT (3)
+#else
+#define MEM_CPY_CNT (1)
+#endif
+
+#define MEM_TEST_COUNT (0x200)
+
+static unsigned int dma_int_count = 0;
+static unsigned char * test_buffer = NULL;
+static dma_addr_t test_phy_addr;
+static struct dma_chan * test_chan = NULL;
+
+void dma_cb(struct zx29_dma_channel * chan)
+{
+ int i;
+
+// dma_sync_single_for_cpu(dma_dev.dma.dev, test_phy_addr, MEM_TEST_COUNT*2, DMA_BIDIRECTIONAL);
+ dma_unmap_single(dma_dev.dma.dev, test_phy_addr, MEM_TEST_COUNT*2*MEM_CPY_CNT, DMA_BIDIRECTIONAL);
+
+ for(i=0; i<MEM_CPY_CNT; i++)
+ {
+ if(memcmp(test_buffer+MEM_TEST_COUNT*2*i,
+ test_buffer+MEM_TEST_COUNT+MEM_TEST_COUNT*2*i,
+ MEM_TEST_COUNT))
+ {
+ pr_info("[DMA] m2m test copy failed(%d). \n", i+1);
+ }
+ }
+
+ kfree(test_buffer);
+ if (test_chan)
+ dma_release_channel(test_chan);
+
+ pr_info("[DMA] m2m test copy succeeded (%d). \n", ++dma_int_count);
+
+}
+
+static void *test_prepare_buff(size_t size)
+{
+ int i;
+
+ /* alloc buffer */
+ test_buffer = kzalloc(size, GFP_KERNEL);
+ if (!test_buffer) {
+ dev_err(dma_dev.dma.dev, "%s: could not alloc DMA memory\n",
+ __func__);
+ BUG();
+ }
+ pr_info("[DMA] m2m test alloc buffer (%x). \n", (unsigned int)test_buffer);
+
+ /* prepare data */
+ for(i=0; i<MEM_CPY_CNT; i++)
+ memset(test_buffer+MEM_TEST_COUNT*2*i, 0x11+0x11*i, MEM_TEST_COUNT);
+
+ return test_buffer;
+}
+
+static struct dma_chan *test_alloc_channel(void)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ return dma_request_channel(mask, zx29_dma_filter_fn, (void*)DMA_CH_MEMORY);
+}
+
+static signed int test_dma_config(struct dma_chan *chan, dma_addr_t phy_addr)
+{
+ dma_channel_def temp[MEM_CPY_CNT];
+ int i;
+
+ memset(temp, 0, sizeof(temp));
+ for(i=0; i<MEM_CPY_CNT; i++)
+ {
+ temp[i].src_addr = phy_addr + MEM_TEST_COUNT*2*i;
+ temp[i].dest_addr = temp[i].src_addr + MEM_TEST_COUNT;
+ temp[i].count = MEM_TEST_COUNT;
+// temp[i].callback = (dma_callback_func)dma_cb;
+
+ temp[i].dma_control.tran_mode = TRAN_MEM_TO_MEM;
+ temp[i].dma_control.src_burst_size = DMA_BURST_SIZE_8BIT;
+ temp[i].dma_control.src_burst_len = DMA_BURST_LEN_16;
+ temp[i].dma_control.dest_burst_size = DMA_BURST_SIZE_8BIT;
+ temp[i].dma_control.dest_burst_len = DMA_BURST_LEN_16;
+ temp[i].dma_control.irq_mode = DMA_ALL_IRQ_ENABLE;
+
+ temp[i].link_addr = 1;
+ }
+ temp[MEM_CPY_CNT-1].link_addr = 0;
+
+ return dmaengine_slave_config(chan,(struct dma_slave_config*)&temp);
+}
+
+
+//static
+void dma_m2m_test(struct device *dev)
+{
+ struct dma_async_tx_descriptor *desc =NULL;
+ struct zx29_dma_channel * zx29_chan = NULL;
+ unsigned char *p = NULL;
+ int ret = 0;
+
+ p = test_prepare_buff(MEM_TEST_COUNT*2*MEM_CPY_CNT);
+
+ /* alloc dma channel */
+ test_chan = test_alloc_channel();
+ if (!test_chan)
+ {
+ pr_info("[DMA]test request channel failed \n");
+ return;
+ }
+
+ /* map dma address */
+ test_phy_addr = dma_map_single(dma_dev.dma.dev, (void *)p, MEM_TEST_COUNT*2*MEM_CPY_CNT, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dma_dev.dma.dev, test_phy_addr)) {
+ dev_err(dma_dev.dma.dev, "Failed to dma_map_single\n");
+ BUG();
+ }
+
+ /* config dma */
+ ret = test_dma_config(test_chan, test_phy_addr);
+ if(ret < 0)
+ printk("dmaengine_slave_config failed(%d)~~~~~~", ret);
+
+ /* start transfer */
+ zx29_chan = to_zx29_dma_chan(test_chan);
+#if 0
+ desc = zx29_chan->dma_device->dma.device_prep_interleaved_dma(test_chan,NULL,0);
+ desc->callback = (dma_async_tx_callback)dma_cb;
+ desc->callback_param = (void *) zx29_chan;
+#else
+ desc = test_chan->device->device_prep_interleaved_dma(test_chan,NULL,0);
+ desc->callback = (dma_async_tx_callback)dma_cb;
+ desc->callback_param = (void *) zx29_chan;
+#endif
+ zx29_chan->zx29_dma_cookie = dmaengine_submit(desc);
+ dma_async_issue_pending(test_chan);
+
+ return ;
+}
+
+
+static ssize_t dma_m2m_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "dma_int_count:%d\n", dma_int_count);
+}
+
+static ssize_t dma_m2m_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ dma_m2m_test(dev);
+
+ return (count);
+}
+
+static DEVICE_ATTR(dma,0600,dma_m2m_show,dma_m2m_store);
+static struct attribute *zx29_dma_attributes[] = {
+ &dev_attr_dma.attr,
+ NULL,
+};
+
+static const struct attribute_group zx29_dma_attribute_group = {
+ .attrs = (struct attribute **) zx29_dma_attributes,
+};
+#endif
+
+static void dma_init_channels(void)
+{
+ int i = 0;
+ struct zx29_dma_channel * dma_chan_ptr = NULL;
+
+ dma_dev.chan_config = dma_chan_config;
+ dma_dev.channel_count = ARRAY_SIZE(dma_chan_config);
+
+ INIT_LIST_HEAD(&dma_dev.dma.channels);
+
+ for(i=0;i<dma_dev.channel_count;i++)
+ {
+ dma_chan_ptr = &dma_dev.dma_chan[i];
+ dma_chan_ptr->peripheral_id = dma_dev.chan_config[i].peripheral_id;
+ dma_chan_ptr->dma_device = &(dma_dev);
+ dma_chan_ptr->chan.device = &(dma_dev.dma);
+ dma_cookie_init(&dma_chan_ptr->chan);
+
+ tasklet_init(&dma_chan_ptr->tasklet, dma_tasklet, (unsigned long)(dma_chan_ptr));
+
+ list_add_tail(&dma_chan_ptr->chan.device_node, &dma_dev.dma.channels);
+ }
+}
+
+static u64 general_dma_mask = DMA_BIT_MASK(32);
+
+static int dma_init_resource(struct platform_device* pdev)
+{
+ int ret = 0;
+ int irq;
+ int i;
+ struct device_node *np = pdev->dev.of_node;
+
+ /* registers */
+ dma_dev.reg = (dma_regs *)of_iomap(np, 0);
+ if ( !dma_dev.reg ) {
+ dev_err(&pdev->dev, "[DMA]Cannot get IORESOURCE_MEM\n");
+ return -ENOENT;
+ }
+
+ dma_pub_configs = (dma_pub_config *)(dma_regs *)of_iomap(np, 1);
+ if ( !dma_pub_configs ) {
+ dev_err(&pdev->dev, "[DMA]Cannot get IORESOURCE_MEM 1\n");
+ return -ENOENT;
+ }
+ // only for test
+// memset((u8 *)dma_pub_configs, 0, 0x80);
+
+ /* irq */
+ irq = irq_of_parse_and_map(np, 0);
+ if( !irq ) {
+ dev_err(&pdev->dev, "[DMA]Cannot get IORESOURCE_IRQ\n");
+ return -ENOENT;
+ }
+ dma_dev.reg->irq_type = 0xF; /* high level for all cores */
+ ret = request_irq(irq, dma_Isr, IRQF_NO_THREAD, "zx29dma", &dma_dev);
+ if(ret)
+ return ret;
+
+ /* memory for lli */
+ for(i=0; i<ARRAY_SIZE(dma_chan_config); i++)
+ {
+ dma_lli_params[i] = kzalloc(MAX_LLI_PARAMS_CNT, GFP_KERNEL);
+ if (!dma_lli_params[i]) {
+ int j;
+ dev_err(&pdev->dev, "[DMA]%s: could not alloc memory for lli[%d].\n",
+ __func__, i);
+ for(j=0; j<i; j++)
+ {
+ dma_unmap_single(&pdev->dev, dma_lli_phy_addr[j], MAX_LLI_PARAMS_CNT, DMA_BIDIRECTIONAL);
+ kfree(dma_lli_params[j]);
+ dma_lli_phy_addr[j]=0;
+ dma_lli_params[j]=NULL;
+ }
+ return -ENOENT;
+ }
+
+ dma_lli_phy_addr[i] = dma_map_single(&pdev->dev, dma_lli_params[i], MAX_LLI_PARAMS_CNT, DMA_BIDIRECTIONAL);
+ }
+
+ return 0;
+}
+
+static int dma_register_device(struct platform_device* pdev)
+{
+ dma_cap_zero(dma_dev.dma.cap_mask);
+ dma_cap_set(DMA_SLAVE, dma_dev.dma.cap_mask);
+ dma_cap_set(DMA_CYCLIC, dma_dev.dma.cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, dma_dev.dma.cap_mask);
+
+ dma_dev.dma.device_alloc_chan_resources = zx29_dma_alloc_chan_resources;
+ dma_dev.dma.device_free_chan_resources = zx29_dma_free_chan_resource;
+ dma_dev.dma.device_tx_status = zx29_dma_tx_status;
+ dma_dev.dma.device_config = zx29_dma_config;
+ dma_dev.dma.device_terminate_all = zx29_dma_terminate_all;
+ dma_dev.dma.device_prep_dma_cyclic = zx29_prep_dma_cyclic;
+ dma_dev.dma.device_prep_interleaved_dma = zx29_prep_dma_interleaved;
+ dma_dev.dma.device_issue_pending = zx29_dma_issue_pending;
+
+ dma_dev.dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ dma_dev.dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ dma_dev.dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | BIT(DMA_MEM_TO_MEM);
+
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.dma_mask = &general_dma_mask;
+
+ dma_dev.dma.dev = &pdev->dev;
+ return dma_async_device_register(&dma_dev.dma);
+}
+
+static int zx29_dma_probe(struct platform_device* pdev)
+{
+ int ret = 0;
+
+ /* resource */
+ ret = dma_init_resource(pdev);
+ if(ret)
+ {
+ pr_info("[DMA]get resource failed!\n");
+ return ret;
+ }
+
+ /* channel info */
+ dma_init_channels();
+
+ /* register device */
+ ret = dma_register_device(pdev);
+ if (ret)
+ {
+ dev_info(dma_dev.dma.dev, "[DMA]unable to register\n");
+ return -EINVAL;
+ }
+
+ pr_info("[DMA]zx297520v DMA initialized\n");
+
+ return 0;
+}
+
+static const struct of_device_id zx29_dma_dt_ids[] = {
+ { .compatible = "arm,zx297520v3-dma" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, zx29_dma_dt_ids);
+
+struct platform_driver zx29_dma_driver = {
+ .driver = {
+ .name = "zx29_dma",
+ .of_match_table = of_match_ptr(zx29_dma_dt_ids),
+ },
+ .probe = zx29_dma_probe,
+};
+static int __init zx29_dma_driver_init(void)
+{
+ return platform_driver_register(&zx29_dma_driver);
+}
+subsys_initcall(zx29_dma_driver_init);
+
+
+/**
+ * "/sys/zte/test/dma_test"
+ */
+extern struct kobject *zx_test_kobj;
+int __init zx_dma_test_init(void)
+{
+#if ZX29_DMA_TEST
+ int ret;
+
+ ret = sysfs_create_group(zx_test_kobj, &zx29_dma_attribute_group);
+ if (!ret)
+ pr_debug("[DEBUG] create test dma sysfs interface OK.\n");
+#endif
+
+ return 0;
+}
+
diff --git a/upstream/linux-5.10/drivers/misc/zcat/debug_info.c b/upstream/linux-5.10/drivers/misc/zcat/debug_info.c
new file mode 100755
index 0000000..d23e340
--- /dev/null
+++ b/upstream/linux-5.10/drivers/misc/zcat/debug_info.c
@@ -0,0 +1,396 @@
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cdev.h>
+#include <linux/semaphore.h>
+#include <linux/timer.h>
+
+// #include <linux/fs.h>
+#include <linux/ioport.h>
+// #include <linux/serial_reg.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+// #include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+// #include <linux/kthread.h>
+#include <asm/io.h>
+
+#include <linux/vmalloc.h>
+#include <linux/soc/zte/rpmsg.h>
+// #include <linux/syscalls.h>
+
+// #include "debuginfo.h"
+#include "pub_debug_info.h"
+#include "ringbuf.h"
+
+
+#if defined(_USE_ZXIC_DEBUG_INFO) && !defined(CONFIG_SYSTEM_RECOVERY)
+/*******************************************************************************
+ * 宏定义 *
+ *******************************************************************************/
+#define DEBUG_INFO_SHARE_MEM_LEN (0x2000)
+#define DEBUG_INFO_READABLE_LEN (0x1400)
+#define DEBUG_INFO_MAX_DATA_LEN (128)
+#define DEBUG_INFO_MAX_TOTAL_LEN (140) // 8 + 128 + 4
+#define DEBUG_INFO_READ_TIME_MSECS (10000)
+
+#define DEBUG_INFO_CHANNEL (9)
+#define DEBUG_INFO_MSG_CAP_SIZE (2 * 1024)
+
+#define DEBUG_INFO_OK (0)
+#define DEBUG_INFO_ERROR (-1)
+
+#define DEBUG_INFO_IOCTL_SET_DISABLE (0x1001)
+
+/*******************************************************************************
+ * 结构体定义 *
+ *******************************************************************************/
+typedef unsigned int UINT32;
+typedef unsigned short UINT16;
+typedef unsigned char UINT8;
+
+typedef struct
+{
+ UINT16 module_id; // 模块id
+ UINT16 sub_len; // 用户数据长度
+ UINT32 time;
+ char sub_data[]; // 用户数据
+} T_SHARED_MEM_DATA;
+
+typedef struct
+{
+ UINT32 head; // 0x010a0a0a
+ UINT32 total_len; // 数据内容长度
+ long long time; // time()函数获取
+} T_SAVE_FILE_DATA;
+
+/*******************************************************************************
+ * 全局变量 *
+ *******************************************************************************/
+volatile T_RINGBUFFER *g_debug_info_buf = NULL;
+static struct semaphore debug_sem;
+static DEFINE_RAW_SPINLOCK(debugWr_lock);
+static int g_init_flag = 0;
+
+/*******************************************************************************
+ * 内部函数定义 *
+ *******************************************************************************/
+static int sc_debug_info_read_to_user(char *buf, unsigned short count);
+static int sc_debug_info_record_from_user(const char *info, unsigned short count);
+static int sc_debug_info_write(UINT32 flag, const UINT8 *buf, UINT32 len);
+static void sc_debug_info_from_ap(void *buf, unsigned int len);
+
+static void kernel_timer_timeout(struct timer_list *t);
+static ssize_t debug_info_read(struct file *fp, char __user *buf, size_t count, loff_t *pos);
+static ssize_t debug_info_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos);
+static int debug_info_open(struct inode *ip, struct file *fp);
+static long debug_info_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
+static int debug_info_release(struct inode *ip, struct file *fp);
+
+//初始化timer
+static DEFINE_TIMER(timer, kernel_timer_timeout);
+
+static const struct file_operations debug_info_fops = {
+ .owner = THIS_MODULE,
+ .read = debug_info_read,
+ .write = debug_info_write,
+ .open = debug_info_open,
+ .unlocked_ioctl = debug_info_ioctl,
+ .release = debug_info_release,
+};
+
+static struct miscdevice debug_info_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "debug_info",
+ .fops = &debug_info_fops,
+};
+
+static void kernel_timer_timeout(struct timer_list *t)
+{
+ if (debug_sem.count == 0)
+ {
+ up(&debug_sem);
+ }
+ /* 因为内核定时器是一个单次的定时器,所以如果想要多次重复定时需要在定时器绑定的函数结尾重新装载时间,并启动定时 */
+ /* Kernel Timer restart */
+ mod_timer(&timer, jiffies + msecs_to_jiffies(DEBUG_INFO_READ_TIME_MSECS));
+}
+
+static ssize_t debug_info_read(struct file *fp, char __user *buf, size_t count, loff_t *pos)
+{
+ int ret;
+ int rd_len;
+
+ ret = down_interruptible(&debug_sem);
+ if(ret < 0)
+ {
+ return ret;
+ }
+ else
+ {
+ rd_len = sc_debug_info_read_to_user(buf, count);
+ }
+
+ return rd_len;
+}
+
+static ssize_t debug_info_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos)
+{
+ int wr_len = sc_debug_info_record_from_user(buf, count);
+
+ return wr_len;
+}
+
+static int debug_info_open(struct inode *ip, struct file *fp)
+{
+ return 0;
+}
+
+static long debug_info_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ switch(cmd)
+ {
+ case DEBUG_INFO_IOCTL_SET_DISABLE:
+ *(volatile UINT32 *)ZCAT_DEBUG_INFO_DISABLE = arg;
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int debug_info_release(struct inode *ip, struct file *fp)
+{
+ return 0;
+}
+
+static void sc_debug_info_from_ap(void *buf, unsigned int len)
+{
+ T_SHARED_MEM_DATA *debug_msg = (T_SHARED_MEM_DATA *)buf;
+ debug_msg->time = jiffies;
+
+ sc_debug_info_write(ZCAT_MEM_TYPE_KERNEL, buf, len);
+}
+
+static int __init debug_info_init(void)
+{
+ int ret = misc_register(&debug_info_device);
+ if (ret)
+ {
+ printk("debug_info_device init.\n");
+ return DEBUG_INFO_ERROR;
+ }
+
+ void *mem;
+ mem = vmalloc(DEBUG_INFO_SHARE_MEM_LEN);
+ if (!mem)
+ {
+ printk("vmalloc failed.\n");
+ return DEBUG_INFO_ERROR;
+ }
+
+ g_debug_info_buf = CreateRingBuffer((UINT8 *)mem, DEBUG_INFO_SHARE_MEM_LEN);
+ if (g_debug_info_buf == NULL)
+ {
+ printk("CreateRingBuffer failed.\n");
+ return DEBUG_INFO_ERROR;
+ }
+#if 1
+ ret = rpmsgCreateChannel(
+ CORE_PS0,
+ DEBUG_INFO_CHANNEL,
+ DEBUG_INFO_MSG_CAP_SIZE);
+ if (ret != DEBUG_INFO_OK)
+ {
+ printk("rpmsgCreateChannel failed, ret = %d\n", ret);
+ return DEBUG_INFO_ERROR;
+ }
+
+ ret = rpmsgRegCallBack(
+ CORE_PS0,
+ DEBUG_INFO_CHANNEL,
+ sc_debug_info_from_ap);
+ if (ret != DEBUG_INFO_OK)
+ {
+ printk("rpmsgRegCallBack failed,ret = %d\n", ret);
+ return DEBUG_INFO_ERROR;
+ }
+#endif
+ sema_init(&debug_sem, 0);
+ /* 添加并启动定时器, 10ms */
+ mod_timer(&timer, jiffies + 1);
+
+ g_init_flag = 1;
+
+ return 0;
+}
+
+static void __exit debug_info_exit(void)
+{
+ misc_deregister(&debug_info_device);
+
+ del_timer(&timer);
+}
+
+static int sc_debug_info_write(UINT32 flag, const UINT8 *buf, UINT32 len)
+{
+ UINT32 writelen;
+ UINT32 used_space;
+ unsigned long flags;
+
+ if (len == 0 || g_debug_info_buf == NULL)
+ {
+ printk("sc_debug_info_write:: (len == 0 || g_debug_info_buf == NULL).\n");
+ return DEBUG_INFO_ERROR;
+ }
+
+ raw_spin_lock_irqsave(&debugWr_lock, flags);
+ writelen = WriteRingBuffer(g_debug_info_buf, buf, len, flag);
+ raw_spin_unlock_irqrestore(&debugWr_lock, flags);
+ used_space = GetRingBufferSize(g_debug_info_buf);
+ if (used_space > DEBUG_INFO_READABLE_LEN)
+ {
+ if (debug_sem.count == 0)
+ {
+ up(&debug_sem);
+ }
+ }
+
+ return writelen;
+}
+
+static int sc_debug_info_read_to_user(char *buf, unsigned short count)
+{
+ unsigned int bufSize_used = 0;
+ unsigned int readLen = 0;
+ unsigned int bufLen = 0;
+ T_SAVE_FILE_DATA fileDataHead;
+
+ if (g_init_flag == 0)
+ {
+ printk("debug_info not init.\n");
+ return DEBUG_INFO_ERROR;
+ }
+ if (count == 0 || buf == NULL || g_debug_info_buf == NULL)
+ {
+ printk("sc_debug_info_read_to_user:: (count == 0 || buf == NULL || g_debug_info_buf == NULL).\n");
+ return DEBUG_INFO_ERROR;
+ }
+
+ bufSize_used = GetRingBufferSize(g_debug_info_buf);
+ if (bufSize_used == 0)
+ {
+ // printk("sc_debug_info_read_to_user:: ringBuf is empty.\n");
+ return 0;
+ }
+
+ fileDataHead.head = 0x010a0a0a;
+ fileDataHead.time = 0;
+ fileDataHead.total_len = bufSize_used;
+
+ copy_to_user(buf, &fileDataHead, sizeof(T_SAVE_FILE_DATA));
+
+ readLen = ReadRingBuffer(g_debug_info_buf, (buf + sizeof(T_SAVE_FILE_DATA)), bufSize_used, ZCAT_MEM_TYPE_USER);
+ if (readLen == 0)
+ {
+ // printk("ReadRingBuffer failed.\n");
+ return 0;
+ }
+
+ return (readLen + sizeof(T_SAVE_FILE_DATA));
+}
+
+static int sc_debug_info_record_from_user(const char *info, unsigned short count)
+{
+ unsigned int cnt = 0;
+ unsigned int my_jiffies = jiffies;
+
+ if (g_init_flag == 0)
+ {
+ printk("debug_info not init.\n");
+ return DEBUG_INFO_ERROR;
+ }
+ if (info == NULL)
+ {
+ printk("sc_debug_info_record_from_user:: info is NULL.\n");
+ return DEBUG_INFO_ERROR;
+ }
+
+ copy_to_user(info + 4, &my_jiffies, sizeof(my_jiffies));
+ cnt = sc_debug_info_write(ZCAT_MEM_TYPE_USER, (UINT8 *)info, count);
+
+ return cnt;
+}
+
+module_init(debug_info_init);
+module_exit(debug_info_exit);
+
+MODULE_AUTHOR("jcw");
+MODULE_DESCRIPTION("debug_info driver");
+MODULE_LICENSE("GPL");
+
+
+int sc_debug_info_vrecord(unsigned int id, const char *format, va_list args)
+{
+ int len;
+ UINT32 writelen;
+ // va_list args;
+ char str_buf[DEBUG_INFO_MAX_TOTAL_LEN] __attribute__((aligned(4)));
+ T_SHARED_MEM_DATA *shareMemData = (T_SHARED_MEM_DATA *)str_buf;
+
+ if (g_init_flag == 0)
+ {
+ printk("debug_info not init.\n");
+ return DEBUG_INFO_ERROR;
+ }
+
+ /* args是一个char*类型指针,指向format之后的第一个参数*/
+ // va_start(args, format);
+ len = vsnprintf(shareMemData->sub_data, DEBUG_INFO_MAX_DATA_LEN, format, args);
+ // va_end(args);
+ if (len < 0)
+ {
+ printk("vsnprintf format error.\n");
+ return DEBUG_INFO_ERROR;
+ }
+
+ shareMemData->module_id = (UINT16)(id & 0xFFFF);
+ shareMemData->sub_len = len;
+ shareMemData->time = jiffies;
+
+ writelen = sc_debug_info_write(ZCAT_MEM_TYPE_KERNEL, (UINT8 *)shareMemData, len + sizeof(T_SHARED_MEM_DATA));
+ return writelen;
+}
+EXPORT_SYMBOL(sc_debug_info_vrecord);
+
+int sc_debug_info_record(unsigned int id, const char *format, ...)
+{
+ va_list args;
+ int r;
+
+ va_start(args, format);
+ r = sc_debug_info_vrecord(id, format, args);
+ va_end(args);
+
+
+ return r;
+}
+EXPORT_SYMBOL(sc_debug_info_record);
+#else
+int sc_debug_info_record(unsigned int id, const char *format, ...)
+{
+ return 0;
+}
+#endif /* _USE_ZXIC_DEBUG_INFO */
+
diff --git a/upstream/linux-5.10/drivers/mmc/core/mmc_ramdump.c b/upstream/linux-5.10/drivers/mmc/core/mmc_ramdump.c
new file mode 100755
index 0000000..f72860e
--- /dev/null
+++ b/upstream/linux-5.10/drivers/mmc/core/mmc_ramdump.c
@@ -0,0 +1,680 @@
+/*******************************************************************************
+* °æÈ¨ËùÓÐ (C)2014, ÉîÛÚÊÐÖÐÐËͨѶ΢µç×Ó
+*
+* ÎļþÃû³Æ£º emmc_ramdump.c
+* Îļþ±êʶ£º
+* ÄÚÈÝÕªÒª£º
+* ÆäËü˵Ã÷£º
+* µ±Ç°°æ±¾£º 1.0
+* ×÷¡¡¡¡Õߣº
+* Íê³ÉÈÕÆÚ£º
+*******************************************************************************/
+//#include <common.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/mfd/zx234290.h>
+#include <linux/stddef.h>
+
+
+#define MMC1_REG_BASE 0x1211000
+#define MATRIX_CRM_REG_BASE 0x1306000
+#define CFG_EMMC_CLK_ENUM 400000
+#define CFG_EMMC_CLK_WORK 26000000
+#define CFG_EMMC_CLK_REF 50000000
+
+#define ZXMCI_FIFO_DEPTH 128
+#define MMC_BLOCK_SIZE 512
+
+#define mci_read(base, reg) \
+ (*(volatile u32*)(base + reg))
+#define mci_write(base, reg, value) \
+ (*(volatile u32*)(base+ reg) = (value))
+
+u8 mmc_data_buf[512]={0};
+//CMD register
+//bit6 1=response from card
+//bit7 1=long response from card
+//bit8 1=check response CRC
+//bit9 1=data transfer expect
+//bit12 1=send stop command at the end of data transfer
+//bit13 1=wait for previous data transfer completion before sending command
+#define R1 ((1 << 6) | (1 << 8))
+#define R2 ((1 << 6) | (1 << 7) | (1 << 8))
+#define R3 (1 << 6)
+#define CF_DATA ((1 << 9) | (1 << 12) | (1 << 13))
+#define CF_DATA_WR ((1 << 9)|(1 << 10) | (1 << 12) | (1 << 13))
+
+
+static u32 mmc_rca;
+static u32 block_addr = 1;
+//extern struct dw_mci *dw_mci_host_ptr[2];
+extern u32 * g_reg_base[2];
+struct mmc_cid
+{
+ u32 psn;
+ u8 oid;
+ u8 mid;
+ u8 prv;
+ u8 mdt;
+ char pnm[7];
+};
+
+// ¸´Î»EMMCʱÖÓ
+static void emmc_clk_reset(void)
+{
+return ;
+#if 0
+ volatile u32 *crm = (u32*)MATRIX_CRM_REG_BASE;
+
+ crm[0x50>>2] &= ~(0x7<<8);//bit8~10 000:26Mhz 001:100Mhz
+
+ crm[0x54>>2] |= 0x03 << 4; // clk enable
+
+ udelay(10);
+
+ crm[0x58>>2] |= 0x01 << 1; // reset release
+#endif
+}
+
+// emmc ·¢ËÍÃüÁî
+static int emmc_cmd(u32 cmd, u32 arg, void *resp, u32 flags)
+{
+
+#define ERR_STATUS (1 << 1 | 1 << 6 | 1 << 8) // bit1:response error
+ // bit6:response CRC error
+ // bit8:response timeout
+ volatile u32 i;
+ u32 cmdreg;
+ u32 *response = resp;
+ u32 response_words = 0;
+ // volatile u32 *emmc = (u32*)MMC1_REG_BASE;
+ u32 reg_val = 0;
+ u32 regs_base = g_reg_base[1];
+
+ //printk("(%s) cmd = %d start\n",__func__,cmd);
+
+ //Clear all raw interrupt status
+ reg_val = mci_read(regs_base,0x44);
+ mci_write(regs_base,0x44,reg_val|((u32)-1));
+
+ cmdreg = cmd & 0x3F;
+ cmdreg |= flags|(1 << 29) | 0x80000000;
+
+ if(flags &(1 << 7))
+ {
+ response_words = 4; // long response expected from card
+ }
+ else if(flags & (1 << 6))
+ {
+ response_words = 1; // response expected from card
+ }
+ //send command
+ reg_val = mci_read(regs_base,0x44);
+ mci_write(regs_base,0x44,reg_val|((u32)-1));
+ mci_write(regs_base,0x28,arg);
+ mci_write(regs_base,0x2C,cmdreg);
+
+ // check command done
+ i= 0;
+ do
+ {
+ udelay(10);
+ if(++i > 1000)
+ {
+ printk("SEND CMD FAILED,CMD = %d,reg= 0x%x\n",cmd,mci_read(regs_base,0x44));
+
+ break;
+ }
+
+ } while(!(mci_read(regs_base,0x44) & (1 << 2)));
+
+ // check error
+ if(mci_read(regs_base,0x44) & ERR_STATUS)
+ {
+
+ printk("SEND CMD ERR,reg_0x44=0x%x\n",mci_read(regs_base,0x44));
+ return -1;
+ }
+
+ if(response == NULL)
+ return 0;
+
+ for(i = 0; i < response_words; i++)
+ {
+ response[i]= mci_read(regs_base,(0x30 + i));
+ }
+
+ return 0;
+}
+
+//¸´Î»ËùÓп¨£¬Ê¹Æä½øÈëIDLE״̬
+static u32 emmc_idle_cards(void)
+{
+ int i;
+ u32 ret;
+ u32 regs_base = g_reg_base[1];
+
+ // Reset and initialize all cards
+ ret = (u32)emmc_cmd(0, 0, NULL, (1 << 15));
+ if(ret)
+ {
+ printk("ENTER IDLE ERR\n");
+ return (int)ret;
+ }
+
+ // wait for 80 clock at least
+ for(i = 0; i < 100; i++)
+ {
+ ret = mci_read(regs_base,0x70);
+ }
+
+ return 0;
+}
+
+//·¢ËÍCMD1
+static inline int mmc_send_op_cond(u32 ocr,u32 *rocr)
+{
+ int i;
+ int ret = 0;
+ u32 resp[4];
+
+ // ÖÁÉÙ1s£¬ÕâÀïÉèÖÃΪ4s
+ for(i = 50000; i > 0; i--)
+ {
+ ret = emmc_cmd(1,ocr,resp,R3);
+ if(ret)
+ break;
+
+ if(ocr == 0)
+ break;
+ if(resp[0] & 0x80000000)
+ break;
+
+ udelay(80);
+
+ ret= -1;
+ }
+
+ if(rocr)
+ *rocr = resp[0];
+
+ return ret;
+}
+
+//ö¾ÙEMMC¿¨
+static u32 ramdump_mmc_init_card(struct mmc_cid *cid, u32 ocr)
+{
+ u32 resp[4];
+ u32 rocr;
+
+ // CMD0
+ emmc_idle_cards();
+
+ // CMD1
+ mmc_send_op_cond(ocr/* | (1 << 30)*/, &rocr);
+
+ if((rocr & 0x80000000) != 0)
+ {
+ if((rocr & 0x40000000) == 0)
+ {
+ block_addr = 0;
+ }
+ else
+ {
+ block_addr = 1;
+ }
+ }
+ else
+ {
+ printk("ERR\n");
+ }
+ // CMD2
+ emmc_cmd(2, 0, resp, R2);
+
+ // CMD3
+ // Set RCA of the card that responded
+
+ mmc_rca = 1 << 16;
+ emmc_cmd(3, mmc_rca, resp, R1);
+
+ return 0;
+}
+
+// card detected numbers
+static inline int emmc_card_present(void)
+{
+ u32 regs_base = g_reg_base[1];
+
+ return ((mci_read(regs_base,0x50) & 0x3FFFFFFF) != 0x01);
+}
+
+// ²ÉÓÃCPU¶ÁÈ¡FIFO£¬½ûÖ¹ÖжÏ
+static inline void emmc_init(void)
+{
+ u32 tmp,cardnums;
+ u32 i = 0;
+ u32 reg_val=0;
+ u32 regs_base = g_reg_base[1];
+
+ mci_read(regs_base,0x00) = (0 << 5)|(1 << 1)|(1 << 0); //½ûÖ¹dma //¸´Î»fifo //¸´Î»¿ØÖÆÆ÷
+ do
+ {
+ udelay(10);
+ if(++i > 100)
+ {
+ printk("RESET FAILED\n");
+ break;
+ }
+ } while(mci_read(regs_base,0x00)&3);
+
+ cardnums = mci_read(regs_base,0x70)&0x3E;
+ cardnums = (cardnums >> 1) + 1;
+
+ //ÉèÖÃCTRL¼Ä´æÆ÷£¬¶ÔÓÚMMC-Ver3.3-onlyģʽ£¬ÐèÒªÉèÖÃenable_OD_pullupλ¡£
+ mci_write(regs_base,0x00,0x0);
+
+ //¸øCARD¹©µç
+ mci_write(regs_base,0x04,0x01);
+ //µÈ´ýµçÔ´Îȶ¨
+ udelay(500);
+
+ //ÇåÖжÏ״̬¼Ä´æÆ÷£¬ÒÔ¼°ÉèÖÃINTMSK¼Ä´æÆ÷
+ mci_write(regs_base,0x44,((u32)-1));
+ //ÆÁ±ÎËùÓÐÖжϣ¬¸ß16λÊǶÔÓ¦sdio£¬µÍ16λ¶ÔӦÿ¸ö¿¨
+ mci_write(regs_base,0x24,0x00);
+
+ //ÐÞ¸ÄCARDµÄʱÖÓÔ´¡£ÎÒÃÇÓõÄʱÖÓÀ´Ô´ÊÇclksrc0. 2bit¶ÔÓ¦Ò»¸ö¿¨(32/2=16)
+ mci_write(regs_base,0x0C,0x0);
+
+ //ÉèÖÃHOST IPµÄһЩȱʡ²ÎÊý¡£¶ÔÓ¦TMOUT,DEBNCE,FIFOTH¼Ä´æÆ÷
+ mci_write(regs_base,0x14,((u32)-1)); //data_timeout
+ //response_timeout,ĬÈÏ0x40
+ //·´Ìø¼ÆÊý¼Ä´æÆ÷,25ms
+ mci_write(regs_base,0x64,0xFFFFFF);
+
+ //fifo·§ÖµÎªÄ¬ÈÏ
+ tmp = (2 << 28) |(((ZXMCI_FIFO_DEPTH >> 1)-1) << 16) |((ZXMCI_FIFO_DEPTH >> 1) << 0); //dma multiple transaction size,ÄÚ²¿dma//RX WMARK //TX WMARK
+ mci_write(regs_base,0x4c,tmp);
+
+ //SD¿¨ÉϵçÖ®ºó£¬ÔËÐÐÔÚ1BITģʽ£¬ËùÒÔÈ·±£host¹¤×÷ÔÚ1BITģʽ
+ mci_write(regs_base,0x18,0);
+ mci_write(regs_base,0x100,1);
+
+ //ÉèÖÃCTRL¼Ä´æÆ÷£¬ÔÊÐíÖжÏ
+ mci_write(regs_base,0x00,(0 << 5)|(0 << 4)|(1 << 1));// ½ûÖ¹dma // ½ûֹȫ¾ÖÖжÏ// ¸´Î»fifo
+
+ i= 0;
+ do
+ {
+ udelay(10);
+ if(++i > 100)
+ {
+ printk("FIFO RESET FAILED\n");
+ break;
+ }
+ } while(mci_read(regs_base,0x00)& 2);
+}
+//¸üÐÂʱÖÓ
+static int emmc_update_clock_reg_only(void)
+{
+ //Ö»ÐèÒªÖÃλbit21,ÒòΪ²»·¢Ë͵½¿¨£¬ËùÒÔ²»»á²úÉúÖжÏ
+ u32 rintsts;
+ u32 repeat = 0;
+ u32 cmdr = (1 << 21)| (1 << 13) | 0x80000000;
+ u32 regs_base = g_reg_base[1];
+
+ do
+ {
+ mci_write(regs_base,0x2c,cmdr);
+ rintsts = mci_read(regs_base,0x44);
+ repeat++;
+ } while(((rintsts & (1 << 12)) != 0) && (repeat < 10));
+
+ if(repeat >= 10)
+ {
+ printk("HW LOCK ERR\n");
+ return -2;
+ }
+
+ repeat = 0;
+
+ while((mci_read(regs_base,0x2C) & 0x80000000) != 0)
+ {
+ udelay(50);
+ if(++repeat >=1000)
+ {
+ printk("UPDATE CLOCK TIMEOUT\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+//ÉèÖÃʱÖÓ
+static inline void emmc_set_clk(u32 clock)
+{
+ u32 clk_div;
+ u32 regs_base = g_reg_base[1];
+
+ //ʱÖӵıà³ÌÁ÷³Ì£¬²Î¿¼IPÊÖ²áP167
+ //È·±£CardûÓÐÔÚ´«ÊäÊý¾Ý
+ //½ûÖ¹ËùÓÐʱÖÓ
+ mci_write(regs_base,0x10,0x0);
+ emmc_update_clock_reg_only();
+ //ÉèÖÃCLKDIV,CLKSRCÁ½¸ö¼Ä´æÆ÷£¬CLKSRC²ÉÓÃĬÈÏÖµ
+ //ÕâÀïÓÃÄ£¿éʱÖÓ2·ÖƵ = 66625000.ÎÒÃÇµÄ·ÖÆµÆ÷¿ÉÒÔ×öµ½2*n(n=255) = 510¸ö·ÖƵ,0=1·ÖƵ
+ //ËùÒÔ×îµÍƵÂÊΪ66625000/510 = 130.637KHz,×î´ó= 66625000.
+ if(clock <= (CFG_EMMC_CLK_REF / 510))
+ {
+ clk_div = 0xff;
+ }
+ else
+ {
+ clk_div = (CFG_EMMC_CLK_REF + clock )/((clock<<1)+1);//ËÄÉáÎåÈë
+ }
+ mci_write(regs_base,0x08,clk_div);
+ emmc_update_clock_reg_only();
+
+ //ÖØÐÂʹÄÜʱÖÓ
+ //»Ö¸´Ê±ÖÓ.¸ß16λ1=µÍ¹¦ºÄģʽ
+ mci_write(regs_base,0x10,0x001);
+ emmc_update_clock_reg_only();
+
+}
+//·¢ËÍEMMC¶ÁÃüÁî
+static s32 zx_mmc_read(u32 src, u8 * dst, u32 size)
+{
+ int ret;
+ u32 resp, data, wordcount, start_addr, fifo_cnt;
+ volatile u32 i= 0;
+ volatile u32 j= 0;
+ u32 *p= (u32 *)dst;
+ u32 regs_base = g_reg_base[1];
+
+ if(size == 0)
+ return -1;
+
+ while((mci_read(regs_base,0x48) & (1 << 9)) != 0)
+ {
+ udelay(10);
+
+ if(++i > 200)
+ break;
+ }
+
+ start_addr = src;
+ data = mci_read(regs_base,0x00) | (1 << 1);
+ mci_write(regs_base,0x00,data);
+ mci_write(regs_base,0x20,size);
+ mci_write(regs_base,0x1C,MMC_BLOCK_SIZE);
+
+ i = 0;
+ do
+ {
+ udelay(10);
+ if(++i > 100)
+ {
+ printk("FIFO RESET FAILED\n");
+ break;
+ }
+ } while(mci_read(regs_base,0x00) & 0x02);
+
+
+ if (size > 512)
+ {
+ ret = emmc_cmd(18,start_addr, &resp,(R1 | CF_DATA));
+ if(ret)
+ return -18;
+ }
+ else
+ {
+ ret = emmc_cmd(17,start_addr, &resp,(R1 | CF_DATA));
+ if(ret)
+ return -17;
+ }
+
+ wordcount = 0;
+ do
+ {
+ fifo_cnt =((mci_read(regs_base,0x48) >> 17) & 0x1FFF);
+
+ for(j = 0; j < fifo_cnt; j++)
+ {
+ data = mci_read(regs_base,0x200);
+ *p++= data;
+ wordcount++;
+ }
+
+ } while(wordcount < (size >> 2));
+ udelay(2000);
+
+ return 0;
+
+}
+
+static int zx_mmc_write(u32 blknr, u8 * src_buf, u32 size)
+{
+ int ret;
+ u32 resp, data, wordcount, start_addr, fifo_cnt;
+ volatile u32 i= 0;
+ volatile u32 j= 0;
+ u32 *p= (u32 *)src_buf;
+ u32 regs_base = g_reg_base[1];
+ u32 write_count_per =0;
+
+ if(size == 0)
+ return -1;
+
+ while((mci_read(regs_base,0x48) & (1 << 9)) != 0)
+ {
+ udelay(10);
+
+ if(++i > 200)
+ break;
+ }
+
+ start_addr = blknr;
+ data = mci_read(regs_base,0x00) | (1 << 1);
+ mci_write(regs_base,0x00,data);
+ mci_write(regs_base,0x20,size);
+ mci_write(regs_base,0x1C,MMC_BLOCK_SIZE);
+
+ i = 0;
+ do
+ {
+ udelay(10);
+ if(++i > 100)
+ {
+ printk("FIFO RESET FAILED\n");
+ break;
+ }
+ } while(mci_read(regs_base,0x00) & 0x02);
+
+
+ if (size > 512)
+ {
+
+ ret = emmc_cmd(25,start_addr, &resp,(R1 | CF_DATA_WR));
+ if(ret)
+ return -18;
+ }
+ else
+ {
+
+ ret = emmc_cmd(24,start_addr, &resp,(R1 | CF_DATA_WR));
+ if(ret)
+ return -17;
+ }
+
+ wordcount = 0;
+
+ do
+ {
+ fifo_cnt =((mci_read(regs_base,0x48) >> 17) & 0x1FFF);
+ write_count_per = min(((size-wordcount)>>2),(ZXMCI_FIFO_DEPTH-fifo_cnt));
+
+ for(j = 0; j < write_count_per; j++)
+ {
+ mci_write(regs_base,0x200,*p++);
+ wordcount = wordcount+4;
+ }
+
+ } while(size-wordcount);
+
+ udelay(2000);
+
+ return 0;
+
+}
+
+//ÉèÖöÁÊý¾Ý´óС
+int mmc_bread(u32 start_addr, u32 data_size, void *dst)
+{
+ int ret;
+ u32 src = 0;
+ u32 blk_count;
+ u32 remain = 0;
+
+ if((start_addr%MMC_BLOCK_SIZE)||(data_size==0)||(dst==NULL))
+ return -1;//err start addr
+
+ blk_count = data_size/MMC_BLOCK_SIZE;
+ remain = data_size%MMC_BLOCK_SIZE;
+ if(remain)
+ memset(&mmc_data_buf,0x0,MMC_BLOCK_SIZE);
+
+ if(block_addr == 0)
+ src = start_addr;
+ else
+ src = start_addr/MMC_BLOCK_SIZE;
+
+ if(blk_count){
+ ret= zx_mmc_read(src, (u8 *) dst, blk_count * MMC_BLOCK_SIZE);
+ if(ret < 0)
+ {
+ printk("READ ERR\n");
+ return -1;
+ }
+ }
+
+ if(remain){/*transfer remain*/
+
+ ret= zx_mmc_read(src+blk_count, (u8 *)&mmc_data_buf, 1 * MMC_BLOCK_SIZE);
+ if(ret < 0)
+ {
+ printk("READ ERR\n");
+ return -1;
+ }
+ memcpy((dst+blk_count * MMC_BLOCK_SIZE),&mmc_data_buf,remain);
+ }
+
+ return data_size;
+}
+
+int mmc_bwrite(u32 start_addr, u32 data_size, void *src_buf)
+{
+ int ret;
+ u32 start_blk = 0;
+ u32 blk_count;
+ u32 remain = 0;
+ u32 resp[4];
+
+ if((start_addr%MMC_BLOCK_SIZE)||(data_size==0)||(src_buf==NULL))
+ return -1;//err start addr
+
+ blk_count = data_size/MMC_BLOCK_SIZE;
+ remain = data_size%MMC_BLOCK_SIZE;
+ if(remain){
+ memset(&mmc_data_buf,0x00,MMC_BLOCK_SIZE);
+ memcpy(&mmc_data_buf,(src_buf+blk_count*MMC_BLOCK_SIZE),remain);
+ }
+
+ if(block_addr == 0)
+ start_blk = start_addr;
+ else
+ start_blk = (start_addr/MMC_BLOCK_SIZE);
+
+ if(blk_count){
+ ret= zx_mmc_write(start_blk, (u8 *)src_buf, blk_count * MMC_BLOCK_SIZE);
+ if(ret < 0)
+ {
+ printk("WRITE ERR\n");
+ return -1;
+ }
+
+ resp[0]=0;
+ do{
+ emmc_cmd(13, (1<<16), resp, R1);
+ }while(((resp[0] & 0x00001E00) >> 9)!= 4);
+ }
+
+ if(remain){/*transfer remain*/
+
+ ret= zx_mmc_write((start_blk+blk_count), (u8 *)&mmc_data_buf, 1 * MMC_BLOCK_SIZE);
+ if(ret < 0)
+ {
+ printk("WRITE remain ERR\n");
+ return -1;
+ }
+
+ resp[0]=0;
+ do{
+ emmc_cmd(13, (1<<16), resp, R1);
+ }while(((resp[0] & 0x00001E00) >> 9)!= 4);
+ }
+ return data_size;
+}
+
+
+// ¶Á4KÊý¾Ý,½öÖ§³Öemmc£¬ºóÐø¿ÉÒÔ¿¼ÂÇ×Ô¶¯Ê¶±ð
+int mmc_ramdump_init(void)
+{
+ struct mmc_cid cid;
+ u32 resp[4];
+ u32 ocr;
+ int ret = 0;
+ block_addr = 0;
+
+ //zDrvPmic_SetNormal_Onoff_PSM(VSD1,PM_DISABLE);
+ //mdelay(10);
+ //zDrvPmic_SetNormal_Onoff_PSM(VSD1,PM_ENABLE);
+ // emmc_clk_reset();
+ emmc_init();
+
+ if(!emmc_card_present())
+ {
+ printk("NO EMMC\n");
+ return -1;
+ }
+
+ emmc_set_clk(CFG_EMMC_CLK_ENUM);
+ emmc_idle_cards();
+
+ ret = mmc_send_op_cond(0, &ocr);
+
+ if(!ret)
+ {
+ ramdump_mmc_init_card(&cid, ocr);
+
+ emmc_cmd(9, mmc_rca, resp, R2);
+
+ ret = emmc_cmd(7, mmc_rca, resp, R1);
+
+ if(ret)
+ {
+ return ret;
+ }
+
+ emmc_cmd(16, 512, resp, R1);
+
+ emmc_set_clk(CFG_EMMC_CLK_WORK);
+ }
+
+// mmc_bread(0, 0, 8, (void *)CFG_LOAD_BASE);
+
+ return 0;
+}
+
+
diff --git a/upstream/linux-5.10/drivers/net/zvnet/zvnet_dev.h b/upstream/linux-5.10/drivers/net/zvnet/zvnet_dev.h
new file mode 100755
index 0000000..d1feaa5
--- /dev/null
+++ b/upstream/linux-5.10/drivers/net/zvnet/zvnet_dev.h
@@ -0,0 +1,76 @@
+#ifndef ZV_NET_H
+#define ZV_NET_H
+
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/soc/zte/rpmsg.h>
+
+//#define ZVNET_DEBUG
+#ifdef ZVNET_DEBUG
+#define zv_dbg(format, arg...) printk(KERN_DEBUG "[zvnet]<%s>: " format "\n" , \
+ __func__ , ## arg)
+#define zv_info(format, arg...) printk(KERN_INFO "[zvnet]<%s>: " format "\n" , \
+ __func__ , ## arg)
+#else
+#define zv_dbg(format, arg...) do {} while (0)
+#define zv_info(format, arg...) do {} while (0)
+#endif
+
+#define zv_err(format, arg...) printk(KERN_ERR "[zvnet]<%s>: " format "\n" , \
+ __func__ , ## arg)
+
+#define zv_warn(format, arg...) printk(KERN_WARNING "[zvnet]<%s>: " format "\n" , \
+ __func__ , ## arg)
+
+//zvnetÉ豸×î´óÊý
+#define DDR_ZVNET_DEV_MAX 10
+#define ZVNET_IFNAME_PREFIX "zvnet"
+
+#define ICP_CHN_ZVNET1 20 //ICP_CHANNEL_WAN1
+#define ICP_CHN_ZVNET2 21 //ICP_CHANNEL_WAN2
+#define ICP_CHN_ZVNET3 22 //ICP_CHANNEL_WAN3
+#define ICP_CHN_ZVNET4 23 //ICP_CHANNEL_WAN4
+
+#define ICP_CHANNEL_SIZE (8 * 1024 *2)
+
+#define zvnetCreateChannel rpmsgCreateChannel
+#define zvnetWrite rpmsgWrite
+#define zvnetRead rpmsgRead
+
+struct zvnet_channel {
+ T_RpMsg_CoreID core_id;
+ T_RpMsg_ChID channel_id;
+ unsigned int channel_size;
+ struct task_struct *rcv_thread;
+};
+
+struct zvnet {
+ struct net_device *net;
+ struct sk_buff_head rxq;
+ struct tasklet_struct bh;
+ void *dev_priv;
+};
+
+struct zvnet_device {
+ struct zvnet *dev;
+ struct net_device *net;
+ //struct zvnet_channel chn_info;
+ unsigned char retran_times;
+ //int (*write)(struct sk_buff *,struct v2x_hdr *, unsigned int, struct net_device *);
+};
+
+struct zvp_header {
+ unsigned int magic_word;
+ unsigned short chnid;
+ unsigned short tlen;
+};
+
+#define ZVP_MAGIC_WORD 0x5A5A5A5A
+#define ZVP_HEAD_LEN sizeof(struct zvp_header)
+
+#endif
+
diff --git a/upstream/linux-5.10/drivers/rtc/class.c b/upstream/linux-5.10/drivers/rtc/class.c
new file mode 100755
index 0000000..625effe
--- /dev/null
+++ b/upstream/linux-5.10/drivers/rtc/class.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RTC subsystem, base class
+ *
+ * Copyright (C) 2005 Tower Technologies
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * class skeleton from drivers/hwmon/hwmon.c
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/rtc.h>
+#include <linux/kdev_t.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "rtc-core.h"
+
+static DEFINE_IDA(rtc_ida);
+struct class *rtc_class;
+
+static void rtc_device_release(struct device *dev)
+{
+ struct rtc_device *rtc = to_rtc_device(dev);
+ struct timerqueue_head *head = &rtc->timerqueue;
+ struct timerqueue_node *node;
+
+ mutex_lock(&rtc->ops_lock);
+ while ((node = timerqueue_getnext(head)))
+ timerqueue_del(head, node);
+ mutex_unlock(&rtc->ops_lock);
+
+ cancel_work_sync(&rtc->irqwork);
+
+ ida_simple_remove(&rtc_ida, rtc->id);
+ kfree(rtc);
+}
+
+#ifdef CONFIG_RTC_HCTOSYS_DEVICE
+/* Result of the last RTC to system clock attempt. */
+int rtc_hctosys_ret = -ENODEV;
+
+/* IMPORTANT: the RTC only stores whole seconds. It is arbitrary
+ * whether it stores the most close value or the value with partial
+ * seconds truncated. However, it is important that we use it to store
+ * the truncated value. This is because otherwise it is necessary,
+ * in an rtc sync function, to read both xtime.tv_sec and
+ * xtime.tv_nsec. On some processors (i.e. ARM), an atomic read
+ * of >32bits is not possible. So storing the most close value would
+ * slow down the sync API. So here we have the truncated value and
+ * the best guess is to add 0.5s.
+ */
+
+static void rtc_hctosys(struct rtc_device *rtc)
+{
+ int err;
+ struct rtc_time tm;
+ struct timespec64 tv64 = {
+ .tv_nsec = NSEC_PER_SEC >> 1,
+ };
+
+ err = rtc_read_time(rtc, &tm);
+ if (err) {
+ dev_err(rtc->dev.parent,
+ "hctosys: unable to read the hardware clock\n");
+ goto err_read;
+ }
+
+ tv64.tv_sec = rtc_tm_to_time64(&tm);
+
+#if BITS_PER_LONG == 32
+ if (tv64.tv_sec > INT_MAX) {
+ err = -ERANGE;
+ goto err_read;
+ }
+#endif
+
+ err = do_settimeofday64(&tv64);
+
+ dev_info(rtc->dev.parent, "setting system clock to %ptR UTC (%lld)\n",
+ &tm, (long long)tv64.tv_sec);
+
+err_read:
+ rtc_hctosys_ret = err;
+}
+#endif
+
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
+/*
+ * On suspend(), measure the delta between one RTC and the
+ * system's wall clock; restore it on resume().
+ */
+
+static struct timespec64 old_rtc, old_system, old_delta;
+
+static int rtc_suspend(struct device *dev)
+{
+ struct rtc_device *rtc = to_rtc_device(dev);
+ struct rtc_time tm;
+ struct timespec64 delta, delta_delta;
+ int err;
+
+ if (timekeeping_rtc_skipsuspend())
+ return 0;
+
+ if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
+ return 0;
+
+ /* snapshot the current RTC and system time at suspend*/
+ err = rtc_read_time(rtc, &tm);
+ if (err < 0) {
+ pr_debug("%s: fail to read rtc time\n", dev_name(&rtc->dev));
+ return 0;
+ }
+
+ ktime_get_real_ts64(&old_system);
+ old_rtc.tv_sec = rtc_tm_to_time64(&tm);
+
+ /*
+ * To avoid drift caused by repeated suspend/resumes,
+ * which each can add ~1 second drift error,
+ * try to compensate so the difference in system time
+ * and rtc time stays close to constant.
+ */
+ delta = timespec64_sub(old_system, old_rtc);
+ delta_delta = timespec64_sub(delta, old_delta);
+ if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
+ /*
+ * if delta_delta is too large, assume time correction
+ * has occurred and set old_delta to the current delta.
+ */
+ old_delta = delta;
+ } else {
+ /* Otherwise try to adjust old_system to compensate */
+ old_system = timespec64_sub(old_system, delta_delta);
+ }
+
+ return 0;
+}
+
+static int rtc_resume(struct device *dev)
+{
+ struct rtc_device *rtc = to_rtc_device(dev);
+ struct rtc_time tm;
+ struct timespec64 new_system, new_rtc;
+ struct timespec64 sleep_time;
+ int err;
+
+ if (timekeeping_rtc_skipresume())
+ return 0;
+
+ rtc_hctosys_ret = -ENODEV;
+ if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
+ return 0;
+
+ /* snapshot the current rtc and system time at resume */
+ ktime_get_real_ts64(&new_system);
+ err = rtc_read_time(rtc, &tm);
+ if (err < 0) {
+ pr_debug("%s: fail to read rtc time\n", dev_name(&rtc->dev));
+ return 0;
+ }
+
+ new_rtc.tv_sec = rtc_tm_to_time64(&tm);
+ new_rtc.tv_nsec = 0;
+
+ if (new_rtc.tv_sec < old_rtc.tv_sec) {
+ pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
+ return 0;
+ }
+
+ /* calculate the RTC time delta (sleep time)*/
+ sleep_time = timespec64_sub(new_rtc, old_rtc);
+
+ /*
+ * Since these RTC suspend/resume handlers are not called
+ * at the very end of suspend or the start of resume,
+ * some run-time may pass on either sides of the sleep time
+ * so subtract kernel run-time between rtc_suspend to rtc_resume
+ * to keep things accurate.
+ */
+ sleep_time = timespec64_sub(sleep_time,
+ timespec64_sub(new_system, old_system));
+
+ if (sleep_time.tv_sec >= 0)
+ timekeeping_inject_sleeptime64(&sleep_time);
+ rtc_hctosys_ret = 0;
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rtc_class_dev_pm_ops, rtc_suspend, rtc_resume);
+#define RTC_CLASS_DEV_PM_OPS (&rtc_class_dev_pm_ops)
+#else
+#define RTC_CLASS_DEV_PM_OPS NULL
+#endif
+
+/* Ensure the caller will set the id before releasing the device */
+static struct rtc_device *rtc_allocate_device(void)
+{
+ struct rtc_device *rtc;
+
+ rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return NULL;
+
+ device_initialize(&rtc->dev);
+
+ /* Drivers can revise this default after allocating the device. */
+ rtc->set_offset_nsec = NSEC_PER_SEC / 2;
+
+ rtc->irq_freq = 1;
+ rtc->max_user_freq = 64;
+ rtc->dev.class = rtc_class;
+ rtc->dev.groups = rtc_get_dev_attribute_groups();
+ rtc->dev.release = rtc_device_release;
+
+ mutex_init(&rtc->ops_lock);
+ spin_lock_init(&rtc->irq_lock);
+ init_waitqueue_head(&rtc->irq_queue);
+
+ /* Init timerqueue */
+ timerqueue_init_head(&rtc->timerqueue);
+ INIT_WORK(&rtc->irqwork, rtc_timer_do_work);
+ /* Init aie timer */
+ rtc_timer_init(&rtc->aie_timer, rtc_aie_update_irq, rtc);
+ /* Init uie timer */
+ rtc_timer_init(&rtc->uie_rtctimer, rtc_uie_update_irq, rtc);
+ /* Init pie timer */
+ hrtimer_init(&rtc->pie_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rtc->pie_timer.function = rtc_pie_update_irq;
+ rtc->pie_enabled = 0;
+
+ return rtc;
+}
+
+static int rtc_device_get_id(struct device *dev)
+{
+ int of_id = -1, id = -1;
+
+ if (dev->of_node)
+ of_id = of_alias_get_id(dev->of_node, "rtc");
+ else if (dev->parent && dev->parent->of_node)
+ of_id = of_alias_get_id(dev->parent->of_node, "rtc");
+
+ if (of_id >= 0) {
+ id = ida_simple_get(&rtc_ida, of_id, of_id + 1, GFP_KERNEL);
+ if (id < 0)
+ dev_warn(dev, "/aliases ID %d not available\n", of_id);
+ }
+
+ if (id < 0)
+ id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL);
+
+ return id;
+}
+
+static void rtc_device_get_offset(struct rtc_device *rtc)
+{
+ time64_t range_secs;
+ u32 start_year;
+ int ret;
+
+ /*
+ * If RTC driver did not implement the range of RTC hardware device,
+ * then we can not expand the RTC range by adding or subtracting one
+ * offset.
+ */
+ if (rtc->range_min == rtc->range_max)
+ return;
+
+ ret = device_property_read_u32(rtc->dev.parent, "start-year",
+ &start_year);
+ if (!ret) {
+ rtc->start_secs = mktime64(start_year, 1, 1, 0, 0, 0);
+ rtc->set_start_time = true;
+ }
+
+ /*
+ * If user did not implement the start time for RTC driver, then no
+ * need to expand the RTC range.
+ */
+ if (!rtc->set_start_time)
+ return;
+
+ range_secs = rtc->range_max - rtc->range_min + 1;
+
+ /*
+ * If the start_secs is larger than the maximum seconds (rtc->range_max)
+ * supported by RTC hardware or the maximum seconds of new expanded
+ * range (start_secs + rtc->range_max - rtc->range_min) is less than
+ * rtc->range_min, which means the minimum seconds (rtc->range_min) of
+ * RTC hardware will be mapped to start_secs by adding one offset, so
+ * the offset seconds calculation formula should be:
+ * rtc->offset_secs = rtc->start_secs - rtc->range_min;
+ *
+ * If the start_secs is larger than the minimum seconds (rtc->range_min)
+ * supported by RTC hardware, then there is one region is overlapped
+ * between the original RTC hardware range and the new expanded range,
+ * and this overlapped region do not need to be mapped into the new
+ * expanded range due to it is valid for RTC device. So the minimum
+ * seconds of RTC hardware (rtc->range_min) should be mapped to
+ * rtc->range_max + 1, then the offset seconds formula should be:
+ * rtc->offset_secs = rtc->range_max - rtc->range_min + 1;
+ *
+ * If the start_secs is less than the minimum seconds (rtc->range_min),
+ * which is similar to case 2. So the start_secs should be mapped to
+ * start_secs + rtc->range_max - rtc->range_min + 1, then the
+ * offset seconds formula should be:
+ * rtc->offset_secs = -(rtc->range_max - rtc->range_min + 1);
+ *
+ * Otherwise the offset seconds should be 0.
+ */
+ if (rtc->start_secs > rtc->range_max ||
+ rtc->start_secs + range_secs - 1 < rtc->range_min)
+ rtc->offset_secs = rtc->start_secs - rtc->range_min;
+ else if (rtc->start_secs > rtc->range_min)
+ rtc->offset_secs = range_secs;
+ else if (rtc->start_secs < rtc->range_min)
+ rtc->offset_secs = -range_secs;
+ else
+ rtc->offset_secs = 0;
+}
+
+/**
+ * rtc_device_unregister - removes the previously registered RTC class device
+ *
+ * @rtc: the RTC class device to destroy
+ */
+static void rtc_device_unregister(struct rtc_device *rtc)
+{
+ mutex_lock(&rtc->ops_lock);
+ /*
+ * Remove innards of this RTC, then disable it, before
+ * letting any rtc_class_open() users access it again
+ */
+ rtc_proc_del_device(rtc);
+ cdev_device_del(&rtc->char_dev, &rtc->dev);
+ rtc->ops = NULL;
+ mutex_unlock(&rtc->ops_lock);
+ put_device(&rtc->dev);
+}
+
+static void devm_rtc_release_device(struct device *dev, void *res)
+{
+ struct rtc_device *rtc = *(struct rtc_device **)res;
+
+ rtc_nvmem_unregister(rtc);
+
+ if (rtc->registered)
+ rtc_device_unregister(rtc);
+ else
+ put_device(&rtc->dev);
+}
+
+struct rtc_device *devm_rtc_allocate_device(struct device *dev)
+{
+ struct rtc_device **ptr, *rtc;
+ int id, err;
+
+ id = rtc_device_get_id(dev);
+ if (id < 0)
+ return ERR_PTR(id);
+
+ ptr = devres_alloc(devm_rtc_release_device, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr) {
+ err = -ENOMEM;
+ goto exit_ida;
+ }
+
+ rtc = rtc_allocate_device();
+ if (!rtc) {
+ err = -ENOMEM;
+ goto exit_devres;
+ }
+
+ *ptr = rtc;
+ devres_add(dev, ptr);
+
+ rtc->id = id;
+ rtc->dev.parent = dev;
+ dev_set_name(&rtc->dev, "rtc%d", id);
+
+ return rtc;
+
+exit_devres:
+ devres_free(ptr);
+exit_ida:
+ ida_simple_remove(&rtc_ida, id);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(devm_rtc_allocate_device);
+
+int __rtc_register_device(struct module *owner, struct rtc_device *rtc)
+{
+ struct rtc_wkalrm alrm;
+ int err;
+
+ if (!rtc->ops) {
+ dev_dbg(&rtc->dev, "no ops set\n");
+ return -EINVAL;
+ }
+
+ rtc->owner = owner;
+ rtc_device_get_offset(rtc);
+
+ /* Check to see if there is an ALARM already set in hw */
+ err = __rtc_read_alarm(rtc, &alrm);
+ if (!err && !rtc_valid_tm(&alrm.time))
+ rtc_initialize_alarm(rtc, &alrm);
+
+ rtc_dev_prepare(rtc);
+
+ err = cdev_device_add(&rtc->char_dev, &rtc->dev);
+ if (err)
+ dev_warn(rtc->dev.parent, "failed to add char device %d:%d\n",
+ MAJOR(rtc->dev.devt), rtc->id);
+ else
+ dev_dbg(rtc->dev.parent, "char device (%d:%d)\n",
+ MAJOR(rtc->dev.devt), rtc->id);
+
+ rtc_proc_add_device(rtc);
+
+ rtc->registered = true;
+ dev_info(rtc->dev.parent, "registered as %s\n",
+ dev_name(&rtc->dev));
+
+#ifdef CONFIG_RTC_HCTOSYS_DEVICE
+ if (!strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE))
+ rtc_hctosys(rtc);
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__rtc_register_device);
+
+/**
+ * devm_rtc_device_register - resource managed rtc_device_register()
+ * @dev: the device to register
+ * @name: the name of the device (unused)
+ * @ops: the rtc operations structure
+ * @owner: the module owner
+ *
+ * @return a struct rtc on success, or an ERR_PTR on error
+ *
+ * Managed rtc_device_register(). The rtc_device returned from this function
+ * are automatically freed on driver detach.
+ * This function is deprecated, use devm_rtc_allocate_device and
+ * rtc_register_device instead
+ */
+struct rtc_device *devm_rtc_device_register(struct device *dev,
+ const char *name,
+ const struct rtc_class_ops *ops,
+ struct module *owner)
+{
+ struct rtc_device *rtc;
+ int err;
+
+ rtc = devm_rtc_allocate_device(dev);
+ if (IS_ERR(rtc))
+ return rtc;
+
+ rtc->ops = ops;
+
+ err = __rtc_register_device(owner, rtc);
+ if (err)
+ return ERR_PTR(err);
+
+ return rtc;
+}
+EXPORT_SYMBOL_GPL(devm_rtc_device_register);
+
+static int __init rtc_init(void)
+{
+ rtc_class = class_create(THIS_MODULE, "rtc");
+ if (IS_ERR(rtc_class)) {
+ pr_err("couldn't create class\n");
+ return PTR_ERR(rtc_class);
+ }
+ rtc_class->pm = RTC_CLASS_DEV_PM_OPS;
+ rtc_dev_init();
+ return 0;
+}
+subsys_initcall(rtc_init);
diff --git a/upstream/linux-5.10/drivers/soc/sc/pcu/pcu-zx297520v3.c b/upstream/linux-5.10/drivers/soc/sc/pcu/pcu-zx297520v3.c
new file mode 100755
index 0000000..66c8cf3
--- /dev/null
+++ b/upstream/linux-5.10/drivers/soc/sc/pcu/pcu-zx297520v3.c
@@ -0,0 +1,976 @@
+/*
+ *
+ * Copyright (C) 2015-2022 ZTE-TSP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/refcount.h>
+#include <linux/slab.h>
+#include <linux/irqchip.h>
+#include <linux/suspend.h>
+#include <linux/syscore_ops.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include <linux/soc/sc/common.h>
+#include <linux/soc/sc/spinlock.h>
+#include <linux/soc/sc/pcu.h>
+#include <linux/soc/sc/rpmsg.h>
+#include <dt-bindings/soc/zx297520v3-irq.h>
+#include <uapi/linux/sc_bsp/bsp_api.h>
+
+#include "pcu-common.h"
+
+#if 0
+
+#define pm_ram_log(fmt, args...) \
+{ \
+ pm_printk("[SLP] " fmt, ##args); \
+}
+#else
+#define pm_ram_log(fmt, args...) \
+{ \
+ printk(KERN_INFO "[SLP] " fmt, ##args); \
+}
+
+#endif
+
+#define ZX_IRQ_NUM (IRQ_ZX297520V3_SPI_NUM + 32)
+
+#define PCU_LOCK reg_spin_lock();
+#define PCU_UNLOCK reg_spin_unlock();
+
+static struct zx_pcu_int_info zx297520v3_pcu_int_info[] =
+{
+ {
+ .pcu_index = PCU_AP_TIMER1_INT,
+ .gic_index = AP_TIMER1_INT,
+ .status_index = 51,
+ .wake_index = 0,
+ .int_name = "ap_timer1",
+ .irq_type = IRQ_TYPE_EDGE_RISING,
+ .wl_type = PM_WL_EVENT_AP_TIMER1,
+ },
+ {
+ .pcu_index = PCU_AP_TIMER2_INT,
+ .gic_index = AP_TIMER2_INT,
+ .status_index = 52,
+ .wake_index = 1,
+ .int_name = "ap_timer2",
+ .irq_type = IRQ_TYPE_EDGE_RISING,
+ .wl_type = PM_WL_EVENT_AP_TIMER2,
+ },
+ {
+ .pcu_index = PCU_ICP_PS2AP_INT,
+ .gic_index = ICP_PS2AP_INT,
+ .status_index = 53,
+ .wake_index = 2,
+ .int_name = "icp_ps_ap",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_ICP_PS2AP,
+ },
+ {
+ .pcu_index = PCU_USB_POWERDWN_UP_INT,
+ .gic_index = USB_POWERDWN_UP_INT,
+ .status_index = 6,
+ .wake_index = 3,
+ .int_name = "usb_up",
+ .irq_type = IRQ_TYPE_EDGE_RISING,
+ .wl_type = PM_WL_EVENT_USB_POWERDWN_UP,
+ },
+ {
+ .pcu_index = PCU_USB_POWERDWN_DOWN_INT,
+ .gic_index = USB_POWERDWN_DOWN_INT,
+ .status_index = 7,
+ .wake_index = 4,
+ .int_name = "usb_down",
+ .irq_type = IRQ_TYPE_EDGE_FALLING,
+ .wl_type = PM_WL_EVENT_USB_POWERDWN_DOWN,
+ },
+ {
+ .pcu_index = PCU_HSIC_POWERDWN_UP_INT,
+ .gic_index = HSIC_POWERDWN_UP_INT,
+ .status_index = 8,
+ .wake_index = 5,
+ .int_name = "hsic_up",
+ .irq_type = IRQ_TYPE_EDGE_RISING,
+ .wl_type = PM_WL_EVENT_HSIC_POWERDWN_UP,
+ },
+ {
+ .pcu_index = PCU_HSIC_POWERDWN_DOWN_INT,
+ .gic_index = HSIC_POWERDWN_DOWN_INT,
+ .status_index = 9,
+ .wake_index = 6,
+ .int_name = "hsic_down",
+ .irq_type = IRQ_TYPE_EDGE_FALLING,
+ .wl_type = PM_WL_EVENT_HSIC_POWERDWN_DOWN,
+ },
+ {
+ .pcu_index = PCU_ICP_M02AP_INT,
+ .gic_index = ICP_M02AP_INT,
+ .status_index = 54,
+ .wake_index = 7,
+ .int_name = "icp_m0_ap",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_ICP_M02AP,
+ },
+ {
+ .pcu_index = PCU_RTC_ALARM_INT,
+ .gic_index = RTC_ALARM_INT,
+ .status_index = 12,
+ .wake_index = 8,
+ .int_name = "rtc_alarm",
+ .irq_type = IRQ_TYPE_LEVEL_LOW,
+ .wl_type = PM_WL_EVENT_ALARM,
+ },
+ {
+ .pcu_index = PCU_RTC_TIMER_INT,
+ .gic_index = RTC_TIMER_INT,
+ .status_index = 13,
+ .wake_index = 9,
+ .int_name = "rtc_timer",
+ .irq_type = IRQ_TYPE_LEVEL_LOW,
+ .wl_type = PM_WL_EVENT_RTC_TIMER,
+ },
+ {
+ .pcu_index = PCU_KEYPAD_INT,
+ .gic_index = KEYPAD_INT,
+ .status_index = 14,
+ .wake_index = 10,
+ .int_name = "kpd",
+ .irq_type = IRQ_TYPE_EDGE_RISING,
+ .wl_type = PM_WL_EVENT_KEYPAD,
+ },
+ {
+ .pcu_index = PCU_SD1_DATA1_INT,
+ .gic_index = SD1_DATA1_INT,
+ .status_index = 15,
+ .wake_index = 11,
+ .int_name = "sd1_d1",
+ .irq_type = IRQ_TYPE_LEVEL_LOW,
+ .wl_type = PM_WL_EVENT_SD1_DATA1,
+ },
+ {
+ .pcu_index = PCU_EX0_INT,
+ .gic_index = EX0_INT,
+ .status_index = 30,
+ .wake_index = 14,
+ .int_name = "ext0",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_EXT0,
+ },
+ {
+ .pcu_index = PCU_EX1_INT,
+ .gic_index = EX1_INT,
+ .status_index = 31,
+ .wake_index = 15,
+ .int_name = "ext1",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_EXT1,
+ },
+ {
+ .pcu_index = PCU_EX2_INT,
+ .gic_index = EX2_INT,
+ .status_index = 32,
+ .wake_index = 16,
+ .int_name = "ext2",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_EXT2,
+ },
+ {
+ .pcu_index = PCU_EX3_INT,
+ .gic_index = EX3_INT,
+ .status_index = 33,
+ .wake_index = 17,
+ .int_name = "ext3",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_EXT3,
+ },
+ {
+ .pcu_index = PCU_EX4_INT,
+ .gic_index = EX4_INT,
+ .status_index = 34,
+ .wake_index = 18,
+ .int_name = "ext4",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_EXT4,
+ },
+ {
+ .pcu_index = PCU_EX5_INT,
+ .gic_index = EX5_INT,
+ .status_index = 35,
+ .wake_index = 19,
+ .int_name = "ext5",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_EXT5,
+ },
+ {
+ .pcu_index = PCU_EX6_INT,
+ .gic_index = EX6_INT,
+ .status_index = 36,
+ .wake_index = 20,
+ .int_name = "ext6",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_EXT6,
+ },
+ {
+ .pcu_index = PCU_EX7_INT,
+ .gic_index = EX7_INT,
+ .status_index = 37,
+ .wake_index = 21,
+ .int_name = "ext7",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_EXT7,
+ },
+ {
+ .pcu_index = PCU_EX8_INT,
+ .gic_index = EX8IN1_INT,
+ .status_index = 38,
+ .wake_index = 22,
+ .int_name = "ext8",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_EXT8,
+ },
+ {
+ .pcu_index = PCU_EX9_INT,
+ .gic_index = EX8IN1_INT,
+ .status_index = 39,
+ .wake_index = 23,
+ .int_name = "ext9",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_EXT9,
+ },
+ {
+ .pcu_index = PCU_EX10_INT,
+ .gic_index = EX8IN1_INT,
+ .status_index = 40,
+ .wake_index = 24,
+ .int_name = "ext10",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_EXT10,
+ },
+ {
+ .pcu_index = PCU_EX11_INT,
+ .gic_index = EX8IN1_INT,
+ .status_index = 41,
+ .wake_index = 25,
+ .int_name = "ext11",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_EXT11,
+ },
+ {
+ .pcu_index = PCU_EX12_INT,
+ .gic_index = EX8IN1_INT,
+ .status_index = 42,
+ .wake_index = 26,
+ .int_name = "ext12",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_EXT12,
+ },
+ {
+ .pcu_index = PCU_EX13_INT,
+ .gic_index = EX8IN1_INT,
+ .status_index = 43,
+ .wake_index = 27,
+ .int_name = "ext13",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_EXT13,
+ },
+ {
+ .pcu_index = PCU_EX14_INT,
+ .gic_index = EX8IN1_INT,
+ .status_index = 44,
+ .wake_index = 28,
+ .int_name = "ext14",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_EXT14,
+ },
+ {
+ .pcu_index = PCU_EX15_INT,
+ .gic_index = EX8IN1_INT,
+ .status_index = 45,
+ .wake_index = 29,
+ .int_name = "ext15",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_EXT15,
+ },
+ {
+ .pcu_index = PCU_SD0_DATA1_INT,
+ .gic_index = SD0_DATA1_INT,
+ .status_index = 2,
+ .wake_index = 30,
+ .int_name = "sd0_d1",
+ .irq_type = IRQ_TYPE_LEVEL_LOW,
+ .wl_type = PM_WL_EVENT_SD0_DATA1,
+ },
+ {
+ .pcu_index = PCU_ICP_PHY2AP_INT,
+ .gic_index = ICP_PHY2AP_INT,
+ .status_index = 55,
+ .wake_index = 31,
+ .int_name = "icp_phy_ap",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = PM_WL_EVENT_ICP_PHY2AP,
+ },
+ {
+ .pcu_index = PCU_GMACPHY_WAKE_INT,
+ .gic_index = GMACPHY_WAKE_INT,
+ .status_index = 60,
+ .wake_index = 0xff,
+ .int_name = "gmacphy_wake",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = 0xff,
+ },
+ {
+ .pcu_index = PCU_UART0_RXD_INT,
+ .gic_index = UART0_RXD_INT,
+ .status_index = 59,
+ .wake_index = 42,
+ .int_name = "uart0_rxd",
+ .irq_type = IRQ_TYPE_EDGE_FALLING,
+ .wl_type = 0xff,
+
+ },
+ {
+ .pcu_index = PCU_GMAC_INT,
+ .gic_index = GMAC_INT,
+ .status_index = 16,
+ .wake_index = 0xff,
+ .int_name = "gmac",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = 0xff,
+ },
+ {
+ .pcu_index = PCU_GMACPHY_INT,
+ .gic_index = GMACPHY_INT,
+ .status_index = 61,
+ .wake_index = 0xff,
+ .int_name = "gmacphy",
+ .irq_type = IRQ_TYPE_LEVEL_HIGH,
+ .wl_type = 0xff,
+ },
+};
+
+static int zx_pcu_get_irqchip_state(struct irq_data *data,
+ enum irqchip_irq_state which, bool *val)
+{
+ data = data->parent_data;
+
+ if (data->chip->irq_get_irqchip_state)
+ return data->chip->irq_get_irqchip_state(data, which, val);
+
+ return -ENOSYS;
+}
+
+static int zx_pcu_set_irqchip_state(struct irq_data *data,
+ enum irqchip_irq_state which, bool val)
+{
+ data = data->parent_data;
+
+ if (data->chip->irq_set_irqchip_state)
+ return data->chip->irq_set_irqchip_state(data, which, val);
+
+ return -ENOSYS;
+}
+
+static int zx_pcu_nmi_setup(struct irq_data *data)
+{
+ data = data->parent_data;
+
+ if (data->chip->irq_nmi_setup)
+ return data->chip->irq_nmi_setup(data);
+
+ return -ENOSYS;
+}
+
+static void zx_pcu_nmi_teardown(struct irq_data *data)
+{
+ data = data->parent_data;
+
+ if (data->chip->irq_nmi_teardown)
+ data->chip->irq_nmi_teardown(data);
+}
+
+static int zx_pcu_set_wake(struct irq_data *data, unsigned int on)
+{
+ pcu_set_irq_wake(data->hwirq, on);
+
+/* pr_info("irq:%d, onoff:%d", data->hwirq, on);*/
+
+ return 0;
+}
+
+static void zx_pcu_eoi_irq(struct irq_data *data)
+{
+ pcu_clr_irq_pending(data->hwirq);
+
+ irq_chip_eoi_parent(data);
+}
+
+static int zx_pcu_set_type(struct irq_data *data, unsigned int type)
+{
+ unsigned int new_type = type;
+
+ if(!pcu_set_irq_type(data->hwirq, type))
+ new_type = IRQ_TYPE_LEVEL_HIGH;
+
+ return irq_chip_set_type_parent(data, new_type);
+}
+
+static int zx_pcu_set_affinity(struct irq_data *data,
+ const struct cpumask *dest, bool force)
+{
+/*
+ if (data->hwirq == IRQ_ZX298501_AP_TIMER1)
+ return irq_chip_set_affinity_parent(data, cpumask_of(0), force); // ???
+ else
+*/ return irq_chip_set_affinity_parent(data, dest, force);
+}
+
+static struct irq_chip zx_pcu_chip = {
+ .name = "PCU",
+ .irq_eoi = zx_pcu_eoi_irq,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_wake = zx_pcu_set_wake,
+ .irq_set_type = zx_pcu_set_type,
+
+ .irq_set_affinity = zx_pcu_set_affinity,
+ .irq_get_irqchip_state = zx_pcu_get_irqchip_state,
+ .irq_set_irqchip_state = zx_pcu_set_irqchip_state,
+ .irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent,
+ .irq_nmi_setup = zx_pcu_nmi_setup,
+ .irq_nmi_teardown = zx_pcu_nmi_teardown,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SET_TYPE_MASKED,
+};
+
+static int zx_pcu_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (is_of_node(fwspec->fwnode)) {
+ if (fwspec->param_count != 3)
+ return -EINVAL;
+
+ /* No PPI should point to this domain */
+ if (fwspec->param[0] != 0)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[1];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int zx_pcu_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ struct zx_pcu_dev *pcu = domain->host_data;
+ irq_hw_number_t hwirq;
+ unsigned int i;
+
+ if (fwspec->param_count != 3)
+ return -EINVAL; /* Not GIC compliant */
+ if (fwspec->param[0] != GIC_SPI)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ hwirq = fwspec->param[1];
+ if (hwirq >= ZX_IRQ_NUM)
+ return -EINVAL;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &zx_pcu_chip,
+ (void __force *)pcu->top_reg_base);
+ }
+
+ parent_fwspec = *fwspec;
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+ &parent_fwspec);
+}
+
+static const struct irq_domain_ops zx_pcu_domain_ops = {
+ .translate = zx_pcu_domain_translate,
+ .alloc = zx_pcu_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init zx_pcu_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *parent_domain, *domain;
+ struct zx_pcu_dev *pcu;
+
+ if (!parent) {
+ pr_err("%pOF: no parent found\n", node);
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%pOF: unable to obtain parent domain\n", node);
+ return -ENXIO;
+ }
+
+ pcu = &pcu_dev;
+ pcu->np = node;
+ pcu->top_reg_base = of_iomap(node, 0);
+ WARN(!pcu->top_reg_base, "unable to map top pcu registers\n");
+
+ pcu->int_info = zx297520v3_pcu_int_info;
+ pcu->int_count = ARRAY_SIZE(zx297520v3_pcu_int_info);
+
+ pcu_init();
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, ZX_IRQ_NUM,
+ node, &zx_pcu_domain_ops,
+ pcu);
+ if (!domain) {
+ pr_err("%pOF: failed to allocated domain\n", node);
+ return -ENOMEM;
+ }
+
+// set_smp_cross_call();
+ pm_pcu_init();
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(zx297520v3_pcu, "zte,zx297520v3-pcu", zx_pcu_init);
+
+/* pcu debug */
+#ifdef CONFIG_PM
+#define PCU_TOP (pcu_dev.top_reg_base)
+
+#define ARM_AP_CONFIG_REG (PCU_TOP + 0x0)
+#define ARM_AP_SLEEP_TIME_REG (PCU_TOP + 4*0x3C)
+#define AP_INT_WAKE_DIS_REG (PCU_TOP + 4*0xD)
+#define CORE_SWITCH_CONFIG_REG (PCU_TOP + 4*0x2b)
+
+#define M0_INT_WAKE_DIS_REG (PCU_TOP + 4*0xE)
+#define PCU_INT_READOUT_REG1 (PCU_TOP + 4*0x1EB)
+#define PCU_INT_READOUT_REG2 (PCU_TOP + 4*0x1EC)
+#define PCU_INT_READOUT_REG3 (PCU_TOP + 4*0x1ED)
+
+
+/*ARM_AP_CONFIG_REG*/
+#define PCU_SLEEP_MODE (1U << 0)
+#define PCU_POWEROFF_MODE (1U << 1)
+#define PCU_L2_CLK_GATE (1U << 2) /*1-can turn off*/
+#define PCU_SLEEP_2M0 (1U << 3)
+#define PCU_SLEEP_DONE_BYPASS (1U << 4)
+#define PCU_SW_CONFIG_MASK (1U << 5) /* ????? */
+
+#define PCU_MODE_MASK (0x3U << 0)
+
+/*ARM_AP_SLEEP_TIME_REG*/
+#define PCU_AP_SLEEP_TIME_DIS (1U << 31)
+
+
+
+/* low power function */
+extern unsigned int pm_get_wakesource(void);
+
+/**
+ * clear pcu sleep mode.
+ *
+ */
+void pm_clear_pcu(void)
+{
+ zx_clr_reg(ARM_AP_CONFIG_REG, PCU_MODE_MASK);
+}
+
+void pm_pcu_init(void)
+{
+ zx_clr_reg(ARM_AP_CONFIG_REG, PCU_MODE_MASK);
+ zx_set_reg(ARM_AP_CONFIG_REG, PCU_L2_CLK_GATE);
+ zx_write_reg(AP_INT_WAKE_DIS_REG, ~(pm_get_wakesource()));
+}
+
+void zx_apmgclken_set(unsigned en)
+{
+ unsigned tmp;
+ if(en){
+ //set ps_clk_switch=1
+ tmp = zx_read_reg(CORE_SWITCH_CONFIG_REG);
+ tmp |= (0x1<<2);
+ zx_write_reg(CORE_SWITCH_CONFIG_REG, tmp);
+ } else{
+ //set ps_clk_switch=0
+ tmp = zx_read_reg(CORE_SWITCH_CONFIG_REG);
+ tmp &= ~(0x1<<2);
+ zx_write_reg(CORE_SWITCH_CONFIG_REG, tmp);
+ }
+}
+
+
+/**
+ * config pcu before poweroff
+ *
+ */
+void pm_set_pcu_poweroff(u32 sleep_time)
+{
+ zx_set_reg(ARM_AP_CONFIG_REG, PCU_POWEROFF_MODE);
+ zx_write_reg(ARM_AP_SLEEP_TIME_REG, sleep_time);
+}
+EXPORT_SYMBOL(pm_set_pcu_poweroff);
+
+
+/**
+ * config pcu before sleep
+ *
+ */
+void pm_set_pcu_sleep(u32 sleep_time)
+{
+ zx_set_reg(ARM_AP_CONFIG_REG, PCU_SLEEP_MODE);
+ zx_write_reg(ARM_AP_SLEEP_TIME_REG, sleep_time);
+}
+
+/**
+ * get wakeup setting.
+ *
+ */
+unsigned int pcu_get_wakeup_setting(void)
+{
+ return zx_read_reg(AP_INT_WAKE_DIS_REG);
+}
+/**
+ * set wakeup enable by gic.
+ *
+ *
+ */
+unsigned int gic_wake_enable[3]=
+{
+ (1<<ICP_PS2AP_INT) |(1<<ICP_M02AP_INT) | (1<<AP_TIMER1_INT) | (1<<EX8IN1_INT),
+ 0,
+ 0
+};
+
+extern void show_icp_state(T_RpMsg_CoreID actorID);
+void pm_get_wake_cause(void)
+{
+ unsigned int int_status[2];
+ int i = 0;
+ int index_found = 0xff;
+ unsigned int pcu_wake_setting[2];
+
+ /* when wake up, the level is high&the value is 0*/
+ int_status[0] = zx_read_reg(PCU_INT_READOUT_REG1);
+ int_status[1] = zx_read_reg(PCU_INT_READOUT_REG2);
+
+ pcu_wake_setting[0] = zx_read_reg(AP_INT_WAKE_DIS_REG);
+ pcu_wake_setting[1] = zx_read_reg(M0_INT_WAKE_DIS_REG);
+
+ for(i=0; i<ARRAY_SIZE(zx297520v3_pcu_int_info); i++)
+ {
+ if (zx297520v3_pcu_int_info[i].wake_index == 0xff)
+ continue;
+
+ if(pcu_wake_setting[0]&BIT(zx297520v3_pcu_int_info[i].wake_index))
+ continue;
+
+ if(int_status[zx297520v3_pcu_int_info[i].status_index/32]&(1<<(zx297520v3_pcu_int_info[i].status_index%32)))
+ continue;
+
+ index_found = i;
+ break;
+ }
+
+ if(index_found != 0xff)
+ {
+ pm_ram_log(" wake: %d [%s]\n", zx297520v3_pcu_int_info[index_found].gic_index, zx297520v3_pcu_int_info[index_found].int_name);
+
+ if(zx297520v3_pcu_int_info[index_found].gic_index ==ICP_PS2AP_INT) {
+ show_icp_state(CORE_PS0);
+ }
+ pm_ram_log(" pcu int status:0x%x 0x%x\n",int_status[0], int_status[1]);
+
+ pm_wl_set_event(pcu_get_wl_index_by_gic(zx297520v3_pcu_int_info[index_found].gic_index));
+ }
+ else
+ {
+ pm_ram_log(" wake abnormal\n");
+ pm_ram_log(" pcu int status:0x%x 0x%x\n",int_status[0], int_status[1]);
+ }
+}
+
+static struct wakeup_source *zx_main_ws;
+static int zx_pcu_pm_callback(struct notifier_block *nb,
+ unsigned long action, void *ptr)
+{
+ switch (action) {
+
+ case PM_POST_SUSPEND:
+ __pm_wakeup_event(zx_main_ws, 1000);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int pcu_pm_suspend(void)
+{
+ int ret = 0;
+
+ return ret;
+}
+
+static void pcu_pm_resume(void)
+{
+// pcu_get_wake_cause();
+}
+
+static struct syscore_ops pcu_pm_syscore_ops = {
+ .suspend = pcu_pm_suspend,
+ .resume = pcu_pm_resume,
+};
+
+static int pcu_pm_init(void)
+{
+ zx_main_ws = wakeup_source_register(NULL, "zx_main");
+ if (!zx_main_ws)
+ return -ENOMEM;
+
+ pm_notifier(zx_pcu_pm_callback, 0);
+
+ register_syscore_ops(&pcu_pm_syscore_ops);
+ return 0;
+}
+core_initcall(pcu_pm_init);
+#endif
+
+/* --------------------------------------------------------------------
+ * extint_8in1
+ * -------------------------------------------------------------------- */
+
+struct ext8in1_info {
+ struct irq_domain *domain;
+ struct regmap *regmap;
+ int parent_irq;
+
+};
+
+struct ext8in1_info ext8in1_dev = {0};
+
+/*
+ * return external interrupt number from ex8-ex15,
+ * return value is 0-7
+ */
+static unsigned int pcu_get_8in1_int_source(void)
+{
+ unsigned int vector_8in1 = 0;
+
+ vector_8in1 = zx_read_reg(pcu_dev.top_reg_base+0x12C);
+
+ return (vector_8in1&0x7);
+}
+
+
+/*external int 8-15 need extra clear*/
+static void pcu_int_clear_8in1(unsigned int pcu_index)
+{
+ unsigned int vector=0;
+
+ if ( (pcu_index >= PCU_EX8_INT)&&(pcu_index <= PCU_EX15_INT) )
+ {
+ /*
+ *in 7510 platform, 8in1 interrupt would be used by different cores.
+ *when any core installs a new 8in1 interrupt, another core may be
+ * responding another 8in1 interrupt, so 8in1 interrupt shouldn't be
+ *cleared. in this case, nothing to be done. but a new problem comes,
+ * the core install new 8in1 interrupt will receive a fake interrupt.
+ */
+ vector = pcu_get_8in1_int_source();
+ if (pcu_index != (vector + PCU_EX8_INT) )
+ return;
+
+ PCU_LOCK
+ zx_write_reg(pcu_dev.top_reg_base+0x128, 0x1);
+ PCU_UNLOCK
+
+ pcu_int_clear(pcu_index);
+ }
+}
+
+static void ext8in1_irq_lock(struct irq_data *data){}
+static void ext8in1_irq_sync_unlock(struct irq_data *data){}
+static void ext8in1_irq_mask(struct irq_data *data){}
+static void ext8in1_irq_unmask(struct irq_data *data){}
+static int ext8in1_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ if (!data)
+ return -EINVAL;
+
+ pcu_set_irq_wake_by_pcu(data->hwirq + PCU_EX8_INT, on);
+
+ return 0;
+}
+
+static int ext8in1_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ if (!data)
+ return -EINVAL;
+
+ pcu_int_set_type(data->hwirq + PCU_EX8_INT, type);
+
+ pcu_int_clear_8in1(data->hwirq + PCU_EX8_INT);
+
+ return 0;
+}
+
+static struct irq_chip ext8in1_irq_chip =
+{
+ .name = "ext8in1",
+
+ .irq_set_wake = ext8in1_irq_set_wake,
+ .irq_set_type = ext8in1_irq_set_type,
+ .irq_mask = ext8in1_irq_mask,
+ .irq_unmask = ext8in1_irq_unmask,
+ .irq_bus_lock = ext8in1_irq_lock,
+ .irq_bus_sync_unlock = ext8in1_irq_sync_unlock,
+};
+
+static void ext8in1_handle_irq(struct irq_desc *desc)
+{
+ struct ext8in1_info *data = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int virq;
+ int hwirq, new_hwirq;
+
+ chained_irq_enter(chip, desc);
+
+ hwirq = pcu_get_8in1_int_source();
+
+ while(1) {
+ pcu_int_clear_8in1(hwirq + PCU_EX8_INT);
+
+ virq = irq_find_mapping(data->domain, hwirq);
+ if (virq > 0)
+ generic_handle_irq(virq);
+
+ new_hwirq = pcu_get_8in1_int_source();
+ if (hwirq == new_hwirq)
+ break;
+ else
+ hwirq = new_hwirq;
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+extern void mask_irq(struct irq_desc *desc);
+extern void unmask_irq(struct irq_desc *desc);
+static int ext8in1_irq_resume(struct device *dev)
+{
+ unmask_irq(irq_to_desc(ext8in1_dev.parent_irq));
+
+ return 0;
+}
+
+static int ext8in1_irq_suspend(struct device *dev)
+{
+ mask_irq(irq_to_desc(ext8in1_dev.parent_irq));
+
+ return 0;
+}
+
+static int zx_ext8in1_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *parent_np;
+ struct regmap *regmap;
+ struct ext8in1_info *data = &ext8in1_dev;
+ int i;
+
+ parent_np = of_parse_phandle(pdev->dev.of_node, "parent-syscon", 0);
+ if (!parent_np) {
+ dev_err(&pdev->dev, "Can't get parent-syscon\n");
+ return -EINVAL;
+ }
+
+ regmap = syscon_node_to_regmap(parent_np);
+ if (IS_ERR(regmap)) {
+ of_node_put(parent_np);
+ return PTR_ERR(regmap);
+ }
+ data->regmap = regmap;
+
+ data->parent_irq = platform_get_irq(pdev, 0);
+ if (data->parent_irq <= 0)
+ return -EPROBE_DEFER;
+
+ data->domain = irq_domain_add_linear(np, 8, &irq_domain_simple_ops, NULL);
+ if (!data->domain)
+ return -ENODEV;
+
+ for (i = EX8_INT; i <= EX15_INT; i++) {
+ int virq = irq_create_mapping(data->domain, i);
+
+ irq_set_chip_and_handler(virq, &ext8in1_irq_chip,
+ handle_simple_irq);
+ irq_set_chip_data(virq, data);
+ }
+
+ irq_set_chained_handler_and_data(data->parent_irq,
+ ext8in1_handle_irq, data);
+ enable_irq_wake(data->parent_irq);
+
+ pr_info("zx_ext8in1 init OK. \n");
+
+ return 0;
+}
+
+static const struct of_device_id zx_ext8in1_match[] = {
+ { .compatible = "zte,zx297520v3-ext8in1" },
+ { }
+};
+
+static const struct dev_pm_ops ext8in1_irq_pm_ops = {
+ .resume = ext8in1_irq_resume,
+ .suspend = ext8in1_irq_suspend,
+};
+
+static struct platform_driver zx_ext8in1_driver = {
+ .probe = zx_ext8in1_probe,
+ .driver = {
+ .name = "zx_ext8in1_drv",
+ .of_match_table = zx_ext8in1_match,
+ .pm = &ext8in1_irq_pm_ops,
+ },
+};
+
+static int __init zx_ext8in1_driver_init(void)
+{
+ return platform_driver_register(&zx_ext8in1_driver);
+}
+core_initcall(zx_ext8in1_driver_init);
+
diff --git a/upstream/linux-5.10/drivers/soc/sc/plat/plat-zx297520v3.c b/upstream/linux-5.10/drivers/soc/sc/plat/plat-zx297520v3.c
new file mode 100755
index 0000000..b2da8d9
--- /dev/null
+++ b/upstream/linux-5.10/drivers/soc/sc/plat/plat-zx297520v3.c
@@ -0,0 +1,2048 @@
+/*
+ * drivers/soc/zte/plat/plat-zx298501.c
+ *
+ * Copyright (C) 2021 ZTE-TSP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/clockchips.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/syscore_ops.h>
+#include <linux/gpio.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/reset.h>
+#include <linux/io.h>
+#include <linux/amba/serial.h>
+#include <linux/serial_reg.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/sched/clock.h>
+#include <linux/suspend.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/reboot.h>
+#include <linux/miscdevice.h>
+#include <linux/soc/sc/common.h>
+#include <linux/soc/sc/pcu.h>
+#include <linux/soc/sc/spinlock.h>
+#include <dt-bindings/soc/zx297520v3-irq.h>
+#include <uapi/linux/sc_bsp/bsp_api.h>
+
+/*
+ * we use sysfs to test&debug some system funcs
+ *
+ */
+struct kobject *zx_root_kobj;
+struct kobject *zx_test_kobj;
+struct kobject *zx_pm_kobj;
+
+extern int __init zx_clk_test_init(void);
+extern int __init zx_dma_test_init(void);
+extern int __init zx_icp_test_init(void);
+extern int __init zx_timer_test_init(void);
+
+#define DOUBLE_EINT_DBG 0
+#define EINT_THREAD_TEST 0
+
+#define CONFIG_USE_DEBUG_LED 1
+#define ZX_RESET_TEST 1
+#define ZX_CLK_TEST 1
+#define ZX_PINCTRL_TEST 1
+#define ZX_GPIO_TEST 1
+#define ZX_EINT_TEST 1
+#define ZX_PM_TEST 1
+#if ZX_PM_TEST
+#define PM_RUNTIME_AUTO_TEST 1
+#endif
+#define ZX_SPINLOCK_TEST 0
+#define ZX_PM_QOS_TEST 1
+
+
+/*
+ *
+ * some test need device probe
+ */
+struct zx_drv_test
+{
+ struct device *dev;
+#if ZX_RESET_TEST
+ struct reset_control *rst;
+#endif
+
+#if ZX_PINCTRL_TEST
+ struct pinctrl *pctrl;
+ struct pinctrl_state *state0;
+ struct pinctrl_state *state1;
+ struct pinctrl_state *state2;
+#endif
+
+#if ZX_GPIO_TEST
+ int gpio;
+ int gpio2;
+ int gpio3;
+ struct gpio_desc *gd;
+
+#endif
+
+#if ZX_EINT_TEST
+ int eint_irq;
+ int eint_irq2;
+#endif
+
+#if ZX_CLK_TEST
+ struct clk *clk;
+#endif
+};
+
+struct zx_drv_test drv_test = {0};
+
+#if 0//ZX_RESET_TEST
+static void drv_reset_test(struct reset_control *rstc)
+{
+ reset_control_assert(rstc);
+ udelay(10);
+ reset_control_deassert(rstc);
+}
+#endif
+
+#if ZX_EINT_TEST
+#if EINT_THREAD_TEST
+static irqreturn_t test_eint_pri_isr(int irq, void *p)
+{
+ disable_irq_nosync(irq);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t test_eint_isr(int irq, void *p)
+{
+ static int eint_cnt = 0;
+ if ( pinctrl_select_state(drv_test.pctrl, drv_test.state0) < 0) {
+ pr_err("setting state0 failed\n");
+ }
+
+ irq_set_irq_type(drv_test.eint_irq, gpio_get_value(drv_test.gpio)?IRQ_TYPE_LEVEL_LOW:IRQ_TYPE_LEVEL_HIGH);
+
+ if ( pinctrl_select_state(drv_test.pctrl, drv_test.state1) < 0) {
+ pr_err("setting state0 failed\n");
+ }
+
+ enable_irq(irq);
+
+ pr_info("eint9 get = %d\n", ++eint_cnt);
+
+ return IRQ_HANDLED;
+}
+#else
+static irqreturn_t test_eint_isr(int irq, void *p)
+{
+ static int eint_cnt = 0;
+
+ irq_set_irq_type(drv_test.eint_irq, gpio_get_value(drv_test.gpio)?IRQ_TYPE_LEVEL_LOW:IRQ_TYPE_LEVEL_HIGH);
+
+ //pr_info("eint9 get = %d\n", ++eint_cnt);
+
+ return IRQ_HANDLED;
+}
+#endif
+static irqreturn_t test_eint_isr2(int irq, void *p)
+{
+ static int eint_cnt1 = 0;
+
+ irq_set_irq_type(drv_test.eint_irq2, gpio_get_value(drv_test.gpio2)?IRQ_TYPE_LEVEL_LOW:IRQ_TYPE_LEVEL_HIGH);
+
+ pr_info("eint12 get = %d\n", ++eint_cnt1);
+
+ return IRQ_HANDLED;
+}
+
+#endif
+
+#if ZX_GPIO_TEST
+static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
+{
+ static int irq_cnt = 0;
+
+ irq_cnt ++;
+ pr_info("gpio irq_cnt = %d\n", irq_cnt);
+
+ return IRQ_HANDLED;
+}
+#endif
+
+/*
+ * test led helper interface
+ *
+ */
+#if CONFIG_USE_DEBUG_LED
+static void test_led_init(void)
+{
+ int ret;
+
+ if (!drv_test.dev)
+ return;
+
+ ret = gpio_request(drv_test.gpio, "led_test");
+ if (ret)
+ {
+ pr_info("led_test gpio request error.\n");
+ return ;
+ }
+
+ gpio_direction_output(drv_test.gpio, 0);
+}
+static void test_led_on(void)
+{
+ if (!drv_test.dev)
+ return;
+
+ gpio_direction_output(drv_test.gpio, 1);
+}
+static void test_led_off(void)
+{
+ if (!drv_test.dev)
+ return;
+
+ gpio_direction_output(drv_test.gpio, 0);
+}
+#else
+static void test_led_init(void){}
+static void test_led_on(void){}
+static void test_led_off(void){}
+#endif
+
+#if ZX_PM_TEST
+
+static int zx_drv_test_pm_resume(struct device *dev)
+{
+ pm_stay_awake(dev);
+
+ pr_info("zx_drv_test_pm_resume\n");
+ return 0;
+}
+
+static int zx_drv_test_pm_suspend(struct device *dev)
+{
+ pr_info("zx_drv_test_pm_suspend\n");
+ return 0;
+}
+
+static int zx_drv_test_pm_runtime_resume(struct device *dev)
+{
+ /* enable clk and restore regs */
+ pr_info("zx_drv_test_pm_runtime_resume\n");
+ return 0;
+}
+
+static int zx_drv_test_pm_runtime_suspend(struct device *dev)
+{
+ /* backup regs and disable clk */
+ pr_info("zx_drv_test_pm_runtime_suspend\n");
+ return 0;
+}
+
+static int zx_drv_test_pm_runtime_idle(struct device *dev)
+{
+ pr_info("zx_drv_test_pm_runtime_idle\n");
+ return 0;
+}
+
+static const struct dev_pm_ops zx_drv_test_pm = {
+ .resume = zx_drv_test_pm_resume,
+ .suspend = zx_drv_test_pm_suspend,
+ .runtime_resume = zx_drv_test_pm_runtime_resume,
+ .runtime_suspend = zx_drv_test_pm_runtime_suspend,
+ .runtime_idle = zx_drv_test_pm_runtime_idle
+};
+#endif
+
+
+static int zx_drv_test_probe(struct platform_device *pdev)
+{
+ int gpio;
+ int irq;
+ enum of_gpio_flags flags;
+ int ret;
+
+ drv_test.dev = &pdev->dev;
+
+ /* reset */
+#if ZX_RESET_TEST
+ drv_test.rst = devm_reset_control_get(&pdev->dev, "test_rst");
+#endif
+
+ /* clk */
+#if ZX_CLK_TEST
+ drv_test.clk = devm_clk_get(&pdev->dev, "test");
+ if (IS_ERR(drv_test.clk)) {
+ ret = PTR_ERR(drv_test.clk);
+ dev_err(&pdev->dev, "failed to get test_clk: %d\n", ret);
+ return ret;
+ }
+ clk_prepare_enable(drv_test.clk);
+#endif
+
+#if ZX_GPIO_TEST
+ drv_test.gd = gpiod_get_index(drv_test.dev, "testtt", 0, GPIOD_OUT_HIGH);
+
+ /* gpio test */
+ gpio = of_get_gpio_flags(pdev->dev.of_node, 0, &flags);
+ if (!gpio_is_valid(gpio)) {
+ pr_info("test gpio no found\n");
+ goto gpio_init_end;
+ }
+ /* pr_info("test gpio :%d flag=0x%x\n", gpio, flags); */
+
+ drv_test.gpio = gpio;
+
+ ret = gpio_request(drv_test.gpio, "gpio119");
+ if (ret)
+ {
+ pr_info("led_test gpio request error.\n");
+ BUG();
+ return 0;
+ }
+ gpio_direction_input(drv_test.gpio);
+
+
+#if DOUBLE_EINT_DBG
+ gpio = of_get_gpio_flags(pdev->dev.of_node, 1, &flags);
+ if (!gpio_is_valid(gpio)) {
+ pr_info("test gpio1 no found\n");
+ goto gpio_init_end;
+ }
+
+ drv_test.gpio2 = gpio;
+
+ ret = gpio_request(drv_test.gpio2, "gpio131");
+ if (ret)
+ {
+ pr_info("led_test gpio2 request error.\n");
+ BUG();
+ return 0;
+ }
+ gpio_direction_input(drv_test.gpio2);
+
+ pr_info("test gpio :%d gpio2 : %d\n", drv_test.gpio, drv_test.gpio2);
+#endif
+
+ gpio = of_get_gpio_flags(pdev->dev.of_node, 2, &flags);
+ if (!gpio_is_valid(gpio)) {
+ pr_info("test gpio1 no found\n");
+ goto gpio_init_end;
+ }
+
+ drv_test.gpio3 = gpio;
+
+ ret = gpio_request(drv_test.gpio3, "gpio120");
+ if (ret)
+ {
+ pr_info("led_test gpio3 request error.\n");
+ BUG();
+ return 0;
+ }
+ gpio_direction_output(drv_test.gpio3, 1);
+
+
+gpio_init_end:
+
+#endif
+
+ /* pinctrl */
+#if ZX_PINCTRL_TEST
+/*
+ drv_test.pctrl = devm_pinctrl_get_select_default(&pdev->dev);
+*/
+ drv_test.pctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(drv_test.pctrl)) {
+ dev_warn(&pdev->dev, "Failed to get test pins");
+ drv_test.pctrl = NULL;
+ goto pinctrl_init_end;
+ }
+ drv_test.state0 = pinctrl_lookup_state(drv_test.pctrl, "state0");
+ if (IS_ERR(drv_test.state0)) {
+ dev_err(&pdev->dev, "TEST: missing state0\n");
+ }
+ drv_test.state1 = pinctrl_lookup_state(drv_test.pctrl, "state1"); // int9
+ if (IS_ERR(drv_test.state1)) {
+ dev_err(&pdev->dev, "TEST: missing state1\n");
+ }
+ drv_test.state2 = pinctrl_lookup_state(drv_test.pctrl, "ext_int5"); // int12
+ if (IS_ERR(drv_test.state2)) {
+ dev_err(&pdev->dev, "TEST: missing state2\n");
+ }
+ if ( pinctrl_select_state(drv_test.pctrl, drv_test.state1) < 0) {
+ dev_err(&pdev->dev, "setting state0 failed\n");
+ }
+
+#if DOUBLE_EINT_DBG
+ /* eint5 */
+ if ( pinctrl_select_state(drv_test.pctrl, drv_test.state2) < 0) {
+ dev_err(&pdev->dev, "setting eint5 failed\n");
+ }
+#endif
+
+pinctrl_init_end:
+#endif
+
+
+#if ZX_PM_TEST
+ /* just show how a device use wake source */
+ device_init_wakeup(&pdev->dev, true);
+// pm_stay_awake(&pdev->dev);
+#endif
+
+ /* eint5 irq */
+#if ZX_EINT_TEST
+ drv_test.eint_irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ irq_set_irq_type(drv_test.eint_irq, gpio_get_value(drv_test.gpio)?IRQ_TYPE_LEVEL_LOW:IRQ_TYPE_LEVEL_HIGH);
+
+#if EINT_THREAD_TEST
+ ret = request_threaded_irq(drv_test.eint_irq, test_eint_pri_isr, test_eint_isr, IRQF_ONESHOT, "test_eint9", &drv_test);
+#else
+ ret = request_irq(drv_test.eint_irq,
+ test_eint_isr,
+ 0,
+ "test_eint9",
+ &drv_test);
+#endif
+ if(ret<0)
+ BUG();
+ enable_irq_wake(drv_test.eint_irq);
+
+#if DOUBLE_EINT_DBG
+ drv_test.eint_irq2 = irq_of_parse_and_map(pdev->dev.of_node, 1);
+ irq_set_irq_type(drv_test.eint_irq2, gpio_get_value(drv_test.gpio2)?IRQ_TYPE_LEVEL_LOW:IRQ_TYPE_LEVEL_HIGH);
+ ret = request_irq(drv_test.eint_irq2,
+ test_eint_isr2,
+ 0,
+ "test_eint12",
+ &drv_test);
+ if(ret<0)
+ BUG();
+ enable_irq_wake(drv_test.eint_irq2);
+#endif
+
+#endif
+
+
+#if ZX_PM_TEST
+#if PM_RUNTIME_AUTO_TEST
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 3000 /*ms*/);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ zx_drv_test_pm_runtime_resume(&pdev->dev);
+ }
+
+ /* put to suspend 3s later */
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_sync_autosuspend(&pdev->dev);
+#else
+ if (pdev->dev.pm_domain) {
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ }
+
+ if (pm_runtime_enabled(&pdev->dev))
+ pm_runtime_get_sync(&pdev->dev);
+#endif
+#endif
+
+ return 0;
+}
+
+static const struct of_device_id zx297520v3_drv_test_match[] = {
+ { .compatible = "zte,drv-test", },
+ { }
+};
+
+static struct platform_driver zx_test_driver = {
+ .probe = zx_drv_test_probe,
+ .driver = {
+ .name = "zx297520v3_drv_test",
+#if ZX_PM_TEST
+ .pm = &zx_drv_test_pm,
+#endif
+ .of_match_table = zx297520v3_drv_test_match,
+ },
+};
+
+/*sys fs*/
+#define zte_attr(_name) \
+static struct kobj_attribute _name##_attr = \
+{ \
+ .attr = \
+ { \
+ .name = __stringify(_name), \
+ .mode = 0644, \
+ }, \
+ .show = _name##_show, \
+ .store = _name##_store, \
+}
+
+/*=============================================================================
+ *======== /sys/zte/test/os_timer ==============================================
+ *=============================================================================
+ */
+static ssize_t os_timer_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+ s += sprintf(s, "%s\n", "[TEST]Test will light on/off led every 5s~");
+
+ return (s - buf);
+}
+
+/*echo 1 > /sys/zte/test/os_timer*/
+static struct timer_list test_timer;
+static unsigned long test_timer_count = 0;
+static unsigned int os_timer_timeout = 5*1000;
+static void test_timer_expired(struct timer_list *unused)
+{
+ mod_timer(&test_timer, jiffies + msecs_to_jiffies(os_timer_timeout));
+
+// gpio_set_value(drv_test.gpio3, gpio_get_value(drv_test.gpio3)^1);
+
+ pr_info("[TEST]Test timer arrived:%lu \n",
+ ++test_timer_count);
+
+/*
+ if(test_timer_count&1)
+ test_led_on();
+ else
+ test_led_off();
+*/
+}
+
+static ssize_t os_timer_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+ unsigned int temp;
+
+// if(strict_strtol(buf, 0, &temp))
+ if(sscanf(buf, "%u", &temp) != 1)
+ error = -EINVAL;
+
+ pr_info("temp=%d", temp);
+
+ if(temp == 1)
+ {
+ mod_timer(&test_timer, jiffies + msecs_to_jiffies(os_timer_timeout));
+ }
+ else
+ {
+ del_timer(&test_timer);
+ test_timer_count = 0;
+ }
+
+ return error ? error : n;
+}
+
+zte_attr(os_timer);
+
+/*=============================================================================
+ *======== /sys/zte/test/timer ==============================================
+ *=============================================================================
+ */
+/*echo 0xXXXXXXXX > /sys/zte/test/reg_read*/
+#if ZX_PM_TEST
+static ssize_t wake_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+ s += sprintf(s, "0x%x\n", 0xaa55aa55);
+
+ return (s - buf);
+}
+
+static ssize_t wake_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+ unsigned int temp;
+
+ if(sscanf(buf, "%u", &temp) != 1)
+ error = -EINVAL;
+
+ pr_info("temp=%d", temp);
+
+ if(temp == 1)
+ {
+ pm_stay_awake(drv_test.dev);
+ }
+ else if(temp == 2)
+ {
+ pm_relax(drv_test.dev);
+ }
+
+#if 0
+ if(sscanf(buf, "%08x", &addr) != 1)
+ error = -EINVAL;
+
+ reg_vir_addr = ioremap(addr, 0x1000);
+ pr_info("reg[%08x]=%08x\n", addr, ioread32((void __iomem *)reg_vir_addr));
+
+ iounmap(reg_vir_addr);
+#endif
+ return error ? error : n;
+}
+
+zte_attr(wake);
+#endif
+/*=============================================================================
+ *======== /sys/zte/test/spinlock ==============================================
+ *=============================================================================
+ */
+/*echo 0xXXXXXXXX > /sys/zte/test/spinlock*/
+#if ZX_SPINLOCK_TEST
+void hw_spin_lock(u32 hwid);
+void hw_spin_unlock(u32 hwid);
+static ssize_t spinlock_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+// s += sprintf(s, "%s\n", "[TEST]Read register[0xXXXXXXXX] value~");
+
+ return (s - buf);
+}
+
+static ssize_t spinlock_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+ u32 temp;
+
+ {
+#if 0
+ int irq_base ;
+
+ pr_info("current irq=%d\n", irq);
+ irq_base = irq_alloc_descs(-1, 0, 11, 0);
+ pr_info("next irq=%d\n", irq_base);
+#endif
+ struct of_phandle_args out_irq;
+ int rc;
+
+ rc = of_irq_parse_one(drv_test.dev->of_node, 0, &out_irq);
+ pr_info("pcie irq=%d\n", rc);
+
+ }
+
+ sscanf(buf, "%u", &temp);
+ pr_info("spinlock store:%d\n", temp);
+#if 0
+ /* 1--lock 2--unlock */
+ if(temp == 1)
+ {
+ hw_spin_lock(7);
+ pr_info("spinlock lock ok!\n");
+ }
+ else if(temp == 2)
+ {
+ hw_spin_unlock(7);
+ pr_info("spinlock unlock ok!\n");
+ }
+#endif
+ return error ? error : n;
+}
+
+zte_attr(spinlock);
+#endif
+
+/*=============================================================================
+ *======== /sys/zte/test/reset ==============================================
+ *=============================================================================
+ */
+/* echo 1/0 > /sys/zte/test/reset */
+#if ZX_RESET_TEST
+static ssize_t reset_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+ s += sprintf(s, "%s %d\n", "reset signal status:", reset_control_status(drv_test.rst));
+
+ return (s - buf);
+}
+
+static ssize_t reset_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+ u32 temp;
+
+ sscanf(buf, "%u", &temp);
+
+ /* 1--assert 0--deassert */
+ if(temp == 1)
+ {
+ reset_control_deassert(drv_test.rst);
+
+ pr_info("reset signal assert!\n");
+ }
+ else if(temp == 0)
+ {
+ reset_control_deassert(drv_test.rst);
+
+ pr_info("reset signal release!\n");
+ }
+
+ return error ? error : n;
+}
+
+zte_attr(reset);
+#endif
+/*=============================================================================
+ *======== /sys/zte/test/gpio ============================================
+ *=============================================================================
+ */
+/* echo 1/0 > /sys/zte/test/gpio */
+#if ZX_PINCTRL_TEST
+static ssize_t gpio_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+ return (s - buf);
+}
+
+static ssize_t gpio_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+ u32 temp;
+
+ sscanf(buf, "%u", &temp);
+
+ if (!drv_test.dev)
+ return error;
+
+ /* 0-out_l 1-out_h 2-in and get */
+ if(temp == 0) {
+ gpio_direction_output(drv_test.gpio, 0);
+ pr_info("gpio out low");
+ }
+ else if(temp == 1) {
+ gpio_direction_output(drv_test.gpio, 1);
+ pr_info("gpio out high");
+ }
+ else if(temp == 2) {
+ gpio_direction_input(drv_test.gpio);
+ pr_info("gpio get value(%d) !\n",__gpio_get_value(drv_test.gpio));
+ }
+
+ return error ? error : n;
+}
+
+zte_attr(gpio);
+#endif
+
+/*=============================================================================
+ *======== /sys/zte/test/pinctrl ============================================
+ *=============================================================================
+ */
+/* echo 1/0 > /sys/zte/test/pinctrl */
+#if ZX_PINCTRL_TEST
+static ssize_t pinctrl_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+ int i;
+
+ for (i=0; i<16; i++)
+ printk("gpio_%d mapped irq to %d \n", i, gpio_to_irq(i));
+
+ return (s - buf);
+}
+
+static ssize_t pinctrl_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+ u32 temp;
+
+ sscanf(buf, "%u", &temp);
+
+ /* temp --> pin state */
+ if(temp == 1)
+ {
+ if ( pinctrl_select_state(drv_test.pctrl, drv_test.state1) < 0) {
+ dev_err(drv_test.dev, "setting state1 failed\n");
+ }
+
+ pr_info("setting state1 !\n");
+ }
+ else if(temp == 0)
+ {
+ if ( pinctrl_select_state(drv_test.pctrl, drv_test.state0) < 0) {
+ dev_err(drv_test.dev, "setting state0 failed\n");
+ }
+
+ pr_info("setting state0 !\n");
+ }
+ else if(temp == 2)
+ {
+ if ( pinctrl_select_state(drv_test.pctrl, drv_test.state2) < 0) {
+ dev_err(drv_test.dev, "setting state2 failed\n");
+ }
+
+ pr_info("setting state2 !\n");
+ }
+
+ return error ? error : n;
+}
+
+zte_attr(pinctrl);
+#endif
+
+/*=============================================================================
+ *======== /sys/zte/test/pd ==============================================
+ *=============================================================================
+ */
+/* echo 1/0 > /sys/zte/test/pd */
+#if ZX_PM_TEST
+static ssize_t pd_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+ return (s - buf);
+}
+
+static ssize_t pd_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+
+#ifdef CONFIG_PM
+ u32 temp;
+
+ sscanf(buf, "%u", &temp);
+
+ /* 1--on 0--off */
+ if(temp == 1)
+ {
+ pm_runtime_get_sync(drv_test.dev);
+
+ pr_info("power on!\n");
+ }
+ else if(temp == 0)
+ {
+#if PM_RUNTIME_AUTO_TEST
+ pm_runtime_mark_last_busy(drv_test.dev);
+ pm_runtime_put_sync_autosuspend(drv_test.dev);
+#else
+ pm_runtime_put_sync(drv_test.dev);
+#endif
+ pr_info("power off!\n");
+ }
+#else
+ error = -ENXIO;
+#endif
+ return error ? error : n;
+}
+
+zte_attr(pd);
+#endif
+
+
+/*=============================================================================
+ *======== /sys/zte/test/clk ==============================================
+ *=============================================================================
+ */
+/* echo 1/0 > /sys/zte/test/clk */
+#if ZX_CLK_TEST
+static ssize_t clk_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+ s += sprintf(s, "%s %d\n", "clk enable status:", __clk_is_enabled(drv_test.clk));
+ s += sprintf(s, "%s %d\n", "clk rate:", clk_get_rate(drv_test.clk));
+
+ return (s - buf);
+}
+
+static ssize_t clk_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+ u32 temp;
+
+ sscanf(buf, "%u", &temp);
+
+ /* 1--on 0--off */
+ if(temp == 1) {
+ clk_enable(drv_test.clk);
+ }
+ else if(temp == 0) {
+ clk_disable(drv_test.clk);
+ } else {
+ clk_set_rate(drv_test.clk, temp);
+ }
+
+ return error ? error : n;
+}
+
+zte_attr(clk);
+#endif
+
+/*=============================================================================
+ *======== /sys/zte/test/pm_qos ==============================================
+ *=============================================================================
+ */
+/* echo 1/0 > /sys/zte/test/pm_qos */
+#if ZX_PM_QOS_TEST
+static unsigned int pm_qos_test = 0;
+static ssize_t pm_qos_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+ const char *mode[3] = {"normal", "performance", "powersave"};
+
+ s += sprintf(s, "pm_qos mode: %s\n", (pm_qos_test<=2) ? mode[pm_qos_test] : "unknown");
+
+ return (s - buf);
+}
+
+static ssize_t pm_qos_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+ u32 temp;
+
+ sscanf(buf, "%u", &temp);
+
+ if(temp == 1) {
+ freq_performance(FREQ_OWNER_MANAGER);
+ } else if(temp == 2) {
+ freq_powersave(FREQ_OWNER_MANAGER);
+ } else if(temp == 0) {
+ freq_normal(FREQ_OWNER_MANAGER);
+ } else {
+ return -EINVAL;
+ }
+
+ pm_qos_test = temp;
+
+ return error ? error : n;
+}
+
+zte_attr(pm_qos);
+#endif
+
+
+static ssize_t rpmsg_log_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf);
+
+static ssize_t rpmsg_log_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int error = 0;
+
+ return error ? error : n;
+}
+zte_attr(rpmsg_log);
+
+/*test group*/
+static struct attribute * g[] =
+{
+ &os_timer_attr.attr,
+#if ZX_PM_TEST
+ &wake_attr.attr,
+#endif
+#if ZX_SPINLOCK_TEST
+ &spinlock_attr.attr,
+#endif
+#if ZX_RESET_TEST
+ &reset_attr.attr,
+#endif
+#if ZX_GPIO_TEST
+ &gpio_attr.attr,
+#endif
+#if ZX_PINCTRL_TEST
+ &pinctrl_attr.attr,
+#endif
+#if ZX_PM_TEST
+ &pd_attr.attr,
+#endif
+#if ZX_CLK_TEST
+ &clk_attr.attr,
+#endif
+#if ZX_PM_QOS_TEST
+ &pm_qos_attr.attr,
+#endif
+ &rpmsg_log_attr.attr,
+ NULL,
+};
+
+static struct attribute_group zte_test_attr_group =
+{
+ .attrs = g,
+};
+
+/**
+ * 1¡¢create sysfs "/sys/zte/test"
+ * 2¡¢call other debug modules
+ */
+static int __init zx_test_init(void)
+{
+ int ret;
+
+ zx_test_kobj = kobject_create_and_add("test", zx_root_kobj);
+ if (!zx_test_kobj)
+ return -ENOMEM;
+
+ ret = sysfs_create_group(zx_test_kobj, &zte_test_attr_group);
+ if (ret)
+ {
+ pr_info("[DEBUG] sysfs_create_group ret %d\n", ret);
+ return ret;
+ }
+
+ timer_setup(&test_timer, test_timer_expired, 0);
+
+ pr_info("[DEBUG] create test sysfs interface OK.\n");
+
+ return platform_driver_register(&zx_test_driver);
+}
+
+/*
+#define SC_LIBPM_LPMODE_CPU_HALT (0)
+#define SC_LIBPM_LPMODE_CPU_CLKOFF (1)
+#define SC_LIBPM_LPMODE_CPU_POWEROFF (2)
+*/
+static int zx_lp_mode = 2;
+int pm_get_lpmode(void)
+{
+ return zx_lp_mode;
+}
+
+static ssize_t lp_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "lp_mode:%d\n", zx_lp_mode);
+}
+
+static ssize_t lp_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int error = 0;
+ unsigned int lp_mode;
+
+ if(sscanf(buf, "%u", &lp_mode) != 1)
+ error = -EINVAL;
+
+// pr_info("%s: lp_mode=%d\n", __func__, lp_mode);
+ if (lp_mode > 2)
+ error = -EINVAL;
+ else
+ zx_lp_mode = lp_mode;
+
+ return error ? error : count;
+}
+
+static DEVICE_ATTR(lp_mode, 0600, lp_mode_show, lp_mode_store);
+static struct attribute *zx_pm_attributes[] = {
+ &dev_attr_lp_mode.attr,
+ NULL,
+};
+
+static const struct attribute_group zx_pm_attribute_group = {
+ .attrs = (struct attribute **) zx_pm_attributes,
+};
+
+#ifdef CONFIG_PM_SLEEP_DEBUG
+extern bool pm_debug_messages_on;
+#endif
+
+/**
+ * 1¡¢create sysfs "/sys/zte"
+ * 2¡¢call other debug modules
+ */
+int __init zx_dma_test_init(void);
+static int __init zx_debug_init(void)
+{
+ pr_info("[DEBUG] create zte sysfs interface OK.\n");
+ zx_root_kobj = kobject_create_and_add("zte", NULL);
+ if (!zx_root_kobj)
+ return -ENOMEM;
+
+ zx_pm_kobj = kobject_create_and_add("power", zx_root_kobj);
+ if (!zx_pm_kobj)
+ return -ENOMEM;
+ sysfs_create_group(zx_pm_kobj, &zx_pm_attribute_group);
+
+ zx_test_init();
+
+ zx_dma_test_init();
+
+/* zx_clk_test_init(); */
+
+ zx_icp_test_init();
+
+ zx_timer_test_init();
+
+#ifdef CONFIG_PM_SLEEP_DEBUG
+ pm_debug_messages_on = true;
+#endif
+
+ return 0;
+}
+
+late_initcall(zx_debug_init);
+
+void __init zx29_clock_init(void);
+
+struct zx297520v3_chip_info {
+ void __iomem *stdcrm_base;
+ void __iomem *socsys_base;
+ void __iomem *sflock_base;
+ void __iomem *apcrm_base;
+};
+
+static struct zx297520v3_chip_info zx_chip_info;
+
+void __iomem *get_stdcrm_base(void)
+{
+ return zx_chip_info.stdcrm_base;
+}
+
+void __iomem *get_socsys_base(void)
+{
+ return zx_chip_info.socsys_base;
+}
+
+static int spinlock_init(void)
+{
+ struct device_node *np;
+ void __iomem *param[2];
+
+ np = of_find_compatible_node(NULL, NULL, "zte,zx297520v3-standby");
+ if (!np)
+ {
+ BUG();
+ return -ENODEV;
+ }
+
+ zx_chip_info.stdcrm_base = of_iomap(np, 0);
+ WARN(!zx_chip_info.stdcrm_base, "unable to map stdcrm_base registers\n");
+
+
+ np = of_find_compatible_node(NULL, NULL, "zte,zx29_spinlock");
+ if (!np)
+ {
+ BUG();
+ return -ENODEV;
+ }
+
+ zx_chip_info.sflock_base = of_iomap(np, 0);
+ WARN(!zx_chip_info.sflock_base, "unable to map sflock_base registers\n");
+
+
+ param[0] = zx_chip_info.stdcrm_base;
+ param[1] = zx_chip_info.sflock_base;
+ zx_spinlock_init(param);
+
+ return 0;
+}
+
+static void socsys_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "zte,zx297520v3-socsys");
+ if (!np)
+ {
+ BUG();
+ }
+
+ zx_chip_info.socsys_base = of_iomap(np, 0);
+ WARN(!zx_chip_info.socsys_base, "unable to map socsys_base registers\n");
+}
+
+/*---------------------------------------------------------*/
+#define AP_INT_MODE_BASE (zx_chip_info.apcrm_base + 0x70)
+#define AP_PPI_MODE_REG (zx_chip_info.apcrm_base + 0xA0)
+
+#define INT_HIGHLEVEL (0x0) /* 00: high level */
+#define INT_LOWLEVEL (0x1) /* 01: low level */
+#define INT_POSEDGE (0x2) /* 10: raise edge */
+#define INT_NEGEDGE (0x3) /* 11: fall edge */
+
+static int zx29_int_set_type(unsigned int hwirq, unsigned int type)
+{
+ unsigned int data_tmp=0;
+ unsigned int srctype=0;
+ unsigned int reg_index=0,pos_index=0;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ srctype = INT_HIGHLEVEL;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ srctype = INT_POSEDGE;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ srctype = INT_LOWLEVEL;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ srctype = INT_NEGEDGE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ reg_index=(hwirq)/16;
+ pos_index=((hwirq)%16)*2;
+
+ data_tmp=zx_read_reg(AP_INT_MODE_BASE+reg_index*4);
+ data_tmp &= ~(3<<pos_index);
+ data_tmp |= srctype<<pos_index;
+ zx_write_reg(AP_INT_MODE_BASE+reg_index*4, data_tmp);
+
+ return 0;
+}
+
+static void int_set_type_default(unsigned int line)
+{
+ unsigned int int_type=0;
+
+ switch ( line )
+ {
+ case WDT_INT:
+ case AP_TIMER0_INT:
+ case GSM_RFSSCR_INT:
+ case GSM_RFSSCT_INT:
+ case AP_TIMER3_INT:
+ case AP_TIMER4_INT:
+ case SYS_COUNTER_INT:
+ {
+ int_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ }
+ case MCU_LCD_INT:
+ {
+ int_type = IRQ_TYPE_LEVEL_LOW;
+ break;
+ }
+
+ default:
+ {
+ int_type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ }
+ }
+
+ zx29_int_set_type(line, int_type);
+}
+
+static void apcrm_init(void)
+{
+ int i;
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "zte,zx297520v3-apcrm");
+ if (!np)
+ {
+ BUG();
+ }
+
+ zx_chip_info.apcrm_base = of_iomap(np, 0);
+ WARN(!zx_chip_info.apcrm_base, "unable to map apcrm_base registers\n");
+
+ zx_write_reg(AP_PPI_MODE_REG, 0x55545555);
+
+ for (i=0; i<IRQ_ZX297520V3_SPI_NUM; i++)
+ int_set_type_default(i);
+
+}
+
+void early_drv_init(void)
+{
+ spinlock_init();
+
+ socsys_init();
+
+ apcrm_init();
+}
+
+//early_initcall(early_drv_init);
+
+/*-------------------------------------------------------------------*/
+#define ZX_PM_MINOR (235)
+
+static unsigned int pm_wl_mask = 0;
+static unsigned int pm_wl_event = 0;
+static u64 pm_sleep_time;
+
+static DECLARE_WAIT_QUEUE_HEAD(zx_pm_wait);
+static bool zx_pm_wake_flag = false;
+static bool zx_in_suspend = false;
+
+void pm_wl_set_event(unsigned int wake_event)
+{
+ if (wake_event >=PM_WL_EVENT_END)
+ return;
+
+ pm_wl_event = BIT(wake_event)&pm_wl_mask;
+}
+
+static unsigned int pm_wl_get_event(void)
+{
+// return pm_wl_event&pm_wl_mask;
+ return pm_wl_event;
+}
+
+void pm_set_sleeptime(u64 sleep_time)
+{
+ pm_sleep_time = sleep_time;
+}
+
+static u64 pm_get_sleeptime(void)
+{
+ return pm_sleep_time;
+}
+
+static int zx_pm_open(struct inode *inode, struct file *filp)
+{
+ int error = 0;
+
+ return error;
+}
+
+static int zx_pm_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+void zx_enter_suspend(void)
+{
+ zx_in_suspend = true;
+}
+
+ void zx_exit_suspend(void)
+ {
+ zx_in_suspend = false;
+ }
+
+ bool zx_suspend_query(void)
+ {
+ return zx_in_suspend;
+ }
+
+void pm_notify_wake_event(void)
+{
+ if (pm_wl_event) {
+ zx_pm_wake_flag = true;
+ wake_up_interruptible(&zx_pm_wait);
+ }
+}
+
+static ssize_t zx_pm_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *offp)
+{
+ ssize_t res = 0;
+ int ret;
+ struct sc_pm_info pm_info;
+
+ zx_pm_wake_flag = false;
+ wait_event_freezable(zx_pm_wait, zx_pm_wake_flag);
+
+ pm_info.sleep_time = pm_get_sleeptime();
+ pm_info.wake_event = pm_wl_get_event();
+
+ res = sizeof(struct sc_pm_info);
+ ret = copy_to_user((void __user *)buf, &pm_info, res);
+ if (ret < 0)
+ return -EFAULT;
+
+ return res;
+}
+
+static unsigned int pm_event_convert(unsigned int req_event)
+{
+ return 0;
+}
+
+static long zx_pm_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int error = 0;
+ unsigned int wl_event;
+
+ switch (cmd) {
+
+ case SC_PM_WL_SET:
+ wl_event = (unsigned int)arg;
+ if (wl_event >= BIT(PM_WL_EVENT_END)) {
+ error = -ENOTTY;
+ break;
+ }
+ pm_wl_mask |= wl_event;
+ /* pr_info("%s:pm_wl_mask(0x%x) user_set(0x%x)\n", __func__, pm_wl_mask, wl_event); */
+ break;
+
+ case SC_PM_WL_CLEAR:
+ wl_event = (unsigned int)arg;
+ if (wl_event >= BIT(PM_WL_EVENT_END)) {
+ error = -ENOTTY;
+ break;
+ }
+ pm_wl_mask &= (~wl_event);
+ /* pr_info("%s:pm_wl_mask(0x%x) user_clr(0x%x)\n", __func__, pm_wl_mask, wl_event); */
+ break;
+
+ case SC_PM_WL_GET:
+ /* pr_info("%s:pm_wl_mask(0x%x)\n", __func__, pm_wl_mask); */
+ error = put_user(pm_wl_mask, (unsigned int __user *)arg);
+ break;
+
+ default:
+ error = -ENOTTY;
+ break;
+ }
+
+ return error;
+}
+
+#ifdef CONFIG_COMPAT
+
+static long
+zx_pm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ BUILD_BUG_ON(sizeof(loff_t) != sizeof(compat_loff_t));
+
+
+ if (_IOC_TYPE(cmd) != SC_PM_IOC_MAGIC)
+ return -ENOTTY;
+
+ if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
+ cmd &= ~IOCSIZE_MASK;
+ cmd |= sizeof(char *) << IOCSIZE_SHIFT;
+ }
+
+ return zx_pm_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+
+}
+
+#endif /* CONFIG_COMPAT */
+
+static const struct file_operations zx_pm_fops = {
+ .open = zx_pm_open,
+ .release = zx_pm_release,
+ .read = zx_pm_read,
+/* .write = zx_pm_write,*/
+ .llseek = no_llseek,
+ .unlocked_ioctl = zx_pm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = zx_pm_compat_ioctl,
+#endif
+};
+
+static struct miscdevice zx_pm_device = {
+ .minor = ZX_PM_MINOR,
+ .name = "sc_pm",
+ .fops = &zx_pm_fops,
+};
+
+static int zx_pm_device_init(void)
+{
+ return misc_register(&zx_pm_device);
+};
+
+/*-------------------------------------------------------------------*/
+#define ZX_IRQ_MINOR (237)
+#define SC_LIBIRQ_MAX (16)
+
+/*
+ * line : request
+ * type : driver use
+ * wake_flag : for wait
+ *
+ */
+struct libirq_info {
+ unsigned int line;
+ unsigned int hirq;
+ unsigned int virq;
+ unsigned int type;
+ int wake;
+ unsigned int used;
+ bool wake_flag;
+ wait_queue_head_t wait;
+ char name[16];
+};
+
+struct libirq_context {
+ unsigned int pending;
+ spinlock_t lock;
+ struct pinctrl *pctrl;
+ struct pinctrl_state *state[SC_LIBIRQ_MAX];
+
+ struct device_node *np;
+ struct device_node *ext8in1_np;
+ struct libirq_info info[SC_LIBIRQ_MAX];
+};
+static struct libirq_context irq_ctx = {0};
+
+#define line_used(l) irq_ctx.info[l].used
+
+static int irq_line_convert(int line)
+{
+ if (line>=8)
+ return EX8_INT + line - 8;
+ else
+ return EX0_INT + line;
+}
+
+static unsigned int irq_type_convert(unsigned int req_type)
+{
+ if (req_type == 0)
+ return IRQ_TYPE_EDGE_RISING;
+ else if (req_type == 1)
+ return IRQ_TYPE_EDGE_FALLING;
+ else if (req_type == 2)
+ return IRQ_TYPE_LEVEL_HIGH;
+ else if (req_type == 3)
+ return IRQ_TYPE_LEVEL_LOW;
+ else
+ return IRQ_TYPE_NONE;
+}
+
+static int zx_irq_map_ext8in1(int hirq, unsigned int type)
+{
+ struct of_phandle_args args;
+
+ args.args_count = 2;
+ args.args[0] = hirq;
+ args.args[1] = type;
+ args.np = irq_ctx.ext8in1_np;
+
+ return irq_create_of_mapping(&args);
+}
+
+static int zx_irq_map(int hirq, unsigned int type)
+{
+ struct of_phandle_args args;
+
+ if (hirq>=EX8_INT && hirq<=EX15_INT)
+ return zx_irq_map_ext8in1(hirq, type);
+
+ args.args_count = 3;
+ args.args[0] = 0;
+ args.args[1] = hirq;
+ args.args[2] = type;
+ args.np = irq_ctx.np;
+
+ return irq_create_of_mapping(&args);
+}
+
+static void zx_irq_wait(unsigned int line)
+{
+ struct libirq_info *info = &(irq_ctx.info[line]);
+
+ info->wake_flag = false;
+ wait_event_freezable(info->wait, info->wake_flag);
+}
+
+static void zx_irq_wakeup(unsigned int line)
+{
+ struct libirq_info *info = &(irq_ctx.info[line]);
+
+ info->wake_flag = true;
+ wake_up_interruptible(&info->wait);
+}
+
+static irqreturn_t zx_irq_isr(int irq, void *p)
+{
+ struct libirq_info *info = (struct libirq_info *)p;
+ unsigned int line = info->line;
+
+ if(line_used(line)) {
+ irq_ctx.pending |= BIT(line);
+
+ zx_irq_wakeup(line);
+ }
+
+/* pr_info("%s:eint get = %d\n", __func__, line); */
+
+ return IRQ_HANDLED;
+}
+
+static int zx_irq_open(struct inode *inode, struct file *filp)
+{
+ int error = 0;
+ unsigned int line;
+
+ line = iminor(inode) - ZX_IRQ_MINOR;
+
+ filp->private_data = &(irq_ctx.info[line]);
+
+ return error;
+}
+
+static int zx_irq_release(struct inode *inode, struct file *filp)
+{
+ struct libirq_info *info;
+
+ info = (struct libirq_info *)filp->private_data;
+
+ if(line_used(info->line)) {
+ irq_set_irq_type(info->virq, IRQ_TYPE_NONE);
+ free_irq(info->virq, info);
+ line_used(info->line) = 0;
+ }
+
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+static long zx_irq_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int error = 0;
+ struct libirq_info *info;
+ int virq;
+ unsigned int type;
+ unsigned int en;
+ int ret = 0;
+ unsigned long flags;
+
+ info = (struct libirq_info *)filp->private_data;
+
+ switch (cmd) {
+
+ case SC_IRQ_INSTALL:
+
+ if(line_used(info->line))
+ return -EEXIST;
+
+ type = irq_type_convert((unsigned int)arg);
+ virq = zx_irq_map(info->hirq, type);
+ if (virq <= 0) {
+ pr_err("%s:zx_irq_map %d failed %d(%d)\n", __func__, info->line, virq, type);
+ return -ENOMEM;
+ }
+ /* pr_err("%s:zx_irq_map %d %d %d\n", __func__, info->line, virq, info->hirq);*/
+
+ if ( pinctrl_select_state(irq_ctx.pctrl, irq_ctx.state[info->line]) < 0) {
+ pr_err("%s:setting state%d failed\n", __func__, info->line);
+ return -ENODEV;
+ }
+
+ ret = request_irq(virq, zx_irq_isr, 0, info->name, info);
+ if(ret<0) {
+ pr_err("%s:request_irq %d failed %d\n", __func__, info->line, type);
+ return ret;
+ }
+
+ info->virq = virq;
+ info->type = type;
+
+ line_used(info->line) = 1;
+
+ /* pr_info("%s:install(%d) hirq(%d) virq(%d) type(%d)\n", __func__, info->line, info->hirq, info->virq, info->type);*/
+ break;
+
+ case SC_IRQ_UNINSTALL:
+
+ if(!line_used(info->line))
+ return -ENODEV;
+
+ irq_set_irq_type(info->virq, IRQ_TYPE_NONE);
+
+ free_irq(info->virq, info);
+
+ line_used(info->line) = 0;
+
+ /* pr_info("%s:uninstall(%d)\n", __func__, info->line);*/
+ break;
+
+ case SC_IRQ_SET_TYPE:
+
+ if(!line_used(info->line))
+ return -ENODEV;
+
+ type = irq_type_convert((unsigned int)arg);
+ ret = irq_set_irq_type(info->virq, type);
+ if (ret)
+ return ret;
+
+ info->type = type;
+
+ /* pr_info("%s:set_type(%d) virq(%d) type(%d)\n", __func__, info->line, info->virq, info->type); */
+
+ break;
+
+ case SC_IRQ_SET_WAKE:
+
+ if(!line_used(info->line))
+ return -ENODEV;
+
+ en = (unsigned int)arg;
+ ret = irq_set_irq_wake(info->virq, en);
+ if (ret)
+ return ret;
+
+ info->wake = en;
+
+ /* pr_info("%s:set_wake(%d) virq(%d) wake(%d)\n", __func__, info->line, info->virq, info->wake); */
+
+ break;
+
+ case SC_IRQ_GET_WAKE:
+
+ if(!line_used(info->line))
+ return -ENODEV;
+
+ error = put_user(info->wake, (unsigned int __user *)arg);
+
+ /* pr_info("%s:get_wake(%d) virq(%d) wake(%d)\n", __func__, info->line, info->virq, info->wake); */
+
+ break;
+
+ case SC_IRQ_GET_STATUS:
+
+ if(!line_used(info->line))
+ return -ENODEV;
+
+ zx_irq_wait(info->line);
+
+ spin_lock_irqsave(&irq_ctx.lock, flags);
+ error = put_user(irq_ctx.pending, (unsigned int __user *)arg);
+ spin_unlock_irqrestore(&irq_ctx.lock, flags);
+
+ /* pr_debug("%s:get_status(%d) virq(%d) wake(%d) pending(0x%x)\n",
+ __func__, info->line, info->virq, info->wake, irq_ctx.pending); */
+
+ break;
+
+ case SC_IRQ_CLEAR_STATUS:
+
+ if(!line_used(info->line))
+ return -ENODEV;
+
+ spin_lock_irqsave(&irq_ctx.lock, flags);
+ irq_ctx.pending &= ~(1 << info->line);
+ spin_unlock_irqrestore(&irq_ctx.lock, flags);
+
+ /* pr_info("%s:clear_status(%d)\n", __func__, info->line); */
+
+ break;
+
+ default:
+ error = -ENOTTY;
+ break;
+ }
+
+ return error;
+}
+
+#ifdef CONFIG_COMPAT
+
+static long
+zx_irq_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ BUILD_BUG_ON(sizeof(loff_t) != sizeof(compat_loff_t));
+
+
+ if (_IOC_TYPE(cmd) != SC_IRQ_IOC_MAGIC)
+ return -ENOTTY;
+
+ if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
+ cmd &= ~IOCSIZE_MASK;
+ cmd |= sizeof(char *) << IOCSIZE_SHIFT;
+ }
+
+ return zx_irq_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+
+#endif /* CONFIG_COMPAT */
+
+static const struct file_operations zx_irq_fops = {
+ .open = zx_irq_open,
+ .release = zx_irq_release,
+ .llseek = no_llseek,
+ .unlocked_ioctl = zx_irq_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = zx_irq_compat_ioctl,
+#endif
+};
+
+static struct miscdevice zx_irq_device[SC_LIBIRQ_MAX] = {0};
+static int zx_irq_device_init(struct platform_device *pdev)
+{
+ int i;
+ int ret = 0;
+ char name[16];
+ struct miscdevice *misc_dev;
+ struct device_node *np;
+
+ irq_ctx.pctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(irq_ctx.pctrl)) {
+ dev_warn(&pdev->dev, "Failed to get sc_irq pins");
+ irq_ctx.pctrl = NULL;
+ return -ENODEV;
+ }
+
+ for (i=0; i<SC_LIBIRQ_MAX; i++) {
+ misc_dev = &zx_irq_device[i];
+ misc_dev->minor = ZX_IRQ_MINOR + i,
+ sprintf(name, "%s%d", "sc_irq", i);
+ misc_dev->name = name,
+ misc_dev->fops = &zx_irq_fops,
+
+ strcpy(irq_ctx.info[i].name, name);
+ irq_ctx.info[i].line = i;
+ irq_ctx.info[i].hirq = irq_line_convert(i);
+ init_waitqueue_head(&irq_ctx.info[i].wait);
+
+ ret = misc_register(misc_dev);
+ if (ret) {
+ pr_err("%s:register dev(%d) failed:%d \n", __func__, i, ret);
+ return ret;
+ }
+
+ irq_ctx.state[i] = pinctrl_lookup_state(irq_ctx.pctrl, name);
+ if (IS_ERR(irq_ctx.state[i])) {
+ dev_err(&pdev->dev, "TEST: missing state(%s)\n", name);
+ return -ENODEV;
+ }
+ }
+
+ np = of_find_compatible_node(NULL, NULL, "zte,zx297520v3-pcu");
+ if (NULL == np) {
+ pr_err("Can't find interrupt-controller \n");
+ return -ENODEV;
+ }
+ irq_ctx.np = np;
+
+ np = of_find_compatible_node(NULL, NULL, "zte,zx297520v3-ext8in1");
+ if (NULL == np) {
+ pr_err("Can't find ext8in1 interrupt-controller \n");
+ return -ENODEV;
+ }
+ irq_ctx.ext8in1_np = np;
+
+ spin_lock_init(&irq_ctx.lock);
+
+ return ret;
+};
+
+static int zx_bsp_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ ret = zx_pm_device_init();
+ if (ret)
+ return ret;
+
+ ret = zx_irq_device_init(pdev);
+
+ device_init_wakeup(&pdev->dev, true);
+
+ return ret;
+}
+
+static int zx_bsp_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id zx_bsp_match[] = {
+ { .compatible = "sc,sc-bsp", },
+ { }
+};
+
+static struct platform_driver zx_bsp_driver = {
+ .probe = zx_bsp_probe,
+ .remove = zx_bsp_remove,
+ .driver = {
+ .name = "sc_bsp",
+ .of_match_table = zx_bsp_match,
+ },
+};
+builtin_platform_driver(zx_bsp_driver)
+
+/*---------------------------------------------------------------*/
+static struct reset_control *reboot_rst;
+static int zx_restart(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ if (reboot_rst) {
+ reset_control_assert(reboot_rst);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block zx_restart_handler = {
+ .notifier_call = zx_restart,
+ .priority = 129,
+};
+
+static int zx_reboot_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *np = pdev->dev.of_node;
+
+ reboot_rst = of_reset_control_get_by_index(np, 0);
+ if (!reboot_rst) {
+ dev_err(&pdev->dev, "No reset handler found!");
+ return -EINVAL;
+ }
+
+ ret = register_restart_handler(&zx_restart_handler);
+ if (ret)
+ pr_warn("cannot register restart handler, %d\n", ret);
+
+ return 0;
+}
+
+static const struct of_device_id zx_reboot_match[] = {
+ { .compatible = "zte,reboot", },
+ { }
+};
+
+static struct platform_driver zx_reboot_driver = {
+ .probe = zx_reboot_probe,
+ .driver = {
+ .name = "zx_reboot",
+ .of_match_table = zx_reboot_match,
+ },
+};
+builtin_platform_driver(zx_reboot_driver)
+
+/*----------------------------------------------------------------*/
+#define CONFIG_RPMSG_LOG 1
+
+#ifdef CONFIG_RPMSG_LOG
+#define RPMSG_LOG_SIZE (20*1024)
+static char rpmsg_printk_buf[RPMSG_LOG_SIZE];
+static u32 rpmsg_log_point = 0;
+static u32 rpmsg_log_turn = 0;
+static u32 rpmsg_sram_inited = 0;
+//static char rpmsg_log_temp_buf[512] = {0};
+static spinlock_t rpmsg_log_lock;
+
+static void rpmsg_sram_cpy(char *s, unsigned len)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rpmsg_log_lock, flags);
+
+ if(rpmsg_log_point + len + 2 >= RPMSG_LOG_SIZE) {
+ rpmsg_log_point = 0;
+
+ if (!rpmsg_log_turn)
+ rpmsg_log_turn = 1;
+ }
+
+ memcpy(rpmsg_printk_buf+rpmsg_log_point, s, len);
+ rpmsg_log_point += len;
+ rpmsg_printk_buf[rpmsg_log_point]=0;
+
+ spin_unlock_irqrestore(&rpmsg_log_lock, flags);
+}
+#endif
+
+static ssize_t rpmsg_log_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+
+#ifdef CONFIG_RPMSG_LOG
+ unsigned long flags;
+
+ if (rpmsg_sram_inited) {
+ spin_lock_irqsave(&rpmsg_log_lock, flags);
+
+ if (!rpmsg_log_turn) {
+ s += sprintf(s, "%s", rpmsg_printk_buf);
+ }
+ else {
+ s += sprintf(s, "%s", rpmsg_printk_buf+rpmsg_log_point+2);
+ s += sprintf(s, "%s", rpmsg_printk_buf);
+ }
+
+ spin_unlock_irqrestore(&rpmsg_log_lock, flags);
+ }
+#endif
+
+ return (s - buf);
+}
+
+/**
+ * usage: like printk(...)
+ */
+void rpmsg_printk(const char *fmt, ...)
+{
+#ifdef CONFIG_RPMSG_LOG
+ va_list args;
+ unsigned long long t;
+ unsigned long nanosec_rem;
+ int tlen, len;
+ char rpmsg_log_temp_buf[512] = {0};
+
+ if(!rpmsg_sram_inited)
+ return;
+
+ va_start(args, fmt);
+
+ /* add time stamp */
+ t = cpu_clock(smp_processor_id());
+ nanosec_rem = do_div(t, 1000000000);
+ tlen = sprintf(rpmsg_log_temp_buf, ">%5lu.%06lu< ",
+ (unsigned long) t, nanosec_rem / 1000);
+
+ len = vsprintf(rpmsg_log_temp_buf+tlen, fmt, args);
+ len += tlen;
+
+ rpmsg_sram_cpy(rpmsg_log_temp_buf, len);
+
+ va_end(args);
+#endif
+}
+
+void rpmsg_sram_init(void)
+{
+#ifdef CONFIG_RPMSG_LOG
+ pr_info("[RPMSG] LOG_INIT \n");
+
+ memset(rpmsg_printk_buf, 0, RPMSG_LOG_SIZE);
+ rpmsg_log_point = 0;
+
+ spin_lock_init(&rpmsg_log_lock);
+
+ rpmsg_sram_inited = 1;
+#endif
+}
diff --git a/upstream/linux-5.10/drivers/soc/sc/power/zx29-cpufreq.c b/upstream/linux-5.10/drivers/soc/sc/power/zx29-cpufreq.c
new file mode 100755
index 0000000..7eacfd7
--- /dev/null
+++ b/upstream/linux-5.10/drivers/soc/sc/power/zx29-cpufreq.c
@@ -0,0 +1,528 @@
+/*
+ * ZTE zx297510 dvfs driver
+ *
+ * Copyright (C) 2013 ZTE Ltd.
+ * by zxp
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/cpufreq.h>
+#include <linux/suspend.h>
+
+#include <linux/soc/zte/rpmsg.h>
+//#include "mach/clock.h"
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include "zx-pm.h"
+#define ZX_CPUFREQ_IOC_MAGIC 'W'
+
+/*ioctl cmd usd by device*/
+#define ZX_CPUFREQ_SET_FREQ _IOW(ZX_CPUFREQ_IOC_MAGIC, 1, char *)
+#define ZX_CPUFREQ_GET_FREQ _IOW(ZX_CPUFREQ_IOC_MAGIC, 2, char *)
+
+#define ZX_CPUFREQ_DEV "/dev/zx_cpufreq"
+
+#define PM_FREQ_TRACE 1
+#if PM_FREQ_TRACE
+
+#define FREQ_CHANGE_COUNT 20
+
+typedef struct
+{
+ volatile unsigned int old_index;
+ volatile unsigned int new_idex;
+ volatile unsigned int time;
+}freq_change_view_trace_t;
+
+static freq_change_view_trace_t freq_change_view[FREQ_CHANGE_COUNT] ;
+static unsigned int freq_change_index = 0;
+static int cpufreq_driver_inited = 0;
+
+void trace_freq_change(unsigned int old_index,unsigned int new_index)
+{
+ freq_change_view[freq_change_index].old_index = old_index;
+ freq_change_view[freq_change_index].new_idex = new_index;
+ freq_change_view[freq_change_index].time = ktime_to_us(ktime_get());
+ freq_change_index++;
+ if(freq_change_index == FREQ_CHANGE_COUNT)
+ {
+ freq_change_index = 0;
+ }
+}
+#else
+void trace_freq_change(unsigned int old_index,unsigned int new_index){}
+#endif
+
+unsigned int freq_change_enabled_by_startup = 0;
+static struct delayed_work pm_freq_work;
+#define PM_FREQ_DELAY msecs_to_jiffies(25000)
+
+/* for count change time by M0 */
+#define DEBUG_CPUFREQ_TIME 1
+
+#ifdef CONFIG_DDR_FREQ
+#ifdef CONFIG_ARCH_ZX297520V2
+#define get_cur_ddr() pm_read_reg_16(AXI_CURRENT_FREQ)
+#define set_target_ddr(f) pm_write_reg_16(AXI_AP2M0_TARGET, f)
+#define set_ddr_req() pm_write_reg_16(AXI_AP2M0_FLAG, 1)
+#define clr_ddr_ack() pm_write_reg_16(AXI_M02AP_ACK, 0)
+
+#define wait_ddr_ack() while(!pm_read_reg_16(AXI_M02AP_ACK))
+#else
+static ddr_freq_regs *ddr_regs = (ddr_freq_regs *)IRAM_CHANGE_DDR_BASE;
+#define get_cur_ddr() (ddr_regs->cur_freq)
+#define set_target_ddr(f) (ddr_regs->ap_exp_freq = f)
+#define set_ddr_req() (ddr_regs->ap_req_flag = 1)
+
+#endif
+#endif
+
+//#undef CONFIG_AXI_FREQ
+#ifdef CONFIG_AXI_FREQ
+static DEFINE_MUTEX(axifreq_lock);
+
+static axi_freq_regs *axi_regs; // = (axi_freq_regs *)IRAM_CHANGE_AXI_BASE;
+static vol_dvs_regs *vol_regs; // = (vol_dvs_regs *)IRAM_CHANGE_DVS_BASE;
+
+#define get_cur_axi() (axi_regs->cur_freq)
+#define set_target_axi_sw(f) (axi_regs->ap_exp_freq = f)
+#define set_axi_req() (axi_regs->ap_req_flag = 1)
+
+#define get_target_axi_hw(addr) (pm_read_reg(addr)&(0x7))
+
+#if 1
+#define DDR_FREQ_156M_HW (0x4e)
+#define DDR_FREQ_208M_HW (0x68)
+#define DDR_FREQ_312M_HW (0x9c)
+#define DDR_FREQ_400M_HW (0xc8)
+
+#define set_ddr_freq_hw(addr,f) (pm_read_reg(addr)&(~0xff)|f)
+#define set_ddr_freq_sync(addr,f) (pm_read_reg(addr)&(~0x1)|f)
+#endif
+
+#define get_cur_vol() (vol_regs->cur_vol)
+#define set_target_vol(f) (vol_regs->ap_exp_vol = f)
+#define set_vol_req() (vol_regs->ap_req_flag = 1)
+
+#if 0
+#define WAIT_AXI_ACK_TIMEOUT (jiffies + msecs_to_jiffies(2)) /* wait 2 ms, we count max 200us also */
+#define wait_axi_ack(timeout) while(!pm_read_reg_16(AXI_M02AP_ACK) && time_before(jiffies, timeout))
+#else
+#define WAIT_AXI_ACK_TIMEOUT (200) /* wait 120us, we count max 200us also */
+static void wait_axi_ack(unsigned timeout)
+{
+ ktime_t begin_time = ktime_get();
+
+ while(((vol_regs->ap_req_flag) ||(axi_regs->ap_req_flag) )&& (unsigned)ktime_to_us(ktime_sub(ktime_get(), begin_time))<timeout);
+}
+#endif
+
+static int send_msg_to_m0(void)
+{
+ unsigned int ap_m0_buf = AXI_VOL_CHANGE_ICP_BUF; /* the icp interface need a buffer */
+ T_RpMsg_Msg Icp_Msg;
+ int ret;
+
+ Icp_Msg.coreID = CORE_M0;
+ Icp_Msg.chID = 1;
+ Icp_Msg.flag = RPMSG_WRITE_INT; /* 1- means send an icp interrupt> */
+ Icp_Msg.buf = &ap_m0_buf;
+ Icp_Msg.len = 0x4;
+
+ ret = rpmsgWrite(&Icp_Msg);
+ if(Icp_Msg.len == ret)
+ return 0;
+ else
+ return ret;
+}
+
+static int axi_freq_change_allowed(void)
+{
+ if(pm_get_mask_info()&PM_NO_AXI_FREQ)
+ return false;
+
+ return true;
+}
+
+/**
+ * request to change vol.
+ *
+ * vol_dvs: input vol enum
+ */
+int request_vol(zx29_vol vol_dvs)
+{
+ unsigned int current_vol = get_cur_vol();
+
+ set_target_vol(vol_dvs);
+#if DEBUG_CPUFREQ_TIME
+ pm_printk("[CPUFREQ] current_vol(%d) request_vol(%d) \n",(u32)current_vol,(u32)vol_dvs);
+#endif
+
+ if(vol_dvs != current_vol)
+ {
+ /* request freq */
+ set_vol_req();
+ }
+
+ return 0;
+}
+
+/**
+ * input axi freq.
+ */
+static zx29_vol request_vol_by_axi(zx29_axi_freq axi_freq)
+{
+ if(axi_freq == AXI_FREQ_156M)
+ return VOL_VO_900;
+ else
+ return VOL_VO_850;
+}
+
+/**
+ * set vol .
+ *
+ * we will do this by M0.
+ */
+static int set_vol_by_axi(zx29_axi_freq axi_freq)
+{
+ zx29_vol vol_dvs= request_vol_by_axi(axi_freq);
+
+ /* set new vol*/
+ return request_vol(vol_dvs);
+}
+
+
+/**
+ * request to change axi freq.
+ *
+ * axi_freq: input freq enum
+ */
+int request_axi_freq(zx29_axi_freq axi_freq)
+{
+ unsigned int current_axi_freq = get_cur_axi();
+ unsigned int tmp;
+ int ret = 0;
+
+#if DEBUG_CPUFREQ_TIME
+ ktime_t begin_time, end_time;
+ s64 total_time;
+#endif
+
+ if(!axi_freq_change_allowed())
+ return 0;
+
+#ifdef SET_AXI_BY_HW
+ tmp = (pm_read_reg(PS_MATRIX_AXI_SEL)&(~0x7))|axi_freq;
+ pm_write_reg(PS_MATRIX_AXI_SEL,tmp);
+ pm_printk("[CPUFREQ] current_axi_freq(%d) request_axi_freq(%d) after_request_axi_freq(%d) after_request_vol(%d)\n",(u32)current_axi_freq,(u32)axi_freq,get_cur_axi(),get_cur_vol());
+#else
+ set_target_axi_sw(axi_freq);
+
+ if(axi_freq != current_axi_freq)
+ {
+ /* request freq */
+ set_axi_req();
+
+// set_vol_by_axi(axi_freq);//set vol
+
+ ret = send_msg_to_m0();
+#if DEBUG_CPUFREQ_TIME
+ begin_time = ktime_get();
+#endif
+ if(!ret)
+ {
+ /* wait axi freq changed ok! we will set a timeout for safety~ */
+ wait_axi_ack(WAIT_AXI_ACK_TIMEOUT);
+ }
+ else
+ {
+ pm_printk("[CPUFREQ] request_axi_freq(%d) failed: (%d) \n",(u32)axi_freq, ret);
+ }
+
+#if DEBUG_CPUFREQ_TIME
+ end_time = ktime_get();
+ total_time = ktime_to_us(ktime_sub(end_time, begin_time));
+ pm_printk("[CPUFREQ] total axi time: %d us current_axi_freq(%d) request_axi_freq(%d) after_request_axi_freq(%d) after_request_vol(%d)\n",(u32)total_time,(u32)current_axi_freq,(u32)axi_freq,get_cur_axi(),get_cur_vol());
+ }
+ else
+ {
+ pm_printk("[CPUFREQ] current_axi_freq(%d) request_axi_freq(%d) \n",(u32)current_axi_freq,(u32)axi_freq);
+#endif
+ }
+#endif
+
+ return 0;
+}
+
+
+/**
+ * input cpu freq [KHz].
+ */
+static zx29_axi_freq request_axi_freq_by_cpu(unsigned int freq)
+{
+ if(freq >= 600*1000)
+ return AXI_FREQ_156M;
+ else
+ return AXI_FREQ_78M;
+}
+
+/**
+ * set axi freq .
+ *
+ * we will do this by M0.
+ */
+static int set_axi_frequency_by_cpu(unsigned int freq)
+{
+ zx29_axi_freq axi_freq = request_axi_freq_by_cpu(freq);
+
+ /* set new freq */
+ return request_axi_freq(axi_freq);
+}
+
+int zx_request_axi_freq(unsigned int axifreq)
+{
+ zx29_axi_freq axi_freq;
+
+ if (axifreq == 0xff)
+ return -EINVAL;
+
+ if(cpufreq_driver_inited==0)
+ return -EPERM;
+
+ if(axifreq >= 600*1000*1000)
+ return AXI_FREQ_156M;
+ else
+ return AXI_FREQ_78M;
+
+ return request_axi_freq(axi_freq);
+}
+
+#endif
+
+
+#ifdef CONFIG_AXI_FREQ
+/**
+ * zx_axifreq_pm_notifier - acquire axifreq in suspend-resume context
+ *
+ * @notifier
+ * @pm_event
+ * @v
+ *
+ */
+
+static int zx_axifreq_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *v)
+{
+ mutex_lock(&axifreq_lock);
+
+ switch (pm_event)
+ {
+ case PM_SUSPEND_PREPARE:
+ request_axi_freq(AXI_FREQ_78M);
+ break;
+
+ case PM_POST_SUSPEND:
+ request_axi_freq(AXI_FREQ_156M);
+ break;
+ }
+
+ mutex_unlock(&axifreq_lock);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block zx_axifreq_nb =
+{
+ .notifier_call = zx_axifreq_pm_notifier,
+};
+
+static int __init zx29_axifreq_init(void)
+{
+
+ /* pm notify */
+ register_pm_notifier(&zx_axifreq_nb);
+// request_vol(VOL_VO_900);
+ request_axi_freq(AXI_FREQ_156M);
+
+ return 0;
+}
+
+//late_initcall(zx29_axifreq_init);
+#endif
+
+/*=============================================================================
+ *======== zx29 DDR freq ===============================================
+ *** ap/phy request --> m0 notify --> jump to iram --> wait completely --> ***
+ *** -->jump to ddr ***====
+ *=============================================================================
+ */
+#ifdef CONFIG_DDR_FREQ
+static DEFINE_MUTEX(ddrfreq_lock);
+static int ddr_freq_change_allowed(void)
+{
+ if(pm_get_mask_info()&PM_NO_DDR_FREQ)
+ return false;
+
+ return true;
+}
+
+static int send_msg_to_ps(void)
+{
+ unsigned int ap_m0_buf = AXI_VOL_CHANGE_ICP_BUF; /* the icp interface need a buffer */
+ T_ZDrvRpMsg_Msg Icp_Msg;
+ int ret;
+ Icp_Msg.actorID = PS_ID;
+ Icp_Msg.chID = ICP_CHANNEL_PSM;
+ Icp_Msg.flag = RPMSG_WRITE_INT; /* 1- means send an icp interrupt> */
+ Icp_Msg.buf = &ap_m0_buf;
+ Icp_Msg.len = 0x4;
+ ret = zDrvRpMsg_Write(&Icp_Msg);
+ if(Icp_Msg.len == ret)
+ return 0;
+ else
+ return ret;
+}
+
+int request_ddr_freq_hw(unsigned int ddr_freq)
+{
+ if(!ddr_freq_change_allowed())
+ return 0;
+ pm_write_reg(AP_DDR_FFC_SEL_SYNC,0x0);
+ pm_write_reg(AP_DDR_FFC_SEL,ddr_freq);
+ pm_write_reg(AP_DDR_FFC_SEL_SYNC,0x1);
+ return 0;
+}
+
+int request_ddr_freq(zx29_ddr_freq ddr_freq)
+{
+ int ret = 0;
+ unsigned current_ddr_freq = get_cur_ddr();
+ if(!ddr_freq_change_allowed())
+ return 0;
+
+ if(ddr_freq == current_ddr_freq)
+ return 0;
+
+#ifdef SET_DDR_BY_HW
+ //set_ddr_freq_hw(AP_DDR_FFC_SEL, ddr_exp_freq);
+ set_ddr_freq_sync(AP_DDR_FFC_SEL_SYNC,0x1);
+#else
+ set_target_ddr(ddr_freq);
+ ret = send_msg_to_ps();
+ if(!ret)
+ {
+ printk("[DDRFREQ] ddr_freq [%d]\n",get_cur_ddr());
+ }
+ else
+ {
+ printk("[DDRFREQ] request_ddr_freq failed\n");
+ }
+#endif
+#if 0
+ unsigned current_ddr_freq = get_cur_ddr();
+ int ret = 0;
+
+#if DEBUG_CPUFREQ_TIME
+ ktime_t begin_time, end_time;
+ s64 total_time;
+#endif
+
+ if(!ddr_freq_change_allowed())
+ return 0;
+
+ set_target_ddr(ddr_freq);
+
+ if(ddr_freq != current_ddr_freq)
+ {
+ /* request freq */
+ clr_ddr_ack();
+ set_ddr_req();
+
+#if DEBUG_CPUFREQ_TIME
+ begin_time = ktime_get();
+#endif
+
+ ret = send_msg_to_m0();
+ if(!ret)
+ /* wait axi freq changed ok! we will set a timeout for safety~ */
+ wait_ddr_ack();
+ else
+ pr_info("[CPUFREQ] request_ddr_freq(%d) failed: (%d) \n",(u32)ddr_freq, ret);
+
+#if DEBUG_CPUFREQ_TIME
+ end_time = ktime_get();
+ total_time = ktime_to_us(ktime_sub(end_time, begin_time));
+ pr_info("[CPUFREQ] total ddr time: %d us\n",(u32)total_time);
+#endif
+ }
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_ARCH_ZX297520V2
+static void ddr_freq_handler(void)
+{
+ local_irq_disable();
+ waiting_ddr_dfs((unsigned long)DDR_DFS_CODE_ADDR);
+ local_irq_enable();
+}
+#else
+static int zx_ddrfreq_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *v)
+{
+ mutex_lock(&ddrfreq_lock);
+ switch (pm_event)
+ {
+ case PM_SUSPEND_PREPARE:
+ request_ddr_freq_hw(0);
+ break;
+ case PM_POST_SUSPEND:
+ request_ddr_freq_hw(0x9c);
+ break;
+ }
+ mutex_unlock(&ddrfreq_lock);
+ return NOTIFY_OK;
+}
+static struct notifier_block zx_ddrfreq_nb =
+{
+ .notifier_call = zx_ddrfreq_pm_notifier,
+};
+#endif
+
+static int __init zx29_ddrfreq_init(void)
+{
+#ifdef CONFIG_ARCH_ZX297520V2
+#else
+ register_pm_notifier(&zx_ddrfreq_nb);
+#endif
+ return 0;
+}
+
+#endif
+
+static void pm_m0_handler(void *buf, unsigned int len)
+{
+ /* deal msg from m0 */
+}
+
+int zx29_cpufreq_init(void)
+{
+ if(cpufreq_driver_inited)
+ return 0;
+
+ axi_regs = (axi_freq_regs *)IRAM_CHANGE_AXI_BASE;
+ vol_regs = (vol_dvs_regs *)IRAM_CHANGE_DVS_BASE;
+
+ cpufreq_driver_inited = 1;
+
+ pr_info("[CPUFREQ] zx29_cpufreq_init ok \n");
+ return 0;
+}
+
diff --git a/upstream/linux-5.10/drivers/soc/sc/rpmsg/zx29_icp.c b/upstream/linux-5.10/drivers/soc/sc/rpmsg/zx29_icp.c
new file mode 100755
index 0000000..aae42a2
--- /dev/null
+++ b/upstream/linux-5.10/drivers/soc/sc/rpmsg/zx29_icp.c
@@ -0,0 +1,490 @@
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/clockchips.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/syscore_ops.h>
+#include <linux/gpio.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/soc/sc/common.h>
+#include <linux/soc/sc/spinlock.h>
+
+#include "icp_dev.h"
+#include "zx29_icp.h"
+#include "icp_rpmsg.h"
+
+static icp_callback_fn _icp_fn;
+static T_HalIcp_Reg *icp_ap2m0_reg;
+static T_HalIcp_Reg *icp_ap2ps_reg;
+
+static inline T_HalIcp_Reg *icp_get_reg(T_ZDrvRpMsg_ActorID actor_id)
+{
+ if (CORE_M0 == actor_id )
+ return icp_ap2m0_reg;
+ else if (CORE_PS0 == actor_id )
+ return icp_ap2ps_reg;
+ else
+ BUG();
+}
+
+/*******************************************************************************
+* Function: icp_set_int
+* Description: This function is used for generating icp interrupt to inform remote cpu;
+* Parameters:
+* Input:
+ actorID: id of remote cpu
+ chID: id of channel
+* Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+static int icp_set_int(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+ T_HalIcp_Reg *icp_reg;
+
+ if ((actorID >= CORE_MAXID) || (chID >= CHANNEL_MAXID(actorID)))
+ return -EINVAL;
+
+ icp_reg = icp_get_reg(actorID);
+
+ if(chID<32)
+ icp_reg->control.low_word = (1<<chID);
+ else
+ icp_reg->control.high_word = (1<<(chID-32));
+
+ return 0;
+}
+
+/*******************************************************************************
+* Function: icp_clear_int
+* Description: This function is used for clear icp interrupt from remote cpu;
+* Parameters:
+* Input:
+ actorID: id of remote cpu
+ chID: id of channel
+* Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+static void icp_clear_int(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+ T_HalIcp_Reg *icp_reg = icp_get_reg(actorID);
+
+ if(chID<32)
+ icp_reg->clear.low_word = (1<<chID);
+ else
+ icp_reg->clear.high_word = (1<<(chID-32)) ;
+}
+
+/*******************************************************************************
+* Function: icp_get_int
+* Description: This function is used for get icp interrupt from remote cpu;
+* Parameters:
+* Input:
+* actorID: id of remote cpu
+* chID: id of channel
+* Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+T_HalIcp_Dword icp_get_int(T_ZDrvRpMsg_ActorID actorID)
+{
+ T_HalIcp_Dword IcpState;
+ T_HalIcp_Reg *icp_reg;
+
+ if (actorID >= CORE_MAXID)
+ {
+ IcpState.high_word = 0;
+ IcpState.low_word = 0;
+
+ return IcpState;
+ }
+
+ icp_reg = icp_get_reg(actorID);
+
+ IcpState.high_word = icp_reg->state.high_word;
+ IcpState.low_word = icp_reg->state.low_word;
+
+ return IcpState;
+}
+
+/*******************************************************************************
+* Function: icp_get_int_state
+* Description: This function is used for get the state of icp interruptting of remote cpu;
+* Parameters:
+* Input:
+ actorID: id of remote cpu
+ chID: id of channel
+* Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+static int icp_get_int_state(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+ T_HalIcp_Reg *icp_reg;
+
+ icp_reg = icp_get_reg(actorID);
+
+ if(chID<32)
+ {
+ if(icp_reg->in_state.low_word & (0x1<<chID))
+ return true;
+ }
+ else
+ {
+ if(icp_reg->in_state.high_word & (0x1<<(chID-32)))
+ return true;
+ }
+
+ return false;
+}
+
+/*******************************************************************************
+* Function: icp_mask_int
+* Description: This function is used for Mask interrupt of channel;
+* Parameters:
+* Input:
+* Output:
+*
+* Returns: NONE
+*
+*
+* Others:
+********************************************************************************/
+static int icp_mask_int(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+ T_HalIcp_Reg *icp_reg;
+
+ if ((actorID >= CORE_MAXID) || (chID >= CHANNEL_MAXID(actorID)))
+ return -EINVAL;
+
+ icp_reg = icp_get_reg(actorID);
+
+ if(chID<32)
+ icp_reg->mask.low_word |= (0x1<<chID);
+ else
+ icp_reg->mask.high_word |= (0x1<<(chID-32));
+
+ return 0;
+}
+
+/*******************************************************************************
+* Function: icp_unmask_int
+* Description: This function is used for unmask interrupt of channel;
+* Parameters:
+* Input:
+* Output:
+*
+* Returns:
+* NONE
+*
+*
+* Others:
+********************************************************************************/
+static int icp_unmask_int(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+ T_HalIcp_Reg *icp_reg;
+
+ if ((actorID >= CORE_MAXID) || (chID >= CHANNEL_MAXID(actorID)))
+ return -EINVAL;
+
+ icp_reg = icp_get_reg(actorID);
+
+ if(chID < 32)
+ icp_reg->mask.low_word &= ~(0x1<<chID);
+ else
+ icp_reg->mask.high_word &= ~(0x1<<(chID-32));
+
+ return 0;
+}
+
+int icp_int_count = 0;
+#ifdef CONFIG_ZX29_WATCHDOG
+extern void zx_wdt_icp_wake(void);
+#endif
+irqreturn_t icp_isr(int irq, void *data)
+{
+ icp_msg _icp_msg;
+ T_HalIcp_Dword IcpState;
+ unsigned int i;
+
+ _icp_msg.src_id = (unsigned int)data;
+
+ IcpState = icp_get_int(_icp_msg.src_id);
+
+ for(i=0; i<CHANNEL_MAXID(_icp_msg.src_id); i++)
+ {
+ if((((i<32)&&((IcpState.low_word>>i) & 0x1))||((i>=32)&&((IcpState.high_word>>(i-32)) & 0x1)))) {
+ _icp_msg.event_id = i;
+ #ifdef CONFIG_ZX29_WATCHDOG
+ if((CORE_M0 == _icp_msg.src_id)&&(2 == i))
+ zx_wdt_icp_wake();
+ #endif
+ if(_icp_fn)
+ _icp_fn(&_icp_msg);
+
+ icp_clear_int(_icp_msg.src_id, i);
+ }
+ }
+
+ icp_int_count ++;
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * for loopback test
+ */
+void fake_icp_isr(T_RpMsg_CoreID src_core, T_RpMsg_CoreID dest_core, T_RpMsg_ChID ch)
+{
+ icp_msg _icp_msg;
+ unsigned int i;
+
+ _icp_msg.src_id = src_core;
+ _icp_msg.dest_core = dest_core;
+ _icp_msg.event_id = ch;
+
+ if(_icp_fn)
+ _icp_fn(&_icp_msg);
+}
+
+/*
+ * for get wake state
+ */
+void icp_get_int_info(T_ZDrvRpMsg_ActorID actorID, unsigned int *high_word, unsigned int *low_word)
+{
+ T_HalIcp_Dword IcpState;
+
+ IcpState = icp_get_int(actorID);
+
+ *high_word = IcpState.high_word;
+ *low_word = IcpState.low_word;
+}
+
+static const char * const ps_channel_info[64] = {
+ [0] = "drv test",
+ [2] = "Power Management",
+ [3] = "ADB agent",
+ [4] = "USB app config",
+ [5] = "USB kernel config",
+ [6] = "audio",
+ [7] = "console switch",
+ [8] = "NV",
+ [9] = "debug",
+ [10] = "ramdump",
+ [11] = "tee common",
+ [12] = "tee RPC",
+ [13] = "ap2cap message queue",
+ [14] = "cap2ap message queue",
+ [15] = "AMT framework",
+ [16] = "APP rsvd 16",
+ [17] = "APP rsvd 17",
+ [18] = "APP rsvd 18",
+ [19] = "APP rsvd 19",
+ [20] = "zvnet 20",
+ [21] = "zvnet 21",
+ [22] = "zvnet 22",
+ [23] = "zvnet 23",
+ [24] = "zvnet 24",
+ [25] = "zvnet 25",
+ [26] = "zvnet 26",
+ [27] = "zvnet 27",
+ [28] = "zvnet 28",
+ [29] = "free skb",
+ [30] = "ttygs0",
+ [31] = "ttygs1",
+ [32] = "socket ipc",
+ [33] = "binder ipc",
+ [34] = "at channel 34",
+ [35] = "at channel 35",
+ [36] = "at channel 36",
+ [37] = "at channel 37",
+ [38] = "at channel 38",
+ [39] = "at channel 39",
+ [40] = "at channel 40",
+ [41] = "voice buffer",
+};
+
+void show_icp_state(T_ZDrvRpMsg_ActorID actorID)
+{
+ unsigned int hw, lw;
+ int i;
+
+ if (actorID != CORE_PS0)
+ return;
+
+ icp_get_int_info(actorID, &hw, &lw);
+ pr_info("[SLP] icpwake: 0x%x 0x%x\n", hw, lw);
+
+ for (i=0; i<32; i++)
+ if (lw&BIT(i))
+ pr_info("[SLP] icpwake: channel(%d) function(%s)\n", i, ps_channel_info[i] ? ps_channel_info[i] : "NA");
+
+ for (i=0; i<32; i++)
+ if (hw&BIT(i))
+ pr_info("[SLP] icpwake: channel(%d) function(%s)\n", i+32, ps_channel_info[i+32] ? ps_channel_info[i+32] : "NA");
+}
+
+static void icp_register_callback(icp_callback_fn cb)
+{
+ _icp_fn = cb;
+}
+
+static int icp_send_message(unsigned int core_id, icp_msg *icp_msg)
+{
+ if(!icp_msg || icp_msg->dest_core > CORE_MAXID )
+ return -EINVAL;
+
+ if(icp_get_int_state(icp_msg->dest_core, icp_msg->event_id)==false)
+ {
+ icp_set_int(icp_msg->dest_core, icp_msg->event_id);
+ }
+
+ return 0;
+}
+
+static t_icpdev_ops zx29_icp_ops = {
+ .register_callback = icp_register_callback,
+ .send_message = icp_send_message,
+ .mask_int = icp_mask_int,
+ .unmask_int = icp_unmask_int,
+ .set_int = icp_set_int,
+};
+
+static int icp_ap2ps_init(struct device *dev)
+{
+ void __iomem *reg_base;
+ unsigned int irq;
+ int ret;
+ struct device_node *np = dev->of_node;
+
+ reg_base = of_iomap(np, 0);
+ if ( !reg_base ){
+ pr_err("%s: [ICP]Cannot get IORESOURCE_MEM\n", __func__);
+ return -ENOENT;
+ }
+
+ icp_ap2ps_reg = (T_HalIcp_Reg *)reg_base;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if( !irq ){
+ pr_err("%s: [ICP]Cannot get IORESOURCE_IRQ\n", __func__);
+ return -ENOENT;
+ }
+
+ icp_ap2ps_reg->mask.high_word = 0xffffffff;
+ icp_ap2ps_reg->mask.low_word = 0xffffffff;
+
+ ret = request_irq(irq, icp_isr, 0, "zx_icp", CORE_PS0);
+ if (ret)
+ {
+ pr_err("%s: [ICP]register irq failed\n", __func__);
+ return ret;
+ }
+
+ enable_irq_wake(irq);
+
+ icpdev_register_ops(&zx29_icp_ops);
+
+ rpmsgInit(CORE_PS0, np);
+/*
+ dev->id = CORE_PS0;
+ ret = icp_rpmsg_device_register(dev);
+*/
+ pr_info("%s: ok! irq(%d) icp_address(%llx \n", __func__, irq, reg_base );
+
+ return ret;
+}
+
+static int icp_ap2m0_init(struct device *dev)
+{
+ void __iomem *reg_base;
+ unsigned int irq;
+ int ret;
+ struct device_node *np = dev->of_node;
+
+ pr_info("%s: enter \n", __func__);
+
+ reg_base = of_iomap(np, 0);
+ if ( !reg_base ){
+ pr_err("%s: [ICP]Cannot get IORESOURCE_MEM\n", __func__);
+ return -ENOENT;
+ }
+
+ icp_ap2m0_reg = (T_HalIcp_Reg *)reg_base;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if( !irq ){
+ pr_err("%s: [ICP]Cannot get IORESOURCE_IRQ\n", __func__);
+ return -ENOENT;
+ }
+
+ icp_ap2m0_reg->mask.high_word = 0xffffffff;
+ icp_ap2m0_reg->mask.low_word = 0xffffffff;
+
+ ret = request_irq(irq, icp_isr, 0, "zx_icp", CORE_M0);
+ if (ret)
+ {
+ pr_err("%s: [ICP]register irq failed\n", __func__);
+ return ret;
+ }
+
+ enable_irq_wake(irq);
+
+ icpdev_register_ops(&zx29_icp_ops);
+
+ rpmsgInit(CORE_M0, np);
+
+ pr_info("%s: ok! irq(%d) icp_address(%llx \n", __func__, irq, reg_base );
+
+ return 0;
+}
+
+static const struct of_device_id zx29_icp_dt_ids[] = {
+ { .compatible = "zte,zx29-icp-ap2m0", .data = &icp_ap2m0_init },
+ { .compatible = "zte,zx29-icp-ap2ps", .data = &icp_ap2ps_init },
+ { /* sentinel */ }
+};
+
+static int zx29_icp_probe(struct platform_device *pdev)
+{
+ int (*init_fn)(struct device *dev);
+
+ init_fn = of_device_get_match_data(&pdev->dev);
+ if (!init_fn) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+
+ return init_fn(&pdev->dev);
+}
+
+static struct platform_driver zx29_icp_driver = {
+ .driver = {
+ .name = "zx29-icp",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(zx29_icp_dt_ids),
+ },
+ .probe = zx29_icp_probe,
+};
+
+builtin_platform_driver(zx29_icp_driver)
diff --git a/upstream/linux-5.10/drivers/soc/sc/spinlock/spinlock-zx297520v3.c b/upstream/linux-5.10/drivers/soc/sc/spinlock/spinlock-zx297520v3.c
new file mode 100755
index 0000000..7843e46
--- /dev/null
+++ b/upstream/linux-5.10/drivers/soc/sc/spinlock/spinlock-zx297520v3.c
@@ -0,0 +1,513 @@
+/*
+ * arch/arm/mach-zx297520v2/zx297520v2-clock.c
+ *
+ * Copyright (C) 2015 ZTE-TSP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/soc/zte/spinlock.h>
+
+#define USE_HW_SPINLOCK 1
+
+#define SHARED_DEVICE_REG1 (hwlock_reg_base + 0x170)
+#define SHARED_DEVICE_REG2 (hwlock_reg_base + 0x174)
+#define SHARED_DEVICE_REG3 (hwlock_reg_base + 0x178)
+#define SHARED_DEVICE_REG4 (hwlock_reg_base + 0x17C)
+
+#if USE_HW_SPINLOCK
+
+#define MACH_NR_SFLOCKS SFLOCK_NUM
+#define MACH_NR_HWLOCKS HWLOCK_NUM
+
+#define SELF_CORE_ID CORE_ID_AP
+
+/* now use 8*MACH_NR_SFLOCKS bytes */
+#define SOFTLOCK_DESC_BASE (sf_base)//(SPINLOCK_SOFTLOCK_BASE)
+
+#define SPINLOCK_DEBUG 1
+
+#if SPINLOCK_DEBUG
+#define zspinlock_debug(fmt, ...) \
+ printk(KERN_INFO fmt, ##__VA_ARGS__)
+#else
+#define zspinlock_debug(fmt, ...)
+#endif
+
+#define zspinlock_assert(_EXP) BUG_ON(!_EXP)//ZDRV_ASSERT(_EXP)
+static DEFINE_MUTEX(zspinlock_mutex);
+static unsigned long s_hwSpinlockMsr[HWLOCK_NUM];
+/****************************************************************************
+* Types
+****************************************************************************/
+struct zte_softlock_desc {
+ unsigned long used;
+ unsigned long owner;
+};
+/**************************************************************************
+ * Global Variables *
+ **************************************************************************/
+static volatile struct zte_softlock_desc *softlock_desc[MACH_NR_SFLOCKS];
+static void __iomem __force *hwlock_reg_base;
+static void __iomem __force *sf_base;
+static void __iomem __force *hwlock_regs[MACH_NR_HWLOCKS] ;
+/*
+=
+{
+ SHARED_DEVICE_REG1,
+ SHARED_DEVICE_REG2,
+ SHARED_DEVICE_REG3,
+ SHARED_DEVICE_REG4
+};
+*/
+extern void msleep(unsigned int msecs);
+
+ /*******************************************************************************
+ * Function: _hw_spin_lock
+ * Description:»ñȡӲ¼þËø£¬id 0~3
+ * Parameters:
+ * Input:
+ *
+ * Output:
+ *
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+ static void _hw_spin_lock(unsigned long hwid)
+{
+ unsigned long tmp;
+ unsigned long msr;
+ local_irq_save(msr);
+ s_hwSpinlockMsr[hwid] = msr;
+
+ while(ioread32(hwlock_regs[hwid])&0x1);
+ tmp = ioread32(hwlock_regs[hwid]);
+ tmp &= 0x00ffffff;
+ tmp |= (SELF_CORE_ID&0xff)<<24;
+ iowrite32(tmp, hwlock_regs[hwid]);
+
+}
+/*******************************************************************************
+ * Function: _hw_spin_unlock
+ * Description:ÊÍ·ÅÓ²¼þËø£¬id 0~3
+ * Parameters:
+ * Input:
+ *
+ * Output:
+ *
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+static void _hw_spin_unlock(unsigned long hwid)
+{
+ unsigned long tmp;
+
+
+ if(SELF_CORE_ID != (ioread32(hwlock_regs[hwid])&0xff000000)>>24){
+ zspinlock_assert(0);
+ }
+ tmp = ioread32(hwlock_regs[hwid]);
+ tmp &= 0x00fffffe;
+ iowrite32(tmp, hwlock_regs[hwid]);
+
+ local_irq_restore(s_hwSpinlockMsr[hwid]);
+}
+/*******************************************************************************
+ * Function: hw_spin_lock
+ * Description:»ñȡӲ¼þËø£¬id 0~2£¬
+ * id 3±£Áô¸øÈí¼þËøÊ¹Óã¬ÍⲿÇý¶¯²»¿ÉÓá£
+ * Parameters:
+ * Input:
+ *
+ * Output:
+ *
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+void hw_spin_lock(emhw_lock_id hwid)
+{
+ _hw_spin_lock(hwid);
+// zspinlock_debug("cpu %d gets %d hardware lock!/n",SELF_CORE_ID,hwid);
+}
+/*******************************************************************************
+ * Function: hw_spin_unlock
+ * Description:Çý¶¯ÊÍ·ÅÓ²¼þËø£¬id 0~2
+ * Parameters:
+ * Input:
+ *
+ * Output:
+ *
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+void hw_spin_unlock(emhw_lock_id hwid)
+{
+ _hw_spin_unlock(hwid);
+// zspinlock_debug("cpu %d releases %d hardware lock!/n",SELF_CORE_ID,hwid);
+}
+/*******************************************************************************
+ * Function: soft_spin_lock
+ * Description:Çý¶¯»ñµÃÈí¼þËø½Ó¿Ú
+ * Parameters:
+ * Input: sfid: Èí¼þËøid¡£
+ * coreid: ±£³ÖidºÅΪsfidÈí¼þËøµÄcpuid¡£
+ * Output:
+ *
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+void soft_spin_lock(emsf_lock_id sfid)
+{
+ static unsigned long lock_count = 0;
+
+softlock_loop:
+ while(softlock_desc[sfid]->owner != SELF_CORE_ID && softlock_desc[sfid]->used)
+ {
+ lock_count++;
+ if(lock_count == 1000)
+ {
+ lock_count = 0;
+ msleep(5);
+ }
+ }
+
+ _hw_spin_lock(SOFTLOCK_HWLOCK);
+ if(softlock_desc[sfid]->owner != SELF_CORE_ID && softlock_desc[sfid]->used)
+ {
+ _hw_spin_unlock(SOFTLOCK_HWLOCK);
+ goto softlock_loop;
+ }
+ softlock_desc[sfid]->used ++;
+ softlock_desc[sfid]->owner = SELF_CORE_ID;
+ _hw_spin_unlock(SOFTLOCK_HWLOCK);
+ //zspinlock_debug("cpu %d releases %d software lock!/n",SELF_CORE_ID,sfid);
+
+}
+#if 1
+int soft_spin_lock_printf(emsf_lock_id sfid)
+{
+ static unsigned long lock_count = 0;
+softlock_loop:
+ while(softlock_desc[sfid]->owner != SELF_CORE_ID && softlock_desc[sfid]->used)
+ {
+ ndelay(1);
+ lock_count++;
+ if(lock_count >= 5000)
+ {
+ lock_count = 0;
+ return -1;
+ }
+ }
+ _hw_spin_lock(SOFTLOCK_HWLOCK);
+ if(softlock_desc[sfid]->owner != SELF_CORE_ID && softlock_desc[sfid]->used)
+ {
+ _hw_spin_unlock(SOFTLOCK_HWLOCK);
+ goto softlock_loop;
+ }
+ softlock_desc[sfid]->used ++;
+ softlock_desc[sfid]->owner = SELF_CORE_ID;
+ _hw_spin_unlock(SOFTLOCK_HWLOCK);
+ return 0;
+}
+#endif
+/*******************************************************************************
+ * Function: soft_spin_unlock
+ * Description:Óësoft_spin_lock¶ÔÓ¦µÄÊÍ·ÅÈí¼þËø½Ó¿Ú¡£
+ * Parameters:
+ * Input:
+ *
+ * Output:
+ *
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+ void soft_spin_unlock(emsf_lock_id sfid)
+{
+ if(softlock_desc[sfid]->used){
+ if(SELF_CORE_ID != softlock_desc[sfid]->owner){
+ zspinlock_assert(0);
+ }
+ _hw_spin_lock(SOFTLOCK_HWLOCK);
+ softlock_desc[sfid]->used --;
+ if(softlock_desc[sfid]->used == 0) {
+ softlock_desc[sfid]->owner = 0x0;
+ }
+ _hw_spin_unlock(SOFTLOCK_HWLOCK);
+ //zspinlock_debug("cpu %d releases %d software lock!/n",SELF_CORE_ID,sfid);
+ }
+}
+
+/*******************************************************************************
+ * Function: soft_spin_lock_psm
+ * Description:Çý¶¯»ñµÃÈí¼þËø½Ó¿Ú
+ * Parameters:
+ * Input: sfid: Èí¼þËøid¡£
+ * coreid: ±£³ÖidºÅΪsfidÈí¼þËøµÄcpuid¡£
+ * Output:
+ *
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+void soft_spin_lock_psm(emsf_lock_id sfid)
+{
+softlock_loop:
+ while(softlock_desc[sfid]->owner != SELF_CORE_ID && softlock_desc[sfid]->used)
+ {
+
+ }
+
+ _hw_spin_lock(SOFTLOCK_HWLOCK);
+ if(softlock_desc[sfid]->owner != SELF_CORE_ID && softlock_desc[sfid]->used)
+ {
+ _hw_spin_unlock(SOFTLOCK_HWLOCK);
+ goto softlock_loop;
+ }
+ softlock_desc[sfid]->used ++;
+ softlock_desc[sfid]->owner = SELF_CORE_ID;
+ _hw_spin_unlock(SOFTLOCK_HWLOCK);
+ //zspinlock_debug("cpu %d releases %d software lock!/n",SELF_CORE_ID,sfid);
+
+}
+
+/*******************************************************************************
+ * Function: soft_spin_unlock_psm
+ * Description:Óësoft_spin_lock_psm¶ÔÓ¦µÄÊÍ·ÅÈí¼þËø½Ó¿Ú¡£
+ * Parameters:
+ * Input:
+ *
+ * Output:
+ *
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+void soft_spin_unlock_psm(emsf_lock_id sfid)
+{
+ soft_spin_unlock(sfid);
+}
+
+/*******************************************************************************
+ * Function: reg_spin_lock
+ * Description:Çý¶¯»ñµÃ¼Ä´æÆ÷Ëø½Ó¿Ú
+ * Parameters:
+ * Input:
+ * Output:
+ *
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+void reg_spin_lock(void)
+{
+ _hw_spin_lock(REGLOCK_HWLOCK);
+ softlock_desc[REG_SFLOCK]->owner = SELF_CORE_ID;
+}
+EXPORT_SYMBOL(reg_spin_lock);
+
+/*******************************************************************************
+ * Function: reg_spin_unlock
+ * Description:Óëreg_spin_lock¶ÔÓ¦µÄÊͷżĴæÆ÷Ëø½Ó¿Ú¡£
+ * Parameters:
+ * Input:
+ *
+ * Output:
+ *
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+ void reg_spin_unlock(void)
+{
+ softlock_desc[REG_SFLOCK]->owner = 0x0;
+ _hw_spin_unlock(REGLOCK_HWLOCK);
+
+}
+EXPORT_SYMBOL(reg_spin_unlock);
+
+/*******************************************************************************
+ * Function: softspinlock_init
+ * Description:Èí¼þËø³õʼ»¯¡£
+ * Parameters:
+ * Input:
+ *
+ * Output:
+ *
+ * Returns:
+ *
+ *
+ * Others:
+ ********************************************************************************/
+int softspinlock_init(void)
+{
+ int i;
+
+ for(i = 0; i<MACH_NR_SFLOCKS; i++){
+ softlock_desc[i] =
+ (struct zte_softlock_desc *)(SOFTLOCK_DESC_BASE +i*sizeof(struct zte_softlock_desc));
+ //softlock_desc[i]->used = 0;
+ //softlock_desc[i]->owner= CORE_ID_NUM;
+ }
+ zspinlock_debug("softspinlock init success base=0x%x!",(int)SOFTLOCK_DESC_BASE);
+ return 0;
+}
+
+typedef struct _zx29_softspinlock_ser
+{
+ struct cdev cdev;
+ struct module *owner;
+ struct class *classes;
+ const struct file_operations *ops;
+}zx29_softspinlock_ser;
+
+static zx29_softspinlock_ser softspinlock_zx29 = {
+ .owner = THIS_MODULE,
+};
+
+int soft_spin_lock_get(emsf_lock_id sfid)
+{
+ if(sfid>=SFLOCK_NUM)
+ return -EFAULT;
+
+ if(softlock_desc[sfid]->owner != SELF_CORE_ID && softlock_desc[sfid]->used)
+ return 1;
+ else
+ return 0;
+}
+
+static long softspinlock_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ char k_arg;
+
+ switch(cmd)
+ {
+ case SPINLOCK_GET_STATUS:
+ ret = copy_from_user(&k_arg, arg, sizeof(char));
+ if (ret)
+ return -EFAULT;
+
+ if(k_arg>= SFLOCK_NUM)
+ return -EFAULT;
+
+ k_arg = (char)soft_spin_lock_get(k_arg);
+ ret = copy_to_user(arg,&k_arg, sizeof(char));
+ if (ret)
+ return -EFAULT;
+ break;
+ default:
+ return -EPERM;
+ }
+
+ return ret;
+}
+
+
+static const struct file_operations softspinlock_ops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = softspinlock_ioctl,
+};
+
+
+static int __init softspinlock_dev_init(void)
+{
+ int ret = 0;
+ dev_t dev;
+
+ softspinlock_zx29.ops = &softspinlock_ops;
+
+ ret = alloc_chrdev_region(&dev, 0, 1, "softspinlock");
+ if (ret)
+ {
+ printk(KERN_ERR "%s: softspinlock failed to allocate char dev region\n",
+ __FILE__);
+ return ret;
+ }
+
+ cdev_init(&softspinlock_zx29.cdev, &softspinlock_ops);
+ softspinlock_zx29.cdev.owner = softspinlock_zx29.owner;
+
+ ret = cdev_add(&softspinlock_zx29.cdev, dev, 1);
+ if (ret)
+ {
+ unregister_chrdev_region(dev, 1);
+ printk(KERN_ERR "%s: softspinlock failed to add cdev\n",
+ __FILE__);
+ return ret;
+ }
+
+ softspinlock_zx29.classes = class_create(THIS_MODULE, "softspinlock");
+ if (IS_ERR(softspinlock_zx29.classes))
+ return PTR_ERR(softspinlock_zx29.classes);
+
+ device_create(softspinlock_zx29.classes, NULL, dev, NULL, "softspinlock");
+
+ printk("[xxx] softspinlock dev inited! \n");
+
+ return ret;
+}
+
+void zx_spinlock_init(void __iomem *spinlock_base)
+{
+ void __iomem **data = (void __iomem **)spinlock_base;
+
+ hwlock_reg_base = data[0];
+ sf_base = data[1];
+
+ hwlock_regs[0] = SHARED_DEVICE_REG1;
+ hwlock_regs[1] = SHARED_DEVICE_REG2;
+ hwlock_regs[2] = SHARED_DEVICE_REG3;
+ hwlock_regs[3] = SHARED_DEVICE_REG4;
+
+ softspinlock_init();
+}
+
+//arch_initcall(softspinlock_init);
+#else
+int softspinlock_init(void){return 0;}
+void reg_spin_lock(void){}
+void reg_spin_unlock(void){}
+void soft_spin_lock(emsf_lock_id sfid){}
+void soft_spin_unlock(emsf_lock_id sfid){}
+void soft_spin_lock_psm(emsf_lock_id sfid){}
+void soft_spin_unlock_psm(emsf_lock_id sfid){}
+void hw_spin_lock(emhw_lock_id hwid){}
+void hw_spin_unlock(emhw_lock_id hwid){}
+static int __init softspinlock_dev_init(void){return 0;}
+#endif
+
+module_init(softspinlock_dev_init);
+
diff --git a/upstream/linux-5.10/drivers/staging/Kconfig b/upstream/linux-5.10/drivers/staging/Kconfig
new file mode 100755
index 0000000..443587b
--- /dev/null
+++ b/upstream/linux-5.10/drivers/staging/Kconfig
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: GPL-2.0
+menuconfig STAGING
+ bool "Staging drivers"
+ help
+ This option allows you to select a number of drivers that are
+ not of the "normal" Linux kernel quality level. These drivers
+ are placed here in order to get a wider audience to make use of
+ them. Please note that these drivers are under heavy
+ development, may or may not work, and may contain userspace
+ interfaces that most likely will be changed in the near
+ future.
+
+ Using any of these drivers will taint your kernel which might
+ affect support options from both the community, and various
+ commercial support organizations.
+
+ If you wish to work on these drivers, to help improve them, or
+ to report problems you have with them, please see the
+ drivers/staging/<driver_name>/TODO file to see what needs to be
+ worked on, and who to contact.
+
+ If in doubt, say N here.
+
+
+if STAGING
+
+source "drivers/staging/wlan-ng/Kconfig"
+
+source "drivers/staging/comedi/Kconfig"
+
+source "drivers/staging/olpc_dcon/Kconfig"
+
+source "drivers/staging/rtl8192u/Kconfig"
+
+source "drivers/staging/rtl8192e/Kconfig"
+
+source "drivers/staging/rtl8723bs/Kconfig"
+
+source "drivers/staging/rtl8712/Kconfig"
+
+source "drivers/staging/rtl8188eu/Kconfig"
+
+source "drivers/staging/rts5208/Kconfig"
+
+source "drivers/staging/octeon/Kconfig"
+
+source "drivers/staging/octeon-usb/Kconfig"
+
+source "drivers/staging/vt6655/Kconfig"
+
+source "drivers/staging/vt6656/Kconfig"
+
+source "drivers/staging/iio/Kconfig"
+
+source "drivers/staging/sm750fb/Kconfig"
+
+source "drivers/staging/emxx_udc/Kconfig"
+
+source "drivers/staging/nvec/Kconfig"
+
+source "drivers/staging/media/Kconfig"
+
+source "drivers/staging/android/Kconfig"
+
+source "drivers/staging/board/Kconfig"
+
+source "drivers/staging/gdm724x/Kconfig"
+
+source "drivers/staging/fwserial/Kconfig"
+
+source "drivers/staging/goldfish/Kconfig"
+
+source "drivers/staging/netlogic/Kconfig"
+
+source "drivers/staging/gs_fpgaboot/Kconfig"
+
+source "drivers/staging/unisys/Kconfig"
+
+source "drivers/staging/clocking-wizard/Kconfig"
+
+source "drivers/staging/fbtft/Kconfig"
+
+source "drivers/staging/fsl-dpaa2/Kconfig"
+
+source "drivers/staging/most/Kconfig"
+
+source "drivers/staging/ks7010/Kconfig"
+
+source "drivers/staging/greybus/Kconfig"
+
+source "drivers/staging/vc04_services/Kconfig"
+
+source "drivers/staging/pi433/Kconfig"
+
+source "drivers/staging/mt7621-pci/Kconfig"
+
+source "drivers/staging/mt7621-pci-phy/Kconfig"
+
+source "drivers/staging/mt7621-pinctrl/Kconfig"
+
+source "drivers/staging/mt7621-dma/Kconfig"
+
+source "drivers/staging/ralink-gdma/Kconfig"
+
+source "drivers/staging/mt7621-dts/Kconfig"
+
+source "drivers/staging/gasket/Kconfig"
+
+source "drivers/staging/axis-fifo/Kconfig"
+
+source "drivers/staging/fieldbus/Kconfig"
+
+source "drivers/staging/kpc2000/Kconfig"
+
+source "drivers/staging/qlge/Kconfig"
+
+source "drivers/staging/wfx/Kconfig"
+
+source "drivers/staging/hikey9xx/Kconfig"
+
+source "drivers/staging/voicebufferdrv/Kconfig"
+endif # STAGING
diff --git a/upstream/linux-5.10/drivers/staging/Makefile b/upstream/linux-5.10/drivers/staging/Makefile
new file mode 100755
index 0000000..0cb5246
--- /dev/null
+++ b/upstream/linux-5.10/drivers/staging/Makefile
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for staging directory
+
+obj-y += media/
+obj-$(CONFIG_PRISM2_USB) += wlan-ng/
+obj-$(CONFIG_COMEDI) += comedi/
+obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
+obj-$(CONFIG_RTL8192U) += rtl8192u/
+obj-$(CONFIG_RTL8192E) += rtl8192e/
+obj-$(CONFIG_RTL8723BS) += rtl8723bs/
+obj-$(CONFIG_R8712U) += rtl8712/
+obj-$(CONFIG_R8188EU) += rtl8188eu/
+obj-$(CONFIG_RTS5208) += rts5208/
+obj-$(CONFIG_NETLOGIC_XLR_NET) += netlogic/
+obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
+obj-$(CONFIG_OCTEON_USB) += octeon-usb/
+obj-$(CONFIG_VT6655) += vt6655/
+obj-$(CONFIG_VT6656) += vt6656/
+obj-$(CONFIG_VME_BUS) += vme/
+obj-$(CONFIG_IIO) += iio/
+obj-$(CONFIG_FB_SM750) += sm750fb/
+obj-$(CONFIG_USB_EMXX) += emxx_udc/
+obj-$(CONFIG_MFD_NVEC) += nvec/
+obj-$(CONFIG_ANDROID) += android/
+obj-$(CONFIG_STAGING_BOARD) += board/
+obj-$(CONFIG_LTE_GDM724X) += gdm724x/
+obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
+obj-$(CONFIG_GOLDFISH) += goldfish/
+obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/
+obj-$(CONFIG_UNISYSSPAR) += unisys/
+obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
+obj-$(CONFIG_FB_TFT) += fbtft/
+obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/
+obj-$(CONFIG_MOST) += most/
+obj-$(CONFIG_KS7010) += ks7010/
+obj-$(CONFIG_GREYBUS) += greybus/
+obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
+obj-$(CONFIG_PI433) += pi433/
+obj-$(CONFIG_PCI_MT7621) += mt7621-pci/
+obj-$(CONFIG_PCI_MT7621_PHY) += mt7621-pci-phy/
+obj-$(CONFIG_PINCTRL_RT2880) += mt7621-pinctrl/
+obj-$(CONFIG_SOC_MT7621) += mt7621-dma/
+obj-$(CONFIG_DMA_RALINK) += ralink-gdma/
+obj-$(CONFIG_SOC_MT7621) += mt7621-dts/
+obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
+obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
+obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
+obj-$(CONFIG_KPC2000) += kpc2000/
+obj-$(CONFIG_QLGE) += qlge/
+obj-$(CONFIG_WFX) += wfx/
+obj-y += hikey9xx/
+obj-$(CONFIG_VOICE_BUFFER_DRV) += voicebufferdrv/
diff --git a/upstream/linux-5.10/drivers/staging/voicebufferdrv/voice_buffer_dev_multicore.c b/upstream/linux-5.10/drivers/staging/voicebufferdrv/voice_buffer_dev_multicore.c
new file mode 100755
index 0000000..9c7d0c3
--- /dev/null
+++ b/upstream/linux-5.10/drivers/staging/voicebufferdrv/voice_buffer_dev_multicore.c
Binary files differ
diff --git a/upstream/linux-5.10/drivers/tty/tty_io.c b/upstream/linux-5.10/drivers/tty/tty_io.c
new file mode 100755
index 0000000..669aef7
--- /dev/null
+++ b/upstream/linux-5.10/drivers/tty/tty_io.c
@@ -0,0 +1,3602 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * 'tty_io.c' gives an orthogonal feeling to tty's, be they consoles
+ * or rs-channels. It also implements echoing, cooked mode etc.
+ *
+ * Kill-line thanks to John T Kohl, who also corrected VMIN = VTIME = 0.
+ *
+ * Modified by Theodore Ts'o, 9/14/92, to dynamically allocate the
+ * tty_struct and tty_queue structures. Previously there was an array
+ * of 256 tty_struct's which was statically allocated, and the
+ * tty_queue structures were allocated at boot time. Both are now
+ * dynamically allocated only when the tty is open.
+ *
+ * Also restructured routines so that there is more of a separation
+ * between the high-level tty routines (tty_io.c and tty_ioctl.c) and
+ * the low-level tty routines (serial.c, pty.c, console.c). This
+ * makes for cleaner and more compact code. -TYT, 9/17/92
+ *
+ * Modified by Fred N. van Kempen, 01/29/93, to add line disciplines
+ * which can be dynamically activated and de-activated by the line
+ * discipline handling modules (like SLIP).
+ *
+ * NOTE: pay no attention to the line discipline code (yet); its
+ * interface is still subject to change in this version...
+ * -- TYT, 1/31/92
+ *
+ * Added functionality to the OPOST tty handling. No delays, but all
+ * other bits should be there.
+ * -- Nick Holloway <alfie@dcs.warwick.ac.uk>, 27th May 1993.
+ *
+ * Rewrote canonical mode and added more termios flags.
+ * -- julian@uhunix.uhcc.hawaii.edu (J. Cowley), 13Jan94
+ *
+ * Reorganized FASYNC support so mouse code can share it.
+ * -- ctm@ardi.com, 9Sep95
+ *
+ * New TIOCLINUX variants added.
+ * -- mj@k332.feld.cvut.cz, 19-Nov-95
+ *
+ * Restrict vt switching via ioctl()
+ * -- grif@cs.ucr.edu, 5-Dec-95
+ *
+ * Move console and virtual terminal code to more appropriate files,
+ * implement CONFIG_VT and generalize console device interface.
+ * -- Marko Kohtala <Marko.Kohtala@hut.fi>, March 97
+ *
+ * Rewrote tty_init_dev and tty_release_dev to eliminate races.
+ * -- Bill Hawes <whawes@star.net>, June 97
+ *
+ * Added devfs support.
+ * -- C. Scott Ananian <cananian@alumni.princeton.edu>, 13-Jan-1998
+ *
+ * Added support for a Unix98-style ptmx device.
+ * -- C. Scott Ananian <cananian@alumni.princeton.edu>, 14-Jan-1998
+ *
+ * Reduced memory usage for older ARM systems
+ * -- Russell King <rmk@arm.linux.org.uk>
+ *
+ * Move do_SAK() into process context. Less stack use in devfs functions.
+ * alloc_tty_struct() always uses kmalloc()
+ * -- Andrew Morton <andrewm@uow.edu.eu> 17Mar01
+ */
+
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/fcntl.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/task.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/devpts_fs.h>
+#include <linux/file.h>
+#include <linux/fdtable.h>
+#include <linux/console.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/kd.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/ppp-ioctl.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/serial.h>
+#include <linux/ratelimit.h>
+#include <linux/compat.h>
+
+#include <linux/uaccess.h>
+
+#include <linux/kbd_kern.h>
+#include <linux/vt_kern.h>
+#include <linux/selection.h>
+
+#include <linux/kmod.h>
+#include <linux/nsproxy.h>
+
+#undef TTY_DEBUG_HANGUP
+#ifdef TTY_DEBUG_HANGUP
+# define tty_debug_hangup(tty, f, args...) tty_debug(tty, f, ##args)
+#else
+# define tty_debug_hangup(tty, f, args...) do { } while (0)
+#endif
+
+#define TTY_PARANOIA_CHECK 1
+#define CHECK_TTY_COUNT 1
+
+struct ktermios tty_std_termios = { /* for the benefit of tty drivers */
+ .c_iflag = ICRNL | IXON,
+ .c_oflag = OPOST | ONLCR,
+ .c_cflag = B38400 | CS8 | CREAD | HUPCL,
+ .c_lflag = ISIG | ICANON | ECHO | ECHOE | ECHOK |
+ ECHOCTL | ECHOKE | IEXTEN,
+ .c_cc = INIT_C_CC,
+ .c_ispeed = 38400,
+ .c_ospeed = 38400,
+ /* .c_line = N_TTY, */
+};
+
+EXPORT_SYMBOL(tty_std_termios);
+
+/* This list gets poked at by procfs and various bits of boot up code. This
+ could do with some rationalisation such as pulling the tty proc function
+ into this file */
+
+LIST_HEAD(tty_drivers); /* linked list of tty drivers */
+
+/* Mutex to protect creating and releasing a tty */
+DEFINE_MUTEX(tty_mutex);
+
+static ssize_t tty_read(struct kiocb *, struct iov_iter *);
+static ssize_t tty_write(struct kiocb *, struct iov_iter *);
+static __poll_t tty_poll(struct file *, poll_table *);
+static int tty_open(struct inode *, struct file *);
+#ifdef CONFIG_COMPAT
+static long tty_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+#else
+#define tty_compat_ioctl NULL
+#endif
+static int __tty_fasync(int fd, struct file *filp, int on);
+static int tty_fasync(int fd, struct file *filp, int on);
+static void release_tty(struct tty_struct *tty, int idx);
+
+/**
+ * free_tty_struct - free a disused tty
+ * @tty: tty struct to free
+ *
+ * Free the write buffers, tty queue and tty memory itself.
+ *
+ * Locking: none. Must be called after tty is definitely unused
+ */
+
+static void free_tty_struct(struct tty_struct *tty)
+{
+ tty_ldisc_deinit(tty);
+ put_device(tty->dev);
+ kfree(tty->write_buf);
+ tty->magic = 0xDEADDEAD;
+ kfree(tty);
+}
+
+static inline struct tty_struct *file_tty(struct file *file)
+{
+ return ((struct tty_file_private *)file->private_data)->tty;
+}
+
+int tty_alloc_file(struct file *file)
+{
+ struct tty_file_private *priv;
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ file->private_data = priv;
+
+ return 0;
+}
+
+/* Associate a new file with the tty structure */
+void tty_add_file(struct tty_struct *tty, struct file *file)
+{
+ struct tty_file_private *priv = file->private_data;
+
+ priv->tty = tty;
+ priv->file = file;
+
+ spin_lock(&tty->files_lock);
+ list_add(&priv->list, &tty->tty_files);
+ spin_unlock(&tty->files_lock);
+}
+
+/**
+ * tty_free_file - free file->private_data
+ *
+ * This shall be used only for fail path handling when tty_add_file was not
+ * called yet.
+ */
+void tty_free_file(struct file *file)
+{
+ struct tty_file_private *priv = file->private_data;
+
+ file->private_data = NULL;
+ kfree(priv);
+}
+
+/* Delete file from its tty */
+static void tty_del_file(struct file *file)
+{
+ struct tty_file_private *priv = file->private_data;
+ struct tty_struct *tty = priv->tty;
+
+ spin_lock(&tty->files_lock);
+ list_del(&priv->list);
+ spin_unlock(&tty->files_lock);
+ tty_free_file(file);
+}
+
+/**
+ * tty_name - return tty naming
+ * @tty: tty structure
+ *
+ * Convert a tty structure into a name. The name reflects the kernel
+ * naming policy and if udev is in use may not reflect user space
+ *
+ * Locking: none
+ */
+
+const char *tty_name(const struct tty_struct *tty)
+{
+ if (!tty) /* Hmm. NULL pointer. That's fun. */
+ return "NULL tty";
+ return tty->name;
+}
+
+EXPORT_SYMBOL(tty_name);
+
+const char *tty_driver_name(const struct tty_struct *tty)
+{
+ if (!tty || !tty->driver)
+ return "";
+ return tty->driver->name;
+}
+
+static int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
+ const char *routine)
+{
+#ifdef TTY_PARANOIA_CHECK
+ if (!tty) {
+ pr_warn("(%d:%d): %s: NULL tty\n",
+ imajor(inode), iminor(inode), routine);
+ return 1;
+ }
+ if (tty->magic != TTY_MAGIC) {
+ pr_warn("(%d:%d): %s: bad magic number\n",
+ imajor(inode), iminor(inode), routine);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/* Caller must hold tty_lock */
+static int check_tty_count(struct tty_struct *tty, const char *routine)
+{
+#ifdef CHECK_TTY_COUNT
+ struct list_head *p;
+ int count = 0, kopen_count = 0;
+
+ spin_lock(&tty->files_lock);
+ list_for_each(p, &tty->tty_files) {
+ count++;
+ }
+ spin_unlock(&tty->files_lock);
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_SLAVE &&
+ tty->link && tty->link->count)
+ count++;
+ if (tty_port_kopened(tty->port))
+ kopen_count++;
+ if (tty->count != (count + kopen_count)) {
+ tty_warn(tty, "%s: tty->count(%d) != (#fd's(%d) + #kopen's(%d))\n",
+ routine, tty->count, count, kopen_count);
+ return (count + kopen_count);
+ }
+#endif
+ return 0;
+}
+
+/**
+ * get_tty_driver - find device of a tty
+ * @device: device identifier
+ * @index: returns the index of the tty
+ *
+ * This routine returns a tty driver structure, given a device number
+ * and also passes back the index number.
+ *
+ * Locking: caller must hold tty_mutex
+ */
+
+static struct tty_driver *get_tty_driver(dev_t device, int *index)
+{
+ struct tty_driver *p;
+
+ list_for_each_entry(p, &tty_drivers, tty_drivers) {
+ dev_t base = MKDEV(p->major, p->minor_start);
+ if (device < base || device >= base + p->num)
+ continue;
+ *index = device - base;
+ return tty_driver_kref_get(p);
+ }
+ return NULL;
+}
+
+/**
+ * tty_dev_name_to_number - return dev_t for device name
+ * @name: user space name of device under /dev
+ * @number: pointer to dev_t that this function will populate
+ *
+ * This function converts device names like ttyS0 or ttyUSB1 into dev_t
+ * like (4, 64) or (188, 1). If no corresponding driver is registered then
+ * the function returns -ENODEV.
+ *
+ * Locking: this acquires tty_mutex to protect the tty_drivers list from
+ * being modified while we are traversing it, and makes sure to
+ * release it before exiting.
+ */
+int tty_dev_name_to_number(const char *name, dev_t *number)
+{
+ struct tty_driver *p;
+ int ret;
+ int index, prefix_length = 0;
+ const char *str;
+
+ for (str = name; *str && !isdigit(*str); str++)
+ ;
+
+ if (!*str)
+ return -EINVAL;
+
+ ret = kstrtoint(str, 10, &index);
+ if (ret)
+ return ret;
+
+ prefix_length = str - name;
+ mutex_lock(&tty_mutex);
+
+ list_for_each_entry(p, &tty_drivers, tty_drivers)
+ if (prefix_length == strlen(p->name) && strncmp(name,
+ p->name, prefix_length) == 0) {
+ if (index < p->num) {
+ *number = MKDEV(p->major, p->minor_start + index);
+ goto out;
+ }
+ }
+
+ /* if here then driver wasn't found */
+ ret = -ENODEV;
+out:
+ mutex_unlock(&tty_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tty_dev_name_to_number);
+
+#ifdef CONFIG_CONSOLE_POLL
+
+/**
+ * tty_find_polling_driver - find device of a polled tty
+ * @name: name string to match
+ * @line: pointer to resulting tty line nr
+ *
+ * This routine returns a tty driver structure, given a name
+ * and the condition that the tty driver is capable of polled
+ * operation.
+ */
+struct tty_driver *tty_find_polling_driver(char *name, int *line)
+{
+ struct tty_driver *p, *res = NULL;
+ int tty_line = 0;
+ int len;
+ char *str, *stp;
+
+ for (str = name; *str; str++)
+ if ((*str >= '0' && *str <= '9') || *str == ',')
+ break;
+ if (!*str)
+ return NULL;
+
+ len = str - name;
+ tty_line = simple_strtoul(str, &str, 10);
+
+ mutex_lock(&tty_mutex);
+ /* Search through the tty devices to look for a match */
+ list_for_each_entry(p, &tty_drivers, tty_drivers) {
+ if (!len || strncmp(name, p->name, len) != 0)
+ continue;
+ stp = str;
+ if (*stp == ',')
+ stp++;
+ if (*stp == '\0')
+ stp = NULL;
+
+ if (tty_line >= 0 && tty_line < p->num && p->ops &&
+ p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
+ res = tty_driver_kref_get(p);
+ *line = tty_line;
+ break;
+ }
+ }
+ mutex_unlock(&tty_mutex);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(tty_find_polling_driver);
+#endif
+
+static ssize_t hung_up_tty_read(struct kiocb *iocb, struct iov_iter *to)
+{
+ return 0;
+}
+
+static ssize_t hung_up_tty_write(struct kiocb *iocb, struct iov_iter *from)
+{
+ return -EIO;
+}
+
+/* No kernel lock held - none needed ;) */
+static __poll_t hung_up_tty_poll(struct file *filp, poll_table *wait)
+{
+ return EPOLLIN | EPOLLOUT | EPOLLERR | EPOLLHUP | EPOLLRDNORM | EPOLLWRNORM;
+}
+
+static long hung_up_tty_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
+}
+
+static long hung_up_tty_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
+}
+
+static int hung_up_tty_fasync(int fd, struct file *file, int on)
+{
+ return -ENOTTY;
+}
+
+static void tty_show_fdinfo(struct seq_file *m, struct file *file)
+{
+ struct tty_struct *tty = file_tty(file);
+
+ if (tty && tty->ops && tty->ops->show_fdinfo)
+ tty->ops->show_fdinfo(tty, m);
+}
+
+static const struct file_operations tty_fops = {
+ .llseek = no_llseek,
+ .read_iter = tty_read,
+ .write_iter = tty_write,
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
+ .poll = tty_poll,
+ .unlocked_ioctl = tty_ioctl,
+ .compat_ioctl = tty_compat_ioctl,
+ .open = tty_open,
+ .release = tty_release,
+ .fasync = tty_fasync,
+ .show_fdinfo = tty_show_fdinfo,
+};
+
+static const struct file_operations console_fops = {
+ .llseek = no_llseek,
+ .read_iter = tty_read,
+ .write_iter = redirected_tty_write,
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
+ .poll = tty_poll,
+ .unlocked_ioctl = tty_ioctl,
+ .compat_ioctl = tty_compat_ioctl,
+ .open = tty_open,
+ .release = tty_release,
+ .fasync = tty_fasync,
+};
+
+static const struct file_operations hung_up_tty_fops = {
+ .llseek = no_llseek,
+ .read_iter = hung_up_tty_read,
+ .write_iter = hung_up_tty_write,
+ .poll = hung_up_tty_poll,
+ .unlocked_ioctl = hung_up_tty_ioctl,
+ .compat_ioctl = hung_up_tty_compat_ioctl,
+ .release = tty_release,
+ .fasync = hung_up_tty_fasync,
+};
+
+static DEFINE_SPINLOCK(redirect_lock);
+static struct file *redirect;
+
+extern void tty_sysctl_init(void);
+
+/**
+ * tty_wakeup - request more data
+ * @tty: terminal
+ *
+ * Internal and external helper for wakeups of tty. This function
+ * informs the line discipline if present that the driver is ready
+ * to receive more output data.
+ */
+
+void tty_wakeup(struct tty_struct *tty)
+{
+ struct tty_ldisc *ld;
+
+ if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) {
+ ld = tty_ldisc_ref(tty);
+ if (ld) {
+ if (ld->ops->write_wakeup)
+ ld->ops->write_wakeup(tty);
+ tty_ldisc_deref(ld);
+ }
+ }
+ wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
+}
+
+EXPORT_SYMBOL_GPL(tty_wakeup);
+
+/**
+ * __tty_hangup - actual handler for hangup events
+ * @tty: tty device
+ *
+ * This can be called by a "kworker" kernel thread. That is process
+ * synchronous but doesn't hold any locks, so we need to make sure we
+ * have the appropriate locks for what we're doing.
+ *
+ * The hangup event clears any pending redirections onto the hung up
+ * device. It ensures future writes will error and it does the needed
+ * line discipline hangup and signal delivery. The tty object itself
+ * remains intact.
+ *
+ * Locking:
+ * BTM
+ * redirect lock for undoing redirection
+ * file list lock for manipulating list of ttys
+ * tty_ldiscs_lock from called functions
+ * termios_rwsem resetting termios data
+ * tasklist_lock to walk task list for hangup event
+ * ->siglock to protect ->signal/->sighand
+ */
+static void __tty_hangup(struct tty_struct *tty, int exit_session)
+{
+ struct file *cons_filp = NULL;
+ struct file *filp, *f = NULL;
+ struct tty_file_private *priv;
+ int closecount = 0, n;
+ int refs;
+
+ if (!tty)
+ return;
+
+
+ spin_lock(&redirect_lock);
+ if (redirect && file_tty(redirect) == tty) {
+ f = redirect;
+ redirect = NULL;
+ }
+ spin_unlock(&redirect_lock);
+
+ tty_lock(tty);
+
+ if (test_bit(TTY_HUPPED, &tty->flags)) {
+ tty_unlock(tty);
+ return;
+ }
+
+ /*
+ * Some console devices aren't actually hung up for technical and
+ * historical reasons, which can lead to indefinite interruptible
+ * sleep in n_tty_read(). The following explicitly tells
+ * n_tty_read() to abort readers.
+ */
+ set_bit(TTY_HUPPING, &tty->flags);
+
+ /* inuse_filps is protected by the single tty lock,
+ this really needs to change if we want to flush the
+ workqueue with the lock held */
+ check_tty_count(tty, "tty_hangup");
+
+ spin_lock(&tty->files_lock);
+ /* This breaks for file handles being sent over AF_UNIX sockets ? */
+ list_for_each_entry(priv, &tty->tty_files, list) {
+ filp = priv->file;
+ if (filp->f_op->write_iter == redirected_tty_write)
+ cons_filp = filp;
+ if (filp->f_op->write_iter != tty_write)
+ continue;
+ closecount++;
+ __tty_fasync(-1, filp, 0); /* can't block */
+ filp->f_op = &hung_up_tty_fops;
+ }
+ spin_unlock(&tty->files_lock);
+
+ refs = tty_signal_session_leader(tty, exit_session);
+ /* Account for the p->signal references we killed */
+ while (refs--)
+ tty_kref_put(tty);
+
+ tty_ldisc_hangup(tty, cons_filp != NULL);
+
+ spin_lock_irq(&tty->ctrl_lock);
+ clear_bit(TTY_THROTTLED, &tty->flags);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ put_pid(tty->session);
+ put_pid(tty->pgrp);
+ tty->session = NULL;
+ tty->pgrp = NULL;
+ tty->ctrl_status = 0;
+ spin_unlock_irq(&tty->ctrl_lock);
+
+ /*
+ * If one of the devices matches a console pointer, we
+ * cannot just call hangup() because that will cause
+ * tty->count and state->count to go out of sync.
+ * So we just call close() the right number of times.
+ */
+ if (cons_filp) {
+ if (tty->ops->close)
+ for (n = 0; n < closecount; n++)
+ tty->ops->close(tty, cons_filp);
+ } else if (tty->ops->hangup)
+ tty->ops->hangup(tty);
+ /*
+ * We don't want to have driver/ldisc interactions beyond the ones
+ * we did here. The driver layer expects no calls after ->hangup()
+ * from the ldisc side, which is now guaranteed.
+ */
+ set_bit(TTY_HUPPED, &tty->flags);
+ clear_bit(TTY_HUPPING, &tty->flags);
+ tty_unlock(tty);
+
+ if (f)
+ fput(f);
+}
+
+static void do_tty_hangup(struct work_struct *work)
+{
+ struct tty_struct *tty =
+ container_of(work, struct tty_struct, hangup_work);
+
+ __tty_hangup(tty, 0);
+}
+
+/**
+ * tty_hangup - trigger a hangup event
+ * @tty: tty to hangup
+ *
+ * A carrier loss (virtual or otherwise) has occurred on this like
+ * schedule a hangup sequence to run after this event.
+ */
+
+void tty_hangup(struct tty_struct *tty)
+{
+ tty_debug_hangup(tty, "hangup\n");
+ schedule_work(&tty->hangup_work);
+}
+
+EXPORT_SYMBOL(tty_hangup);
+
+/**
+ * tty_vhangup - process vhangup
+ * @tty: tty to hangup
+ *
+ * The user has asked via system call for the terminal to be hung up.
+ * We do this synchronously so that when the syscall returns the process
+ * is complete. That guarantee is necessary for security reasons.
+ */
+
+void tty_vhangup(struct tty_struct *tty)
+{
+ tty_debug_hangup(tty, "vhangup\n");
+ __tty_hangup(tty, 0);
+}
+
+EXPORT_SYMBOL(tty_vhangup);
+
+
+/**
+ * tty_vhangup_self - process vhangup for own ctty
+ *
+ * Perform a vhangup on the current controlling tty
+ */
+
+void tty_vhangup_self(void)
+{
+ struct tty_struct *tty;
+
+ tty = get_current_tty();
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
+}
+
+/**
+ * tty_vhangup_session - hangup session leader exit
+ * @tty: tty to hangup
+ *
+ * The session leader is exiting and hanging up its controlling terminal.
+ * Every process in the foreground process group is signalled SIGHUP.
+ *
+ * We do this synchronously so that when the syscall returns the process
+ * is complete. That guarantee is necessary for security reasons.
+ */
+
+void tty_vhangup_session(struct tty_struct *tty)
+{
+ tty_debug_hangup(tty, "session hangup\n");
+ __tty_hangup(tty, 1);
+}
+
+/**
+ * tty_hung_up_p - was tty hung up
+ * @filp: file pointer of tty
+ *
+ * Return true if the tty has been subject to a vhangup or a carrier
+ * loss
+ */
+
+int tty_hung_up_p(struct file *filp)
+{
+ return (filp && filp->f_op == &hung_up_tty_fops);
+}
+
+EXPORT_SYMBOL(tty_hung_up_p);
+
+/**
+ * stop_tty - propagate flow control
+ * @tty: tty to stop
+ *
+ * Perform flow control to the driver. May be called
+ * on an already stopped device and will not re-call the driver
+ * method.
+ *
+ * This functionality is used by both the line disciplines for
+ * halting incoming flow and by the driver. It may therefore be
+ * called from any context, may be under the tty atomic_write_lock
+ * but not always.
+ *
+ * Locking:
+ * flow_lock
+ */
+
+void __stop_tty(struct tty_struct *tty)
+{
+ if (tty->stopped)
+ return;
+ tty->stopped = 1;
+ if (tty->ops->stop)
+ tty->ops->stop(tty);
+}
+
+void stop_tty(struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tty->flow_lock, flags);
+ __stop_tty(tty);
+ spin_unlock_irqrestore(&tty->flow_lock, flags);
+}
+EXPORT_SYMBOL(stop_tty);
+
+/**
+ * start_tty - propagate flow control
+ * @tty: tty to start
+ *
+ * Start a tty that has been stopped if at all possible. If this
+ * tty was previous stopped and is now being started, the driver
+ * start method is invoked and the line discipline woken.
+ *
+ * Locking:
+ * flow_lock
+ */
+
+void __start_tty(struct tty_struct *tty)
+{
+ if (!tty->stopped || tty->flow_stopped)
+ return;
+ tty->stopped = 0;
+ if (tty->ops->start)
+ tty->ops->start(tty);
+ tty_wakeup(tty);
+}
+
+void start_tty(struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tty->flow_lock, flags);
+ __start_tty(tty);
+ spin_unlock_irqrestore(&tty->flow_lock, flags);
+}
+EXPORT_SYMBOL(start_tty);
+
+static void tty_update_time(struct timespec64 *time)
+{
+ time64_t sec = ktime_get_real_seconds();
+
+ /*
+ * We only care if the two values differ in anything other than the
+ * lower three bits (i.e every 8 seconds). If so, then we can update
+ * the time of the tty device, otherwise it could be construded as a
+ * security leak to let userspace know the exact timing of the tty.
+ */
+ if ((sec ^ time->tv_sec) & ~7)
+ time->tv_sec = sec;
+}
+
+/*
+ * Iterate on the ldisc ->read() function until we've gotten all
+ * the data the ldisc has for us.
+ *
+ * The "cookie" is something that the ldisc read function can fill
+ * in to let us know that there is more data to be had.
+ *
+ * We promise to continue to call the ldisc until it stops returning
+ * data or clears the cookie. The cookie may be something that the
+ * ldisc maintains state for and needs to free.
+ */
+static int iterate_tty_read(struct tty_ldisc *ld, struct tty_struct *tty,
+ struct file *file, struct iov_iter *to)
+{
+ int retval = 0;
+ void *cookie = NULL;
+ unsigned long offset = 0;
+ char kernel_buf[64];
+ size_t count = iov_iter_count(to);
+
+ do {
+ int size, copied;
+
+ size = count > sizeof(kernel_buf) ? sizeof(kernel_buf) : count;
+ size = ld->ops->read(tty, file, kernel_buf, size, &cookie, offset);
+ if (!size)
+ break;
+
+ if (size < 0) {
+ /* Did we have an earlier error (ie -EFAULT)? */
+ if (retval)
+ break;
+ retval = size;
+
+ /*
+ * -EOVERFLOW means we didn't have enough space
+ * for a whole packet, and we shouldn't return
+ * a partial result.
+ */
+ if (retval == -EOVERFLOW)
+ offset = 0;
+ break;
+ }
+
+ copied = copy_to_iter(kernel_buf, size, to);
+ offset += copied;
+ count -= copied;
+
+ /*
+ * If the user copy failed, we still need to do another ->read()
+ * call if we had a cookie to let the ldisc clear up.
+ *
+ * But make sure size is zeroed.
+ */
+ if (unlikely(copied != size)) {
+ count = 0;
+ retval = -EFAULT;
+ }
+ } while (cookie);
+
+ /* We always clear tty buffer in case they contained passwords */
+ memzero_explicit(kernel_buf, sizeof(kernel_buf));
+ return offset ? offset : retval;
+}
+
+
+/**
+ * tty_read - read method for tty device files
+ * @file: pointer to tty file
+ * @buf: user buffer
+ * @count: size of user buffer
+ * @ppos: unused
+ *
+ * Perform the read system call function on this terminal device. Checks
+ * for hung up devices before calling the line discipline method.
+ *
+ * Locking:
+ * Locks the line discipline internally while needed. Multiple
+ * read calls may be outstanding in parallel.
+ */
+
+static ssize_t tty_read(struct kiocb *iocb, struct iov_iter *to)
+{
+ int i;
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct tty_struct *tty = file_tty(file);
+ struct tty_ldisc *ld;
+
+ if (tty_paranoia_check(tty, inode, "tty_read"))
+ return -EIO;
+ if (!tty || tty_io_error(tty))
+ return -EIO;
+
+ /* We want to wait for the line discipline to sort out in this
+ situation */
+ ld = tty_ldisc_ref_wait(tty);
+ if (!ld)
+ return hung_up_tty_read(iocb, to);
+ i = -EIO;
+ if (ld->ops->read)
+ i = iterate_tty_read(ld, tty, file, to);
+ tty_ldisc_deref(ld);
+
+ if (i > 0)
+ tty_update_time(&inode->i_atime);
+
+ return i;
+}
+
+static void tty_write_unlock(struct tty_struct *tty)
+{
+ mutex_unlock(&tty->atomic_write_lock);
+ wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
+}
+
+static int tty_write_lock(struct tty_struct *tty, int ndelay)
+{
+ if (!mutex_trylock(&tty->atomic_write_lock)) {
+ if (ndelay)
+ return -EAGAIN;
+ if (mutex_lock_interruptible(&tty->atomic_write_lock))
+ return -ERESTARTSYS;
+ }
+ return 0;
+}
+
+/*
+ * Split writes up in sane blocksizes to avoid
+ * denial-of-service type attacks
+ */
+static inline ssize_t do_tty_write(
+ ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
+ struct tty_struct *tty,
+ struct file *file,
+ struct iov_iter *from)
+{
+ size_t count = iov_iter_count(from);
+ ssize_t ret, written = 0;
+ unsigned int chunk;
+
+ ret = tty_write_lock(tty, file->f_flags & O_NDELAY);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We chunk up writes into a temporary buffer. This
+ * simplifies low-level drivers immensely, since they
+ * don't have locking issues and user mode accesses.
+ *
+ * But if TTY_NO_WRITE_SPLIT is set, we should use a
+ * big chunk-size..
+ *
+ * The default chunk-size is 2kB, because the NTTY
+ * layer has problems with bigger chunks. It will
+ * claim to be able to handle more characters than
+ * it actually does.
+ *
+ * FIXME: This can probably go away now except that 64K chunks
+ * are too likely to fail unless switched to vmalloc...
+ */
+ chunk = 2048;
+ if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags))
+ chunk = 65536;
+ if (count < chunk)
+ chunk = count;
+
+ /* write_buf/write_cnt is protected by the atomic_write_lock mutex */
+ if (tty->write_cnt < chunk) {
+ unsigned char *buf_chunk;
+
+ if (chunk < 1024)
+ chunk = 1024;
+
+ buf_chunk = kmalloc(chunk, GFP_KERNEL);
+ if (!buf_chunk) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(tty->write_buf);
+ tty->write_cnt = chunk;
+ tty->write_buf = buf_chunk;
+ }
+
+ /* Do the write .. */
+ for (;;) {
+ size_t size = count;
+ if (size > chunk)
+ size = chunk;
+
+ ret = -EFAULT;
+ if (copy_from_iter(tty->write_buf, size, from) != size)
+ break;
+
+ ret = write(tty, file, tty->write_buf, size);
+ if (ret <= 0)
+ break;
+
+ written += ret;
+ if (ret > size)
+ break;
+
+ /* FIXME! Have Al check this! */
+ if (ret != size)
+ iov_iter_revert(from, size-ret);
+
+ count -= ret;
+ if (!count)
+ break;
+ ret = -ERESTARTSYS;
+ if (signal_pending(current))
+ break;
+ cond_resched();
+ }
+ if (written) {
+ tty_update_time(&file_inode(file)->i_mtime);
+ ret = written;
+ }
+out:
+ tty_write_unlock(tty);
+ return ret;
+}
+
+/**
+ * tty_write_message - write a message to a certain tty, not just the console.
+ * @tty: the destination tty_struct
+ * @msg: the message to write
+ *
+ * This is used for messages that need to be redirected to a specific tty.
+ * We don't put it into the syslog queue right now maybe in the future if
+ * really needed.
+ *
+ * We must still hold the BTM and test the CLOSING flag for the moment.
+ */
+
+void tty_write_message(struct tty_struct *tty, char *msg)
+{
+ if (tty) {
+ mutex_lock(&tty->atomic_write_lock);
+ tty_lock(tty);
+ if (tty->ops->write && tty->count > 0)
+ tty->ops->write(tty, msg, strlen(msg));
+ tty_unlock(tty);
+ tty_write_unlock(tty);
+ }
+ return;
+}
+
+
+/**
+ * tty_write - write method for tty device file
+ * @file: tty file pointer
+ * @buf: user data to write
+ * @count: bytes to write
+ * @ppos: unused
+ *
+ * Write data to a tty device via the line discipline.
+ *
+ * Locking:
+ * Locks the line discipline as required
+ * Writes to the tty driver are serialized by the atomic_write_lock
+ * and are then processed in chunks to the device. The line discipline
+ * write method will not be invoked in parallel for each device.
+ */
+
+static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_iter *from)
+{
+ struct tty_struct *tty = file_tty(file);
+ struct tty_ldisc *ld;
+ ssize_t ret;
+
+ if (tty_paranoia_check(tty, file_inode(file), "tty_write"))
+ return -EIO;
+ if (!tty || !tty->ops->write || tty_io_error(tty))
+ return -EIO;
+ /* Short term debug to catch buggy drivers */
+ if (tty->ops->write_room == NULL)
+ tty_err(tty, "missing write_room method\n");
+ ld = tty_ldisc_ref_wait(tty);
+ if (!ld)
+ return hung_up_tty_write(iocb, from);
+ if (!ld->ops->write)
+ ret = -EIO;
+ else
+ ret = do_tty_write(ld->ops->write, tty, file, from);
+ tty_ldisc_deref(ld);
+ return ret;
+}
+
+static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
+{
+ return file_tty_write(iocb->ki_filp, iocb, from);
+}
+
+ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct file *p = NULL;
+
+ spin_lock(&redirect_lock);
+ if (redirect)
+ p = get_file(redirect);
+ spin_unlock(&redirect_lock);
+
+ /*
+ * We know the redirected tty is just another tty, we can can
+ * call file_tty_write() directly with that file pointer.
+ */
+ if (p) {
+ ssize_t res;
+ res = file_tty_write(p, iocb, iter);
+ fput(p);
+ return res;
+ }
+ return tty_write(iocb, iter);
+}
+
+/**
+ * tty_send_xchar - send priority character
+ *
+ * Send a high priority character to the tty even if stopped
+ *
+ * Locking: none for xchar method, write ordering for write method.
+ */
+
+int tty_send_xchar(struct tty_struct *tty, char ch)
+{
+ int was_stopped = tty->stopped;
+
+ if (tty->ops->send_xchar) {
+ down_read(&tty->termios_rwsem);
+ tty->ops->send_xchar(tty, ch);
+ up_read(&tty->termios_rwsem);
+ return 0;
+ }
+
+ if (tty_write_lock(tty, 0) < 0)
+ return -ERESTARTSYS;
+
+ down_read(&tty->termios_rwsem);
+ if (was_stopped)
+ start_tty(tty);
+ tty->ops->write(tty, &ch, 1);
+ if (was_stopped)
+ stop_tty(tty);
+ up_read(&tty->termios_rwsem);
+ tty_write_unlock(tty);
+ return 0;
+}
+
+static char ptychar[] = "pqrstuvwxyzabcde";
+
+/**
+ * pty_line_name - generate name for a pty
+ * @driver: the tty driver in use
+ * @index: the minor number
+ * @p: output buffer of at least 6 bytes
+ *
+ * Generate a name from a driver reference and write it to the output
+ * buffer.
+ *
+ * Locking: None
+ */
+static void pty_line_name(struct tty_driver *driver, int index, char *p)
+{
+ int i = index + driver->name_base;
+ /* ->name is initialized to "ttyp", but "tty" is expected */
+ sprintf(p, "%s%c%x",
+ driver->subtype == PTY_TYPE_SLAVE ? "tty" : driver->name,
+ ptychar[i >> 4 & 0xf], i & 0xf);
+}
+
+/**
+ * tty_line_name - generate name for a tty
+ * @driver: the tty driver in use
+ * @index: the minor number
+ * @p: output buffer of at least 7 bytes
+ *
+ * Generate a name from a driver reference and write it to the output
+ * buffer.
+ *
+ * Locking: None
+ */
+static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
+{
+ if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE)
+ return sprintf(p, "%s", driver->name);
+ else
+ return sprintf(p, "%s%d", driver->name,
+ index + driver->name_base);
+}
+
+/**
+ * tty_driver_lookup_tty() - find an existing tty, if any
+ * @driver: the driver for the tty
+ * @idx: the minor number
+ *
+ * Return the tty, if found. If not found, return NULL or ERR_PTR() if the
+ * driver lookup() method returns an error.
+ *
+ * Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
+ */
+static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
+ struct file *file, int idx)
+{
+ struct tty_struct *tty;
+
+ if (driver->ops->lookup)
+ if (!file)
+ tty = ERR_PTR(-EIO);
+ else
+ tty = driver->ops->lookup(driver, file, idx);
+ else
+ tty = driver->ttys[idx];
+
+ if (!IS_ERR(tty))
+ tty_kref_get(tty);
+ return tty;
+}
+
+/**
+ * tty_init_termios - helper for termios setup
+ * @tty: the tty to set up
+ *
+ * Initialise the termios structure for this tty. This runs under
+ * the tty_mutex currently so we can be relaxed about ordering.
+ */
+
+void tty_init_termios(struct tty_struct *tty)
+{
+ struct ktermios *tp;
+ int idx = tty->index;
+
+ if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
+ tty->termios = tty->driver->init_termios;
+ else {
+ /* Check for lazy saved data */
+ tp = tty->driver->termios[idx];
+ if (tp != NULL) {
+ tty->termios = *tp;
+ tty->termios.c_line = tty->driver->init_termios.c_line;
+ } else
+ tty->termios = tty->driver->init_termios;
+ }
+ /* Compatibility until drivers always set this */
+ tty->termios.c_ispeed = tty_termios_input_baud_rate(&tty->termios);
+ tty->termios.c_ospeed = tty_termios_baud_rate(&tty->termios);
+}
+EXPORT_SYMBOL_GPL(tty_init_termios);
+
+int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ tty_init_termios(tty);
+ tty_driver_kref_get(driver);
+ tty->count++;
+ driver->ttys[tty->index] = tty;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tty_standard_install);
+
+/**
+ * tty_driver_install_tty() - install a tty entry in the driver
+ * @driver: the driver for the tty
+ * @tty: the tty
+ *
+ * Install a tty object into the driver tables. The tty->index field
+ * will be set by the time this is called. This method is responsible
+ * for ensuring any need additional structures are allocated and
+ * configured.
+ *
+ * Locking: tty_mutex for now
+ */
+static int tty_driver_install_tty(struct tty_driver *driver,
+ struct tty_struct *tty)
+{
+ return driver->ops->install ? driver->ops->install(driver, tty) :
+ tty_standard_install(driver, tty);
+}
+
+/**
+ * tty_driver_remove_tty() - remove a tty from the driver tables
+ * @driver: the driver for the tty
+ * @tty: tty to remove
+ *
+ * Remvoe a tty object from the driver tables. The tty->index field
+ * will be set by the time this is called.
+ *
+ * Locking: tty_mutex for now
+ */
+static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)
+{
+ if (driver->ops->remove)
+ driver->ops->remove(driver, tty);
+ else
+ driver->ttys[tty->index] = NULL;
+}
+
+/**
+ * tty_reopen() - fast re-open of an open tty
+ * @tty: the tty to open
+ *
+ * Return 0 on success, -errno on error.
+ * Re-opens on master ptys are not allowed and return -EIO.
+ *
+ * Locking: Caller must hold tty_lock
+ */
+static int tty_reopen(struct tty_struct *tty)
+{
+ struct tty_driver *driver = tty->driver;
+ struct tty_ldisc *ld;
+ int retval = 0;
+
+ if (driver->type == TTY_DRIVER_TYPE_PTY &&
+ driver->subtype == PTY_TYPE_MASTER)
+ return -EIO;
+
+ if (!tty->count)
+ return -EAGAIN;
+
+ if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
+ return -EBUSY;
+
+ ld = tty_ldisc_ref_wait(tty);
+ if (ld) {
+ tty_ldisc_deref(ld);
+ } else {
+ retval = tty_ldisc_lock(tty, 5 * HZ);
+ if (retval)
+ return retval;
+
+ if (!tty->ldisc)
+ retval = tty_ldisc_reinit(tty, tty->termios.c_line);
+ tty_ldisc_unlock(tty);
+ }
+
+ if (retval == 0)
+ tty->count++;
+
+ return retval;
+}
+
+/**
+ * tty_init_dev - initialise a tty device
+ * @driver: tty driver we are opening a device on
+ * @idx: device index
+ *
+ * Prepare a tty device. This may not be a "new" clean device but
+ * could also be an active device. The pty drivers require special
+ * handling because of this.
+ *
+ * Locking:
+ * The function is called under the tty_mutex, which
+ * protects us from the tty struct or driver itself going away.
+ *
+ * On exit the tty device has the line discipline attached and
+ * a reference count of 1. If a pair was created for pty/tty use
+ * and the other was a pty master then it too has a reference count of 1.
+ *
+ * WSH 06/09/97: Rewritten to remove races and properly clean up after a
+ * failed open. The new code protects the open with a mutex, so it's
+ * really quite straightforward. The mutex locking can probably be
+ * relaxed for the (most common) case of reopening a tty.
+ *
+ * Return: returned tty structure
+ */
+
+struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
+{
+ struct tty_struct *tty;
+ int retval;
+
+ /*
+ * First time open is complex, especially for PTY devices.
+ * This code guarantees that either everything succeeds and the
+ * TTY is ready for operation, or else the table slots are vacated
+ * and the allocated memory released. (Except that the termios
+ * may be retained.)
+ */
+
+ if (!try_module_get(driver->owner))
+ return ERR_PTR(-ENODEV);
+
+ tty = alloc_tty_struct(driver, idx);
+ if (!tty) {
+ retval = -ENOMEM;
+ goto err_module_put;
+ }
+
+ tty_lock(tty);
+ retval = tty_driver_install_tty(driver, tty);
+ if (retval < 0)
+ goto err_free_tty;
+
+ if (!tty->port)
+ tty->port = driver->ports[idx];
+
+ if (WARN_RATELIMIT(!tty->port,
+ "%s: %s driver does not set tty->port. This would crash the kernel. Fix the driver!\n",
+ __func__, tty->driver->name)) {
+ retval = -EINVAL;
+ goto err_release_lock;
+ }
+
+ retval = tty_ldisc_lock(tty, 5 * HZ);
+ if (retval)
+ goto err_release_lock;
+ tty->port->itty = tty;
+
+ /*
+ * Structures all installed ... call the ldisc open routines.
+ * If we fail here just call release_tty to clean up. No need
+ * to decrement the use counts, as release_tty doesn't care.
+ */
+ retval = tty_ldisc_setup(tty, tty->link);
+ if (retval)
+ goto err_release_tty;
+ tty_ldisc_unlock(tty);
+ /* Return the tty locked so that it cannot vanish under the caller */
+ return tty;
+
+err_free_tty:
+ tty_unlock(tty);
+ free_tty_struct(tty);
+err_module_put:
+ module_put(driver->owner);
+ return ERR_PTR(retval);
+
+ /* call the tty release_tty routine to clean out this slot */
+err_release_tty:
+ tty_ldisc_unlock(tty);
+ tty_info_ratelimited(tty, "ldisc open failed (%d), clearing slot %d\n",
+ retval, idx);
+err_release_lock:
+ tty_unlock(tty);
+ release_tty(tty, idx);
+ return ERR_PTR(retval);
+}
+
+/**
+ * tty_save_termios() - save tty termios data in driver table
+ * @tty: tty whose termios data to save
+ *
+ * Locking: Caller guarantees serialisation with tty_init_termios().
+ */
+void tty_save_termios(struct tty_struct *tty)
+{
+ struct ktermios *tp;
+ int idx = tty->index;
+
+ /* If the port is going to reset then it has no termios to save */
+ if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
+ return;
+
+ /* Stash the termios data */
+ tp = tty->driver->termios[idx];
+ if (tp == NULL) {
+ tp = kmalloc(sizeof(*tp), GFP_KERNEL);
+ if (tp == NULL)
+ return;
+ tty->driver->termios[idx] = tp;
+ }
+ *tp = tty->termios;
+}
+EXPORT_SYMBOL_GPL(tty_save_termios);
+
+/**
+ * tty_flush_works - flush all works of a tty/pty pair
+ * @tty: tty device to flush works for (or either end of a pty pair)
+ *
+ * Sync flush all works belonging to @tty (and the 'other' tty).
+ */
+static void tty_flush_works(struct tty_struct *tty)
+{
+ flush_work(&tty->SAK_work);
+ flush_work(&tty->hangup_work);
+ if (tty->link) {
+ flush_work(&tty->link->SAK_work);
+ flush_work(&tty->link->hangup_work);
+ }
+}
+
+/**
+ * release_one_tty - release tty structure memory
+ * @work: work of tty we are obliterating
+ *
+ * Releases memory associated with a tty structure, and clears out the
+ * driver table slots. This function is called when a device is no longer
+ * in use. It also gets called when setup of a device fails.
+ *
+ * Locking:
+ * takes the file list lock internally when working on the list
+ * of ttys that the driver keeps.
+ *
+ * This method gets called from a work queue so that the driver private
+ * cleanup ops can sleep (needed for USB at least)
+ */
+static void release_one_tty(struct work_struct *work)
+{
+ struct tty_struct *tty =
+ container_of(work, struct tty_struct, hangup_work);
+ struct tty_driver *driver = tty->driver;
+ struct module *owner = driver->owner;
+
+ if (tty->ops->cleanup)
+ tty->ops->cleanup(tty);
+
+ tty->magic = 0;
+ tty_driver_kref_put(driver);
+ module_put(owner);
+
+ spin_lock(&tty->files_lock);
+ list_del_init(&tty->tty_files);
+ spin_unlock(&tty->files_lock);
+
+ put_pid(tty->pgrp);
+ put_pid(tty->session);
+ free_tty_struct(tty);
+}
+
+static void queue_release_one_tty(struct kref *kref)
+{
+ struct tty_struct *tty = container_of(kref, struct tty_struct, kref);
+
+ /* The hangup queue is now free so we can reuse it rather than
+ waste a chunk of memory for each port */
+ INIT_WORK(&tty->hangup_work, release_one_tty);
+ schedule_work(&tty->hangup_work);
+}
+
+/**
+ * tty_kref_put - release a tty kref
+ * @tty: tty device
+ *
+ * Release a reference to a tty device and if need be let the kref
+ * layer destruct the object for us
+ */
+
+void tty_kref_put(struct tty_struct *tty)
+{
+ if (tty)
+ kref_put(&tty->kref, queue_release_one_tty);
+}
+EXPORT_SYMBOL(tty_kref_put);
+
+/**
+ * release_tty - release tty structure memory
+ *
+ * Release both @tty and a possible linked partner (think pty pair),
+ * and decrement the refcount of the backing module.
+ *
+ * Locking:
+ * tty_mutex
+ * takes the file list lock internally when working on the list
+ * of ttys that the driver keeps.
+ *
+ */
+static void release_tty(struct tty_struct *tty, int idx)
+{
+ /* This should always be true but check for the moment */
+ WARN_ON(tty->index != idx);
+ WARN_ON(!mutex_is_locked(&tty_mutex));
+ if (tty->ops->shutdown)
+ tty->ops->shutdown(tty);
+ tty_save_termios(tty);
+ tty_driver_remove_tty(tty->driver, tty);
+ if (tty->port)
+ tty->port->itty = NULL;
+ if (tty->link)
+ tty->link->port->itty = NULL;
+ if (tty->port)
+ tty_buffer_cancel_work(tty->port);
+ if (tty->link)
+ tty_buffer_cancel_work(tty->link->port);
+
+ tty_kref_put(tty->link);
+ tty_kref_put(tty);
+}
+
+/**
+ * tty_release_checks - check a tty before real release
+ * @tty: tty to check
+ * @idx: index of the tty
+ *
+ * Performs some paranoid checking before true release of the @tty.
+ * This is a no-op unless TTY_PARANOIA_CHECK is defined.
+ */
+static int tty_release_checks(struct tty_struct *tty, int idx)
+{
+#ifdef TTY_PARANOIA_CHECK
+ if (idx < 0 || idx >= tty->driver->num) {
+ tty_debug(tty, "bad idx %d\n", idx);
+ return -1;
+ }
+
+ /* not much to check for devpts */
+ if (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)
+ return 0;
+
+ if (tty != tty->driver->ttys[idx]) {
+ tty_debug(tty, "bad driver table[%d] = %p\n",
+ idx, tty->driver->ttys[idx]);
+ return -1;
+ }
+ if (tty->driver->other) {
+ struct tty_struct *o_tty = tty->link;
+
+ if (o_tty != tty->driver->other->ttys[idx]) {
+ tty_debug(tty, "bad other table[%d] = %p\n",
+ idx, tty->driver->other->ttys[idx]);
+ return -1;
+ }
+ if (o_tty->link != tty) {
+ tty_debug(tty, "bad link = %p\n", o_tty->link);
+ return -1;
+ }
+ }
+#endif
+ return 0;
+}
+
+/**
+ * tty_kclose - closes tty opened by tty_kopen
+ * @tty: tty device
+ *
+ * Performs the final steps to release and free a tty device. It is the
+ * same as tty_release_struct except that it also resets TTY_PORT_KOPENED
+ * flag on tty->port.
+ */
+void tty_kclose(struct tty_struct *tty)
+{
+ /*
+ * Ask the line discipline code to release its structures
+ */
+ tty_ldisc_release(tty);
+
+ /* Wait for pending work before tty destruction commmences */
+ tty_flush_works(tty);
+
+ tty_debug_hangup(tty, "freeing structure\n");
+ /*
+ * The release_tty function takes care of the details of clearing
+ * the slots and preserving the termios structure.
+ */
+ mutex_lock(&tty_mutex);
+ tty_port_set_kopened(tty->port, 0);
+ release_tty(tty, tty->index);
+ mutex_unlock(&tty_mutex);
+}
+EXPORT_SYMBOL_GPL(tty_kclose);
+
+/**
+ * tty_release_struct - release a tty struct
+ * @tty: tty device
+ * @idx: index of the tty
+ *
+ * Performs the final steps to release and free a tty device. It is
+ * roughly the reverse of tty_init_dev.
+ */
+void tty_release_struct(struct tty_struct *tty, int idx)
+{
+ /*
+ * Ask the line discipline code to release its structures
+ */
+ tty_ldisc_release(tty);
+
+ /* Wait for pending work before tty destruction commmences */
+ tty_flush_works(tty);
+
+ tty_debug_hangup(tty, "freeing structure\n");
+ /*
+ * The release_tty function takes care of the details of clearing
+ * the slots and preserving the termios structure.
+ */
+ mutex_lock(&tty_mutex);
+ release_tty(tty, idx);
+ mutex_unlock(&tty_mutex);
+}
+EXPORT_SYMBOL_GPL(tty_release_struct);
+
+/**
+ * tty_release - vfs callback for close
+ * @inode: inode of tty
+ * @filp: file pointer for handle to tty
+ *
+ * Called the last time each file handle is closed that references
+ * this tty. There may however be several such references.
+ *
+ * Locking:
+ * Takes bkl. See tty_release_dev
+ *
+ * Even releasing the tty structures is a tricky business.. We have
+ * to be very careful that the structures are all released at the
+ * same time, as interrupts might otherwise get the wrong pointers.
+ *
+ * WSH 09/09/97: rewritten to avoid some nasty race conditions that could
+ * lead to double frees or releasing memory still in use.
+ */
+
+int tty_release(struct inode *inode, struct file *filp)
+{
+ struct tty_struct *tty = file_tty(filp);
+ struct tty_struct *o_tty = NULL;
+ int do_sleep, final;
+ int idx;
+ long timeout = 0;
+ int once = 1;
+
+ if (tty_paranoia_check(tty, inode, __func__))
+ return 0;
+
+ tty_lock(tty);
+ check_tty_count(tty, __func__);
+
+ __tty_fasync(-1, filp, 0);
+
+ idx = tty->index;
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER)
+ o_tty = tty->link;
+
+ if (tty_release_checks(tty, idx)) {
+ tty_unlock(tty);
+ return 0;
+ }
+
+ tty_debug_hangup(tty, "releasing (count=%d)\n", tty->count);
+
+ if (tty->ops->close)
+ tty->ops->close(tty, filp);
+
+ /* If tty is pty master, lock the slave pty (stable lock order) */
+ tty_lock_slave(o_tty);
+
+ /*
+ * Sanity check: if tty->count is going to zero, there shouldn't be
+ * any waiters on tty->read_wait or tty->write_wait. We test the
+ * wait queues and kick everyone out _before_ actually starting to
+ * close. This ensures that we won't block while releasing the tty
+ * structure.
+ *
+ * The test for the o_tty closing is necessary, since the master and
+ * slave sides may close in any order. If the slave side closes out
+ * first, its count will be one, since the master side holds an open.
+ * Thus this test wouldn't be triggered at the time the slave closed,
+ * so we do it now.
+ */
+ while (1) {
+ do_sleep = 0;
+
+ if (tty->count <= 1) {
+ if (waitqueue_active(&tty->read_wait)) {
+ wake_up_poll(&tty->read_wait, EPOLLIN);
+ do_sleep++;
+ }
+ if (waitqueue_active(&tty->write_wait)) {
+ wake_up_poll(&tty->write_wait, EPOLLOUT);
+ do_sleep++;
+ }
+ }
+ if (o_tty && o_tty->count <= 1) {
+ if (waitqueue_active(&o_tty->read_wait)) {
+ wake_up_poll(&o_tty->read_wait, EPOLLIN);
+ do_sleep++;
+ }
+ if (waitqueue_active(&o_tty->write_wait)) {
+ wake_up_poll(&o_tty->write_wait, EPOLLOUT);
+ do_sleep++;
+ }
+ }
+ if (!do_sleep)
+ break;
+
+ if (once) {
+ once = 0;
+ tty_warn(tty, "read/write wait queue active!\n");
+ }
+ schedule_timeout_killable(timeout);
+ if (timeout < 120 * HZ)
+ timeout = 2 * timeout + 1;
+ else
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ }
+
+ if (o_tty) {
+ if (--o_tty->count < 0) {
+ tty_warn(tty, "bad slave count (%d)\n", o_tty->count);
+ o_tty->count = 0;
+ }
+ }
+ if (--tty->count < 0) {
+ tty_warn(tty, "bad tty->count (%d)\n", tty->count);
+ tty->count = 0;
+ }
+
+ /*
+ * We've decremented tty->count, so we need to remove this file
+ * descriptor off the tty->tty_files list; this serves two
+ * purposes:
+ * - check_tty_count sees the correct number of file descriptors
+ * associated with this tty.
+ * - do_tty_hangup no longer sees this file descriptor as
+ * something that needs to be handled for hangups.
+ */
+ tty_del_file(filp);
+
+ /*
+ * Perform some housekeeping before deciding whether to return.
+ *
+ * If _either_ side is closing, make sure there aren't any
+ * processes that still think tty or o_tty is their controlling
+ * tty.
+ */
+ if (!tty->count) {
+ read_lock(&tasklist_lock);
+ session_clear_tty(tty->session);
+ if (o_tty)
+ session_clear_tty(o_tty->session);
+ read_unlock(&tasklist_lock);
+ }
+
+ /* check whether both sides are closing ... */
+ final = !tty->count && !(o_tty && o_tty->count);
+
+ tty_unlock_slave(o_tty);
+ tty_unlock(tty);
+
+ /* At this point, the tty->count == 0 should ensure a dead tty
+ cannot be re-opened by a racing opener */
+
+ if (!final)
+ return 0;
+
+ tty_debug_hangup(tty, "final close\n");
+
+ tty_release_struct(tty, idx);
+ return 0;
+}
+
+/**
+ * tty_open_current_tty - get locked tty of current task
+ * @device: device number
+ * @filp: file pointer to tty
+ * @return: locked tty of the current task iff @device is /dev/tty
+ *
+ * Performs a re-open of the current task's controlling tty.
+ *
+ * We cannot return driver and index like for the other nodes because
+ * devpts will not work then. It expects inodes to be from devpts FS.
+ */
+static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
+{
+ struct tty_struct *tty;
+ int retval;
+
+ if (device != MKDEV(TTYAUX_MAJOR, 0))
+ return NULL;
+
+ tty = get_current_tty();
+ if (!tty)
+ return ERR_PTR(-ENXIO);
+
+ filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
+ /* noctty = 1; */
+ tty_lock(tty);
+ tty_kref_put(tty); /* safe to drop the kref now */
+
+ retval = tty_reopen(tty);
+ if (retval < 0) {
+ tty_unlock(tty);
+ tty = ERR_PTR(retval);
+ }
+ return tty;
+}
+
+/**
+ * tty_lookup_driver - lookup a tty driver for a given device file
+ * @device: device number
+ * @filp: file pointer to tty
+ * @index: index for the device in the @return driver
+ * @return: driver for this inode (with increased refcount)
+ *
+ * If @return is not erroneous, the caller is responsible to decrement the
+ * refcount by tty_driver_kref_put.
+ *
+ * Locking: tty_mutex protects get_tty_driver
+ */
+static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
+ int *index)
+{
+ struct tty_driver *driver = NULL;
+
+ switch (device) {
+#ifdef CONFIG_VT
+ case MKDEV(TTY_MAJOR, 0): {
+ extern struct tty_driver *console_driver;
+ driver = tty_driver_kref_get(console_driver);
+ *index = fg_console;
+ break;
+ }
+#endif
+ case MKDEV(TTYAUX_MAJOR, 1): {
+ struct tty_driver *console_driver = console_device(index);
+ if (console_driver) {
+ driver = tty_driver_kref_get(console_driver);
+ if (driver && filp) {
+ /* Don't let /dev/console block */
+ filp->f_flags |= O_NONBLOCK;
+ break;
+ }
+ }
+ if (driver)
+ tty_driver_kref_put(driver);
+ return ERR_PTR(-ENODEV);
+ }
+ default:
+ driver = get_tty_driver(device, index);
+ if (!driver)
+ return ERR_PTR(-ENODEV);
+ break;
+ }
+ return driver;
+}
+
+/**
+ * tty_kopen - open a tty device for kernel
+ * @device: dev_t of device to open
+ *
+ * Opens tty exclusively for kernel. Performs the driver lookup,
+ * makes sure it's not already opened and performs the first-time
+ * tty initialization.
+ *
+ * Returns the locked initialized &tty_struct
+ *
+ * Claims the global tty_mutex to serialize:
+ * - concurrent first-time tty initialization
+ * - concurrent tty driver removal w/ lookup
+ * - concurrent tty removal from driver table
+ */
+struct tty_struct *tty_kopen(dev_t device)
+{
+ struct tty_struct *tty;
+ struct tty_driver *driver;
+ int index = -1;
+
+ mutex_lock(&tty_mutex);
+ driver = tty_lookup_driver(device, NULL, &index);
+ if (IS_ERR(driver)) {
+ mutex_unlock(&tty_mutex);
+ return ERR_CAST(driver);
+ }
+
+ /* check whether we're reopening an existing tty */
+ tty = tty_driver_lookup_tty(driver, NULL, index);
+ if (IS_ERR(tty))
+ goto out;
+
+ if (tty) {
+ /* drop kref from tty_driver_lookup_tty() */
+ tty_kref_put(tty);
+ tty = ERR_PTR(-EBUSY);
+ } else { /* tty_init_dev returns tty with the tty_lock held */
+ tty = tty_init_dev(driver, index);
+ if (IS_ERR(tty))
+ goto out;
+ tty_port_set_kopened(tty->port, 1);
+ }
+out:
+ mutex_unlock(&tty_mutex);
+ tty_driver_kref_put(driver);
+ return tty;
+}
+EXPORT_SYMBOL_GPL(tty_kopen);
+
+/**
+ * tty_open_by_driver - open a tty device
+ * @device: dev_t of device to open
+ * @filp: file pointer to tty
+ *
+ * Performs the driver lookup, checks for a reopen, or otherwise
+ * performs the first-time tty initialization.
+ *
+ * Returns the locked initialized or re-opened &tty_struct
+ *
+ * Claims the global tty_mutex to serialize:
+ * - concurrent first-time tty initialization
+ * - concurrent tty driver removal w/ lookup
+ * - concurrent tty removal from driver table
+ */
+static struct tty_struct *tty_open_by_driver(dev_t device,
+ struct file *filp)
+{
+ struct tty_struct *tty;
+ struct tty_driver *driver = NULL;
+ int index = -1;
+ int retval;
+
+ mutex_lock(&tty_mutex);
+ driver = tty_lookup_driver(device, filp, &index);
+ if (IS_ERR(driver)) {
+ mutex_unlock(&tty_mutex);
+ return ERR_CAST(driver);
+ }
+
+ /* check whether we're reopening an existing tty */
+ tty = tty_driver_lookup_tty(driver, filp, index);
+ if (IS_ERR(tty)) {
+ mutex_unlock(&tty_mutex);
+ goto out;
+ }
+
+ if (tty) {
+ if (tty_port_kopened(tty->port)) {
+ tty_kref_put(tty);
+ mutex_unlock(&tty_mutex);
+ tty = ERR_PTR(-EBUSY);
+ goto out;
+ }
+ mutex_unlock(&tty_mutex);
+ retval = tty_lock_interruptible(tty);
+ tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */
+ if (retval) {
+ if (retval == -EINTR)
+ retval = -ERESTARTSYS;
+ tty = ERR_PTR(retval);
+ goto out;
+ }
+ retval = tty_reopen(tty);
+ if (retval < 0) {
+ tty_unlock(tty);
+ tty = ERR_PTR(retval);
+ }
+ } else { /* Returns with the tty_lock held for now */
+ tty = tty_init_dev(driver, index);
+ mutex_unlock(&tty_mutex);
+ }
+out:
+ tty_driver_kref_put(driver);
+ return tty;
+}
+
+/**
+ * tty_open - open a tty device
+ * @inode: inode of device file
+ * @filp: file pointer to tty
+ *
+ * tty_open and tty_release keep up the tty count that contains the
+ * number of opens done on a tty. We cannot use the inode-count, as
+ * different inodes might point to the same tty.
+ *
+ * Open-counting is needed for pty masters, as well as for keeping
+ * track of serial lines: DTR is dropped when the last close happens.
+ * (This is not done solely through tty->count, now. - Ted 1/27/92)
+ *
+ * The termios state of a pty is reset on first open so that
+ * settings don't persist across reuse.
+ *
+ * Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev.
+ * tty->count should protect the rest.
+ * ->siglock protects ->signal/->sighand
+ *
+ * Note: the tty_unlock/lock cases without a ref are only safe due to
+ * tty_mutex
+ */
+
+static int tty_open(struct inode *inode, struct file *filp)
+{
+ struct tty_struct *tty;
+ int noctty, retval;
+ dev_t device = inode->i_rdev;
+ unsigned saved_flags = filp->f_flags;
+
+ nonseekable_open(inode, filp);
+
+retry_open:
+ retval = tty_alloc_file(filp);
+ if (retval)
+ return -ENOMEM;
+
+ tty = tty_open_current_tty(device, filp);
+ if (!tty)
+ tty = tty_open_by_driver(device, filp);
+
+ if (IS_ERR(tty)) {
+ tty_free_file(filp);
+ retval = PTR_ERR(tty);
+ if (retval != -EAGAIN || signal_pending(current))
+ return retval;
+ schedule();
+ goto retry_open;
+ }
+
+ tty_add_file(tty, filp);
+
+ check_tty_count(tty, __func__);
+ tty_debug_hangup(tty, "opening (count=%d)\n", tty->count);
+
+ if (tty->ops->open)
+ retval = tty->ops->open(tty, filp);
+ else
+ retval = -ENODEV;
+ filp->f_flags = saved_flags;
+
+ if (retval) {
+ tty_debug_hangup(tty, "open error %d, releasing\n", retval);
+
+ tty_unlock(tty); /* need to call tty_release without BTM */
+ tty_release(inode, filp);
+ if (retval != -ERESTARTSYS)
+ return retval;
+
+ if (signal_pending(current))
+ return retval;
+
+ schedule();
+ /*
+ * Need to reset f_op in case a hangup happened.
+ */
+ if (tty_hung_up_p(filp))
+ filp->f_op = &tty_fops;
+ goto retry_open;
+ }
+ clear_bit(TTY_HUPPED, &tty->flags);
+
+ noctty = (filp->f_flags & O_NOCTTY) ||
+ (IS_ENABLED(CONFIG_VT) && device == MKDEV(TTY_MAJOR, 0)) ||
+ device == MKDEV(TTYAUX_MAJOR, 1) ||
+ (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER);
+ if (!noctty)
+ tty_open_proc_set_tty(filp, tty);
+ tty_unlock(tty);
+ return 0;
+}
+
+
+
+/**
+ * tty_poll - check tty status
+ * @filp: file being polled
+ * @wait: poll wait structures to update
+ *
+ * Call the line discipline polling method to obtain the poll
+ * status of the device.
+ *
+ * Locking: locks called line discipline but ldisc poll method
+ * may be re-entered freely by other callers.
+ */
+
+static __poll_t tty_poll(struct file *filp, poll_table *wait)
+{
+ struct tty_struct *tty = file_tty(filp);
+ struct tty_ldisc *ld;
+ __poll_t ret = 0;
+
+ if (tty_paranoia_check(tty, file_inode(filp), "tty_poll"))
+ return 0;
+
+ ld = tty_ldisc_ref_wait(tty);
+ if (!ld)
+ return hung_up_tty_poll(filp, wait);
+ if (ld->ops->poll)
+ ret = ld->ops->poll(tty, filp, wait);
+ tty_ldisc_deref(ld);
+ return ret;
+}
+
+static int __tty_fasync(int fd, struct file *filp, int on)
+{
+ struct tty_struct *tty = file_tty(filp);
+ unsigned long flags;
+ int retval = 0;
+
+ if (tty_paranoia_check(tty, file_inode(filp), "tty_fasync"))
+ goto out;
+
+ retval = fasync_helper(fd, filp, on, &tty->fasync);
+ if (retval <= 0)
+ goto out;
+
+ if (on) {
+ enum pid_type type;
+ struct pid *pid;
+
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ if (tty->pgrp) {
+ pid = tty->pgrp;
+ type = PIDTYPE_PGID;
+ } else {
+ pid = task_pid(current);
+ type = PIDTYPE_TGID;
+ }
+ get_pid(pid);
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ __f_setown(filp, pid, type, 0);
+ put_pid(pid);
+ retval = 0;
+ }
+out:
+ return retval;
+}
+
+static int tty_fasync(int fd, struct file *filp, int on)
+{
+ struct tty_struct *tty = file_tty(filp);
+ int retval = -ENOTTY;
+
+ tty_lock(tty);
+ if (!tty_hung_up_p(filp))
+ retval = __tty_fasync(fd, filp, on);
+ tty_unlock(tty);
+
+ return retval;
+}
+
+/**
+ * tiocsti - fake input character
+ * @tty: tty to fake input into
+ * @p: pointer to character
+ *
+ * Fake input to a tty device. Does the necessary locking and
+ * input management.
+ *
+ * FIXME: does not honour flow control ??
+ *
+ * Locking:
+ * Called functions take tty_ldiscs_lock
+ * current->signal->tty check is safe without locks
+ */
+
+static int tiocsti(struct tty_struct *tty, char __user *p)
+{
+ char ch, mbz = 0;
+ struct tty_ldisc *ld;
+
+ if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (get_user(ch, p))
+ return -EFAULT;
+ tty_audit_tiocsti(tty, ch);
+ ld = tty_ldisc_ref_wait(tty);
+ if (!ld)
+ return -EIO;
+ tty_buffer_lock_exclusive(tty->port);
+ if (ld->ops->receive_buf)
+ ld->ops->receive_buf(tty, &ch, &mbz, 1);
+ tty_buffer_unlock_exclusive(tty->port);
+ tty_ldisc_deref(ld);
+ return 0;
+}
+
+/**
+ * tiocgwinsz - implement window query ioctl
+ * @tty: tty
+ * @arg: user buffer for result
+ *
+ * Copies the kernel idea of the window size into the user buffer.
+ *
+ * Locking: tty->winsize_mutex is taken to ensure the winsize data
+ * is consistent.
+ */
+
+static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
+{
+ int err;
+
+ mutex_lock(&tty->winsize_mutex);
+ err = copy_to_user(arg, &tty->winsize, sizeof(*arg));
+ mutex_unlock(&tty->winsize_mutex);
+
+ return err ? -EFAULT: 0;
+}
+
+/**
+ * tty_do_resize - resize event
+ * @tty: tty being resized
+ * @ws: new dimensions
+ *
+ * Update the termios variables and send the necessary signals to
+ * peform a terminal resize correctly
+ */
+
+int tty_do_resize(struct tty_struct *tty, struct winsize *ws)
+{
+ struct pid *pgrp;
+
+ /* Lock the tty */
+ mutex_lock(&tty->winsize_mutex);
+ if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
+ goto done;
+
+ /* Signal the foreground process group */
+ pgrp = tty_get_pgrp(tty);
+ if (pgrp)
+ kill_pgrp(pgrp, SIGWINCH, 1);
+ put_pid(pgrp);
+
+ tty->winsize = *ws;
+done:
+ mutex_unlock(&tty->winsize_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(tty_do_resize);
+
+/**
+ * tiocswinsz - implement window size set ioctl
+ * @tty: tty side of tty
+ * @arg: user buffer for result
+ *
+ * Copies the user idea of the window size to the kernel. Traditionally
+ * this is just advisory information but for the Linux console it
+ * actually has driver level meaning and triggers a VC resize.
+ *
+ * Locking:
+ * Driver dependent. The default do_resize method takes the
+ * tty termios mutex and ctrl_lock. The console takes its own lock
+ * then calls into the default method.
+ */
+
+static int tiocswinsz(struct tty_struct *tty, struct winsize __user *arg)
+{
+ struct winsize tmp_ws;
+ if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
+ return -EFAULT;
+
+ if (tty->ops->resize)
+ return tty->ops->resize(tty, &tmp_ws);
+ else
+ return tty_do_resize(tty, &tmp_ws);
+}
+
+/**
+ * tioccons - allow admin to move logical console
+ * @file: the file to become console
+ *
+ * Allow the administrator to move the redirected console device
+ *
+ * Locking: uses redirect_lock to guard the redirect information
+ */
+
+static int tioccons(struct file *file)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (file->f_op->write_iter == redirected_tty_write) {
+ struct file *f;
+ spin_lock(&redirect_lock);
+ f = redirect;
+ redirect = NULL;
+ spin_unlock(&redirect_lock);
+ if (f)
+ fput(f);
+ return 0;
+ }
+ if (file->f_op->write_iter != tty_write)
+ return -ENOTTY;
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EBADF;
+ if (!(file->f_mode & FMODE_CAN_WRITE))
+ return -EINVAL;
+ spin_lock(&redirect_lock);
+ if (redirect) {
+ spin_unlock(&redirect_lock);
+ return -EBUSY;
+ }
+ redirect = get_file(file);
+ spin_unlock(&redirect_lock);
+ return 0;
+}
+
+/**
+ * tiocsetd - set line discipline
+ * @tty: tty device
+ * @p: pointer to user data
+ *
+ * Set the line discipline according to user request.
+ *
+ * Locking: see tty_set_ldisc, this function is just a helper
+ */
+
+static int tiocsetd(struct tty_struct *tty, int __user *p)
+{
+ int disc;
+ int ret;
+
+ if (get_user(disc, p))
+ return -EFAULT;
+
+ ret = tty_set_ldisc(tty, disc);
+
+ return ret;
+}
+
+/**
+ * tiocgetd - get line discipline
+ * @tty: tty device
+ * @p: pointer to user data
+ *
+ * Retrieves the line discipline id directly from the ldisc.
+ *
+ * Locking: waits for ldisc reference (in case the line discipline
+ * is changing or the tty is being hungup)
+ */
+
+static int tiocgetd(struct tty_struct *tty, int __user *p)
+{
+ struct tty_ldisc *ld;
+ int ret;
+
+ ld = tty_ldisc_ref_wait(tty);
+ if (!ld)
+ return -EIO;
+ ret = put_user(ld->ops->num, p);
+ tty_ldisc_deref(ld);
+ return ret;
+}
+
+/**
+ * send_break - performed time break
+ * @tty: device to break on
+ * @duration: timeout in mS
+ *
+ * Perform a timed break on hardware that lacks its own driver level
+ * timed break functionality.
+ *
+ * Locking:
+ * atomic_write_lock serializes
+ *
+ */
+
+static int send_break(struct tty_struct *tty, unsigned int duration)
+{
+ int retval;
+
+ if (tty->ops->break_ctl == NULL)
+ return 0;
+
+ if (tty->driver->flags & TTY_DRIVER_HARDWARE_BREAK)
+ retval = tty->ops->break_ctl(tty, duration);
+ else {
+ /* Do the work ourselves */
+ if (tty_write_lock(tty, 0) < 0)
+ return -EINTR;
+ retval = tty->ops->break_ctl(tty, -1);
+ if (retval)
+ goto out;
+ if (!signal_pending(current))
+ msleep_interruptible(duration);
+ retval = tty->ops->break_ctl(tty, 0);
+out:
+ tty_write_unlock(tty);
+ if (signal_pending(current))
+ retval = -EINTR;
+ }
+ return retval;
+}
+
+/**
+ * tty_tiocmget - get modem status
+ * @tty: tty device
+ * @p: pointer to result
+ *
+ * Obtain the modem status bits from the tty driver if the feature
+ * is supported. Return -ENOTTY if it is not available.
+ *
+ * Locking: none (up to the driver)
+ */
+
+static int tty_tiocmget(struct tty_struct *tty, int __user *p)
+{
+ int retval = -ENOTTY;
+
+ if (tty->ops->tiocmget) {
+ retval = tty->ops->tiocmget(tty);
+
+ if (retval >= 0)
+ retval = put_user(retval, p);
+ }
+ return retval;
+}
+
+/**
+ * tty_tiocmset - set modem status
+ * @tty: tty device
+ * @cmd: command - clear bits, set bits or set all
+ * @p: pointer to desired bits
+ *
+ * Set the modem status bits from the tty driver if the feature
+ * is supported. Return -ENOTTY if it is not available.
+ *
+ * Locking: none (up to the driver)
+ */
+
+static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
+ unsigned __user *p)
+{
+ int retval;
+ unsigned int set, clear, val;
+
+ if (tty->ops->tiocmset == NULL)
+ return -ENOTTY;
+
+ retval = get_user(val, p);
+ if (retval)
+ return retval;
+ set = clear = 0;
+ switch (cmd) {
+ case TIOCMBIS:
+ set = val;
+ break;
+ case TIOCMBIC:
+ clear = val;
+ break;
+ case TIOCMSET:
+ set = val;
+ clear = ~val;
+ break;
+ }
+ set &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
+ clear &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
+ return tty->ops->tiocmset(tty, set, clear);
+}
+
+static int tty_tiocgicount(struct tty_struct *tty, void __user *arg)
+{
+ int retval = -EINVAL;
+ struct serial_icounter_struct icount;
+ memset(&icount, 0, sizeof(icount));
+ if (tty->ops->get_icount)
+ retval = tty->ops->get_icount(tty, &icount);
+ if (retval != 0)
+ return retval;
+ if (copy_to_user(arg, &icount, sizeof(icount)))
+ return -EFAULT;
+ return 0;
+}
+
+static int tty_tiocsserial(struct tty_struct *tty, struct serial_struct __user *ss)
+{
+ static DEFINE_RATELIMIT_STATE(depr_flags,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ char comm[TASK_COMM_LEN];
+ struct serial_struct v;
+ int flags;
+
+ if (copy_from_user(&v, ss, sizeof(*ss)))
+ return -EFAULT;
+
+ flags = v.flags & ASYNC_DEPRECATED;
+
+ if (flags && __ratelimit(&depr_flags))
+ pr_warn("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n",
+ __func__, get_task_comm(comm, current), flags);
+ if (!tty->ops->set_serial)
+ return -ENOTTY;
+ return tty->ops->set_serial(tty, &v);
+}
+
+static int tty_tiocgserial(struct tty_struct *tty, struct serial_struct __user *ss)
+{
+ struct serial_struct v;
+ int err;
+
+ memset(&v, 0, sizeof(v));
+ if (!tty->ops->get_serial)
+ return -ENOTTY;
+ err = tty->ops->get_serial(tty, &v);
+ if (!err && copy_to_user(ss, &v, sizeof(v)))
+ err = -EFAULT;
+ return err;
+}
+
+/*
+ * if pty, return the slave side (real_tty)
+ * otherwise, return self
+ */
+static struct tty_struct *tty_pair_get_tty(struct tty_struct *tty)
+{
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER)
+ tty = tty->link;
+ return tty;
+}
+
+/*
+ * Split this up, as gcc can choke on it otherwise..
+ */
+long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct tty_struct *tty = file_tty(file);
+ struct tty_struct *real_tty;
+ void __user *p = (void __user *)arg;
+ int retval;
+ struct tty_ldisc *ld;
+
+ if (tty_paranoia_check(tty, file_inode(file), "tty_ioctl"))
+ return -EINVAL;
+
+ real_tty = tty_pair_get_tty(tty);
+
+ /*
+ * Factor out some common prep work
+ */
+ switch (cmd) {
+ case TIOCSETD:
+ case TIOCSBRK:
+ case TIOCCBRK:
+ case TCSBRK:
+ case TCSBRKP:
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+ if (cmd != TIOCCBRK) {
+ tty_wait_until_sent(tty, 0);
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ break;
+ }
+
+ /*
+ * Now do the stuff.
+ */
+ switch (cmd) {
+ case TIOCSTI:
+ return tiocsti(tty, p);
+ case TIOCGWINSZ:
+ return tiocgwinsz(real_tty, p);
+ case TIOCSWINSZ:
+ return tiocswinsz(real_tty, p);
+ case TIOCCONS:
+ return real_tty != tty ? -EINVAL : tioccons(file);
+ case TIOCEXCL:
+ set_bit(TTY_EXCLUSIVE, &tty->flags);
+ return 0;
+ case TIOCNXCL:
+ clear_bit(TTY_EXCLUSIVE, &tty->flags);
+ return 0;
+ case TIOCGEXCL:
+ {
+ int excl = test_bit(TTY_EXCLUSIVE, &tty->flags);
+ return put_user(excl, (int __user *)p);
+ }
+ case TIOCGETD:
+ return tiocgetd(tty, p);
+ case TIOCSETD:
+ return tiocsetd(tty, p);
+ case TIOCVHANGUP:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ tty_vhangup(tty);
+ return 0;
+ case TIOCGDEV:
+ {
+ unsigned int ret = new_encode_dev(tty_devnum(real_tty));
+ return put_user(ret, (unsigned int __user *)p);
+ }
+ /*
+ * Break handling
+ */
+ case TIOCSBRK: /* Turn break on, unconditionally */
+ if (tty->ops->break_ctl)
+ return tty->ops->break_ctl(tty, -1);
+ return 0;
+ case TIOCCBRK: /* Turn break off, unconditionally */
+ if (tty->ops->break_ctl)
+ return tty->ops->break_ctl(tty, 0);
+ return 0;
+ case TCSBRK: /* SVID version: non-zero arg --> no break */
+ /* non-zero arg means wait for all output data
+ * to be sent (performed above) but don't send break.
+ * This is used by the tcdrain() termios function.
+ */
+ if (!arg)
+ return send_break(tty, 250);
+ return 0;
+ case TCSBRKP: /* support for POSIX tcsendbreak() */
+ return send_break(tty, arg ? arg*100 : 250);
+
+ case TIOCMGET:
+ return tty_tiocmget(tty, p);
+ case TIOCMSET:
+ case TIOCMBIC:
+ case TIOCMBIS:
+ return tty_tiocmset(tty, cmd, p);
+ case TIOCGICOUNT:
+ return tty_tiocgicount(tty, p);
+ case TCFLSH:
+ switch (arg) {
+ case TCIFLUSH:
+ case TCIOFLUSH:
+ /* flush tty buffer and allow ldisc to process ioctl */
+ tty_buffer_flush(tty, NULL);
+ break;
+ }
+ break;
+ case TIOCSSERIAL:
+ return tty_tiocsserial(tty, p);
+ case TIOCGSERIAL:
+ return tty_tiocgserial(tty, p);
+ case TIOCGPTPEER:
+ /* Special because the struct file is needed */
+ return ptm_open_peer(file, tty, (int)arg);
+ default:
+ retval = tty_jobctrl_ioctl(tty, real_tty, file, cmd, arg);
+ if (retval != -ENOIOCTLCMD)
+ return retval;
+ }
+ if (tty->ops->ioctl) {
+ retval = tty->ops->ioctl(tty, cmd, arg);
+ if (retval != -ENOIOCTLCMD)
+ return retval;
+ }
+ ld = tty_ldisc_ref_wait(tty);
+ if (!ld)
+ return hung_up_tty_ioctl(file, cmd, arg);
+ retval = -EINVAL;
+ if (ld->ops->ioctl) {
+ retval = ld->ops->ioctl(tty, file, cmd, arg);
+ if (retval == -ENOIOCTLCMD)
+ retval = -ENOTTY;
+ }
+ tty_ldisc_deref(ld);
+ return retval;
+}
+
+#ifdef CONFIG_COMPAT
+
+struct serial_struct32 {
+ compat_int_t type;
+ compat_int_t line;
+ compat_uint_t port;
+ compat_int_t irq;
+ compat_int_t flags;
+ compat_int_t xmit_fifo_size;
+ compat_int_t custom_divisor;
+ compat_int_t baud_base;
+ unsigned short close_delay;
+ char io_type;
+ char reserved_char;
+ compat_int_t hub6;
+ unsigned short closing_wait; /* time to wait before closing */
+ unsigned short closing_wait2; /* no longer used... */
+ compat_uint_t iomem_base;
+ unsigned short iomem_reg_shift;
+ unsigned int port_high;
+ /* compat_ulong_t iomap_base FIXME */
+ compat_int_t reserved;
+};
+
+static int compat_tty_tiocsserial(struct tty_struct *tty,
+ struct serial_struct32 __user *ss)
+{
+ static DEFINE_RATELIMIT_STATE(depr_flags,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ char comm[TASK_COMM_LEN];
+ struct serial_struct32 v32;
+ struct serial_struct v;
+ int flags;
+
+ if (copy_from_user(&v32, ss, sizeof(*ss)))
+ return -EFAULT;
+
+ memcpy(&v, &v32, offsetof(struct serial_struct32, iomem_base));
+ v.iomem_base = compat_ptr(v32.iomem_base);
+ v.iomem_reg_shift = v32.iomem_reg_shift;
+ v.port_high = v32.port_high;
+ v.iomap_base = 0;
+
+ flags = v.flags & ASYNC_DEPRECATED;
+
+ if (flags && __ratelimit(&depr_flags))
+ pr_warn("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n",
+ __func__, get_task_comm(comm, current), flags);
+ if (!tty->ops->set_serial)
+ return -ENOTTY;
+ return tty->ops->set_serial(tty, &v);
+}
+
+static int compat_tty_tiocgserial(struct tty_struct *tty,
+ struct serial_struct32 __user *ss)
+{
+ struct serial_struct32 v32;
+ struct serial_struct v;
+ int err;
+
+ memset(&v, 0, sizeof(v));
+ memset(&v32, 0, sizeof(v32));
+
+ if (!tty->ops->get_serial)
+ return -ENOTTY;
+ err = tty->ops->get_serial(tty, &v);
+ if (!err) {
+ memcpy(&v32, &v, offsetof(struct serial_struct32, iomem_base));
+ v32.iomem_base = (unsigned long)v.iomem_base >> 32 ?
+ 0xfffffff : ptr_to_compat(v.iomem_base);
+ v32.iomem_reg_shift = v.iomem_reg_shift;
+ v32.port_high = v.port_high;
+ if (copy_to_user(ss, &v32, sizeof(v32)))
+ err = -EFAULT;
+ }
+ return err;
+}
+static long tty_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct tty_struct *tty = file_tty(file);
+ struct tty_ldisc *ld;
+ int retval = -ENOIOCTLCMD;
+
+ switch (cmd) {
+ case TIOCOUTQ:
+ case TIOCSTI:
+ case TIOCGWINSZ:
+ case TIOCSWINSZ:
+ case TIOCGEXCL:
+ case TIOCGETD:
+ case TIOCSETD:
+ case TIOCGDEV:
+ case TIOCMGET:
+ case TIOCMSET:
+ case TIOCMBIC:
+ case TIOCMBIS:
+ case TIOCGICOUNT:
+ case TIOCGPGRP:
+ case TIOCSPGRP:
+ case TIOCGSID:
+ case TIOCSERGETLSR:
+ case TIOCGRS485:
+ case TIOCSRS485:
+#ifdef TIOCGETP
+ case TIOCGETP:
+ case TIOCSETP:
+ case TIOCSETN:
+#endif
+#ifdef TIOCGETC
+ case TIOCGETC:
+ case TIOCSETC:
+#endif
+#ifdef TIOCGLTC
+ case TIOCGLTC:
+ case TIOCSLTC:
+#endif
+ case TCSETSF:
+ case TCSETSW:
+ case TCSETS:
+ case TCGETS:
+#ifdef TCGETS2
+ case TCGETS2:
+ case TCSETSF2:
+ case TCSETSW2:
+ case TCSETS2:
+#endif
+ case TCGETA:
+ case TCSETAF:
+ case TCSETAW:
+ case TCSETA:
+ case TIOCGLCKTRMIOS:
+ case TIOCSLCKTRMIOS:
+#ifdef TCGETX
+ case TCGETX:
+ case TCSETX:
+ case TCSETXW:
+ case TCSETXF:
+#endif
+ case TIOCGSOFTCAR:
+ case TIOCSSOFTCAR:
+
+ case PPPIOCGCHAN:
+ case PPPIOCGUNIT:
+ return tty_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+ case TIOCCONS:
+ case TIOCEXCL:
+ case TIOCNXCL:
+ case TIOCVHANGUP:
+ case TIOCSBRK:
+ case TIOCCBRK:
+ case TCSBRK:
+ case TCSBRKP:
+ case TCFLSH:
+ case TIOCGPTPEER:
+ case TIOCNOTTY:
+ case TIOCSCTTY:
+ case TCXONC:
+ case TIOCMIWAIT:
+ case TIOCSERCONFIG:
+ return tty_ioctl(file, cmd, arg);
+ }
+
+ if (tty_paranoia_check(tty, file_inode(file), "tty_ioctl"))
+ return -EINVAL;
+
+ switch (cmd) {
+ case TIOCSSERIAL:
+ return compat_tty_tiocsserial(tty, compat_ptr(arg));
+ case TIOCGSERIAL:
+ return compat_tty_tiocgserial(tty, compat_ptr(arg));
+ }
+ if (tty->ops->compat_ioctl) {
+ retval = tty->ops->compat_ioctl(tty, cmd, arg);
+ if (retval != -ENOIOCTLCMD)
+ return retval;
+ }
+
+ ld = tty_ldisc_ref_wait(tty);
+ if (!ld)
+ return hung_up_tty_compat_ioctl(file, cmd, arg);
+ if (ld->ops->compat_ioctl)
+ retval = ld->ops->compat_ioctl(tty, file, cmd, arg);
+ if (retval == -ENOIOCTLCMD && ld->ops->ioctl)
+ retval = ld->ops->ioctl(tty, file,
+ (unsigned long)compat_ptr(cmd), arg);
+ tty_ldisc_deref(ld);
+
+ return retval;
+}
+#endif
+
+static int this_tty(const void *t, struct file *file, unsigned fd)
+{
+ if (likely(file->f_op->read_iter != tty_read))
+ return 0;
+ return file_tty(file) != t ? 0 : fd + 1;
+}
+
+/*
+ * This implements the "Secure Attention Key" --- the idea is to
+ * prevent trojan horses by killing all processes associated with this
+ * tty when the user hits the "Secure Attention Key". Required for
+ * super-paranoid applications --- see the Orange Book for more details.
+ *
+ * This code could be nicer; ideally it should send a HUP, wait a few
+ * seconds, then send a INT, and then a KILL signal. But you then
+ * have to coordinate with the init process, since all processes associated
+ * with the current tty must be dead before the new getty is allowed
+ * to spawn.
+ *
+ * Now, if it would be correct ;-/ The current code has a nasty hole -
+ * it doesn't catch files in flight. We may send the descriptor to ourselves
+ * via AF_UNIX socket, close it and later fetch from socket. FIXME.
+ *
+ * Nasty bug: do_SAK is being called in interrupt context. This can
+ * deadlock. We punt it up to process context. AKPM - 16Mar2001
+ */
+void __do_SAK(struct tty_struct *tty)
+{
+#ifdef TTY_SOFT_SAK
+ tty_hangup(tty);
+#else
+ struct task_struct *g, *p;
+ struct pid *session;
+ int i;
+ unsigned long flags;
+
+ if (!tty)
+ return;
+
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ session = get_pid(tty->session);
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+
+ tty_ldisc_flush(tty);
+
+ tty_driver_flush_buffer(tty);
+
+ read_lock(&tasklist_lock);
+ /* Kill the entire session */
+ do_each_pid_task(session, PIDTYPE_SID, p) {
+ tty_notice(tty, "SAK: killed process %d (%s): by session\n",
+ task_pid_nr(p), p->comm);
+ group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
+ } while_each_pid_task(session, PIDTYPE_SID, p);
+
+ /* Now kill any processes that happen to have the tty open */
+ do_each_thread(g, p) {
+ if (p->signal->tty == tty) {
+ tty_notice(tty, "SAK: killed process %d (%s): by controlling tty\n",
+ task_pid_nr(p), p->comm);
+ group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
+ continue;
+ }
+ task_lock(p);
+ i = iterate_fd(p->files, 0, this_tty, tty);
+ if (i != 0) {
+ tty_notice(tty, "SAK: killed process %d (%s): by fd#%d\n",
+ task_pid_nr(p), p->comm, i - 1);
+ group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
+ }
+ task_unlock(p);
+ } while_each_thread(g, p);
+ read_unlock(&tasklist_lock);
+ put_pid(session);
+#endif
+}
+
+static void do_SAK_work(struct work_struct *work)
+{
+ struct tty_struct *tty =
+ container_of(work, struct tty_struct, SAK_work);
+ __do_SAK(tty);
+}
+
+/*
+ * The tq handling here is a little racy - tty->SAK_work may already be queued.
+ * Fortunately we don't need to worry, because if ->SAK_work is already queued,
+ * the values which we write to it will be identical to the values which it
+ * already has. --akpm
+ */
+void do_SAK(struct tty_struct *tty)
+{
+ if (!tty)
+ return;
+ schedule_work(&tty->SAK_work);
+}
+
+EXPORT_SYMBOL(do_SAK);
+
+/* Must put_device() after it's unused! */
+static struct device *tty_get_device(struct tty_struct *tty)
+{
+ dev_t devt = tty_devnum(tty);
+ return class_find_device_by_devt(tty_class, devt);
+}
+
+
+/**
+ * alloc_tty_struct
+ *
+ * This subroutine allocates and initializes a tty structure.
+ *
+ * Locking: none - tty in question is not exposed at this point
+ */
+
+struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
+{
+ struct tty_struct *tty;
+
+ tty = kzalloc(sizeof(*tty), GFP_KERNEL);
+ if (!tty)
+ return NULL;
+
+ kref_init(&tty->kref);
+ tty->magic = TTY_MAGIC;
+ if (tty_ldisc_init(tty)) {
+ kfree(tty);
+ return NULL;
+ }
+ tty->session = NULL;
+ tty->pgrp = NULL;
+ mutex_init(&tty->legacy_mutex);
+ mutex_init(&tty->throttle_mutex);
+ init_rwsem(&tty->termios_rwsem);
+ mutex_init(&tty->winsize_mutex);
+ init_ldsem(&tty->ldisc_sem);
+ init_waitqueue_head(&tty->write_wait);
+ init_waitqueue_head(&tty->read_wait);
+ INIT_WORK(&tty->hangup_work, do_tty_hangup);
+ mutex_init(&tty->atomic_write_lock);
+ spin_lock_init(&tty->ctrl_lock);
+ spin_lock_init(&tty->flow_lock);
+ spin_lock_init(&tty->files_lock);
+ INIT_LIST_HEAD(&tty->tty_files);
+ INIT_WORK(&tty->SAK_work, do_SAK_work);
+
+ tty->driver = driver;
+ tty->ops = driver->ops;
+ tty->index = idx;
+ tty_line_name(driver, idx, tty->name);
+ tty->dev = tty_get_device(tty);
+
+ return tty;
+}
+
+/**
+ * tty_put_char - write one character to a tty
+ * @tty: tty
+ * @ch: character
+ *
+ * Write one byte to the tty using the provided put_char method
+ * if present. Returns the number of characters successfully output.
+ *
+ * Note: the specific put_char operation in the driver layer may go
+ * away soon. Don't call it directly, use this method
+ */
+
+int tty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ if (tty->ops->put_char)
+ return tty->ops->put_char(tty, ch);
+ return tty->ops->write(tty, &ch, 1);
+}
+EXPORT_SYMBOL_GPL(tty_put_char);
+
+struct class *tty_class;
+
+static int tty_cdev_add(struct tty_driver *driver, dev_t dev,
+ unsigned int index, unsigned int count)
+{
+ int err;
+
+ /* init here, since reused cdevs cause crashes */
+ driver->cdevs[index] = cdev_alloc();
+ if (!driver->cdevs[index])
+ return -ENOMEM;
+ driver->cdevs[index]->ops = &tty_fops;
+ driver->cdevs[index]->owner = driver->owner;
+ err = cdev_add(driver->cdevs[index], dev, count);
+ if (err)
+ kobject_put(&driver->cdevs[index]->kobj);
+ return err;
+}
+
+/**
+ * tty_register_device - register a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
+ * @device: a struct device that is associated with this tty device.
+ * This field is optional, if there is no known struct device
+ * for this tty device it can be set to NULL safely.
+ *
+ * Returns a pointer to the struct device for this tty device
+ * (or ERR_PTR(-EFOO) on error).
+ *
+ * This call is required to be made to register an individual tty device
+ * if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If
+ * that bit is not set, this function should not be called by a tty
+ * driver.
+ *
+ * Locking: ??
+ */
+
+struct device *tty_register_device(struct tty_driver *driver, unsigned index,
+ struct device *device)
+{
+ return tty_register_device_attr(driver, index, device, NULL, NULL);
+}
+EXPORT_SYMBOL(tty_register_device);
+
+static void tty_device_create_release(struct device *dev)
+{
+ dev_dbg(dev, "releasing...\n");
+ kfree(dev);
+}
+
+/**
+ * tty_register_device_attr - register a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
+ * @device: a struct device that is associated with this tty device.
+ * This field is optional, if there is no known struct device
+ * for this tty device it can be set to NULL safely.
+ * @drvdata: Driver data to be set to device.
+ * @attr_grp: Attribute group to be set on device.
+ *
+ * Returns a pointer to the struct device for this tty device
+ * (or ERR_PTR(-EFOO) on error).
+ *
+ * This call is required to be made to register an individual tty device
+ * if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If
+ * that bit is not set, this function should not be called by a tty
+ * driver.
+ *
+ * Locking: ??
+ */
+struct device *tty_register_device_attr(struct tty_driver *driver,
+ unsigned index, struct device *device,
+ void *drvdata,
+ const struct attribute_group **attr_grp)
+{
+ char name[64];
+ dev_t devt = MKDEV(driver->major, driver->minor_start) + index;
+ struct ktermios *tp;
+ struct device *dev;
+ int retval;
+
+ if (index >= driver->num) {
+ pr_err("%s: Attempt to register invalid tty line number (%d)\n",
+ driver->name, index);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (driver->type == TTY_DRIVER_TYPE_PTY)
+ pty_line_name(driver, index, name);
+ else
+ tty_line_name(driver, index, name);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->devt = devt;
+ dev->class = tty_class;
+ dev->parent = device;
+ dev->release = tty_device_create_release;
+ dev_set_name(dev, "%s", name);
+ dev->groups = attr_grp;
+ dev_set_drvdata(dev, drvdata);
+
+ dev_set_uevent_suppress(dev, 1);
+
+ retval = device_register(dev);
+ if (retval)
+ goto err_put;
+
+ if (!(driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)) {
+ /*
+ * Free any saved termios data so that the termios state is
+ * reset when reusing a minor number.
+ */
+ tp = driver->termios[index];
+ if (tp) {
+ driver->termios[index] = NULL;
+ kfree(tp);
+ }
+
+ retval = tty_cdev_add(driver, devt, index, 1);
+ if (retval)
+ goto err_del;
+ }
+
+ dev_set_uevent_suppress(dev, 0);
+ kobject_uevent(&dev->kobj, KOBJ_ADD);
+
+ return dev;
+
+err_del:
+ device_del(dev);
+err_put:
+ put_device(dev);
+
+ return ERR_PTR(retval);
+}
+EXPORT_SYMBOL_GPL(tty_register_device_attr);
+
+/**
+ * tty_unregister_device - unregister a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
+ *
+ * If a tty device is registered with a call to tty_register_device() then
+ * this function must be called when the tty device is gone.
+ *
+ * Locking: ??
+ */
+
+void tty_unregister_device(struct tty_driver *driver, unsigned index)
+{
+ device_destroy(tty_class,
+ MKDEV(driver->major, driver->minor_start) + index);
+ if (!(driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)) {
+ cdev_del(driver->cdevs[index]);
+ driver->cdevs[index] = NULL;
+ }
+}
+EXPORT_SYMBOL(tty_unregister_device);
+
+/**
+ * __tty_alloc_driver -- allocate tty driver
+ * @lines: count of lines this driver can handle at most
+ * @owner: module which is responsible for this driver
+ * @flags: some of TTY_DRIVER_* flags, will be set in driver->flags
+ *
+ * This should not be called directly, some of the provided macros should be
+ * used instead. Use IS_ERR and friends on @retval.
+ */
+struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner,
+ unsigned long flags)
+{
+ struct tty_driver *driver;
+ unsigned int cdevs = 1;
+ int err;
+
+ if (!lines || (flags & TTY_DRIVER_UNNUMBERED_NODE && lines > 1))
+ return ERR_PTR(-EINVAL);
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&driver->kref);
+ driver->magic = TTY_DRIVER_MAGIC;
+ driver->num = lines;
+ driver->owner = owner;
+ driver->flags = flags;
+
+ if (!(flags & TTY_DRIVER_DEVPTS_MEM)) {
+ driver->ttys = kcalloc(lines, sizeof(*driver->ttys),
+ GFP_KERNEL);
+ driver->termios = kcalloc(lines, sizeof(*driver->termios),
+ GFP_KERNEL);
+ if (!driver->ttys || !driver->termios) {
+ err = -ENOMEM;
+ goto err_free_all;
+ }
+ }
+
+ if (!(flags & TTY_DRIVER_DYNAMIC_ALLOC)) {
+ driver->ports = kcalloc(lines, sizeof(*driver->ports),
+ GFP_KERNEL);
+ if (!driver->ports) {
+ err = -ENOMEM;
+ goto err_free_all;
+ }
+ cdevs = lines;
+ }
+
+ driver->cdevs = kcalloc(cdevs, sizeof(*driver->cdevs), GFP_KERNEL);
+ if (!driver->cdevs) {
+ err = -ENOMEM;
+ goto err_free_all;
+ }
+
+ return driver;
+err_free_all:
+ kfree(driver->ports);
+ kfree(driver->ttys);
+ kfree(driver->termios);
+ kfree(driver->cdevs);
+ kfree(driver);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(__tty_alloc_driver);
+
+static void destruct_tty_driver(struct kref *kref)
+{
+ struct tty_driver *driver = container_of(kref, struct tty_driver, kref);
+ int i;
+ struct ktermios *tp;
+
+ if (driver->flags & TTY_DRIVER_INSTALLED) {
+ for (i = 0; i < driver->num; i++) {
+ tp = driver->termios[i];
+ if (tp) {
+ driver->termios[i] = NULL;
+ kfree(tp);
+ }
+ if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV))
+ tty_unregister_device(driver, i);
+ }
+ proc_tty_unregister_driver(driver);
+ if (driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)
+ cdev_del(driver->cdevs[0]);
+ }
+ kfree(driver->cdevs);
+ kfree(driver->ports);
+ kfree(driver->termios);
+ kfree(driver->ttys);
+ kfree(driver);
+}
+
+void tty_driver_kref_put(struct tty_driver *driver)
+{
+ kref_put(&driver->kref, destruct_tty_driver);
+}
+EXPORT_SYMBOL(tty_driver_kref_put);
+
+void tty_set_operations(struct tty_driver *driver,
+ const struct tty_operations *op)
+{
+ driver->ops = op;
+};
+EXPORT_SYMBOL(tty_set_operations);
+
+void put_tty_driver(struct tty_driver *d)
+{
+ tty_driver_kref_put(d);
+}
+EXPORT_SYMBOL(put_tty_driver);
+
+/*
+ * Called by a tty driver to register itself.
+ */
+int tty_register_driver(struct tty_driver *driver)
+{
+ int error;
+ int i;
+ dev_t dev;
+ struct device *d;
+
+ if (!driver->major) {
+ error = alloc_chrdev_region(&dev, driver->minor_start,
+ driver->num, driver->name);
+ if (!error) {
+ driver->major = MAJOR(dev);
+ driver->minor_start = MINOR(dev);
+ }
+ } else {
+ dev = MKDEV(driver->major, driver->minor_start);
+ error = register_chrdev_region(dev, driver->num, driver->name);
+ }
+ if (error < 0)
+ goto err;
+
+ if (driver->flags & TTY_DRIVER_DYNAMIC_ALLOC) {
+ error = tty_cdev_add(driver, dev, 0, driver->num);
+ if (error)
+ goto err_unreg_char;
+ }
+
+ mutex_lock(&tty_mutex);
+ list_add(&driver->tty_drivers, &tty_drivers);
+ mutex_unlock(&tty_mutex);
+
+ if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV)) {
+ for (i = 0; i < driver->num; i++) {
+ d = tty_register_device(driver, i, NULL);
+ if (IS_ERR(d)) {
+ error = PTR_ERR(d);
+ goto err_unreg_devs;
+ }
+ }
+ }
+ proc_tty_register_driver(driver);
+ driver->flags |= TTY_DRIVER_INSTALLED;
+ return 0;
+
+err_unreg_devs:
+ for (i--; i >= 0; i--)
+ tty_unregister_device(driver, i);
+
+ mutex_lock(&tty_mutex);
+ list_del(&driver->tty_drivers);
+ mutex_unlock(&tty_mutex);
+
+err_unreg_char:
+ unregister_chrdev_region(dev, driver->num);
+err:
+ return error;
+}
+EXPORT_SYMBOL(tty_register_driver);
+
+/*
+ * Called by a tty driver to unregister itself.
+ */
+int tty_unregister_driver(struct tty_driver *driver)
+{
+#if 0
+ /* FIXME */
+ if (driver->refcount)
+ return -EBUSY;
+#endif
+ unregister_chrdev_region(MKDEV(driver->major, driver->minor_start),
+ driver->num);
+ mutex_lock(&tty_mutex);
+ list_del(&driver->tty_drivers);
+ mutex_unlock(&tty_mutex);
+ return 0;
+}
+
+EXPORT_SYMBOL(tty_unregister_driver);
+
+dev_t tty_devnum(struct tty_struct *tty)
+{
+ return MKDEV(tty->driver->major, tty->driver->minor_start) + tty->index;
+}
+EXPORT_SYMBOL(tty_devnum);
+
+void tty_default_fops(struct file_operations *fops)
+{
+ *fops = tty_fops;
+}
+
+static char *tty_devnode(struct device *dev, umode_t *mode)
+{
+ if (!mode)
+ return NULL;
+ if (dev->devt == MKDEV(TTYAUX_MAJOR, 0) ||
+ dev->devt == MKDEV(TTYAUX_MAJOR, 2))
+ *mode = 0666;
+ return NULL;
+}
+
+static int __init tty_class_init(void)
+{
+ tty_class = class_create(THIS_MODULE, "tty");
+ if (IS_ERR(tty_class))
+ return PTR_ERR(tty_class);
+ tty_class->devnode = tty_devnode;
+ return 0;
+}
+
+postcore_initcall(tty_class_init);
+
+/* 3/2004 jmc: why do these devices exist? */
+static struct cdev tty_cdev, console_cdev;
+
+static ssize_t show_cons_active(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct console *cs[16];
+ int i = 0;
+ struct console *c;
+ ssize_t count = 0;
+
+ console_lock();
+ for_each_console(c) {
+ if (!c->device)
+ continue;
+ if (!c->write)
+ continue;
+ if ((c->flags & CON_ENABLED) == 0)
+ continue;
+ cs[i++] = c;
+ if (i >= ARRAY_SIZE(cs))
+ break;
+ }
+ while (i--) {
+ int index = cs[i]->index;
+ struct tty_driver *drv = cs[i]->device(cs[i], &index);
+
+ /* don't resolve tty0 as some programs depend on it */
+ if (drv && (cs[i]->index > 0 || drv->major != TTY_MAJOR))
+ count += tty_line_name(drv, index, buf + count);
+ else
+ count += sprintf(buf + count, "%s%d",
+ cs[i]->name, cs[i]->index);
+
+ count += sprintf(buf + count, "%c", i ? ' ':'\n');
+ }
+ console_unlock();
+
+ return count;
+}
+static DEVICE_ATTR(active, S_IRUGO, show_cons_active, NULL);
+
+static struct attribute *cons_dev_attrs[] = {
+ &dev_attr_active.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(cons_dev);
+
+static struct device *consdev;
+
+void console_sysfs_notify(void)
+{
+ if (consdev)
+ sysfs_notify(&consdev->kobj, NULL, "active");
+}
+
+/*
+ * Ok, now we can initialize the rest of the tty devices and can count
+ * on memory allocations, interrupts etc..
+ */
+int __init tty_init(void)
+{
+ tty_sysctl_init();
+ cdev_init(&tty_cdev, &tty_fops);
+ if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
+ register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
+ panic("Couldn't register /dev/tty driver\n");
+ device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 0), NULL, "tty");
+
+ cdev_init(&console_cdev, &console_fops);
+ if (cdev_add(&console_cdev, MKDEV(TTYAUX_MAJOR, 1), 1) ||
+ register_chrdev_region(MKDEV(TTYAUX_MAJOR, 1), 1, "/dev/console") < 0)
+ panic("Couldn't register /dev/console driver\n");
+ consdev = device_create_with_groups(tty_class, NULL,
+ MKDEV(TTYAUX_MAJOR, 1), NULL,
+ cons_dev_groups, "console");
+ if (IS_ERR(consdev))
+ consdev = NULL;
+
+#ifdef CONFIG_VT
+ vty_init(&console_fops);
+#endif
+ return 0;
+}
+
diff --git a/upstream/linux-5.10/include/linux/soc/sc/spinlock.h b/upstream/linux-5.10/include/linux/soc/sc/spinlock.h
new file mode 100755
index 0000000..eb5e7b7
--- /dev/null
+++ b/upstream/linux-5.10/include/linux/soc/sc/spinlock.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2015 ZTE-TSP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SPINLOCK_H
+#define _SPINLOCK_H
+
+
+/****************************************************************************
+* Include files
+****************************************************************************/
+//#include <spinlock_com.h>
+
+/****************************************************************************
+* Macros
+****************************************************************************/
+
+/****************************************************************************
+* Types
+****************************************************************************/
+
+
+#define SPINLOCK_IOC_MAGIC 'S'
+
+/*ioctl cmd usd by device*/
+#define SPINLOCK_GET_STATUS _IOWR(SPINLOCK_IOC_MAGIC, 1, char *)
+
+
+
+typedef enum
+{
+ CORE_ID_PS=210,
+ CORE_ID_PHY=211,
+ CORE_ID_ZSP=212,
+ CORE_ID_M0=213,
+ CORE_ID_AP=214,
+ CORE_ID_NUM = 215
+} zte_coreid;
+/* ±êʶӲ¼þËø*/
+typedef enum
+{
+ PCU_HWLOCK = 0,/*PCU*/
+ CLK_HWLOCK = 1,/*Clock*/
+ REGLOCK_HWLOCK,
+ SOFTLOCK_HWLOCK,
+ HWLOCK_NUM
+} emhw_lock_id;
+
+/* ±êʶÈí¼þËø*/
+typedef enum
+{
+ I2C0_SFLOCK = 0,/*i2c0*/
+ I2C1_SFLOCK = 1,/*i2c1*/
+ I2C2_SFLOCK = 2,/*pmic-i2c*/
+ NAND_SFLOCK = 3,/*NAND*/
+ SD0_SFLOCK, /*for sd0*/
+ SD1_SFLOCK, /*for sd1*/
+ ADC_SFLOCK, /*for adc*/
+ UART_SFLOCK,
+ PMIC_SFLOCK,
+ SFLOCK_ID9,
+ SFLOCK_ID10,
+ SFLOCK_ID11,
+ SFLOCK_ID12,
+ SFLOCK_ID13,
+ SFLOCK_ID14,
+ SFLOCK_ID15,
+ SFLOCK_ID16,
+ SFLOCK_ID17,
+ SFLOCK_ID18,
+ SFLOCK_ID19,
+ SFLOCK_ID20,
+ SFLOCK_ID21,
+ SFLOCK_ID22,
+ SFLOCK_ID23,
+ SFLOCK_ID24,
+ SFLOCK_ID25,
+ SFLOCK_ID26,
+ SFLOCK_ID27,
+ SFLOCK_ID28,
+ SFLOCK_ID29,
+ SFLOCK_ID30,
+ REG_SFLOCK = 31,/*REG*/
+ SFLOCK_NUM
+} emsf_lock_id;
+
+
+void hw_spin_lock(u32 hwid);
+void hw_spin_unlock(u32 hwid);
+void soft_spin_lock(u32 sfid);
+void soft_spin_unlock(u32 sfid);
+void reg_spin_lock(void);
+void reg_spin_unlock(void);
+void soft_spin_lock_psm(emsf_lock_id sfid);
+void soft_spin_unlock_psm(emsf_lock_id sfid);
+
+void zx_spinlock_init(void __iomem *spinlock_base);
+int soft_spin_lock_printf(emsf_lock_id sfid);
+
+#endif/* _SPINLOCK_H */
+
diff --git a/upstream/linux-5.10/include/linux/soc/zte/spinlock.h b/upstream/linux-5.10/include/linux/soc/zte/spinlock.h
new file mode 100755
index 0000000..1181933
--- /dev/null
+++ b/upstream/linux-5.10/include/linux/soc/zte/spinlock.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2015 ZTE-TSP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SPINLOCK_H
+#define _SPINLOCK_H
+
+
+/****************************************************************************
+* Include files
+****************************************************************************/
+//#include <spinlock_com.h>
+
+/****************************************************************************
+* Macros
+****************************************************************************/
+
+/****************************************************************************
+* Types
+****************************************************************************/
+
+
+#define SPINLOCK_IOC_MAGIC 'S'
+
+/*ioctl cmd usd by device*/
+#define SPINLOCK_GET_STATUS _IOWR(SPINLOCK_IOC_MAGIC, 1, char *)
+
+
+
+typedef enum
+{
+ CORE_ID_PS=210,
+ CORE_ID_PHY=211,
+ CORE_ID_ZSP=212,
+ CORE_ID_M0=213,
+ CORE_ID_AP=214,
+ CORE_ID_NUM = 215
+} zte_coreid;
+/* ±êʶӲ¼þËø*/
+typedef enum
+{
+ PCU_HWLOCK = 0,/*PCU*/
+ CLK_HWLOCK = 1,/*Clock*/
+ REGLOCK_HWLOCK,
+ SOFTLOCK_HWLOCK,
+ HWLOCK_NUM
+} emhw_lock_id;
+
+/* ±êʶÈí¼þËø*/
+typedef enum
+{
+ I2C0_SFLOCK = 0,/*i2c0*/
+ I2C1_SFLOCK = 1,/*i2c1*/
+ I2C2_SFLOCK = 2,/*pmic-i2c*/
+ NAND_SFLOCK = 3,/*NAND*/
+ SD0_SFLOCK, /*for sd0*/
+ SD1_SFLOCK, /*for sd1*/
+ ADC_SFLOCK, /*for adc*/
+ UART_SFLOCK,
+ PMIC_SFLOCK,
+#ifdef CONFIG_SYSVIPC_CROSS_SHM
+ SHM_SFLOCK,
+#else
+ SFLOCK_ID9,
+#endif
+ EFUSE_SFLOCK,
+ SFLOCK_ID11,
+ SFLOCK_ID12,
+ SFLOCK_ID13,
+ SFLOCK_ID14,
+ SFLOCK_ID15,
+ SFLOCK_ID16,
+ SFLOCK_ID17,
+ SFLOCK_ID18,
+ SFLOCK_ID19,
+ SFLOCK_ID20,
+ SFLOCK_ID21,
+ SFLOCK_ID22,
+ SFLOCK_ID23,
+ SFLOCK_ID24,
+ SFLOCK_ID25,
+ SFLOCK_ID26,
+ SFLOCK_ID27,
+ SFLOCK_ID28,
+ SFLOCK_ID29,
+ SFLOCK_ID30,
+ REG_SFLOCK = 31,/*REG*/
+ SFLOCK_NUM
+} emsf_lock_id;
+
+
+void hw_spin_lock(u32 hwid);
+void hw_spin_unlock(u32 hwid);
+void soft_spin_lock(u32 sfid);
+void soft_spin_unlock(u32 sfid);
+void reg_spin_lock(void);
+void reg_spin_unlock(void);
+void soft_spin_lock_psm(emsf_lock_id sfid);
+void soft_spin_unlock_psm(emsf_lock_id sfid);
+
+void zx_spinlock_init(void __iomem *spinlock_base);
+int soft_spin_lock_printf(emsf_lock_id sfid);
+
+#endif/* _SPINLOCK_H */
+
diff --git a/upstream/linux-5.10/include/net/SI/netioc_proc.h b/upstream/linux-5.10/include/net/SI/netioc_proc.h
new file mode 100755
index 0000000..f0c8aa4
--- /dev/null
+++ b/upstream/linux-5.10/include/net/SI/netioc_proc.h
@@ -0,0 +1,492 @@
+/************************************************************************
+*¹¦ÄܽéÉÜ£ºlinuxÖÐÍøÂçfastnat¡¢fastbrÏà¹Ø²Ù×÷½Ó¿Ú
+*¸ºÔðÈË£º
+*±¸·ÝÈË£º
+*ÐÞ¸ÄÈÕ£º
+*ÐÞ¸ÄÄÚÈÝ£º
+*°æ±¾ºÅ£º
+************************************************************************/
+#ifndef _NETIO_PROC_H_
+#define _NETIO_PROC_H_
+
+
+#include <net/SI/net_track.h>
+#include <net/SI/ext_mem.h>
+
+
+#define PC_MAX_NUM 100
+#define DEV_NAME_LEN 50
+
+#define DEV_NAME_LEN_20 20
+
+#define ETH_ALEN 6
+
+#define SLAB_NUM 21
+
+#define HASH_ARRAY_COUNT 512
+//SKBͳ¼ÆÖµ£¬°üÀ¨Òì³£µãµÄͳ¼ÆÖµ
+enum skbinfo_type{
+ SKB_TYPE_ALL = 0, //skb½á¹¹ÌåÕýÔÚ¹¤×÷µÄ¸öÊý
+ SKB_TYPE_DATA, //SKBÖеÄslab»úÖÆµÄdataÕýÔÚ¹¤×÷µÄ¸öÊý£¬°üº¬SKB_TYPE_TOCP£¬µ«²»°üº¬SKB_TYPE_FROMCP
+ SKB_TYPE_TOCP, //·¢ÍùCPµÄskbÕýÔÚ¹¤×÷µÄ¸öÊý
+ SKB_TYPE_FROMCP, //´ÓCP½ÓÊÕµ½µÄPSBUFÕýÔÚ¹¤×÷µÄ¸öÊý
+ SKB_DATA_BYTES, //µ±Ç°ÉêÇëµÄdata×Ü×Ö½ÚÊý £¬ksize·½Ê½ÀÛ¼ÓµÄ
+
+ //ÐÔÄÜÏà¹ØµÄÈ«¾ÖÐÅÏ¢£¬Äں˽öÀÛ¼Ó·¢ÉúµÄ´ÎÊý£¬²»¸ºÔð·ÖÎö
+ SKB_QUEUE_STOP, //xmit_stopÔì³ÉµÄ¶ª°ü¸öÊý£¬ÀÛ¼ÓÖµ
+ SKB_QUEUE_LOCK, //QUEUE_LOCKÔì³ÉµÄ¶ª°ü¸öÊý£¬ÀÛ¼ÓÖµ
+ SKB_COPY_CACHE, //net_cacheÔ´ÎļþÖнøÐÐÈ«¿½±´µÄÀÛ¼ÓÖµ£¬Ä¿Ç°½öÔÚPPPºÍ·ÖƬʱ¿½±´£¬Ó°ÏìÐÔÄÜ
+ SKB_IRQ_FREE, //ͨ¹ýÈíÖжÏÊͷŵÄskb£¬ÐÔÄÜÏà¹Ø
+ SKB_COPY, //Êý¾Ý¿½±´µÄskb£¬ÐÔÄÜÏà¹Ø
+ SKB_FLOOD, //·ººéµÄskb£¬ÐÔÄÜÏà¹Ø
+ SKB_ERRFREE, //devÔ´ÎļþÖдíÎóÊͷŵÄskb¼ÆÊý£¬°üÀ¨SKB_QUEUE_STOPºÍSKB_QUEUE_LOCKÁ½¸ö¼ÆÊýÖµ
+ SKB_FRAG, //½ÓÊÕµ½µÄ·ÖƬ±¨ÎÄ£¬ÐÔÄÜÏà¹Ø
+ SKB_OVER_MTU, //fastÖÐÊý¾Ý³¤¶È´óÓÚ³ö¿Údev MTUʱ£¬fastʧ°Ü¸öÊýͳ¼Æ
+ SKB_LOOP, //ͶµÝ¸øÇý¶¯£¬ÓÖ´ÓÇý¶¯ÊÕµ½µÄÒì³£»Ø»·¸öÊýͳ¼Æ£¬´óÓÚ0±íʾÇý¶¯Òì³£
+ SKB_ALLOC_FIAL, //ÉêÇëskbʧ°ÜµÄ¸öÊýÀÛ¼ÓÖµ
+ SKB_INFO_MAX,
+};
+
+//ÍøÂçÄÚºËÔËÐÐʱµÄͳ¼ÆÖµ£¬ÖØµã¹Ø×¢½á¹¹ÌåµÄÉêÇëµã
+enum net_run_info{
+ BR_MAC_CHANGE = 0, //ÍøÇÅmacµØÖ·±»¸Ä±ä´ÎÊý
+ NEIGH_ALLOC, //neighbourÉêÇë´ÎÊý
+ NEIGH_FREE, //neighbourÊͷŵãÀÛ¼ÓÖµ
+ BR_NEIGH_VARY, //ÇŵãµÄ³ö¿ÚdevµÄMACµØÖ·±»¶à¸öPC¹²Ïí
+ CONN_ALLOC, //CONNÉêÇë´ÎÊýÀÛ¼ÓÖµ
+ CONN_FREE, //CONNÊͷŵãÀÛ¼ÓÖµ
+ BRFDB_ALLOC, //ÇŵãÉêÇë´ÎÊýÀÛ¼ÓÖµ
+ DST_ALLOC, //dst_entryÉêÇëÀÛ¼ÓÖµ
+ DST_FREE, //dst_entryÊͷŵãÀÛ¼ÓÖµ
+ HH_UPDATE, //HH¶þ²ãMACÍ·¸üÐÂÀÛ¼ÓÖµ
+ RT_CACHE_INVALID, //Çå¿Õ·ÓÉcacheµÄÀÛ¼ÓÖµ
+ RT_HASH_ADD, //ÐÂÔört_hash_table½ÚµãµÄÀÛ¼ÓÖµ
+ RT_HASH_DEL, //ɾ³ýrt_hash_table½ÚµãµÄÀÛ¼ÓÖµ
+ SSMAC_CHANGE_INDEV, //ͬһԴMACµØÖ·Èë¿Údev¸Ä±äµÄ´ÎÊý
+ NET_INFO_MAX,
+};
+enum dev_opt_state{
+ DEV_UNOPT=0, //ÈôÓû§Î´×öÈκβÙ×÷£¬³õʼֵΪ0
+ DEV_NOPRESENT, //¶ÔÓ¦!netif_device_presentÒì³££¬±íÃ÷Çý¶¯µ×²ãÉÐδ׼±¸ºÃ
+ DEV_OPENED, //dev is opened
+ DEV_OPEN_FAIL, //open fail
+ DEV_CLOSED, //dev is closed
+};
+
+enum slabinfo_file{
+ FAST_SLAB = 0,
+ SKB_SLAB,
+ BRFDB_SLAB,
+ DST_SLAB,
+ FIB_TRIE_SLAB,
+ FLOW_SLAB,
+ INETPEER_SLAB,
+ INET_HASHTABLES_SLAB,
+ INET_TIMEWAIT_SOCK_SLAB,
+ MYSOCKET_SLAB,
+ NF_CONNTRACK_CORE_SLAB,
+ NF_CONNTRACK_EXCEPT_SLAB,
+ REQUEST_SOCK_SLAB,
+ SOCK_SLAB,
+ SOCKET_SLAB,
+ XFRM6_TUNNEL_SLAB,
+ XT_HASHLIMIT_SLAB,
+ SOCK_ALLOC_PAGES, //ÓÉÓÚÎÞ·¨×¼È·ÕÒµ½Êͷŵ㣬ËùÒÔ½öÓÐ++¶¯×÷£¬--Ó¦¸ÃÓÉput_page´¥·¢
+ IP6_OUTPUT_ALLOC_PAGES, //ÓÉÓÚÎÞ·¨×¼È·ÕÒµ½Êͷŵ㣬ËùÒÔ½öÓÐ++¶¯×÷£¬--Ó¦¸ÃÓÉput_page´¥·¢
+ IP_OUTPUT_ALLOC_PAGES, //ÓÉÓÚÎÞ·¨×¼È·ÕÒµ½Êͷŵ㣬ËùÒÔ½öÓÐ++¶¯×÷£¬--Ó¦¸ÃÓÉput_page´¥·¢
+ SKB_ALLOC_PAGES, //ÓÉÓÚÎÞ·¨×¼È·ÕÒµ½Êͷŵ㣬ËùÒÔ½öÓÐ++¶¯×÷£¬--Ó¦¸ÃÓÉput_page´¥·¢
+};
+
+
+/*±¾µØTCPͳ¼ÆÐÅÏ¢*/
+enum tcp_stat_info
+{
+ TCP_RECV_PKTS = 0, /*½ÓÊÕµÄTCPÊýÄ¿*/
+ TCP_SEND_PKTS, /*·¢Ë͵ÄTCPÊýÄ¿*/
+ TCP_RETRANS_PKTS, /*·¢Ë͵ÄÖØ´«TCPÊýÄ¿*/
+ TCP_RECV_DROPS, /*½ÓÊն˵ÄTCP¶ª°üÊý*/
+ TCP_SEND_DROPS, /*·¢ËͶ˵ÄTCP¶ª°üÊý*/
+ TCP_RST_SEND_NUM, /*·¢Ë͵ÄRSTÊý*/
+ TCP_RST_RECV_NUM, /*½ÓÊÕµÄRSTÊý*/
+ TCP_STATS_MAX,
+};
+
+
+struct tcp_sock_stat
+{
+ unsigned long tcp_recv_num;
+ unsigned long tcp_send_num;
+ unsigned long tcp_retrans_num;
+ unsigned long tcp_recv_drops;
+ unsigned long tcp_send_drops;
+ unsigned long tcp_rst_send;
+ unsigned long tcp_rst_recv;
+};
+
+/****×ÊԴʹÓü°ÉÏÏÞÐÅÏ¢£¬ËùÓбê×¼ÄÚºËÓÐÉÏÏÞÇé¿öµÄ£¬½ÔÐè¼ì²â£¬ÒÔ·ÀÖ¹Òì³£·¢Éú****/
+struct net_max_check_msg
+{
+ /*nf_conntrack*/
+ unsigned long nf_conntrack_max; //nf_conntrack_max = 4832
+ unsigned long nf_conntrack_now; //net->ct.count
+ /*enqueue*/
+ int netdev_max_backlog; //netdev_max_backlog=1000
+ int input_queue_len; //²Î¿´enqueue_to_backlog½Ó¿ÚʵÏÖ
+ int rx_dropped; //ÒòΪÈë¶ÓÁÐÒÑÂúÔì³ÉµÄÈë¶ÓÁжª°üµÄÀÛ¼ÓÖµ
+ /*ÐÔÄÜÏà¹Ø*/
+ int fastnat_link_max; //nf_conntrack_max
+ int fastnat_link_now; //working_list.count
+ int fast6_link_max; //nf_conntrack_max
+ int fast6_link_now; //working_list6.count
+
+ /*ÍøÂçÄÚºËÔËÐÐʱµÄͳ¼ÆÖµ£¬ÖØµã¹Ø×¢½á¹¹ÌåµÄÉêÇëµã*/
+ unsigned long br_mac_change; //ÍøÇÅmacµØÖ·±»¸Ä±ä´ÎÊý
+ unsigned long neigh_alloc; //neighbourÉêÇë´ÎÊý
+ unsigned long neigh_free; //neighbourÊͷŵãÀÛ¼ÓÖµ
+ unsigned long br_neigh_vary; //ÇŵãµÄ³ö¿ÚdevµÄmacµØÖ·±»¶à¸öpc¹²Ïí
+ unsigned long conn_alloc; //connÉêÇë´ÎÊýÀÛ¼ÓÖµ
+ unsigned long conn_free; //connÊͷŵãÀÛ¼ÓÖµ
+ unsigned long brfdb_alloc; //ÇŵãÉêÇë´ÎÊýÀÛ¼ÓÖµ
+ unsigned long dst_alloc; //dst_entryÉêÇëÀÛ¼ÓÖµ
+ unsigned long dst_free; //dst_entryÊͷŵãÀÛ¼ÓÖµ
+ unsigned long hh_update; //hh¶þ²ãmacÍ·¸üÐÂÀÛ¼ÓÖµ
+ unsigned long rt_cache_invalid; //Çå¿Õ·ÓÉcacheµÄÀÛ¼ÓÖµ
+ unsigned long rt_hash_add; //ÐÂÔört_hash_table½ÚµãµÄÀÛ¼ÓÖµ
+ unsigned long rt_hash_del; //ɾ³ýrt_hash_table½ÚµãµÄÀÛ¼ÓÖµ
+ unsigned long ssmac_change_indev; //ͬһԴMACµØÖ·Èë¿Údev¸Ä±äµÄ´ÎÊý
+};
+
+/************************* SKBÏà¹ØÐÅÏ¢£¬°üÀ¨Í³¼ÆÐÅÏ¢ºÍfastÐÅÏ¢ ***********************/
+struct skb_and_fast_msg
+{
+ int skb_num4; //½ÓÊÕµ½µÄV4Êý¾Ý°ü
+ int skb_num6; //½ÓÊÕµ½µÄV6Êý¾Ý°ü
+ int skb_big_num; //len³¤¶È³¬¹ý1000µÄÊý¾Ý°ü£¬º¬V4ºÍV6
+ int skb_small_num; //len³¤¶ÈСÓÚ100µÄÊý¾Ý°ü£¬º¬V4ºÍV6
+ int skb_bytes4; //½ÓÊÕµ½µÄV4Êý¾Ý°ü×Ö½ÚÊý
+ int skb_bytes6; //½ÓÊÕµ½µÄV6Êý¾Ý°ü×Ö½ÚÊý
+ int skb_unknown; //½ÓÊÕµ½µÄδ֪ÐÒéÊý¾Ý°ü£¬°üÀ¨ARPµÈ·ÇV4ºÍV6µÄ±¨ÎÄ
+ int skb_tcpnum; //½ÓÊÕµ½µÄtcpÊý¾Ý°ü£¬º¬V4ºÍV6£¬µ«²»º¬fastbrµÄ±¨ÎÄ
+ int skb_udpnum; //½ÓÊÕµ½µÄudpÊý¾Ý°ü£¬º¬V4ºÍV6£¬µ«²»º¬fastbrµÄ±¨ÎÄ
+ int broadcast_num4; //½ÓÊÕµ½µÄV4¹ã²¥°ü
+ int broadcast_num6; //½ÓÊÕµ½µÄV6¹ã²¥°ü
+ int multicast_num4; //½ÓÊÕµ½µÄV4×é²¥±¨
+ int multicast_num6; //½ÓÊÕµ½µÄV6×é²¥±¨
+ int fastnat_num; //fastnat³É¹¦µÄ±¨ÎÄ
+ int fast6_num; //fast6³É¹¦µÄ±¨ÎÄ
+ int fastbr_num; //fastbr³É¹¦µÄ±¨ÎÄ
+ int fastnat_level; //²Î¿´FAST_NET_DEVICE
+ int fastbr_level; //²¼¶ûÀàÐÍ
+ //ÏÂÃæ¼¸¸öֵΪÀÛ¼ÓÖµ£¬ÐÔÄÜÏà¹Ø
+ int irqfree_num; //ͨ¹ýÈíÖжÏÊͷŵÄskb£¬ÐÔÄÜÏà¹Ø
+ int skbcopy_num; //Êý¾Ý¿½±´µÄskb£¬ÐÔÄÜÏà¹Ø
+ int cache_copy; //net_cacheÔ´Îļþ½øÐп½±´µÄÀÛ»ý¼ÆÊý£¬ÐÔÄÜÏà¹Ø
+ int skbflood_num; //·ººéµÄskb£¬ÐÔÄÜÏà¹Ø
+ int errfree_num; //devÔ´ÎļþÖдíÎóÊͷŵÄskb¼ÆÊý£¬°üÀ¨SKB_QUEUE_STOPºÍSKB_QUEUE_LOCKÁ½¸ö¼ÆÊýÖµ
+ int frag_num; //½ÓÊÕµ½µÄ·ÖƬ±¨ÎÄ£¬ÐÔÄÜÏà¹Ø
+ int mtu_num; //fastÖÐÊý¾Ý³¤¶È´óÓÚ³ö¿Údev MTUʱ£¬fastʧ°Ü¸öÊýͳ¼Æ
+ int fast_loop; //ͶµÝ¸øÇý¶¯£¬ÓÖ´ÓÇý¶¯ÊÕµ½µÄÒì³£»Ø»·¸öÊýͳ¼Æ
+ int skb_alloc_fail ; //ÉêÇëskbʧ°ÜµÄ¸öÊýÀÛ¼ÓÖµ
+ int xmit_lock_num; //xmit_lock_owner±»Ëø×¡Ôì³ÉµÄ¶ª°üµÄÀÛ¼Ó¼ÆÊý£¬Çý¶¯Ôì³ÉµÄ
+ int xmit_stop_num; //!netif_xmit_stopped(txq)Ôì³ÉµÄ¶ª°üµÄÀÛ¼Ó¼ÆÊý£¬Çý¶¯Ôì³ÉµÄ
+ int br_mac_change_num; //ÍøÇÅmacµØÖ·±»¸Ä±ä´ÎÊý
+ int fast_tcpdump_num; //fast×¥°ü¸öÊý
+ int fast_switch;
+ int fast_local4_rcv_num; //±¾µØfast_local4³É¹¦½ÓÊÕ±¨ÎÄ
+ int fast_local6_rcv_num; //±¾µØfast_local6³É¹¦½ÓÊÕ±¨ÎÄ
+ int fast_local4_output_num; //±¾µØfast_local4³É¹¦·¢Ëͱ¨ÎÄ
+ int fast_local6_output_num; //±¾µØfast_local6³É¹¦·¢Ëͱ¨ÎÄ
+};
+
+struct skb_using_msg
+{
+ unsigned long skb_all;
+ unsigned long skb_tocp;
+ unsigned long skb_fromcp;
+ unsigned long skb_data_num;
+ unsigned long skb_data_size;
+ unsigned long skb_stop;
+ unsigned long skb_lock;
+ unsigned long skb_panic;
+ unsigned long skb_fail;
+};
+
+/************************* ½á¹¹Ì嶨Òå DEV ***********************/
+//ÍøÂçÌí¼Ó£¬Í³¼ÆÍøÂç²ãÃæÊÕ·¢°üÇé¿ö
+struct net_dev_skbinfo {
+ unsigned long rx_packets; //Çý¶¯·¢ËÍÀ´µÄÊý¾Ý°ü¸öÊý£¬ÔÚnetif_rxÍ·ÀÛ¼Ó
+ unsigned long tx_packets; //·¢Ë͸øÇý¶¯µÄÊý¾Ý°ü¸öÊý£¬ÔÚdev_queue_xmitÍ·ÀÛ¼Ó
+ unsigned long rx_bytes; //×Ö½ÚÊý
+ unsigned long tx_bytes; //×Ö½ÚÊý
+ unsigned long rx_dropped; //netif_rxÄÚ²¿Á÷³ÌÖÐËùÓÐÒì³£ÊÍ·ÅskbµÄÀÛ¼Ó£¬Èç´ïµ½netdev_max_backlogÈë¶ÓÁÐÉÏÏÞ¶ø¶ª°ü
+ unsigned long tx_dropped; //dev_queue_xmitÄÚ²¿Á÷³ÌÖÐËùÓÐÒì³£ÊÍ·ÅskbµÄÀÛ¼Ó£¬Èç(txq->xmit_lock_owner == cpu)Ôì³ÉµÄ¶ª°ü
+};
+
+//ÍøÂçÌí¼Ó£¬connÊÕ·¢°üÇé¿ö
+struct conn_skbinfo {
+ unsigned long packets; //Êý¾Ý°ü¸öÊý
+ unsigned long bytes; //×Ö½ÚÊý
+};
+
+/* ¸Ã½á¹¹ÌåÖеÄËùÓÐÐÅÏ¢½ÔÓÉÇý¶¯¸³Öµ£¬ÍøÂç×é²»»á¸³Öµ */
+//Õâ¸ö½á¹¹Ì屨ÐëºÍ <linux/netdevice.h>ÖеÄnet_device_stats±£³ÖÒ»ÖÂ
+struct net_dev_stats {
+ unsigned long rx_packets;
+ unsigned long tx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_bytes;
+ unsigned long rx_errors; //Ðè¼à¿Ø
+ unsigned long tx_errors; //Ðè¼à¿Ø
+ unsigned long rx_dropped; //Ðè¼à¿Ø
+ unsigned long tx_dropped; //Ðè¼à¿Ø
+ unsigned long multicast;
+ unsigned long collisions;
+ unsigned long rx_length_errors; //Ðè¼à¿Ø
+ unsigned long rx_over_errors; //Ðè¼à¿Ø
+ unsigned long rx_crc_errors; //Ðè¼à¿Ø
+ unsigned long rx_frame_errors; //Ðè¼à¿Ø
+ unsigned long rx_fifo_errors; //Ðè¼à¿Ø
+ unsigned long rx_missed_errors; //Ðè¼à¿Ø
+ unsigned long tx_aborted_errors; //Ðè¼à¿Ø
+ unsigned long tx_carrier_errors; //Ðè¼à¿Ø
+ unsigned long tx_fifo_errors; //Ðè¼à¿Ø
+ unsigned long tx_heartbeat_errors; //Ðè¼à¿Ø
+ unsigned long tx_window_errors; //Ðè¼à¿Ø
+ unsigned long rx_compressed;
+ unsigned long tx_compressed;
+};
+
+struct ioctl_dev_netstats
+{
+ char dev_name[20];
+ struct net_dev_skbinfo stats_dbg; //ÍøÂçÌí¼ÓµÄ£¬Í³¼ÆÍøÂç²ãÃæÊÕ·¢°üÇé¿ö
+ struct net_dev_stats stats; //Êý¾ÝÊÕ·¢Í³¼ÆÖµ£¬¸Ãֵȫ²¿ÓÉÇý¶¯¸³ÖµµÄ£¬ÍøÂç²»¸³Öµ
+ unsigned int flags; //IFF_UPµÈ
+ unsigned char operstate; //ĿǰûɶÓÃ
+ unsigned long state; //µ×²ãÇý¶¯×´Ì¬£¬__LINK_STATE_NOCARRIER
+ int net_flag; //Óû§ifconfigµÄ½á¹û״̬£¬ÈçDEV_OPEN_FAIL
+ unsigned long que_state; //¶ÓÁÐ״̬£¬¶ÔÓ¦ dev->_tx[0].state£¬Èç__QUEUE_STATE_DRV_XOFF
+ unsigned int num_tx_queues; //TX¶ÓÁÐÊýÁ¿£¬¶àÊýÇý¶¯Ê¹ÓÃΨһ¶ÓÁУ¬ÔòֵΪ1
+};
+
+
+/************************* ½á¹¹Ì嶨Òå OTHER ***********************/
+struct pc_info
+{
+ unsigned char mac_addr[6];
+ char dev_name[DEV_NAME_LEN];
+};
+
+struct pc_node
+{
+ unsigned int num;
+ struct pc_info info[PC_MAX_NUM];
+};
+
+struct leak_info
+{
+ void *addr;
+ int user_num; //µ±Ç°data»òskb±»Ê¹ÓõļÆÊýÖµ£¬°éËæskbÖеÄusersºÍdataref¶ø±ä»¯
+ int track_num; //¼Ç¼ÒѼǼµ½Êý×é¹ì¼£µÄϱêË÷Òý
+ char func_track[10][100];//º¯Êý»ØËݹ켣
+};
+
+struct slab_info
+{
+ int num[SLAB_NUM][2];
+};
+
+
+struct hash_info
+{
+ int max_hash_size;
+ int current_hash_num;
+ int hash[HASH_ARRAY_COUNT][2];
+ int current_array_size;
+};
+
+struct ptype_info
+{
+ unsigned long ptype_all[5];
+ unsigned long ptype_base[15];
+};
+
+struct pkt_lost_stats
+{
+ unsigned int send_drops;
+ unsigned int send_drop_bytes;
+ unsigned int recv_drops;
+ unsigned int recv_drop_bytes;
+ unsigned int total_packets;
+ unsigned int total_bytes;
+};
+
+struct pkt_lost_info
+{
+ struct pkt_lost_stats stats[2];
+};
+
+typedef struct
+{
+ char usb[DEV_NAME_LEN_20];
+ char ps[DEV_NAME_LEN_20];
+ char wifi_wan[DEV_NAME_LEN_20];
+ char wifi_lan[DEV_NAME_LEN_20];
+ char eth_wan[DEV_NAME_LEN_20];
+ char eth_lan[DEV_NAME_LEN_20];
+ char ps_ext1[DEV_NAME_LEN_20];
+ char ps_ext2[DEV_NAME_LEN_20];
+ char ps_ext3[DEV_NAME_LEN_20];
+ char ps_ext4[DEV_NAME_LEN_20];
+} net_dbg_dev_info_t;
+
+struct time_list{
+ struct timeval tv;
+ struct list_head packet_list;
+};
+
+struct net_debug_packet_list{
+ struct list_head list;
+ struct list_head time;
+ int pid;
+ int tgid;
+ char pname[DEV_NAME_LEN];
+ int count;
+};
+
+/******************************±äÁ¿ÉùÃ÷***********************/
+/******************************±äÁ¿ÉùÃ÷***********************/
+/******************************±äÁ¿ÉùÃ÷***********************/
+extern int leak_set; // 1±íʾ¸ú×Ùskb¼°dataÉêÇëÊͷŵ㣻2±íʾ¸ú×Ùusers£¬ÒÔ¼ì²âΪºÎskbʼÖÕ²»ÄÜÊÍ·Å£¬ÔÝʱ¸Ã¹¦ÄÜÉв»¿ÉÓÃ
+extern int leak_list_max; // ÈÝÐí»º´æµÄ´ýÊͷŵÄskb¼°dataµÄ¸öÊý£¬¿Éµ÷Õû£»
+extern int track_max; //¹ì¼£¸ú×ÙÊý×éµÄÉÏÏÞ£¬½öµ±user++--ʱ²ÅÐèÒªÀ©´ó£¬·ñÔò2¼´¿É£»
+extern int stack_lenmax; //Õ»º¯Êý¹ì¼£µÄ×Ö·û¸öÊýÉÏÏÞ£»
+extern int leak_full_panic;
+extern unsigned long now_time; //µ±Ç°Ê±¿Ìµã
+extern spinlock_t leak_lock; //·ÀÖ¹ÔÚbhÖб»µ÷Óã¬Ê¹ÓÃbhËø
+
+
+extern struct leak_list data_leak[TRACK_END];
+extern struct leak_list data_free[TRACK_END];//·Ö±ð¶ÔÓ¦ÕýÔÚʹÓõÄÊý¾ÝºÍÒѾÊͷŵÄÊý¾ÝÁ´±í
+extern void *data_head[TRACK_END];//ÿ¸öÄÚ´æ¼à¿ØÀàÐ͵ijõʼ»¯Ê×µØÖ·£¬È·±£Á´±íµÄÊý¾ÝÇøÁ¬Ðø£¬ÒÔ±ãramdumpʱֱ½ÓËÑË÷
+extern int init_finish ;//Á´±í³õʼ»¯±êÖ¾
+
+/*dump stkÓõ½µÄÏà¹Ø±äÁ¿*/
+extern unsigned int skb_dump_len;
+extern char skb_dump_str[];
+
+/*ºË¼äÖØ¸´Êͷżì²â¿ª¹Ø*/
+extern int set_psbufleak ;
+extern int set_extskbleak ;
+
+extern unsigned long skbinfo_dbg[SKB_INFO_MAX];
+extern unsigned long netruninfo_dbg[NET_INFO_MAX];
+extern unsigned char br_ipchange_flag; //br0 ip´Û¸Ä¶ÏÑÔ
+extern int set_tcpdump; //¶¨µã×¥°ü¿ª¹Ø
+
+extern unsigned char ignoremac[ETH_ALEN];
+
+/*¶Ô±¾µØTCP½øÐÐÏà¹ØÍ³¼Æ*/
+extern unsigned long tcp_stats_dbg[TCP_STATS_MAX];
+
+extern char br_name[];
+extern char ps_name[];
+extern char usb_name[];
+extern char ppp_name[];
+
+
+//sqÌí¼Ó£¬ÓÃÓÚÊý¾Ý°ü½¨Ä£ÒÔ¼°ÐÔÄÜͳ¼ÆÏà¹Ø£¬net_info_numÈ«¾ÖÖÐÐÔÄÜÏà¹ØµÄͳ¼ÆÒ²ÐèÌåÏÖ
+extern int skb_num4; //½ÓÊÕµ½µÄV4Êý¾Ý°ü
+extern int skb_num6; //½ÓÊÕµ½µÄV6Êý¾Ý°ü
+extern int skb_big_num; //len³¤¶È³¬¹ý1000µÄÊý¾Ý°ü£¬º¬V4ºÍV6
+extern int skb_small_num; //len³¤¶ÈСÓÚ100µÄÊý¾Ý°ü£¬º¬V4ºÍV6
+extern int skb_bytes4; //½ÓÊÕµ½µÄV4Êý¾Ý°ü×Ö½ÚÊý
+extern int skb_bytes6; //½ÓÊÕµ½µÄV6Êý¾Ý°ü×Ö½ÚÊý
+extern int skb_unknown; //½ÓÊÕµ½µÄδ֪ÐÒéÊý¾Ý°ü£¬°üÀ¨ARP
+extern int skb_tcpnum; //½ÓÊÕµ½µÄtcpÊý¾Ý°ü£¬º¬V4ºÍV6
+extern int skb_udpnum; //½ÓÊÕµ½µÄudpÊý¾Ý°ü£¬º¬V4ºÍV6
+extern int broadcast_num4; //½ÓÊÕµ½µÄV4¹ã²¥°ü
+extern int broadcast_num6; //½ÓÊÕµ½µÄV6¹ã²¥°ü
+extern int multicast_num4; //½ÓÊÕµ½µÄV4×é²¥±¨
+extern int multicast_num6; //½ÓÊÕµ½µÄV6×é²¥±¨
+extern int fastnat_num; //fastnat³É¹¦µÄ±¨ÎÄ
+extern int fast6_num; //fast6³É¹¦µÄ±¨ÎÄ
+extern int fastbr_num; //fastbr³É¹¦µÄ±¨ÎÄ
+extern int fast_local4_rcv_num; //±¾µØfast_local4³É¹¦½ÓÊÕ±¨ÎÄ
+extern int fast_local6_rcv_num; //±¾µØfast_local6³É¹¦½ÓÊÕ±¨ÎÄ
+extern int fast_local4_output_num; //±¾µØfast_local4³É¹¦·¢Ëͱ¨ÎÄ
+extern int fast_local6_output_num; //±¾µØfast_local6³É¹¦·¢Ëͱ¨ÎÄ
+extern int fast_tcpdump_num; //fast×¥°üÊýÁ¿
+
+extern int double_mac;
+
+extern int net_debug_ping; //×ÔÑÐping°ü¼Ç¼¶ª°üʱÑÓ¹¦ÄÜ
+extern int net_debug_perf; //×ÔÑÐtcp/udp°ü¼Ç¼¶ª°üʱÑÓ¹¦ÄÜ£¬ÐèÒªserver¶ÎÄܰ²×°×ÔÑÐÓ¦ÓÃ
+
+//slabÄÚ´æÊ¹ÓÃÏà¹ØÍ³¼Æ£¬Î´¿¼ÂÇͨÓÃslabµØÖ·³Ø£¬Èçkmalloc
+extern struct slab_info slab_count;
+
+//·¢Ë͸øCP´¦ÀíµÄ£¬´ýÊͷŵÄskbÁ´±í£¬ÓÃÓÚlog¸ú×Ù£¬·ÀÖ¹ºË¼äÄÚ´æÐ¹Â©£»
+extern struct ext_list toCp_listlog[MAX_EXT_MEM_HASH];
+
+//CP·¢Ë͸øAPµÄpsbufÐÅÏ¢Á´±í£¬ÓÃÓÚlog¸ú×Ù£¬·ÀÖ¹ºË¼äÄÚ´æÐ¹Â©;
+extern struct ext_list fromCp_list[MAX_EXT_MEM_HASH];
+
+/*½øÐÐTCPͳ¼Æ*/
+#define TCP_PKT_STATS_INC(_mod) tcp_stats_dbg[_mod]++
+
+
+/******************************Íⲿ½Ó¿ÚÉùÃ÷***********************/
+/******************************Íⲿ½Ó¿ÚÉùÃ÷***********************/
+/******************************Íⲿ½Ó¿ÚÉùÃ÷***********************/
+extern void skbinfo_add(unsigned char *addr,unsigned int skb_type);
+extern void skbinfo_del(unsigned char *addr,unsigned int skb_type);
+extern void netruninfo_add(unsigned char *addr,unsigned int info_type);
+extern void netruninfo_del(unsigned char *addr,unsigned int info_type);
+
+extern int get_skbcnt(unsigned long arg);
+extern int get_dev_info(unsigned long arg);
+extern int get_skb_using(unsigned long arg);
+extern int network_get_pcmac(unsigned long arg);
+extern int get_kernelparam(unsigned long arg);
+extern int get_slab_info(unsigned long arg);
+extern int get_hash_info(unsigned long arg);
+
+extern int set_fastnat_level(void *arg);
+extern int set_fastbr_level(void *arg);
+extern int set_fast_debug_panic(void *arg);
+extern int set_fast_dev_xmit(void *arg);
+extern int set_ackdrop(void *arg);
+extern int set_dumpflag(void *arg);
+extern int set_skb_dump(unsigned long arg);
+extern int set_print_opt(void *arg);
+extern int set_sq_tcpdump(void *arg);
+extern int set_leak(void *arg);
+extern int set_max(unsigned long arg);
+extern int set_stacklenmax(unsigned long arg);
+extern int set_trackmax(unsigned long arg);
+extern int set_tcpdump_opt(unsigned long arg);
+extern int set_br_name(void *arg);
+extern int set_ps_name(void *arg);
+extern int set_usb_name(void *arg);
+extern int set_ppp_name(void *arg);
+extern int set_brip(unsigned long arg);
+extern int set_kernelparam(unsigned long arg);
+extern int set_errno_procname(void *arg);
+extern int get_neigh_ip(unsigned long arg);
+extern int get_skb_fast(unsigned long arg);
+extern int get_max_msg(unsigned long arg);
+extern int get_ptype(unsigned long arg);
+extern int get_process_info(void *arg);
+extern void netslab_inc(int i);
+extern void netslab_dec(int i);
+extern void track_netlink(struct sk_buff *skb,u32 group);
+//extern void record_app_atcive_net();
+
+int get_pkt_lost_info(unsigned long arg);
+
+int get_tcp_stat_info(unsigned long arg);
+
+#endif //end _NETIO_FASTINFO_H_
+
+
diff --git a/upstream/linux-5.10/include/net/netfilter/nf_conntrack.h b/upstream/linux-5.10/include/net/netfilter/nf_conntrack.h
new file mode 100755
index 0000000..04ab917
--- /dev/null
+++ b/upstream/linux-5.10/include/net/netfilter/nf_conntrack.h
@@ -0,0 +1,368 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Connection state tracking for netfilter. This is separated from,
+ * but required by, the (future) NAT layer; it can also be used by an iptables
+ * extension.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - generalize L3 protocol dependent part.
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack.h
+ */
+
+#ifndef _NF_CONNTRACK_H
+#define _NF_CONNTRACK_H
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <linux/netfilter/nf_conntrack_dccp.h>
+#include <linux/netfilter/nf_conntrack_sctp.h>
+#include <linux/netfilter/nf_conntrack_proto_gre.h>
+#include <net/SI/netioc_proc.h>
+
+#include <net/netfilter/nf_conntrack_tuple.h>
+#ifdef CONFIG_FASTNAT_MODULE
+#include <net/SI/pkt_lost_track.h>
+#endif
+
+
+
+struct nf_ct_udp {
+ unsigned long stream_ts;
+};
+
+/* per conntrack: protocol private data */
+union nf_conntrack_proto {
+ /* insert conntrack proto private data here */
+ struct nf_ct_dccp dccp;
+ struct ip_ct_sctp sctp;
+ struct ip_ct_tcp tcp;
+ struct nf_ct_udp udp;
+ struct nf_ct_gre gre;
+ unsigned int tmpl_padto;
+};
+
+union nf_conntrack_expect_proto {
+ /* insert expect proto private data here */
+};
+
+struct nf_conntrack_net {
+ unsigned int users4;
+ unsigned int users6;
+ unsigned int users_bridge;
+};
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+#ifdef CONFIG_FASTNAT_MODULE
+struct fast_ct_ext{
+ union {
+ struct {
+ struct dst_entry __rcu *fast_dst[IP_CT_DIR_MAX];
+ struct net_device __rcu *fast_brport[IP_CT_DIR_MAX];
+ };
+ struct sock __rcu *sk;
+ };
+ unsigned char isFast;
+};
+#endif
+
+struct nf_conn {
+ /* Usage count in here is 1 for hash table, 1 per skb,
+ * plus 1 for any connection(s) we are `master' for
+ *
+ * Hint, SKB address this struct and refcnt via skb->_nfct and
+ * helpers nf_conntrack_get() and nf_conntrack_put().
+ * Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt,
+ * beware nf_ct_get() is different and don't inc refcnt.
+ */
+ struct nf_conntrack ct_general;
+
+ spinlock_t lock;
+ /* jiffies32 when this ct is considered dead */
+ u32 timeout;
+
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ struct nf_conntrack_zone zone;
+#endif
+ /* XXX should I move this to the tail ? - Y.K */
+ /* These are my tuples; original and reply */
+ struct nf_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX];
+
+ /* Have we seen traffic both ways yet? (bitset) */
+ unsigned long status;
+
+ u16 cpu;
+ possible_net_t ct_net;
+
+#if IS_ENABLED(CONFIG_NF_NAT)
+ struct hlist_node nat_bysource;
+#endif
+ /* all members below initialized via memset */
+ struct { } __nfct_init_offset;
+
+ /* If we were expected by an expectation, this will be it */
+ struct nf_conn *master;
+
+#if defined(CONFIG_NF_CONNTRACK_MARK)
+ u_int32_t mark;
+#endif
+
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+ u_int32_t secmark;
+#endif
+
+ /* Extensions */
+ struct nf_ct_ext *ext;
+
+ /* Storage reserved for other modules, must be the last member */
+ union nf_conntrack_proto proto;
+#ifdef CONFIG_FASTNAT_MODULE
+ struct fast_ct_ext fast_ct;
+ struct conn_seq_track conn_pktloss[IP_CT_DIR_MAX];
+#endif
+
+ struct conn_skbinfo packet_info[IP_CT_DIR_MAX];
+ struct net_device* indev[IP_CT_DIR_MAX];
+ struct net_device* outdev[IP_CT_DIR_MAX];
+};
+
+static inline struct nf_conn *
+nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash)
+{
+ return container_of(hash, struct nf_conn,
+ tuplehash[hash->tuple.dst.dir]);
+}
+
+static inline u_int16_t nf_ct_l3num(const struct nf_conn *ct)
+{
+ return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+}
+
+static inline u_int8_t nf_ct_protonum(const struct nf_conn *ct)
+{
+ return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
+}
+
+#define nf_ct_tuple(ct, dir) (&(ct)->tuplehash[dir].tuple)
+
+/* get master conntrack via master expectation */
+#define master_ct(conntr) (conntr->master)
+
+extern struct net init_net;
+
+static inline struct net *nf_ct_net(const struct nf_conn *ct)
+{
+ return read_pnet(&ct->ct_net);
+}
+
+/* Alter reply tuple (maybe alter helper). */
+void nf_conntrack_alter_reply(struct nf_conn *ct,
+ const struct nf_conntrack_tuple *newreply);
+
+/* Is this tuple taken? (ignoring any belonging to the given
+ conntrack). */
+int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+ const struct nf_conn *ignored_conntrack);
+
+/* Return conntrack_info and tuple hash for given skb. */
+static inline struct nf_conn *
+nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
+{
+ unsigned long nfct = skb_get_nfct(skb);
+
+ *ctinfo = nfct & NFCT_INFOMASK;
+ return (struct nf_conn *)(nfct & NFCT_PTRMASK);
+}
+
+/* decrement reference count on a conntrack */
+static inline void nf_ct_put(struct nf_conn *ct)
+{
+ WARN_ON(!ct);
+ nf_conntrack_put(&ct->ct_general);
+}
+
+/* Protocol module loading */
+int nf_ct_l3proto_try_module_get(unsigned short l3proto);
+void nf_ct_l3proto_module_put(unsigned short l3proto);
+
+/* load module; enable/disable conntrack in this namespace */
+int nf_ct_netns_get(struct net *net, u8 nfproto);
+void nf_ct_netns_put(struct net *net, u8 nfproto);
+
+/*
+ * Allocate a hashtable of hlist_head (if nulls == 0),
+ * or hlist_nulls_head (if nulls == 1)
+ */
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
+
+void nf_ct_free_hashtable(void *hash, unsigned int size);
+int nf_conntrack_hash_check_insert(struct nf_conn *ct);
+bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
+
+bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
+ u_int16_t l3num, struct net *net,
+ struct nf_conntrack_tuple *tuple);
+
+void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb,
+ u32 extra_jiffies, bool do_acct);
+
+/* Refresh conntrack for this many jiffies and do accounting */
+static inline void nf_ct_refresh_acct(struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb,
+ u32 extra_jiffies)
+{
+ __nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, true);
+}
+
+/* Refresh conntrack for this many jiffies */
+static inline void nf_ct_refresh(struct nf_conn *ct,
+ const struct sk_buff *skb,
+ u32 extra_jiffies)
+{
+ __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, false);
+}
+
+/* kill conntrack and do accounting */
+bool nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb);
+
+/* kill conntrack without accounting */
+static inline bool nf_ct_kill(struct nf_conn *ct)
+{
+ return nf_ct_delete(ct, 0, 0);
+}
+
+/* Set all unconfirmed conntrack as dying */
+void nf_ct_unconfirmed_destroy(struct net *);
+
+/* Iterate over all conntracks: if iter returns true, it's deleted. */
+void nf_ct_iterate_cleanup_net(struct net *net,
+ int (*iter)(struct nf_conn *i, void *data),
+ void *data, u32 portid, int report);
+
+/* also set unconfirmed conntracks as dying. Only use in module exit path. */
+void nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data),
+ void *data);
+
+struct nf_conntrack_zone;
+
+void nf_conntrack_free(struct nf_conn *ct);
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
+ const struct nf_conntrack_tuple *orig,
+ const struct nf_conntrack_tuple *repl,
+ gfp_t gfp);
+
+static inline int nf_ct_is_template(const struct nf_conn *ct)
+{
+ return test_bit(IPS_TEMPLATE_BIT, &ct->status);
+}
+
+/* It's confirmed if it is, or has been in the hash table. */
+static inline int nf_ct_is_confirmed(const struct nf_conn *ct)
+{
+ return test_bit(IPS_CONFIRMED_BIT, &ct->status);
+}
+
+static inline int nf_ct_is_dying(const struct nf_conn *ct)
+{
+ return test_bit(IPS_DYING_BIT, &ct->status);
+}
+
+/* Packet is received from loopback */
+static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
+{
+ return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
+}
+
+#define nfct_time_stamp ((u32)(jiffies))
+
+/* jiffies until ct expires, 0 if already expired */
+static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
+{
+ s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
+
+ return timeout > 0 ? timeout : 0;
+}
+
+static inline bool nf_ct_is_expired(const struct nf_conn *ct)
+{
+ return (__s32)(READ_ONCE(ct->timeout) - nfct_time_stamp) <= 0;
+}
+
+/* use after obtaining a reference count */
+static inline bool nf_ct_should_gc(const struct nf_conn *ct)
+{
+ return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
+ !nf_ct_is_dying(ct);
+}
+
+#define NF_CT_DAY (86400 * HZ)
+
+/* Set an arbitrary timeout large enough not to ever expire, this save
+ * us a check for the IPS_OFFLOAD_BIT from the packet path via
+ * nf_ct_is_expired().
+ */
+static inline void nf_ct_offload_timeout(struct nf_conn *ct)
+{
+ if (nf_ct_expires(ct) < NF_CT_DAY / 2)
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY);
+}
+
+struct kernel_param;
+
+int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp);
+int nf_conntrack_hash_resize(unsigned int hashsize);
+
+extern struct hlist_nulls_head *nf_conntrack_hash;
+extern unsigned int nf_conntrack_htable_size;
+extern seqcount_spinlock_t nf_conntrack_generation;
+extern unsigned int nf_conntrack_max;
+
+/* must be called with rcu read lock held */
+static inline void
+nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize)
+{
+ struct hlist_nulls_head *hptr;
+ unsigned int sequence, hsz;
+
+ do {
+ sequence = read_seqcount_begin(&nf_conntrack_generation);
+ hsz = nf_conntrack_htable_size;
+ hptr = nf_conntrack_hash;
+ } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
+
+ *hash = hptr;
+ *hsize = hsz;
+}
+
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
+ gfp_t flags);
+void nf_ct_tmpl_free(struct nf_conn *tmpl);
+
+u32 nf_ct_get_id(const struct nf_conn *ct);
+
+static inline void
+nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
+{
+ skb_set_nfct(skb, (unsigned long)ct | info);
+}
+
+#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
+#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
+#define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v))
+
+#define MODULE_ALIAS_NFCT_HELPER(helper) \
+ MODULE_ALIAS("nfct-helper-" helper)
+
+#endif /* _NF_CONNTRACK_H */
diff --git a/upstream/linux-5.10/init/main.c b/upstream/linux-5.10/init/main.c
new file mode 100755
index 0000000..83d93ea
--- /dev/null
+++ b/upstream/linux-5.10/init/main.c
@@ -0,0 +1,1571 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/init/main.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * GK 2/5/95 - Changed to support mounting root fs via NFS
+ * Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96
+ * Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96
+ * Simplified starting of init: Michael A. Griffith <grif@acm.org>
+ */
+
+#define DEBUG /* Enable initcall_debug */
+
+#include <linux/types.h>
+#include <linux/extable.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/binfmts.h>
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+#include <linux/stackprotector.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/initrd.h>
+#include <linux/memblock.h>
+#include <linux/acpi.h>
+#include <linux/bootconfig.h>
+#include <linux/console.h>
+#include <linux/nmi.h>
+#include <linux/percpu.h>
+#include <linux/kmod.h>
+#include <linux/kprobes.h>
+#include <linux/vmalloc.h>
+#include <linux/kernel_stat.h>
+#include <linux/start_kernel.h>
+#include <linux/security.h>
+#include <linux/smp.h>
+#include <linux/profile.h>
+#include <linux/rcupdate.h>
+#include <linux/moduleparam.h>
+#include <linux/kallsyms.h>
+#include <linux/writeback.h>
+#include <linux/cpu.h>
+#include <linux/cpuset.h>
+#include <linux/cgroup.h>
+#include <linux/efi.h>
+#include <linux/tick.h>
+#include <linux/sched/isolation.h>
+#include <linux/interrupt.h>
+#include <linux/taskstats_kern.h>
+#include <linux/delayacct.h>
+#include <linux/unistd.h>
+#include <linux/utsname.h>
+#include <linux/rmap.h>
+#include <linux/mempolicy.h>
+#include <linux/key.h>
+#include <linux/buffer_head.h>
+#include <linux/page_ext.h>
+#include <linux/debug_locks.h>
+#include <linux/debugobjects.h>
+#include <linux/lockdep.h>
+#include <linux/kmemleak.h>
+#include <linux/padata.h>
+#include <linux/pid_namespace.h>
+#include <linux/device/driver.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/sched/init.h>
+#include <linux/signal.h>
+#include <linux/idr.h>
+#include <linux/kgdb.h>
+#include <linux/ftrace.h>
+#include <linux/async.h>
+#include <linux/sfi.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/pti.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/sched/clock.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/context_tracking.h>
+#include <linux/random.h>
+#include <linux/list.h>
+#include <linux/integrity.h>
+#include <linux/proc_ns.h>
+#include <linux/io.h>
+#include <linux/cache.h>
+#include <linux/rodata_test.h>
+#include <linux/jump_label.h>
+#include <linux/mem_encrypt.h>
+#include <linux/kcsan.h>
+#include <linux/init_syscalls.h>
+
+#include <asm/io.h>
+#include <asm/bugs.h>
+#include <asm/setup.h>
+#include <asm/sections.h>
+#include <asm/cacheflush.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/initcall.h>
+
+#include <kunit/test.h>
+
+#ifdef CONFIG_FLAGS_UTILS
+#include <linux/reboot.h>
+#include "pub_flags.h"
+#endif
+
+static int kernel_init(void *);
+
+extern void init_IRQ(void);
+extern void radix_tree_init(void);
+
+/*
+ * Debug helper: via this flag we know that we are in 'early bootup code'
+ * where only the boot processor is running with IRQ disabled. This means
+ * two things - IRQ must not be enabled before the flag is cleared and some
+ * operations which are not allowed with IRQ disabled are allowed while the
+ * flag is set.
+ */
+bool early_boot_irqs_disabled __read_mostly;
+
+enum system_states system_state __read_mostly;
+EXPORT_SYMBOL(system_state);
+
+/*
+ * Boot command-line arguments
+ */
+#define MAX_INIT_ARGS CONFIG_INIT_ENV_ARG_LIMIT
+#define MAX_INIT_ENVS CONFIG_INIT_ENV_ARG_LIMIT
+
+extern void time_init(void);
+/* Default late time init is NULL. archs can override this later. */
+void (*__initdata late_time_init)(void);
+
+/* Untouched command line saved by arch-specific code. */
+char __initdata boot_command_line[COMMAND_LINE_SIZE];
+/* Untouched saved command line (eg. for /proc) */
+char *saved_command_line;
+/* Command line for parameter parsing */
+static char *static_command_line;
+/* Untouched extra command line */
+static char *extra_command_line;
+/* Extra init arguments */
+static char *extra_init_args;
+
+#ifdef CONFIG_BOOT_CONFIG
+/* Is bootconfig on command line? */
+static bool bootconfig_found;
+static bool initargs_found;
+#else
+# define bootconfig_found false
+# define initargs_found false
+#endif
+
+static char *execute_command;
+static char *ramdisk_execute_command = "/init";
+
+/*
+ * Used to generate warnings if static_key manipulation functions are used
+ * before jump_label_init is called.
+ */
+bool static_key_initialized __read_mostly;
+EXPORT_SYMBOL_GPL(static_key_initialized);
+
+/*
+ * If set, this is an indication to the drivers that reset the underlying
+ * device before going ahead with the initialization otherwise driver might
+ * rely on the BIOS and skip the reset operation.
+ *
+ * This is useful if kernel is booting in an unreliable environment.
+ * For ex. kdump situation where previous kernel has crashed, BIOS has been
+ * skipped and devices will be in unknown state.
+ */
+unsigned int reset_devices;
+EXPORT_SYMBOL(reset_devices);
+
+static int __init set_reset_devices(char *str)
+{
+ reset_devices = 1;
+ return 1;
+}
+
+__setup("reset_devices", set_reset_devices);
+
+static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
+const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
+static const char *panic_later, *panic_param;
+
+extern const struct obs_kernel_param __setup_start[], __setup_end[];
+
+static bool __init obsolete_checksetup(char *line)
+{
+ const struct obs_kernel_param *p;
+ bool had_early_param = false;
+
+ p = __setup_start;
+ do {
+ int n = strlen(p->str);
+ if (parameqn(line, p->str, n)) {
+ if (p->early) {
+ /* Already done in parse_early_param?
+ * (Needs exact match on param part).
+ * Keep iterating, as we can have early
+ * params and __setups of same names 8( */
+ if (line[n] == '\0' || line[n] == '=')
+ had_early_param = true;
+ } else if (!p->setup_func) {
+ pr_warn("Parameter %s is obsolete, ignored\n",
+ p->str);
+ return true;
+ } else if (p->setup_func(line + n))
+ return true;
+ }
+ p++;
+ } while (p < __setup_end);
+
+ return had_early_param;
+}
+
+/*
+ * This should be approx 2 Bo*oMips to start (note initial shift), and will
+ * still work even if initially too large, it will just take slightly longer
+ */
+unsigned long loops_per_jiffy = (1<<12);
+EXPORT_SYMBOL(loops_per_jiffy);
+
+static int __init debug_kernel(char *str)
+{
+ console_loglevel = CONSOLE_LOGLEVEL_DEBUG;
+ return 0;
+}
+
+static int __init quiet_kernel(char *str)
+{
+ console_loglevel = CONSOLE_LOGLEVEL_QUIET;
+ return 0;
+}
+
+early_param("debug", debug_kernel);
+early_param("quiet", quiet_kernel);
+
+static int __init loglevel(char *str)
+{
+ int newlevel;
+
+ /*
+ * Only update loglevel value when a correct setting was passed,
+ * to prevent blind crashes (when loglevel being set to 0) that
+ * are quite hard to debug
+ */
+ if (get_option(&str, &newlevel)) {
+ console_loglevel = newlevel;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+early_param("loglevel", loglevel);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+static void * __init get_boot_config_from_initrd(u32 *_size, u32 *_csum)
+{
+ u32 size, csum;
+ char *data;
+ u32 *hdr;
+ int i;
+
+ if (!initrd_end)
+ return NULL;
+
+ data = (char *)initrd_end - BOOTCONFIG_MAGIC_LEN;
+ /*
+ * Since Grub may align the size of initrd to 4, we must
+ * check the preceding 3 bytes as well.
+ */
+ for (i = 0; i < 4; i++) {
+ if (!memcmp(data, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN))
+ goto found;
+ data--;
+ }
+ return NULL;
+
+found:
+ hdr = (u32 *)(data - 8);
+ size = le32_to_cpu(hdr[0]);
+ csum = le32_to_cpu(hdr[1]);
+
+ data = ((void *)hdr) - size;
+ if ((unsigned long)data < initrd_start) {
+ pr_err("bootconfig size %d is greater than initrd size %ld\n",
+ size, initrd_end - initrd_start);
+ return NULL;
+ }
+
+ /* Remove bootconfig from initramfs/initrd */
+ initrd_end = (unsigned long)data;
+ if (_size)
+ *_size = size;
+ if (_csum)
+ *_csum = csum;
+
+ return data;
+}
+#else
+static void * __init get_boot_config_from_initrd(u32 *_size, u32 *_csum)
+{
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_BOOT_CONFIG
+
+static char xbc_namebuf[XBC_KEYLEN_MAX] __initdata;
+
+#define rest(dst, end) ((end) > (dst) ? (end) - (dst) : 0)
+
+static int __init xbc_snprint_cmdline(char *buf, size_t size,
+ struct xbc_node *root)
+{
+ struct xbc_node *knode, *vnode;
+ char *end = buf + size;
+ const char *val;
+ int ret;
+
+ xbc_node_for_each_key_value(root, knode, val) {
+ ret = xbc_node_compose_key_after(root, knode,
+ xbc_namebuf, XBC_KEYLEN_MAX);
+ if (ret < 0)
+ return ret;
+
+ vnode = xbc_node_get_child(knode);
+ if (!vnode) {
+ ret = snprintf(buf, rest(buf, end), "%s ", xbc_namebuf);
+ if (ret < 0)
+ return ret;
+ buf += ret;
+ continue;
+ }
+ xbc_array_for_each_value(vnode, val) {
+ ret = snprintf(buf, rest(buf, end), "%s=\"%s\" ",
+ xbc_namebuf, val);
+ if (ret < 0)
+ return ret;
+ buf += ret;
+ }
+ }
+
+ return buf - (end - size);
+}
+#undef rest
+
+/* Make an extra command line under given key word */
+static char * __init xbc_make_cmdline(const char *key)
+{
+ struct xbc_node *root;
+ char *new_cmdline;
+ int ret, len = 0;
+
+ root = xbc_find_node(key);
+ if (!root)
+ return NULL;
+
+ /* Count required buffer size */
+ len = xbc_snprint_cmdline(NULL, 0, root);
+ if (len <= 0)
+ return NULL;
+
+ new_cmdline = memblock_alloc(len + 1, SMP_CACHE_BYTES);
+ if (!new_cmdline) {
+ pr_err("Failed to allocate memory for extra kernel cmdline.\n");
+ return NULL;
+ }
+
+ ret = xbc_snprint_cmdline(new_cmdline, len + 1, root);
+ if (ret < 0 || ret > len) {
+ pr_err("Failed to print extra kernel cmdline.\n");
+ memblock_free(__pa(new_cmdline), len + 1);
+ return NULL;
+ }
+
+ return new_cmdline;
+}
+
+static u32 boot_config_checksum(unsigned char *p, u32 size)
+{
+ u32 ret = 0;
+
+ while (size--)
+ ret += *p++;
+
+ return ret;
+}
+
+static int __init bootconfig_params(char *param, char *val,
+ const char *unused, void *arg)
+{
+ if (strcmp(param, "bootconfig") == 0) {
+ bootconfig_found = true;
+ }
+ return 0;
+}
+
+static void __init setup_boot_config(const char *cmdline)
+{
+ static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;
+ const char *msg;
+ int pos;
+ u32 size, csum;
+ char *data, *copy, *err;
+ int ret;
+
+ /* Cut out the bootconfig data even if we have no bootconfig option */
+ data = get_boot_config_from_initrd(&size, &csum);
+
+ strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+ err = parse_args("bootconfig", tmp_cmdline, NULL, 0, 0, 0, NULL,
+ bootconfig_params);
+
+ if (IS_ERR(err) || !bootconfig_found)
+ return;
+
+ /* parse_args() stops at '--' and returns an address */
+ if (err)
+ initargs_found = true;
+
+ if (!data) {
+ pr_err("'bootconfig' found on command line, but no bootconfig found\n");
+ return;
+ }
+
+ if (size >= XBC_DATA_MAX) {
+ pr_err("bootconfig size %d greater than max size %d\n",
+ size, XBC_DATA_MAX);
+ return;
+ }
+
+ if (boot_config_checksum((unsigned char *)data, size) != csum) {
+ pr_err("bootconfig checksum failed\n");
+ return;
+ }
+
+ copy = memblock_alloc(size + 1, SMP_CACHE_BYTES);
+ if (!copy) {
+ pr_err("Failed to allocate memory for bootconfig\n");
+ return;
+ }
+
+ memcpy(copy, data, size);
+ copy[size] = '\0';
+
+ ret = xbc_init(copy, &msg, &pos);
+ if (ret < 0) {
+ if (pos < 0)
+ pr_err("Failed to init bootconfig: %s.\n", msg);
+ else
+ pr_err("Failed to parse bootconfig: %s at %d.\n",
+ msg, pos);
+ } else {
+ pr_info("Load bootconfig: %d bytes %d nodes\n", size, ret);
+ /* keys starting with "kernel." are passed via cmdline */
+ extra_command_line = xbc_make_cmdline("kernel");
+ /* Also, "init." keys are init arguments */
+ extra_init_args = xbc_make_cmdline("init");
+ }
+ return;
+}
+
+#else
+
+static void __init setup_boot_config(const char *cmdline)
+{
+ /* Remove bootconfig data from initrd */
+ get_boot_config_from_initrd(NULL, NULL);
+}
+
+static int __init warn_bootconfig(char *str)
+{
+ pr_warn("WARNING: 'bootconfig' found on the kernel command line but CONFIG_BOOT_CONFIG is not set.\n");
+ return 0;
+}
+early_param("bootconfig", warn_bootconfig);
+
+#endif
+
+/* Change NUL term back to "=", to make "param" the whole string. */
+static void __init repair_env_string(char *param, char *val)
+{
+ if (val) {
+ /* param=val or param="val"? */
+ if (val == param+strlen(param)+1)
+ val[-1] = '=';
+ else if (val == param+strlen(param)+2) {
+ val[-2] = '=';
+ memmove(val-1, val, strlen(val)+1);
+ } else
+ BUG();
+ }
+}
+
+/* Anything after -- gets handed straight to init. */
+static int __init set_init_arg(char *param, char *val,
+ const char *unused, void *arg)
+{
+ unsigned int i;
+
+ if (panic_later)
+ return 0;
+
+ repair_env_string(param, val);
+
+ for (i = 0; argv_init[i]; i++) {
+ if (i == MAX_INIT_ARGS) {
+ panic_later = "init";
+ panic_param = param;
+ return 0;
+ }
+ }
+ argv_init[i] = param;
+ return 0;
+}
+
+/*
+ * Unknown boot options get handed to init, unless they look like
+ * unused parameters (modprobe will find them in /proc/cmdline).
+ */
+static int __init unknown_bootoption(char *param, char *val,
+ const char *unused, void *arg)
+{
+ size_t len = strlen(param);
+
+ repair_env_string(param, val);
+
+ /* Handle obsolete-style parameters */
+ if (obsolete_checksetup(param))
+ return 0;
+
+ /* Unused module parameter. */
+ if (strnchr(param, len, '.'))
+ return 0;
+
+ if (panic_later)
+ return 0;
+
+ if (val) {
+ /* Environment option */
+ unsigned int i;
+ for (i = 0; envp_init[i]; i++) {
+ if (i == MAX_INIT_ENVS) {
+ panic_later = "env";
+ panic_param = param;
+ }
+ if (!strncmp(param, envp_init[i], len+1))
+ break;
+ }
+ envp_init[i] = param;
+ } else {
+ /* Command line option */
+ unsigned int i;
+ for (i = 0; argv_init[i]; i++) {
+ if (i == MAX_INIT_ARGS) {
+ panic_later = "init";
+ panic_param = param;
+ }
+ }
+ argv_init[i] = param;
+ }
+ return 0;
+}
+
+static int __init init_setup(char *str)
+{
+ unsigned int i;
+
+ execute_command = str;
+ /*
+ * In case LILO is going to boot us with default command line,
+ * it prepends "auto" before the whole cmdline which makes
+ * the shell think it should execute a script with such name.
+ * So we ignore all arguments entered _before_ init=... [MJ]
+ */
+ for (i = 1; i < MAX_INIT_ARGS; i++)
+ argv_init[i] = NULL;
+ return 1;
+}
+__setup("init=", init_setup);
+
+static int __init rdinit_setup(char *str)
+{
+ unsigned int i;
+
+ ramdisk_execute_command = str;
+ /* See "auto" comment in init_setup */
+ for (i = 1; i < MAX_INIT_ARGS; i++)
+ argv_init[i] = NULL;
+ return 1;
+}
+__setup("rdinit=", rdinit_setup);
+
+#ifndef CONFIG_SMP
+static const unsigned int setup_max_cpus = NR_CPUS;
+static inline void setup_nr_cpu_ids(void) { }
+static inline void smp_prepare_cpus(unsigned int maxcpus) { }
+#endif
+
+/*
+ * We need to store the untouched command line for future reference.
+ * We also need to store the touched command line since the parameter
+ * parsing is performed in place, and we should allow a component to
+ * store reference of name/value for future reference.
+ */
+static void __init setup_command_line(char *command_line)
+{
+ size_t len, xlen = 0, ilen = 0;
+
+ if (extra_command_line)
+ xlen = strlen(extra_command_line);
+ if (extra_init_args)
+ ilen = strlen(extra_init_args) + 4; /* for " -- " */
+
+ len = xlen + strlen(boot_command_line) + 1;
+
+ saved_command_line = memblock_alloc(len + ilen, SMP_CACHE_BYTES);
+ if (!saved_command_line)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, len + ilen);
+
+ static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
+ if (!static_command_line)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+
+ if (xlen) {
+ /*
+ * We have to put extra_command_line before boot command
+ * lines because there could be dashes (separator of init
+ * command line) in the command lines.
+ */
+ strcpy(saved_command_line, extra_command_line);
+ strcpy(static_command_line, extra_command_line);
+ }
+ strcpy(saved_command_line + xlen, boot_command_line);
+ strcpy(static_command_line + xlen, command_line);
+
+ if (ilen) {
+ /*
+ * Append supplemental init boot args to saved_command_line
+ * so that user can check what command line options passed
+ * to init.
+ */
+ len = strlen(saved_command_line);
+ if (initargs_found) {
+ saved_command_line[len++] = ' ';
+ } else {
+ strcpy(saved_command_line + len, " -- ");
+ len += 4;
+ }
+
+ strcpy(saved_command_line + len, extra_init_args);
+ }
+}
+
+/*
+ * We need to finalize in a non-__init function or else race conditions
+ * between the root thread and the init thread may cause start_kernel to
+ * be reaped by free_initmem before the root thread has proceeded to
+ * cpu_idle.
+ *
+ * gcc-3.4 accidentally inlines this function, so use noinline.
+ */
+
+static __initdata DECLARE_COMPLETION(kthreadd_done);
+
+noinline void __ref rest_init(void)
+{
+ struct task_struct *tsk;
+ int pid;
+
+ rcu_scheduler_starting();
+ /*
+ * We need to spawn init first so that it obtains pid 1, however
+ * the init task will end up wanting to create kthreads, which, if
+ * we schedule it before we create kthreadd, will OOPS.
+ */
+ pid = kernel_thread(kernel_init, NULL, CLONE_FS);
+ /*
+ * Pin init on the boot CPU. Task migration is not properly working
+ * until sched_init_smp() has been run. It will set the allowed
+ * CPUs for init to the non isolated CPUs.
+ */
+ rcu_read_lock();
+ tsk = find_task_by_pid_ns(pid, &init_pid_ns);
+ set_cpus_allowed_ptr(tsk, cpumask_of(smp_processor_id()));
+ rcu_read_unlock();
+
+ numa_default_policy();
+ pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
+ rcu_read_lock();
+ kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
+ rcu_read_unlock();
+
+ /*
+ * Enable might_sleep() and smp_processor_id() checks.
+ * They cannot be enabled earlier because with CONFIG_PREEMPTION=y
+ * kernel_thread() would trigger might_sleep() splats. With
+ * CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled
+ * already, but it's stuck on the kthreadd_done completion.
+ */
+ system_state = SYSTEM_SCHEDULING;
+
+ complete(&kthreadd_done);
+
+ /*
+ * The boot idle thread must execute schedule()
+ * at least once to get things moving:
+ */
+ schedule_preempt_disabled();
+ /* Call into cpu_idle with preempt disabled */
+ cpu_startup_entry(CPUHP_ONLINE);
+}
+
+/* Check for early params. */
+static int __init do_early_param(char *param, char *val,
+ const char *unused, void *arg)
+{
+ const struct obs_kernel_param *p;
+
+ for (p = __setup_start; p < __setup_end; p++) {
+ if ((p->early && parameq(param, p->str)) ||
+ (strcmp(param, "console") == 0 &&
+ strcmp(p->str, "earlycon") == 0)
+ ) {
+ if (p->setup_func(val) != 0)
+ pr_warn("Malformed early option '%s'\n", param);
+ }
+ }
+ /* We accept everything at this stage. */
+ return 0;
+}
+
+void __init parse_early_options(char *cmdline)
+{
+ parse_args("early options", cmdline, NULL, 0, 0, 0, NULL,
+ do_early_param);
+}
+
+/* Arch code calls this early on, or if not, just before other parsing. */
+void __init parse_early_param(void)
+{
+ static int done __initdata;
+ static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;
+
+ if (done)
+ return;
+
+ /* All fall through to do_early_param. */
+ strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+ parse_early_options(tmp_cmdline);
+ done = 1;
+}
+
+void __init __weak arch_post_acpi_subsys_init(void) { }
+
+void __init __weak smp_setup_processor_id(void)
+{
+}
+
+# if THREAD_SIZE >= PAGE_SIZE
+void __init __weak thread_stack_cache_init(void)
+{
+}
+#endif
+
+void __init __weak mem_encrypt_init(void) { }
+
+void __init __weak poking_init(void) { }
+
+void __init __weak pgtable_cache_init(void) { }
+
+bool initcall_debug;
+core_param(initcall_debug, initcall_debug, bool, 0644);
+
+#ifdef TRACEPOINTS_ENABLED
+static void __init initcall_debug_enable(void);
+#else
+static inline void initcall_debug_enable(void)
+{
+}
+#endif
+
+/* Report memory auto-initialization states for this boot. */
+static void __init report_meminit(void)
+{
+ const char *stack;
+
+ if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN))
+ stack = "all(pattern)";
+ else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO))
+ stack = "all(zero)";
+ else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL))
+ stack = "byref_all(zero)";
+ else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF))
+ stack = "byref(zero)";
+ else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER))
+ stack = "__user(zero)";
+ else
+ stack = "off";
+
+ pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
+ stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off",
+ want_init_on_free() ? "on" : "off");
+ if (want_init_on_free())
+ pr_info("mem auto-init: clearing system memory may take some time...\n");
+}
+
+/*
+ * Set up kernel memory allocators
+ */
+static void __init mm_init(void)
+{
+ /*
+ * page_ext requires contiguous pages,
+ * bigger than MAX_ORDER unless SPARSEMEM.
+ */
+ page_ext_init_flatmem();
+ init_debug_pagealloc();
+ report_meminit();
+ mem_init();
+ kmem_cache_init();
+ kmemleak_init();
+ pgtable_init();
+ debug_objects_mem_init();
+ vmalloc_init();
+ ioremap_huge_init();
+ /* Should be run before the first non-init thread is created */
+ init_espfix_bsp();
+ /* Should be run after espfix64 is set up. */
+ pti_init();
+}
+
+void __init __weak arch_call_rest_init(void)
+{
+ rest_init();
+}
+
+void __weak early_drv_init(void) {}
+
+asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
+{
+ char *command_line;
+ char *after_dashes;
+
+ set_task_stack_end_magic(&init_task);
+ smp_setup_processor_id();
+ debug_objects_early_init();
+
+ cgroup_init_early();
+
+ local_irq_disable();
+ early_boot_irqs_disabled = true;
+
+ /*
+ * Interrupts are still disabled. Do necessary setups, then
+ * enable them.
+ */
+ boot_cpu_init();
+ page_address_init();
+ pr_notice("%s", linux_banner);
+ early_security_init();
+ setup_arch(&command_line);
+ setup_boot_config(command_line);
+ setup_command_line(command_line);
+ setup_nr_cpu_ids();
+ setup_per_cpu_areas();
+ smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
+ boot_cpu_hotplug_init();
+
+ build_all_zonelists(NULL);
+ page_alloc_init();
+
+ pr_notice("Kernel command line: %s\n", saved_command_line);
+ /* parameters may set static keys */
+ jump_label_init();
+ parse_early_param();
+ after_dashes = parse_args("Booting kernel",
+ static_command_line, __start___param,
+ __stop___param - __start___param,
+ -1, -1, NULL, &unknown_bootoption);
+ if (!IS_ERR_OR_NULL(after_dashes))
+ parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
+ NULL, set_init_arg);
+ if (extra_init_args)
+ parse_args("Setting extra init args", extra_init_args,
+ NULL, 0, -1, -1, NULL, set_init_arg);
+
+ /*
+ * These use large bootmem allocations and must precede
+ * kmem_cache_init()
+ */
+ setup_log_buf(0);
+ vfs_caches_init_early();
+ sort_main_extable();
+ trap_init();
+ mm_init();
+
+ ftrace_init();
+
+ /* trace_printk can be enabled here */
+ early_trace_init();
+
+ /*
+ * Set up the scheduler prior starting any interrupts (such as the
+ * timer interrupt). Full topology setup happens at smp_init()
+ * time - but meanwhile we still have a functioning scheduler.
+ */
+ sched_init();
+
+ if (WARN(!irqs_disabled(),
+ "Interrupts were enabled *very* early, fixing it\n"))
+ local_irq_disable();
+ radix_tree_init();
+
+ /*
+ * Set up housekeeping before setting up workqueues to allow the unbound
+ * workqueue to take non-housekeeping into account.
+ */
+ housekeeping_init();
+
+ /*
+ * Allow workqueue creation and work item queueing/cancelling
+ * early. Work item execution depends on kthreads and starts after
+ * workqueue_init().
+ */
+ workqueue_init_early();
+
+ rcu_init();
+
+ /* Trace events are available after this */
+ trace_init();
+
+ if (initcall_debug)
+ initcall_debug_enable();
+
+ context_tracking_init();
+
+ early_drv_init();
+
+ /* init some links before init_ISA_irqs() */
+ early_irq_init();
+ init_IRQ();
+ tick_init();
+ rcu_init_nohz();
+ init_timers();
+ hrtimers_init();
+ softirq_init();
+ timekeeping_init();
+ time_init();
+
+ /*
+ * For best initial stack canary entropy, prepare it after:
+ * - setup_arch() for any UEFI RNG entropy and boot cmdline access
+ * - timekeeping_init() for ktime entropy used in random_init()
+ * - time_init() for making random_get_entropy() work on some platforms
+ * - random_init() to initialize the RNG from from early entropy sources
+ */
+ random_init(command_line);
+ boot_init_stack_canary();
+
+ perf_event_init();
+ profile_init();
+ call_function_init();
+ WARN(!irqs_disabled(), "Interrupts were enabled early\n");
+
+ early_boot_irqs_disabled = false;
+ local_irq_enable();
+
+ kmem_cache_init_late();
+
+ /*
+ * HACK ALERT! This is early. We're enabling the console before
+ * we've done PCI setups etc, and console_init() must be aware of
+ * this. But we do want output early, in case something goes wrong.
+ */
+ console_init();
+ if (panic_later)
+ panic("Too many boot %s vars at `%s'", panic_later,
+ panic_param);
+
+ lockdep_init();
+
+ /*
+ * Need to run this when irqs are enabled, because it wants
+ * to self-test [hard/soft]-irqs on/off lock inversion bugs
+ * too:
+ */
+ locking_selftest();
+
+ /*
+ * This needs to be called before any devices perform DMA
+ * operations that might use the SWIOTLB bounce buffers. It will
+ * mark the bounce buffers as decrypted so that their usage will
+ * not cause "plain-text" data to be decrypted when accessed.
+ */
+ mem_encrypt_init();
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start && !initrd_below_start_ok &&
+ page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
+ pr_crit("initrd overwritten (0x%08lx < 0x%08lx) - disabling it.\n",
+ page_to_pfn(virt_to_page((void *)initrd_start)),
+ min_low_pfn);
+ initrd_start = 0;
+ }
+#endif
+ setup_per_cpu_pageset();
+ numa_policy_init();
+ acpi_early_init();
+ if (late_time_init)
+ late_time_init();
+ sched_clock_init();
+ calibrate_delay();
+ pid_idr_init();
+ anon_vma_init();
+#ifdef CONFIG_X86
+ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ efi_enter_virtual_mode();
+#endif
+ thread_stack_cache_init();
+ cred_init();
+ fork_init();
+ proc_caches_init();
+ uts_ns_init();
+ buffer_init();
+ key_init();
+ security_init();
+ dbg_late_init();
+ vfs_caches_init();
+ pagecache_init();
+ signals_init();
+ seq_file_init();
+ proc_root_init();
+ nsfs_init();
+ cpuset_init();
+ cgroup_init();
+ taskstats_init_early();
+ delayacct_init();
+
+ poking_init();
+ check_bugs();
+
+ acpi_subsystem_init();
+ arch_post_acpi_subsys_init();
+ sfi_init_late();
+ kcsan_init();
+
+ /* Do the rest non-__init'ed, we're now alive */
+ arch_call_rest_init();
+
+ prevent_tail_call_optimization();
+}
+
+/* Call all constructor functions linked into the kernel. */
+static void __init do_ctors(void)
+{
+#ifdef CONFIG_CONSTRUCTORS
+ ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;
+
+ for (; fn < (ctor_fn_t *) __ctors_end; fn++)
+ (*fn)();
+#endif
+}
+
+#ifdef CONFIG_KALLSYMS
+struct blacklist_entry {
+ struct list_head next;
+ char *buf;
+};
+
+static __initdata_or_module LIST_HEAD(blacklisted_initcalls);
+
+static int __init initcall_blacklist(char *str)
+{
+ char *str_entry;
+ struct blacklist_entry *entry;
+
+ /* str argument is a comma-separated list of functions */
+ do {
+ str_entry = strsep(&str, ",");
+ if (str_entry) {
+ pr_debug("blacklisting initcall %s\n", str_entry);
+ entry = memblock_alloc(sizeof(*entry),
+ SMP_CACHE_BYTES);
+ if (!entry)
+ panic("%s: Failed to allocate %zu bytes\n",
+ __func__, sizeof(*entry));
+ entry->buf = memblock_alloc(strlen(str_entry) + 1,
+ SMP_CACHE_BYTES);
+ if (!entry->buf)
+ panic("%s: Failed to allocate %zu bytes\n",
+ __func__, strlen(str_entry) + 1);
+ strcpy(entry->buf, str_entry);
+ list_add(&entry->next, &blacklisted_initcalls);
+ }
+ } while (str_entry);
+
+ return 1;
+}
+
+static bool __init_or_module initcall_blacklisted(initcall_t fn)
+{
+ struct blacklist_entry *entry;
+ char fn_name[KSYM_SYMBOL_LEN];
+ unsigned long addr;
+
+ if (list_empty(&blacklisted_initcalls))
+ return false;
+
+ addr = (unsigned long) dereference_function_descriptor(fn);
+ sprint_symbol_no_offset(fn_name, addr);
+
+ /*
+ * fn will be "function_name [module_name]" where [module_name] is not
+ * displayed for built-in init functions. Strip off the [module_name].
+ */
+ strreplace(fn_name, ' ', '\0');
+
+ list_for_each_entry(entry, &blacklisted_initcalls, next) {
+ if (!strcmp(fn_name, entry->buf)) {
+ pr_debug("initcall %s blacklisted\n", fn_name);
+ return true;
+ }
+ }
+
+ return false;
+}
+#else
+static int __init initcall_blacklist(char *str)
+{
+ pr_warn("initcall_blacklist requires CONFIG_KALLSYMS\n");
+ return 0;
+}
+
+static bool __init_or_module initcall_blacklisted(initcall_t fn)
+{
+ return false;
+}
+#endif
+__setup("initcall_blacklist=", initcall_blacklist);
+
+static __init_or_module void
+trace_initcall_start_cb(void *data, initcall_t fn)
+{
+ ktime_t *calltime = (ktime_t *)data;
+
+ printk(KERN_DEBUG "calling %pS @ %i\n", fn, task_pid_nr(current));
+ *calltime = ktime_get();
+}
+
+static __init_or_module void
+trace_initcall_finish_cb(void *data, initcall_t fn, int ret)
+{
+ ktime_t *calltime = (ktime_t *)data;
+ ktime_t delta, rettime;
+ unsigned long long duration;
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, *calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+ printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs\n",
+ fn, ret, duration);
+}
+
+static ktime_t initcall_calltime;
+
+#ifdef TRACEPOINTS_ENABLED
+static void __init initcall_debug_enable(void)
+{
+ int ret;
+
+ ret = register_trace_initcall_start(trace_initcall_start_cb,
+ &initcall_calltime);
+ ret |= register_trace_initcall_finish(trace_initcall_finish_cb,
+ &initcall_calltime);
+ WARN(ret, "Failed to register initcall tracepoints\n");
+}
+# define do_trace_initcall_start trace_initcall_start
+# define do_trace_initcall_finish trace_initcall_finish
+#else
+static inline void do_trace_initcall_start(initcall_t fn)
+{
+ if (!initcall_debug)
+ return;
+ trace_initcall_start_cb(&initcall_calltime, fn);
+}
+static inline void do_trace_initcall_finish(initcall_t fn, int ret)
+{
+ if (!initcall_debug)
+ return;
+ trace_initcall_finish_cb(&initcall_calltime, fn, ret);
+}
+#endif /* !TRACEPOINTS_ENABLED */
+
+int __init_or_module do_one_initcall(initcall_t fn)
+{
+ int count = preempt_count();
+ char msgbuf[64];
+ int ret;
+
+ if (initcall_blacklisted(fn))
+ return -EPERM;
+
+ do_trace_initcall_start(fn);
+ ret = fn();
+ do_trace_initcall_finish(fn, ret);
+
+ msgbuf[0] = 0;
+
+ if (preempt_count() != count) {
+ sprintf(msgbuf, "preemption imbalance ");
+ preempt_count_set(count);
+ }
+ if (irqs_disabled()) {
+ strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
+ local_irq_enable();
+ }
+ WARN(msgbuf[0], "initcall %pS returned with %s\n", fn, msgbuf);
+
+ add_latent_entropy();
+ return ret;
+}
+
+
+extern initcall_entry_t __initcall_start[];
+extern initcall_entry_t __initcall0_start[];
+extern initcall_entry_t __initcall1_start[];
+extern initcall_entry_t __initcall2_start[];
+extern initcall_entry_t __initcall3_start[];
+extern initcall_entry_t __initcall4_start[];
+extern initcall_entry_t __initcall5_start[];
+extern initcall_entry_t __initcall6_start[];
+extern initcall_entry_t __initcall7_start[];
+extern initcall_entry_t __initcall_end[];
+
+static initcall_entry_t *initcall_levels[] __initdata = {
+ __initcall0_start,
+ __initcall1_start,
+ __initcall2_start,
+ __initcall3_start,
+ __initcall4_start,
+ __initcall5_start,
+ __initcall6_start,
+ __initcall7_start,
+ __initcall_end,
+};
+
+/* Keep these in sync with initcalls in include/linux/init.h */
+static const char *initcall_level_names[] __initdata = {
+ "pure",
+ "core",
+ "postcore",
+ "arch",
+ "subsys",
+ "fs",
+ "device",
+ "late",
+};
+
+static int __init ignore_unknown_bootoption(char *param, char *val,
+ const char *unused, void *arg)
+{
+ return 0;
+}
+
+static void __init do_initcall_level(int level, char *command_line)
+{
+ initcall_entry_t *fn;
+
+ parse_args(initcall_level_names[level],
+ command_line, __start___param,
+ __stop___param - __start___param,
+ level, level,
+ NULL, ignore_unknown_bootoption);
+
+ trace_initcall_level(initcall_level_names[level]);
+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
+ do_one_initcall(initcall_from_entry(fn));
+}
+
+static void __init do_initcalls(void)
+{
+ int level;
+ size_t len = strlen(saved_command_line) + 1;
+ char *command_line;
+
+ command_line = kzalloc(len, GFP_KERNEL);
+ if (!command_line)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+
+ for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) {
+ /* Parser modifies command_line, restore it each time */
+ strcpy(command_line, saved_command_line);
+ do_initcall_level(level, command_line);
+ }
+
+ kfree(command_line);
+}
+
+/*
+ * Ok, the machine is now initialized. None of the devices
+ * have been touched yet, but the CPU subsystem is up and
+ * running, and memory and process management works.
+ *
+ * Now we can finally start doing some real work..
+ */
+static void __init do_basic_setup(void)
+{
+ cpuset_init_smp();
+ driver_init();
+ init_irq_proc();
+ do_ctors();
+ usermodehelper_enable();
+ do_initcalls();
+}
+
+static void __init do_pre_smp_initcalls(void)
+{
+ initcall_entry_t *fn;
+
+ trace_initcall_level("early");
+ for (fn = __initcall_start; fn < __initcall0_start; fn++)
+ do_one_initcall(initcall_from_entry(fn));
+}
+
+static int run_init_process(const char *init_filename)
+{
+ const char *const *p;
+
+ argv_init[0] = init_filename;
+ pr_info("Run %s as init process\n", init_filename);
+ pr_debug(" with arguments:\n");
+ for (p = argv_init; *p; p++)
+ pr_debug(" %s\n", *p);
+ pr_debug(" with environment:\n");
+ for (p = envp_init; *p; p++)
+ pr_debug(" %s\n", *p);
+ return kernel_execve(init_filename, argv_init, envp_init);
+}
+
+static int try_to_run_init_process(const char *init_filename)
+{
+ int ret;
+
+ ret = run_init_process(init_filename);
+
+ if (ret && ret != -ENOENT) {
+ pr_err("Starting init: %s exists but couldn't execute it (error %d)\n",
+ init_filename, ret);
+ }
+
+ return ret;
+}
+
+static noinline void __init kernel_init_freeable(void);
+
+#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
+bool rodata_enabled __ro_after_init = true;
+static int __init set_debug_rodata(char *str)
+{
+ if (strtobool(str, &rodata_enabled))
+ pr_warn("Invalid option string for rodata: '%s'\n", str);
+ return 1;
+}
+__setup("rodata=", set_debug_rodata);
+#endif
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+static void mark_readonly(void)
+{
+ if (rodata_enabled) {
+ /*
+ * load_module() results in W+X mappings, which are cleaned
+ * up with call_rcu(). Let's make sure that queued work is
+ * flushed so that we don't hit false positives looking for
+ * insecure pages which are W+X.
+ */
+ rcu_barrier();
+ mark_rodata_ro();
+ rodata_test();
+ } else
+ pr_info("Kernel memory protection disabled.\n");
+}
+#elif defined(CONFIG_ARCH_HAS_STRICT_KERNEL_RWX)
+static inline void mark_readonly(void)
+{
+ pr_warn("Kernel memory protection not selected by kernel config.\n");
+}
+#else
+static inline void mark_readonly(void)
+{
+ pr_warn("This architecture does not have kernel memory protection.\n");
+}
+#endif
+
+void __weak free_initmem(void)
+{
+ free_initmem_default(POISON_FREE_INITMEM);
+}
+
+static int __ref kernel_init(void *unused)
+{
+ int ret;
+
+ kernel_init_freeable();
+ /* need to finish all async __init code before freeing the memory */
+ async_synchronize_full();
+ kprobe_free_init_mem();
+ ftrace_free_init_mem();
+ kgdb_free_init_mem();
+ free_initmem();
+ mark_readonly();
+
+ /*
+ * Kernel mappings are now finalized - update the userspace page-table
+ * to finalize PTI.
+ */
+ pti_finalize();
+
+ system_state = SYSTEM_RUNNING;
+ numa_default_policy();
+
+ rcu_end_inkernel_boot();
+
+ do_sysctl_args();
+
+ if (ramdisk_execute_command) {
+ ret = run_init_process(ramdisk_execute_command);
+ if (!ret)
+ return 0;
+ pr_err("Failed to execute %s (error %d)\n",
+ ramdisk_execute_command, ret);
+ }
+
+ /*
+ * We try each of these until one succeeds.
+ *
+ * The Bourne shell can be used instead of init if we are
+ * trying to recover a really broken machine.
+ */
+ if (execute_command) {
+ ret = run_init_process(execute_command);
+ if (!ret)
+ return 0;
+ panic("Requested init %s failed (error %d).",
+ execute_command, ret);
+ }
+
+ if (CONFIG_DEFAULT_INIT[0] != '\0') {
+ ret = run_init_process(CONFIG_DEFAULT_INIT);
+ if (ret)
+ pr_err("Default init %s failed (error %d)\n",
+ CONFIG_DEFAULT_INIT, ret);
+ else
+ return 0;
+ }
+
+ if (!try_to_run_init_process("/sbin/init") ||
+ !try_to_run_init_process("/etc/init") ||
+ !try_to_run_init_process("/bin/init") ||
+ !try_to_run_init_process("/bin/sh"))
+ return 0;
+#ifdef CONFIG_FLAGS_UTILS
+{
+ extern int flags_sys_switch(void);
+ int ret;
+ printk(KERN_EMERG "No working init found. Try passing init= option to kernel. "
+ "See Linux Documentation/admin-guide/init.rst for guidance.");
+ ret = flags_sys_switch();
+ if (ret < 0)
+ panic("VFS: flags_sys_switch fail");
+ else
+ kernel_restart("VFS: Switch to another system, please reset machine");
+}
+#endif
+ panic("No working init found. Try passing init= option to kernel. "
+ "See Linux Documentation/admin-guide/init.rst for guidance.");
+}
+
+/* Open /dev/console, for stdin/stdout/stderr, this should never fail */
+void __init console_on_rootfs(void)
+{
+ struct file *file = filp_open("/dev/console", O_RDWR, 0);
+
+ if (IS_ERR(file)) {
+ pr_err("Warning: unable to open an initial console.\n");
+ return;
+ }
+ init_dup(file);
+ init_dup(file);
+ init_dup(file);
+ fput(file);
+}
+
+static noinline void __init kernel_init_freeable(void)
+{
+ /*
+ * Wait until kthreadd is all set-up.
+ */
+ wait_for_completion(&kthreadd_done);
+
+ /* Now the scheduler is fully set up and can do blocking allocations */
+ gfp_allowed_mask = __GFP_BITS_MASK;
+
+ /*
+ * init can allocate pages on any node
+ */
+ set_mems_allowed(node_states[N_MEMORY]);
+
+ cad_pid = get_pid(task_pid(current));
+
+ smp_prepare_cpus(setup_max_cpus);
+
+ workqueue_init();
+
+ init_mm_internals();
+
+ rcu_init_tasks_generic();
+ do_pre_smp_initcalls();
+ lockup_detector_init();
+
+ smp_init();
+ sched_init_smp();
+
+ padata_init();
+ page_alloc_init_late();
+ /* Initialize page ext after all struct pages are initialized. */
+ page_ext_init();
+
+ do_basic_setup();
+
+ kunit_run_all_tests();
+
+ console_on_rootfs();
+
+ /*
+ * check if there is an early userspace init. If yes, let it do all
+ * the work
+ */
+ if (init_eaccess(ramdisk_execute_command) != 0) {
+ ramdisk_execute_command = NULL;
+ prepare_namespace();
+ }
+
+ /*
+ * Ok, we have completed the initial bootup, and
+ * we're essentially up and running. Get rid of the
+ * initmem segments and start the user-mode stuff..
+ *
+ * rootfs is available now, try loading the public keys
+ * and default modules
+ */
+
+ integrity_load_keys();
+}
diff --git a/upstream/linux-5.10/kernel/ramdump/ramdump_device_trans.c b/upstream/linux-5.10/kernel/ramdump/ramdump_device_trans.c
new file mode 100755
index 0000000..40c920d
--- /dev/null
+++ b/upstream/linux-5.10/kernel/ramdump/ramdump_device_trans.c
@@ -0,0 +1,753 @@
+/**
+ * @file oss_ramdump_osa.c
+ * @brief Implementation of Ramdump os adapt
+ *
+ * Copyright (C) 2017 Sanechips Technology Co., Ltd.
+ * @author Qing Wang <wang.qing@sanechips.com.cn>
+ * @ingroup si_ap_oss_ramdump_id
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/*******************************************************************************
+ * Include header files *
+ ******************************************************************************/
+#include "ramdump.h"
+#include <linux/lzo.h>
+#include "ramdump_compress.h"
+#ifdef CONFIG_RAMDUMP_EMMC
+#include "ramdump_emmc.h"
+#endif
+#ifdef CONFIG_MTD_SPI_NAND
+#include "ramdump_spinand.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+* Extern function declarations *
+*******************************************************************************/
+extern unsigned char *ramdump_phy_to_vir(unsigned long phy, unsigned long size);
+extern int dump_printk_text(char *buffer, unsigned long len);
+
+/*******************************************************************************
+* Extern variable declarations *
+*******************************************************************************/
+extern unsigned int ramdump_compress_flag;
+extern unsigned char *ramdump_log_buf;
+extern unsigned int ramdump_export_mode;
+
+/*******************************************************************************
+ * Macro definitions *
+ ******************************************************************************/
+/*Ö¸ÁîÖ¡³¤¶È */
+#define RAMDUMP_INTERACTIVE_DATA_LEN 40
+#define RAMDUMP_INTERACTIVE_ARRAY_LEN 10
+
+/* ramdump ºÍ ¹²ÏíÄÚ´æ½»»¥ÃüÁîÔ¼¶¨ */
+/*ͬ²½ÇëÇó*/
+#define RAMDUMP_PC_INTERACTIVE_REQ 1
+/*ͬ²½ÇëÇóÓ¦´ð,´«ÊäramdumpµÄÎļþÊýÄ¿*/
+#define RAMDUMP_TRANS_SERVER_INTERACTIVE_RSP 2
+/*ÇëÇó´«µÝÖ¸¶¨Îļþ±àºÅµÄÎļþÐÅÏ¢*/
+#define RAMDUMP_PC_FILE_INFO_READ_REQ 3
+/*ÇëÇó´«µÝÖ¸¶¨Îļþ±àºÅµÄÎļþÐÅÏ¢µÄÓ¦´ð£¬´«ÊäÎļþÃû¼°´óС*/
+#define RAMDUMP_TRANS_SERVER_FILE_INFO_READ_RSP 4
+/*ÇëÇó¶Áȡָ¶¨Îļþ±àºÅµÄÎļþÄÚÈÝ*/
+#define RAMDUMP_PC_FILE_DATA_TRANS_REQ 5
+/*ÇëÇó¶Áȡָ¶¨Îļþ±àºÅµÄÎļþÄÚÈݵÄÓ¦´ð£¬´«ÊäÎļþÄÚÈÝ*/
+#define RAMDUMP_TRANS_SERVER_FILE_DATA_TRANS_RSP 6
+/*´«Êä½áÊø*/
+#define RAMDUMP_PC_FILE_TRANS_DONE_REQ 7
+/*´«Êä½áÊøÓ¦´ð*/
+#define RAMDUMP_TRANS_SERVER_FILE_TRANS_DONE_RSP 8
+
+/* ´íÎóÀàÐÍ */
+/*Ö¸Áî´íÎó*/
+#define RAMDUMP_INTERACTIVE_CMD_ERROR 9
+/*ÇëÇó´«µÝÖ¸¶¨Îļþ±àºÅ´í*/
+#define RAMDUMP_FILE_NUMBER_ERROR 10
+/*ÇëÇó´«µÝÖ¸¶¨ÎļþλÖôóС´í*/
+#define RAMDUMP_FILE_SIZE_ERROR 11
+
+#define RAMDUMP_DELAY_MS_COUNT (2500)
+
+/*******************************************************************************
+ * Type definitions *
+ ******************************************************************************/
+/*
+ * struct TRANS WITH AP
+ */
+
+/* trans_server rsp pc, interactive msg struct */
+typedef struct
+{
+ unsigned int cmd;
+ unsigned int file_num;
+} ramdump_trans_server_interactive_req;
+
+/* trans_server rsp pc, file info msg struct */
+typedef struct
+{
+ unsigned int cmd;
+ char file_name[RAMDUMP_RAMCONF_FILENAME_MAXLEN];
+ unsigned int file_size;
+} ramdump_trans_server_file_info_req;
+
+/* pc req trans_server, file info msg struct */
+typedef struct
+{
+ unsigned int cmd;
+ unsigned int file_id;
+} ramdump_pc_file_info_rsp;
+
+/* trans_server rsp pc, trans data msg struct */
+typedef struct
+{
+ unsigned int cmd;
+ unsigned int buf_addr;
+ unsigned int buf_left_size;
+} ramdump_trans_server_data_trans_req;
+
+/* pc req trans_server, trans data msg struct */
+typedef struct
+{
+ unsigned int cmd;
+ unsigned int file_id; /* Îļþ±àºÅ */
+ unsigned int offset; /* offsetΪÊý¾ÝÆ«ÒÆ */
+ unsigned int length; /* lengthΪÊý¾Ý³¤¶È */
+} ramdump_pc_trans_data_rsp;
+
+/*******************************************************************************
+ * Local function declarations *
+ ******************************************************************************/
+
+/*******************************************************************************
+ * Local variable definitions *
+ ******************************************************************************/
+char *ramdump_log_buf_region = NULL;
+unsigned int ramdump_log_buf_len = 0;
+
+/*******************************************************************************
+ * Global variable definitions *
+ ******************************************************************************/
+unsigned char *ramdump_shared_mem_base = NULL;
+unsigned char *ramdump_export_flag_base = NULL;
+int ramdump_file_num = 0;
+ramdump_file_t ramdump_device_fp = {0};
+ramdump_file_t ramdump_spinand_fp = {0};
+ramdump_file_t *g_ramdump_dev_fp;
+unsigned int ramdump_device_file_cnt = 0;
+unsigned char *ramdump_log_buf = NULL; /* ¸´ÓÃramdump´æ´¢µÄ128KB(Æ«ÒÆ16KB) */
+
+/*******************************************************************************
+ * Inline function implementations *
+ ******************************************************************************/
+static inline void ramdump_wait_delay( unsigned long ms)
+{
+ volatile int j = 0;
+ for (j = 0; j < 10000; j++);
+}
+/*******************************************************************************
+ * extern function implementations *
+ ******************************************************************************/
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: ramdump_oss_data_trans_write
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: void
+* ÆäËü˵Ã÷: This function is used for ramdump to trans dump data to PC
+*******************************************************************************/
+int ramdump_oss_data_trans_write(unsigned char *buffer, unsigned int size)
+{
+ int ret;
+ ramdump_shmem_t *msg = (ramdump_shmem_t *)ramdump_shared_mem_base;
+
+ if (size > (RAMDUMP_SHARED_MEM_LEN- roundup(sizeof(ramdump_shmem_t), RAMDUMP_SHMEM_ALIGN_SIZE)));
+ ret = -1;
+
+ while(1){
+ if ((msg->core_flag == 1) && (msg->rw_flag == 1)){
+ memcpy(msg->buf, buffer, size);
+ msg->size = size;
+ msg->core_flag = 0;
+ msg->rw_flag = 2;
+ ret = size;
+ break;
+ }
+ else
+ ramdump_wait_delay(0);
+ }
+ return ret;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: ramdump_oss_data_trans_read
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: void
+* ÆäËü˵Ã÷: This function is used for ramdump to trans dump data to PC
+*******************************************************************************/
+int ramdump_oss_data_trans_read(unsigned char *buffer, unsigned int size)
+{
+ int ret;
+ ramdump_shmem_t *msg = (ramdump_shmem_t *)ramdump_shared_mem_base;
+
+ if (size > (RAMDUMP_SHARED_MEM_LEN - roundup(sizeof(ramdump_shmem_t), RAMDUMP_SHMEM_ALIGN_SIZE)))
+ ret = -1;
+
+ while(1){
+ if ((msg->core_flag == 1) && (msg->rw_flag == 2)){
+ if (size < msg->size)
+ return -1;
+ memcpy(buffer, msg->buf, msg->size);
+ msg->size = size;
+ msg->core_flag = 1;
+ msg->rw_flag = 1;
+ ret = size;
+ break;
+ }
+ else
+ ramdump_wait_delay(0);
+ }
+ return ret;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: ramdump_oss_data_trans_init
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: void
+* ÆäËü˵Ã÷: This function is used for map ramdump_shared_mem_base
+*******************************************************************************/
+void ramdump_oss_data_trans_init(void)
+{
+ ramdump_shmem_t *msg = (ramdump_shmem_t *)ramdump_shared_mem_base;
+
+ memset(msg, 0, sizeof(ramdump_shmem_t));
+ msg->core_flag = 1;
+ msg->rw_flag = 1;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: ramdump_device_init
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: void
+* ÆäËü˵Ã÷: This function is used for init fp head
+*******************************************************************************/
+int ramdump_device_init(void)
+{
+ int ret = 0;
+
+ ramdump_lzo_init();
+ if(ramdump_export_mode == RAMDUMP_MODE_EMMC)
+ {
+#ifdef CONFIG_RAMDUMP_EMMC
+ ret = ramdump_emmc_init(&ramdump_device_fp);
+#endif
+ g_ramdump_dev_fp = &ramdump_device_fp;
+ }
+ else if(ramdump_export_mode == RAMDUMP_MODE_SPINAND)
+ {
+#ifdef CONFIG_MTD_SPI_NAND
+ ret = ramdump_spinand_init(&ramdump_spinand_fp);
+#endif
+ g_ramdump_dev_fp = &ramdump_spinand_fp;
+ }
+ return ret;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: ramdump_device_close
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: void
+* ÆäËü˵Ã÷: This function is used for print close msg
+*******************************************************************************/
+void ramdump_device_close(void)
+{
+ if(ramdump_export_mode == RAMDUMP_MODE_EMMC)
+ {
+#ifdef CONFIG_RAMDUMP_EMMC
+ ramdump_emmc_close(&ramdump_device_fp);
+#endif
+ }
+ else if (ramdump_export_mode == RAMDUMP_MODE_SPINAND)
+ {
+#ifdef CONFIG_MTD_SPI_NAND
+ ramdump_spinand_close(&ramdump_spinand_fp);
+#endif
+ }
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: ramdump_fill_header
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý)
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: void
+* ÆäËü˵Ã÷: This function is used for ramdump file header
+*******************************************************************************/
+int ramdump_fill_header(char *file_name, unsigned int file_size, ramdump_file_t *fp, unsigned int offset)
+{
+ if (ramdump_device_file_cnt >= RAMDUMP_FILE_NUM_MAX)
+ return -1;
+
+ fp->file_fp[ramdump_device_file_cnt].magic = 0x3A3A3A3A;
+ strncpy(fp->file_fp[ramdump_device_file_cnt].file_name, file_name, RAMDUMP_RAMCONF_FILENAME_MAXLEN - 1);
+ fp->file_fp[ramdump_device_file_cnt].offset = offset;
+ fp->file_fp[ramdump_device_file_cnt].size = file_size;
+ return 0;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: ramdump_device_write_file
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: void
+* ÆäËü˵Ã÷: This function is used for write file infomation
+*******************************************************************************/
+int ramdump_device_write_file(ramdump_trans_server_file_info_req *server_to_cap)
+{
+ int ret = -1;
+
+ if(ramdump_export_mode == RAMDUMP_MODE_EMMC)
+ {
+#ifdef CONFIG_RAMDUMP_EMMC
+ if (ramdump_emmc_offset >= RAMDUMP_TRANS_EMMC_LEN)
+ return -1;
+
+ ret = ramdump_fill_header(server_to_cap->file_name,
+ server_to_cap->file_size,
+ &ramdump_device_fp,
+ ramdump_emmc_offset);
+#endif
+ }
+ else if(ramdump_export_mode == RAMDUMP_MODE_SPINAND)
+ {
+#ifdef CONFIG_MTD_SPI_NAND
+ if (ramdump_spinand_offset >= RAMDUMP_SPINAND_LEN)
+ return -1;
+
+ ret = ramdump_fill_header(server_to_cap->file_name,
+ server_to_cap->file_size,
+ &ramdump_spinand_fp,
+ ramdump_spinand_offset);
+#endif
+ }
+ return ret;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: ramdump_device_write_file
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) fp£º Îļþ¾ä±ú
+* (´«³ö²ÎÊý) file_size Îļþ´óС
+* ·µ »Ø Öµ: ³É¹¦·µ»Ø0£¬Ê§°Ü·µ»Ø-1
+* ÆäËü˵Ã÷: This function is used for write file infomation
+*******************************************************************************/
+int ramdump_device_modify_file_size(ssize_t file_size)
+{
+ int ret = -1;
+ ramdump_file_t *fp = g_ramdump_dev_fp;
+
+ if(fp)
+ {
+ fp->file_fp[ramdump_device_file_cnt].size = file_size;
+ return 0;
+ }
+ return ret;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: ramdump_device_write_file_head
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: void
+* ÆäËü˵Ã÷: This function is used for write file head
+*******************************************************************************/
+int ramdump_device_write_file_head(void)
+{
+ int ret = -1;
+
+ if(ramdump_export_mode == RAMDUMP_MODE_EMMC)
+ {
+#ifdef CONFIG_RAMDUMP_EMMC
+ ret = ramdump_emmc_write_file_head(&ramdump_device_fp);
+#endif
+ }
+ else if(ramdump_export_mode == RAMDUMP_MODE_SPINAND)
+ {
+#ifdef CONFIG_MTD_SPI_NAND
+ ret = ramdump_spinand_write_file_head(&ramdump_spinand_fp);
+#endif
+ }
+ return ret;
+}
+
+int ramdump_do_write_log_txt(ramdump_file_t *fp)
+{
+ int ret = -1;
+ size_t dst_len = 0;
+ size_t send_len = 0;
+ ramdump_shmem_t *msg = (ramdump_shmem_t *)ramdump_shared_mem_base;
+ char *buf = NULL;
+
+ memset(ramdump_log_buf, 0, RAMDUMP_LOG_BUF);
+ ret = dump_printk_text(ramdump_log_buf, RAMDUMP_LOG_BUF);
+ if(ret < 0){
+ printk("ramdump printk log buf failed!!\n");
+ return ret;
+ }
+ if (ramdump_compress_flag == 1){
+ ret = ramdump_lzo_compress(ramdump_log_buf, RAMDUMP_LOG_BUF, msg->buf, &dst_len);
+ buf = msg->buf;
+ }
+ if (ret != LZO_E_OK){
+ dst_len = RAMDUMP_LOG_BUF;
+ buf = ramdump_log_buf;
+ }
+ fp->file_num += 1;
+ fp->file_fp[ramdump_device_file_cnt].magic = 0x3A3A3A3A;
+ strncpy(fp->file_fp[ramdump_device_file_cnt].file_name, "cap_log_buf.txt", RAMDUMP_RAMCONF_FILENAME_MAXLEN - 1);
+
+ if (fp == &ramdump_device_fp)
+ {
+#ifdef CONFIG_RAMDUMP_EMMC
+ fp->file_fp[ramdump_device_file_cnt].size = roundup(dst_len, RAMDUMP_EMMC_ALIGN_SIZE);
+ fp->file_fp[ramdump_device_file_cnt].offset = ramdump_emmc_offset;
+ ret = mmc_bwrite(RAMDUMP_EMMC_ADDR + ramdump_emmc_offset, dst_len, buf);
+ ramdump_emmc_write_file_head(fp);
+ ramdump_emmc_offset = ramdump_emmc_offset + roundup(dst_len, RAMDUMP_EMMC_ALIGN_SIZE);
+#endif
+ }
+ else if (fp == &ramdump_spinand_fp)
+ {
+#ifdef CONFIG_MTD_SPI_NAND
+ send_len = roundup(dst_len, RAMDUMP_FLASH_ALIGN_SIZE);
+ fp->file_fp[ramdump_device_file_cnt].size = send_len;
+ fp->file_fp[ramdump_device_file_cnt].offset = ramdump_spinand_offset;
+ ret = write_data(RAMDUMP_SPINAND_ADDR + ramdump_spinand_offset, send_len, buf);
+ ramdump_spinand_offset = ramdump_spinand_offset + send_len;
+#endif
+ }
+ else
+ {
+ printk("ramdump_do_write_logbuf error fp!\n");
+ return -1;
+ }
+ ramdump_device_file_cnt += 1;
+ return ret;
+}
+
+int ramdump_do_write_logbuf(ramdump_file_t *fp)
+{
+ char *buf = NULL;
+ int ret = -1;
+ size_t dst_len = 0;
+ size_t send_len = 0;
+ ramdump_shmem_t *msg = (ramdump_shmem_t *)ramdump_shared_mem_base;
+
+ if(!fp)
+ {
+ printk("ramdump_do_write_logbuf error: fp is Null\n");
+ return -1;
+ }
+
+ ramdump_log_buf_region = log_buf_addr_get();
+ ramdump_log_buf_len = log_buf_len_get();
+
+ if (ramdump_compress_flag == 1){
+ ret = ramdump_lzo_compress(ramdump_log_buf_region, ramdump_log_buf_len, msg->buf, &dst_len);
+ buf = msg->buf;
+ }
+ if (ret != LZO_E_OK){
+ dst_len = ramdump_log_buf_len;
+ buf = ramdump_log_buf_region;
+ }
+
+ fp->file_num += 1;
+ fp->file_fp[ramdump_device_file_cnt].magic = 0x3A3A3A3A;
+ strncpy(fp->file_fp[ramdump_device_file_cnt].file_name, "cap_log_buf.bin", RAMDUMP_RAMCONF_FILENAME_MAXLEN - 1);
+
+ if (fp == &ramdump_device_fp)
+ {
+#ifdef CONFIG_RAMDUMP_EMMC
+ fp->file_fp[ramdump_device_file_cnt].size = roundup(dst_len, RAMDUMP_EMMC_ALIGN_SIZE);
+ ret = mmc_bwrite(RAMDUMP_EMMC_ADDR + ramdump_emmc_offset, dst_len, buf);
+ fp->file_fp[ramdump_device_file_cnt].offset = ramdump_emmc_offset;
+ ramdump_emmc_write_file_head(fp);
+ ramdump_emmc_offset = ramdump_emmc_offset + roundup(dst_len, RAMDUMP_EMMC_ALIGN_SIZE);
+#endif
+ }
+ else if (fp == &ramdump_spinand_fp)
+ {
+#ifdef CONFIG_MTD_SPI_NAND
+ send_len = roundup(dst_len, RAMDUMP_FLASH_ALIGN_SIZE);
+ fp->file_fp[ramdump_device_file_cnt].size = send_len;
+ fp->file_fp[ramdump_device_file_cnt].offset = ramdump_spinand_offset;
+ ret = write_data(RAMDUMP_SPINAND_ADDR + ramdump_spinand_offset, send_len, buf);
+ ramdump_spinand_offset = ramdump_spinand_offset + send_len;
+#endif
+ }
+ else
+ {
+ printk("ramdump_do_write_logbuf error fp!\n");
+ return -1;
+ }
+
+ ramdump_device_file_cnt += 1;
+ ramdump_do_write_log_txt(fp);
+
+ return ret;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: ramdump_device_write_logbuf
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: void
+* ÆäËü˵Ã÷: This function is used for write cap logbuf
+*******************************************************************************/
+int ramdump_device_write_logbuf(void)
+{
+ int ret = -1;
+
+ ret = ramdump_do_write_logbuf(g_ramdump_dev_fp);
+ if (ret < 0)
+ ramdump_printf("device memory trans file:cap_log_buf error!!!\n");
+ else
+ ramdump_printf("device memory trans file:cap_log_buf success!!!\n");
+ return ret;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: ramdump_device_write_data
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: void
+* ÆäËü˵Ã÷: This function is used for write data
+*******************************************************************************/
+int ramdump_device_write_data(ramdump_shmem_t *msg, unsigned int size, ssize_t *dstlen)
+{
+ int ret = 0;
+
+ if(ramdump_export_mode == RAMDUMP_MODE_EMMC)
+ {
+#ifdef CONFIG_RAMDUMP_EMMC
+ ret = ramdump_emmc_write_data(msg, &ramdump_device_fp, size);
+ if(ret < 0)
+ *dstlen = 0;
+ else
+ *dstlen += roundup(ret, RAMDUMP_EMMC_ALIGN_SIZE);
+#endif
+ }
+ else if(ramdump_export_mode == RAMDUMP_MODE_SPINAND)
+ {
+#ifdef CONFIG_MTD_SPI_NAND
+ ret = ramdump_spinand_write_data(msg, &ramdump_spinand_fp, size);
+ if(ret < 0)
+ *dstlen = 0;
+ else
+ *dstlen += ret;
+#endif
+ }
+ else
+ return 0;
+ return ret;
+}
+
+/*******************************************************************************
+ * Global function implementations *
+ ******************************************************************************/
+void ramdump_shared_mem_init(void)
+{
+ ramdump_shared_mem_base = ramdump_phy_to_vir((unsigned long)RAMDUMP_SHARED_MEM_BASE, (unsigned long)RAMDUMP_MEM_LEN);
+ ramdump_export_flag_base = ramdump_phy_to_vir((unsigned long)IRAM_BASE_ADDR_RAMDUMP_MODE, sizeof(unsigned long));
+ ramdump_log_buf = ramdump_shared_mem_base + 0x4000;
+ ramdump_flash_alloc_transbuf();
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö: ramdump_data_transfer_to_device
+* ²ÎÊý˵Ã÷:
+* (´«Èë²ÎÊý) void
+* (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ: void
+* ÆäËü˵Ã÷: This function is used for ramdump to trans dump data to ap
+*******************************************************************************/
+void ramdump_data_transfer_to_device(void)
+{
+ int data_trans_max;
+ int file_cnt = 0;
+ int file_size = 0;
+ int file_offset = 0;
+ int file_left_size = 0;
+ int file_trans_size = 0;
+ int error_cmd = 0;
+ int ret = 0;
+ ssize_t file_dstlen = 0;
+
+ unsigned int req_buf[RAMDUMP_INTERACTIVE_ARRAY_LEN] = {0};
+ ramdump_trans_server_interactive_req cap_to_server_msg = {0};
+
+ /* interactive begin */
+ if(ramdump_device_init() < 0)
+ return;
+ data_trans_max = RAMDUMP_SHARED_MEM_LEN - roundup(sizeof(ramdump_shmem_t), RAMDUMP_SHMEM_ALIGN_SIZE) - RAMDUMP_COMPRESS_OUT_LEN;
+ cap_to_server_msg.cmd = RAMDUMP_PC_INTERACTIVE_REQ;
+ ramdump_oss_data_trans_write((unsigned char*)(&cap_to_server_msg), sizeof(cap_to_server_msg));
+
+ for(;;)
+ {
+ ramdump_oss_data_trans_read((unsigned char *)req_buf, RAMDUMP_INTERACTIVE_DATA_LEN);
+ switch (*(unsigned int *)req_buf)
+ {
+ case RAMDUMP_TRANS_SERVER_INTERACTIVE_RSP:
+ {
+ ramdump_pc_file_info_rsp cap_to_server_msg ={0};
+ ramdump_trans_server_interactive_req *server_to_cap_msg = (ramdump_trans_server_interactive_req *)req_buf;
+ /* data from server to cap */
+ ramdump_file_num = server_to_cap_msg->file_num;
+ ramdump_device_fp.file_num = ramdump_file_num;
+ ramdump_spinand_fp.file_num = ramdump_file_num;
+
+ /* data from cap to server */
+ cap_to_server_msg.cmd = RAMDUMP_PC_FILE_INFO_READ_REQ;
+ cap_to_server_msg.file_id = file_cnt;
+
+ ramdump_oss_data_trans_write(
+ (unsigned char*)(&cap_to_server_msg),
+ sizeof(cap_to_server_msg));
+
+ break;
+ }
+ case RAMDUMP_TRANS_SERVER_FILE_INFO_READ_RSP:
+ {
+ ramdump_pc_trans_data_rsp cap_to_server_msg = {0};
+ ramdump_trans_server_file_info_req *server_to_cap_msg = (ramdump_trans_server_file_info_req *)req_buf;
+ /* data from server to cap */
+ /*device memory file create*/
+ if(ramdump_device_write_file(server_to_cap_msg) == -1){
+ cap_to_server_msg.cmd = RAMDUMP_PC_FILE_TRANS_DONE_REQ;
+ ramdump_device_write_file_head();//±£Ö¤³ö´íǰ¼¸¸öÎļþ¾ùд¶Ô¡£
+ ramdump_printf("ramdump write emmc file error!\n");
+ }
+ file_size = server_to_cap_msg->file_size;
+ file_offset = 0;
+ file_left_size = file_size;
+ /* data from cap to server */
+ cap_to_server_msg.cmd = RAMDUMP_PC_FILE_DATA_TRANS_REQ;
+ cap_to_server_msg.file_id = file_cnt;
+ cap_to_server_msg.offset = file_offset;
+ if (file_size >= data_trans_max)
+ cap_to_server_msg.length = data_trans_max;
+ else
+ cap_to_server_msg.length = file_size;
+ file_trans_size = cap_to_server_msg.length;
+ file_left_size = file_left_size - cap_to_server_msg.length;
+ file_offset = file_offset + cap_to_server_msg.length;
+ printk("device memory trans file:%s !!!\n", server_to_cap_msg->file_name);
+ /* interactive data trans */
+ ramdump_oss_data_trans_write(
+ (unsigned char*)(&cap_to_server_msg),
+ sizeof(cap_to_server_msg));
+
+ break;
+ }
+ case RAMDUMP_TRANS_SERVER_FILE_DATA_TRANS_RSP:
+ {
+ int write_len = 0;
+ ramdump_pc_trans_data_rsp cap_to_server_msg = {0};
+ /* data from server to cap */
+ ramdump_shmem_t *server_to_cap_msg = (ramdump_shmem_t *)ramdump_shared_mem_base;
+ server_to_cap_msg->core_flag = 0;
+ /*data from cap to emmc*/
+
+ write_len = ramdump_device_write_data(server_to_cap_msg, file_left_size, &file_dstlen);
+ if(write_len < 0)
+ {
+ ramdump_printf("ramdump write emmc data error!\n");
+ }
+
+ /*ÅжÏÊ£Óà´óС*/
+ if (file_left_size == 0)
+ {
+ file_cnt++;
+ if (file_cnt == ramdump_file_num)
+ {
+ cap_to_server_msg.cmd = RAMDUMP_PC_FILE_TRANS_DONE_REQ;
+ }
+ else
+ {
+ cap_to_server_msg.cmd = RAMDUMP_PC_FILE_INFO_READ_REQ;
+ cap_to_server_msg.file_id = file_cnt;
+ }
+ ramdump_device_modify_file_size(file_dstlen);
+ file_dstlen = 0;
+ ramdump_device_file_cnt++;
+ }
+ else
+ {
+ /* data from cap to server */
+ if (file_left_size >= data_trans_max)
+ cap_to_server_msg.length = data_trans_max;
+ else
+ cap_to_server_msg.length = file_left_size;
+ cap_to_server_msg.cmd = RAMDUMP_PC_FILE_DATA_TRANS_REQ;
+ cap_to_server_msg.file_id = file_cnt;
+ cap_to_server_msg.offset = file_offset;
+ file_left_size = file_left_size - cap_to_server_msg.length;
+ file_offset= file_offset + cap_to_server_msg.length;
+ }
+
+ ramdump_oss_data_trans_write((unsigned char *)(&cap_to_server_msg), sizeof(cap_to_server_msg));
+ continue;
+ }
+ case RAMDUMP_TRANS_SERVER_FILE_TRANS_DONE_RSP:
+ {
+ ramdump_device_write_logbuf();
+ ramdump_device_close();
+ return;
+ }
+ default:
+ {
+ error_cmd = RAMDUMP_INTERACTIVE_CMD_ERROR;
+ ramdump_printf("ramdump trans emmc error:%d!\n", error_cmd);
+ /* interactive data trans */
+ break;
+ }
+ }
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
+
diff --git a/upstream/linux-5.10/net/8021q/vlan_dev.c b/upstream/linux-5.10/net/8021q/vlan_dev.c
new file mode 100755
index 0000000..86a1c99
--- /dev/null
+++ b/upstream/linux-5.10/net/8021q/vlan_dev.c
@@ -0,0 +1,842 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* -*- linux-c -*-
+ * INET 802.1Q VLAN
+ * Ethernet-type device handling.
+ *
+ * Authors: Ben Greear <greearb@candelatech.com>
+ * Please send support related email to: netdev@vger.kernel.org
+ * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
+ *
+ * Fixes: Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com>
+ * - reset skb->pkt_type on incoming packets when MAC was changed
+ * - see that changed MAC is saddr for outgoing packets
+ * Oct 20, 2001: Ard van Breeman:
+ * - Fix MC-list, finally.
+ * - Flush MC-list on VLAN destroy.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <net/arp.h>
+
+#include "vlan.h"
+#include "vlanproc.h"
+#include <linux/if_vlan.h>
+#include <linux/netpoll.h>
+
+/*
+ * Create the VLAN header for an arbitrary protocol layer
+ *
+ * saddr=NULL means use device source address
+ * daddr=NULL means leave destination address (eg unresolved arp)
+ *
+ * This is called when the SKB is moving down the stack towards the
+ * physical devices.
+ */
+static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ const void *daddr, const void *saddr,
+ unsigned int len)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct vlan_hdr *vhdr;
+ unsigned int vhdrlen = 0;
+ u16 vlan_tci = 0;
+ int rc;
+
+ if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) {
+ vhdr = skb_push(skb, VLAN_HLEN);
+
+ vlan_tci = vlan->vlan_id;
+ vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
+ vhdr->h_vlan_TCI = htons(vlan_tci);
+
+ /*
+ * Set the protocol type. For a packet of type ETH_P_802_3/2 we
+ * put the length in here instead.
+ */
+ if (type != ETH_P_802_3 && type != ETH_P_802_2)
+ vhdr->h_vlan_encapsulated_proto = htons(type);
+ else
+ vhdr->h_vlan_encapsulated_proto = htons(len);
+
+ skb->protocol = vlan->vlan_proto;
+ type = ntohs(vlan->vlan_proto);
+ vhdrlen = VLAN_HLEN;
+ }
+
+ /* Before delegating work to the lower layer, enter our MAC-address */
+ if (saddr == NULL)
+ saddr = dev->dev_addr;
+
+ /* Now make the underlying real hard header */
+ dev = vlan->real_dev;
+ rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
+ if (rc > 0)
+ rc += vhdrlen;
+ return rc;
+}
+
+static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ return netpoll_send_skb(vlan->netpoll, skb);
+#else
+ BUG();
+ return NETDEV_TX_OK;
+#endif
+}
+
+static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
+ unsigned int len;
+ int ret;
+
+ /* Handle non-VLAN frames if they are sent to us, for example by DHCP.
+ *
+ * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
+ * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
+ */
+ if (veth->h_vlan_proto != vlan->vlan_proto ||
+ vlan->flags & VLAN_FLAG_REORDER_HDR) {
+ u16 vlan_tci;
+ vlan_tci = vlan->vlan_id;
+ vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
+ __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
+ }
+
+ skb->dev = vlan->real_dev;
+ len = skb->len;
+ if (unlikely(netpoll_tx_running(dev)))
+ return vlan_netpoll_send_skb(vlan, skb);
+
+ ret = dev_queue_xmit(skb);
+
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
+ struct vlan_pcpu_stats *stats;
+
+ stats = this_cpu_ptr(vlan->vlan_pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
+ } else {
+ this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped);
+ }
+
+ return ret;
+}
+
+static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ unsigned int max_mtu = real_dev->mtu;
+
+ if (netif_reduces_vlan_mtu(real_dev))
+ max_mtu -= VLAN_HLEN;
+ if (max_mtu < new_mtu)
+ return -ERANGE;
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+void vlan_dev_set_ingress_priority(const struct net_device *dev,
+ u32 skb_prio, u16 vlan_prio)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+ if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio)
+ vlan->nr_ingress_mappings--;
+ else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio)
+ vlan->nr_ingress_mappings++;
+
+ vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio;
+}
+
+int vlan_dev_set_egress_priority(const struct net_device *dev,
+ u32 skb_prio, u16 vlan_prio)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct vlan_priority_tci_mapping *mp = NULL;
+ struct vlan_priority_tci_mapping *np;
+ u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
+
+ /* See if a priority mapping exists.. */
+ mp = vlan->egress_priority_map[skb_prio & 0xF];
+ while (mp) {
+ if (mp->priority == skb_prio) {
+ if (mp->vlan_qos && !vlan_qos)
+ vlan->nr_egress_mappings--;
+ else if (!mp->vlan_qos && vlan_qos)
+ vlan->nr_egress_mappings++;
+ mp->vlan_qos = vlan_qos;
+ return 0;
+ }
+ mp = mp->next;
+ }
+
+ /* Create a new mapping then. */
+ mp = vlan->egress_priority_map[skb_prio & 0xF];
+ np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL);
+ if (!np)
+ return -ENOBUFS;
+
+ np->next = mp;
+ np->priority = skb_prio;
+ np->vlan_qos = vlan_qos;
+ /* Before inserting this element in hash table, make sure all its fields
+ * are committed to memory.
+ * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
+ */
+ smp_wmb();
+ vlan->egress_priority_map[skb_prio & 0xF] = np;
+ if (vlan_qos)
+ vlan->nr_egress_mappings++;
+ return 0;
+}
+
+/* Flags are defined in the vlan_flags enum in
+ * include/uapi/linux/if_vlan.h file.
+ */
+int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ u32 old_flags = vlan->flags;
+
+ if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
+ VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP |
+ VLAN_FLAG_BRIDGE_BINDING))
+ return -EINVAL;
+
+ vlan->flags = (old_flags & ~mask) | (flags & mask);
+
+ if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_GVRP) {
+ if (vlan->flags & VLAN_FLAG_GVRP)
+ vlan_gvrp_request_join(dev);
+ else
+ vlan_gvrp_request_leave(dev);
+ }
+
+ if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_MVRP) {
+ if (vlan->flags & VLAN_FLAG_MVRP)
+ vlan_mvrp_request_join(dev);
+ else
+ vlan_mvrp_request_leave(dev);
+ }
+ return 0;
+}
+
+void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
+{
+ strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
+}
+
+bool vlan_dev_inherit_address(struct net_device *dev,
+ struct net_device *real_dev)
+{
+ if (dev->addr_assign_type != NET_ADDR_STOLEN)
+ return false;
+
+ ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return true;
+}
+
+static int vlan_dev_open(struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+ int err;
+
+ if (!(real_dev->flags & IFF_UP) &&
+ !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
+ return -ENETDOWN;
+
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
+ !vlan_dev_inherit_address(dev, real_dev)) {
+ err = dev_uc_add(real_dev, dev->dev_addr);
+ if (err < 0)
+ goto out;
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ err = dev_set_allmulti(real_dev, 1);
+ if (err < 0)
+ goto del_unicast;
+ }
+ if (dev->flags & IFF_PROMISC) {
+ err = dev_set_promiscuity(real_dev, 1);
+ if (err < 0)
+ goto clear_allmulti;
+ }
+
+ ether_addr_copy(vlan->real_dev_addr, real_dev->dev_addr);
+
+ if (vlan->flags & VLAN_FLAG_GVRP)
+ vlan_gvrp_request_join(dev);
+
+ if (vlan->flags & VLAN_FLAG_MVRP)
+ vlan_mvrp_request_join(dev);
+
+ if (netif_carrier_ok(real_dev) &&
+ !(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
+ netif_carrier_on(dev);
+ return 0;
+
+clear_allmulti:
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(real_dev, -1);
+del_unicast:
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
+ dev_uc_del(real_dev, dev->dev_addr);
+out:
+ netif_carrier_off(dev);
+ return err;
+}
+
+static int vlan_dev_stop(struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+
+ dev_mc_unsync(real_dev, dev);
+ dev_uc_unsync(real_dev, dev);
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(real_dev, -1);
+ if (dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(real_dev, -1);
+
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
+ dev_uc_del(real_dev, dev->dev_addr);
+
+ if (!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
+ netif_carrier_off(dev);
+ return 0;
+}
+
+static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ struct sockaddr *addr = p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (!(dev->flags & IFF_UP))
+ goto out;
+
+ if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) {
+ err = dev_uc_add(real_dev, addr->sa_data);
+ if (err < 0)
+ return err;
+ }
+
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
+ dev_uc_del(real_dev, dev->dev_addr);
+
+out:
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+ return 0;
+}
+
+static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ struct ifreq ifrr;
+ int err = -EOPNOTSUPP;
+
+ strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
+ ifrr.ifr_ifru = ifr->ifr_ifru;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ if (!net_eq(dev_net(dev), &init_net))
+ break;
+ fallthrough;
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ case SIOCGHWTSTAMP:
+ if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
+ err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
+ break;
+ }
+
+ if (!err)
+ ifr->ifr_ifru = ifrr.ifr_ifru;
+
+ return err;
+}
+
+static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int err = 0;
+
+ if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
+ err = ops->ndo_neigh_setup(real_dev, pa);
+
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_FCOE)
+static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = 0;
+
+ if (ops->ndo_fcoe_ddp_setup)
+ rc = ops->ndo_fcoe_ddp_setup(real_dev, xid, sgl, sgc);
+
+ return rc;
+}
+
+static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int len = 0;
+
+ if (ops->ndo_fcoe_ddp_done)
+ len = ops->ndo_fcoe_ddp_done(real_dev, xid);
+
+ return len;
+}
+
+static int vlan_dev_fcoe_enable(struct net_device *dev)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = -EINVAL;
+
+ if (ops->ndo_fcoe_enable)
+ rc = ops->ndo_fcoe_enable(real_dev);
+ return rc;
+}
+
+static int vlan_dev_fcoe_disable(struct net_device *dev)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = -EINVAL;
+
+ if (ops->ndo_fcoe_disable)
+ rc = ops->ndo_fcoe_disable(real_dev);
+ return rc;
+}
+
+static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = 0;
+
+ if (ops->ndo_fcoe_ddp_target)
+ rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
+
+ return rc;
+}
+#endif
+
+#ifdef NETDEV_FCOE_WWNN
+static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = -EINVAL;
+
+ if (ops->ndo_fcoe_get_wwn)
+ rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
+ return rc;
+}
+#endif
+
+static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+
+ if (dev->flags & IFF_UP) {
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+ if (change & IFF_PROMISC)
+ dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
+ }
+}
+
+static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
+{
+ dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+ dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+}
+
+/*
+ * vlan network devices have devices nesting below it, and are a special
+ * "super class" of normal network devices; split their locks off into a
+ * separate class since they always nest.
+ */
+static struct lock_class_key vlan_netdev_xmit_lock_key;
+static struct lock_class_key vlan_netdev_addr_lock_key;
+
+static void vlan_dev_set_lockdep_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *unused)
+{
+ lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
+}
+
+static void vlan_dev_set_lockdep_class(struct net_device *dev)
+{
+ lockdep_set_class(&dev->addr_list_lock,
+ &vlan_netdev_addr_lock_key);
+ netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
+}
+
+static const struct header_ops vlan_header_ops = {
+ .create = vlan_dev_hard_header,
+ .parse = eth_header_parse,
+};
+
+static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ const void *daddr, const void *saddr,
+ unsigned int len)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+
+ if (saddr == NULL)
+ saddr = dev->dev_addr;
+
+ return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
+}
+
+static const struct header_ops vlan_passthru_header_ops = {
+ .create = vlan_passthru_hard_header,
+ .parse = eth_header_parse,
+};
+
+static struct device_type vlan_type = {
+ .name = "vlan",
+};
+
+static const struct net_device_ops vlan_netdev_ops;
+
+static int vlan_dev_init(struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+
+ netif_carrier_off(dev);
+
+ /* IFF_BROADCAST|IFF_MULTICAST; ??? */
+ dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
+ IFF_MASTER | IFF_SLAVE);
+ dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
+ (1<<__LINK_STATE_DORMANT))) |
+ (1<<__LINK_STATE_PRESENT);
+
+ if (vlan->flags & VLAN_FLAG_BRIDGE_BINDING)
+ dev->state |= (1 << __LINK_STATE_NOCARRIER);
+
+ dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG |
+ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE |
+ NETIF_F_GSO_ENCAP_ALL |
+ NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC |
+ NETIF_F_ALL_FCOE;
+
+ dev->features |= dev->hw_features | NETIF_F_LLTX;
+ dev->gso_max_size = real_dev->gso_max_size;
+ dev->gso_max_segs = real_dev->gso_max_segs;
+ if (dev->features & NETIF_F_VLAN_FEATURES)
+ netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
+
+ dev->vlan_features = real_dev->vlan_features & ~NETIF_F_ALL_FCOE;
+ dev->hw_enc_features = vlan_tnl_features(real_dev);
+ dev->mpls_features = real_dev->mpls_features;
+
+ /* ipv6 shared card related stuff */
+ dev->dev_id = real_dev->dev_id;
+
+ if (is_zero_ether_addr(dev->dev_addr)) {
+ ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+ dev->addr_assign_type = NET_ADDR_STOLEN;
+ }
+ if (is_zero_ether_addr(dev->broadcast))
+ memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
+
+#if IS_ENABLED(CONFIG_FCOE)
+ dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
+#endif
+
+ dev->needed_headroom = real_dev->needed_headroom;
+ if (vlan_hw_offload_capable(real_dev->features, vlan->vlan_proto)) {
+ dev->header_ops = &vlan_passthru_header_ops;
+ dev->hard_header_len = real_dev->hard_header_len;
+ } else {
+ dev->header_ops = &vlan_header_ops;
+ dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
+ }
+
+ dev->netdev_ops = &vlan_netdev_ops;
+
+ SET_NETDEV_DEVTYPE(dev, &vlan_type);
+
+ vlan_dev_set_lockdep_class(dev);
+
+ vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
+ if (!vlan->vlan_pcpu_stats)
+ return -ENOMEM;
+
+ /* Get vlan's reference to real_dev */
+ dev_hold(real_dev);
+
+ return 0;
+}
+
+/* Note: this function might be called multiple times for the same device. */
+void vlan_dev_uninit(struct net_device *dev)
+{
+ struct vlan_priority_tci_mapping *pm;
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
+ while ((pm = vlan->egress_priority_map[i]) != NULL) {
+ vlan->egress_priority_map[i] = pm->next;
+ kfree(pm);
+ }
+ }
+}
+
+static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ netdev_features_t old_features = features;
+ netdev_features_t lower_features;
+
+ lower_features = netdev_intersect_features((real_dev->vlan_features |
+ NETIF_F_RXCSUM),
+ real_dev->features);
+
+ /* Add HW_CSUM setting to preserve user ability to control
+ * checksum offload on the vlan device.
+ */
+ if (lower_features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
+ lower_features |= NETIF_F_HW_CSUM;
+ features = netdev_intersect_features(features, lower_features);
+ features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_GSO_SOFTWARE);
+ features |= NETIF_F_LLTX;
+
+ return features;
+}
+
+static int vlan_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+ return __ethtool_get_link_ksettings(vlan->real_dev, cmd);
+}
+
+static void vlan_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, vlan_fullname, sizeof(info->driver));
+ strlcpy(info->version, vlan_version, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+}
+
+static int vlan_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops;
+ struct phy_device *phydev = vlan->real_dev->phydev;
+
+ if (phy_has_tsinfo(phydev)) {
+ return phy_ts_info(phydev, info);
+ } else if (ops->get_ts_info) {
+ return ops->get_ts_info(vlan->real_dev, info);
+ } else {
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ }
+
+ return 0;
+}
+
+static void vlan_dev_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct vlan_pcpu_stats *p;
+ u32 rx_errors = 0, tx_dropped = 0;
+ int i;
+
+ for_each_possible_cpu(i) {
+ u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
+ unsigned int start;
+
+ p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp);
+ rxpackets = p->rx_packets;
+ rxbytes = p->rx_bytes;
+ rxmulticast = p->rx_multicast;
+ txpackets = p->tx_packets;
+ txbytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+ stats->rx_packets += rxpackets;
+ stats->rx_bytes += rxbytes;
+ stats->multicast += rxmulticast;
+ stats->tx_packets += txpackets;
+ stats->tx_bytes += txbytes;
+ /* rx_errors & tx_dropped are u32 */
+ rx_errors += p->rx_errors;
+ tx_dropped += p->tx_dropped;
+ }
+ stats->rx_errors = rx_errors;
+ stats->tx_dropped = tx_dropped;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void vlan_dev_poll_controller(struct net_device *dev)
+{
+ return;
+}
+
+static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+ struct netpoll *netpoll;
+ int err = 0;
+
+ netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!netpoll)
+ goto out;
+
+ err = __netpoll_setup(netpoll, real_dev);
+ if (err) {
+ kfree(netpoll);
+ goto out;
+ }
+
+ vlan->netpoll = netpoll;
+
+out:
+ return err;
+}
+
+static void vlan_dev_netpoll_cleanup(struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan= vlan_dev_priv(dev);
+ struct netpoll *netpoll = vlan->netpoll;
+
+ if (!netpoll)
+ return;
+
+ vlan->netpoll = NULL;
+ __netpoll_free(netpoll);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static int vlan_dev_get_iflink(const struct net_device *dev)
+{
+ struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+
+ return real_dev->ifindex;
+}
+
+static const struct ethtool_ops vlan_ethtool_ops = {
+ .get_link_ksettings = vlan_ethtool_get_link_ksettings,
+ .get_drvinfo = vlan_ethtool_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = vlan_ethtool_get_ts_info,
+};
+
+static const struct net_device_ops vlan_netdev_ops = {
+ .ndo_change_mtu = vlan_dev_change_mtu,
+ .ndo_init = vlan_dev_init,
+ .ndo_uninit = vlan_dev_uninit,
+ .ndo_open = vlan_dev_open,
+ .ndo_stop = vlan_dev_stop,
+ .ndo_start_xmit = vlan_dev_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = vlan_dev_set_mac_address,
+ .ndo_set_rx_mode = vlan_dev_set_rx_mode,
+ .ndo_change_rx_flags = vlan_dev_change_rx_flags,
+ .ndo_do_ioctl = vlan_dev_ioctl,
+ .ndo_neigh_setup = vlan_dev_neigh_setup,
+ .ndo_get_stats64 = vlan_dev_get_stats64,
+#if IS_ENABLED(CONFIG_FCOE)
+ .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
+ .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
+ .ndo_fcoe_enable = vlan_dev_fcoe_enable,
+ .ndo_fcoe_disable = vlan_dev_fcoe_disable,
+ .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target,
+#endif
+#ifdef NETDEV_FCOE_WWNN
+ .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = vlan_dev_poll_controller,
+ .ndo_netpoll_setup = vlan_dev_netpoll_setup,
+ .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
+#endif
+ .ndo_fix_features = vlan_dev_fix_features,
+ .ndo_get_iflink = vlan_dev_get_iflink,
+};
+
+static void vlan_dev_free(struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+ free_percpu(vlan->vlan_pcpu_stats);
+ vlan->vlan_pcpu_stats = NULL;
+
+ /* Get rid of the vlan's reference to real_dev */
+ dev_put(vlan->real_dev);
+}
+
+void vlan_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->priv_flags |= IFF_802_1Q_VLAN | IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ netif_keep_dst(dev);
+
+ dev->netdev_ops = &vlan_netdev_ops;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = vlan_dev_free;
+ dev->ethtool_ops = &vlan_ethtool_ops;
+
+ dev->min_mtu = 0;
+ dev->max_mtu = ETH_MAX_MTU;
+
+ eth_zero_addr(dev->broadcast);
+}
diff --git a/upstream/linux-5.10/net/bridge/br_fdb.c b/upstream/linux-5.10/net/bridge/br_fdb.c
new file mode 100755
index 0000000..65a6054
--- /dev/null
+++ b/upstream/linux-5.10/net/bridge/br_fdb.c
@@ -0,0 +1,1611 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Forwarding database
+ * Linux ethernet bridge
+ *
+ * Authors:
+ * Lennert Buytenhek <buytenh@gnu.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/rculist.h>
+#include <linux/spinlock.h>
+#include <linux/times.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/jhash.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <asm/unaligned.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+#include <trace/events/bridge.h>
+#include "br_private.h"
+#ifdef CONFIG_FASTNAT_MODULE
+#include <net/SI/fast_common.h>
+#include <net/SI/net_track.h>
+#include <net/SI/netioc_proc.h>
+#endif
+
+static const struct rhashtable_params br_fdb_rht_params = {
+ .head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
+ .key_offset = offsetof(struct net_bridge_fdb_entry, key),
+ .key_len = sizeof(struct net_bridge_fdb_key),
+ .automatic_shrinking = true,
+};
+
+static struct kmem_cache *br_fdb_cache __read_mostly;
+static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
+ const unsigned char *addr, u16 vid);
+static void fdb_notify(struct net_bridge *br,
+ const struct net_bridge_fdb_entry *, int, bool);
+
+int __init br_fdb_init(void)
+{
+ br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
+ sizeof(struct net_bridge_fdb_entry),
+ 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!br_fdb_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void br_fdb_fini(void)
+{
+ kmem_cache_destroy(br_fdb_cache);
+}
+
+int br_fdb_hash_init(struct net_bridge *br)
+{
+ return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params);
+}
+
+void br_fdb_hash_fini(struct net_bridge *br)
+{
+ rhashtable_destroy(&br->fdb_hash_tbl);
+}
+
+/* if topology_changing then use forward_delay (default 15 sec)
+ * otherwise keep longer (default 5 minutes)
+ */
+static inline unsigned long hold_time(const struct net_bridge *br)
+{
+ return br->topology_change ? br->forward_delay : br->ageing_time;
+}
+
+static inline int has_expired(const struct net_bridge *br,
+ const struct net_bridge_fdb_entry *fdb)
+{
+ return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
+ !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
+ time_before_eq(fdb->updated + hold_time(br), jiffies);
+}
+
+static void fdb_rcu_free(struct rcu_head *head)
+{
+ struct net_bridge_fdb_entry *ent
+ = container_of(head, struct net_bridge_fdb_entry, rcu);
+ kmem_cache_free(br_fdb_cache, ent);
+}
+
+static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl,
+ const unsigned char *addr,
+ __u16 vid)
+{
+ struct net_bridge_fdb_key key;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ key.vlan_id = vid;
+ memcpy(key.addr.addr, addr, sizeof(key.addr.addr));
+
+ return rhashtable_lookup(tbl, &key, br_fdb_rht_params);
+}
+
+/* requires bridge hash_lock */
+static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
+ const unsigned char *addr,
+ __u16 vid)
+{
+ struct net_bridge_fdb_entry *fdb;
+
+ lockdep_assert_held_once(&br->hash_lock);
+
+ rcu_read_lock();
+ fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
+ rcu_read_unlock();
+
+ return fdb;
+}
+
+struct net_device *br_fdb_find_port(const struct net_device *br_dev,
+ const unsigned char *addr,
+ __u16 vid)
+{
+ struct net_bridge_fdb_entry *f;
+ struct net_device *dev = NULL;
+ struct net_bridge *br;
+
+ ASSERT_RTNL();
+
+ if (!netif_is_bridge_master(br_dev))
+ return NULL;
+
+ br = netdev_priv(br_dev);
+ rcu_read_lock();
+ f = br_fdb_find_rcu(br, addr, vid);
+ if (f && f->dst)
+ dev = f->dst->dev;
+ rcu_read_unlock();
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(br_fdb_find_port);
+
+struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
+ const unsigned char *addr,
+ __u16 vid)
+{
+ return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
+}
+
+/* When a static FDB entry is added, the mac address from the entry is
+ * added to the bridge private HW address list and all required ports
+ * are then updated with the new information.
+ * Called under RTNL.
+ */
+static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr)
+{
+ int err;
+ struct net_bridge_port *p;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(p, &br->port_list, list) {
+ if (!br_promisc_port(p)) {
+ err = dev_uc_add(p->dev, addr);
+ if (err)
+ goto undo;
+ }
+ }
+
+ return;
+undo:
+ list_for_each_entry_continue_reverse(p, &br->port_list, list) {
+ if (!br_promisc_port(p))
+ dev_uc_del(p->dev, addr);
+ }
+}
+
+/* When a static FDB entry is deleted, the HW address from that entry is
+ * also removed from the bridge private HW address list and updates all
+ * the ports with needed information.
+ * Called under RTNL.
+ */
+static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
+{
+ struct net_bridge_port *p;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(p, &br->port_list, list) {
+ if (!br_promisc_port(p))
+ dev_uc_del(p->dev, addr);
+ }
+}
+
+static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
+ bool swdev_notify)
+{
+ trace_fdb_delete(br, f);
+
+ if (test_bit(BR_FDB_STATIC, &f->flags))
+ fdb_del_hw_addr(br, f->key.addr.addr);
+
+ hlist_del_init_rcu(&f->fdb_node);
+ rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode,
+ br_fdb_rht_params);
+ fdb_notify(br, f, RTM_DELNEIGH, swdev_notify);
+ call_rcu(&f->rcu, fdb_rcu_free);
+}
+
+/* Delete a local entry if no other port had the same address. */
+static void fdb_delete_local(struct net_bridge *br,
+ const struct net_bridge_port *p,
+ struct net_bridge_fdb_entry *f)
+{
+ const unsigned char *addr = f->key.addr.addr;
+ struct net_bridge_vlan_group *vg;
+ const struct net_bridge_vlan *v;
+ struct net_bridge_port *op;
+ u16 vid = f->key.vlan_id;
+
+ /* Maybe another port has same hw addr? */
+ list_for_each_entry(op, &br->port_list, list) {
+ vg = nbp_vlan_group(op);
+ if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
+ (!vid || br_vlan_find(vg, vid))) {
+ f->dst = op;
+ clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
+ return;
+ }
+ }
+
+ vg = br_vlan_group(br);
+ v = br_vlan_find(vg, vid);
+ /* Maybe bridge device has same hw addr? */
+ if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
+ (!vid || (v && br_vlan_should_use(v)))) {
+ f->dst = NULL;
+ clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
+ return;
+ }
+
+ fdb_delete(br, f, true);
+}
+
+void br_fdb_find_delete_local(struct net_bridge *br,
+ const struct net_bridge_port *p,
+ const unsigned char *addr, u16 vid)
+{
+ struct net_bridge_fdb_entry *f;
+
+ spin_lock_bh(&br->hash_lock);
+ f = br_fdb_find(br, addr, vid);
+ if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
+ !test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p)
+ fdb_delete_local(br, p, f);
+ spin_unlock_bh(&br->hash_lock);
+}
+
+void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
+{
+ struct net_bridge_vlan_group *vg;
+ struct net_bridge_fdb_entry *f;
+ struct net_bridge *br = p->br;
+ struct net_bridge_vlan *v;
+
+ spin_lock_bh(&br->hash_lock);
+ vg = nbp_vlan_group(p);
+ hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
+ if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) &&
+ !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) {
+ /* delete old one */
+ fdb_delete_local(br, p, f);
+
+ /* if this port has no vlan information
+ * configured, we can safely be done at
+ * this point.
+ */
+ if (!vg || !vg->num_vlans)
+ goto insert;
+ }
+ }
+
+insert:
+ /* insert new address, may fail if invalid address or dup. */
+ fdb_insert(br, p, newaddr, 0);
+
+ if (!vg || !vg->num_vlans)
+ goto done;
+
+ /* Now add entries for every VLAN configured on the port.
+ * This function runs under RTNL so the bitmap will not change
+ * from under us.
+ */
+ list_for_each_entry(v, &vg->vlan_list, vlist)
+ fdb_insert(br, p, newaddr, v->vid);
+
+done:
+ spin_unlock_bh(&br->hash_lock);
+}
+
+void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
+{
+ struct net_bridge_vlan_group *vg;
+ struct net_bridge_fdb_entry *f;
+ struct net_bridge_vlan *v;
+
+ spin_lock_bh(&br->hash_lock);
+
+ /* If old entry was unassociated with any port, then delete it. */
+ f = br_fdb_find(br, br->dev->dev_addr, 0);
+ if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
+ !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
+ fdb_delete_local(br, NULL, f);
+
+ fdb_insert(br, NULL, newaddr, 0);
+ vg = br_vlan_group(br);
+ if (!vg || !vg->num_vlans)
+ goto out;
+ /* Now remove and add entries for every VLAN configured on the
+ * bridge. This function runs under RTNL so the bitmap will not
+ * change from under us.
+ */
+ list_for_each_entry(v, &vg->vlan_list, vlist) {
+ if (!br_vlan_should_use(v))
+ continue;
+ f = br_fdb_find(br, br->dev->dev_addr, v->vid);
+ if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
+ !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
+ fdb_delete_local(br, NULL, f);
+ fdb_insert(br, NULL, newaddr, v->vid);
+ }
+out:
+ spin_unlock_bh(&br->hash_lock);
+}
+
+void br_fdb_cleanup(struct work_struct *work)
+{
+ struct net_bridge *br = container_of(work, struct net_bridge,
+ gc_work.work);
+ struct net_bridge_fdb_entry *f = NULL;
+ unsigned long delay = hold_time(br);
+ unsigned long work_delay = delay;
+ unsigned long now = jiffies;
+
+ /* this part is tricky, in order to avoid blocking learning and
+ * consequently forwarding, we rely on rcu to delete objects with
+ * delayed freeing allowing us to continue traversing
+ */
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+ unsigned long this_timer = f->updated + delay;
+
+ if (test_bit(BR_FDB_STATIC, &f->flags) ||
+ test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) {
+ if (test_bit(BR_FDB_NOTIFY, &f->flags)) {
+ if (time_after(this_timer, now))
+ work_delay = min(work_delay,
+ this_timer - now);
+ else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE,
+ &f->flags))
+ fdb_notify(br, f, RTM_NEWNEIGH, false);
+ }
+ continue;
+ }
+
+ if (time_after(this_timer, now)) {
+ work_delay = min(work_delay, this_timer - now);
+ } else {
+ spin_lock_bh(&br->hash_lock);
+ if (!hlist_unhashed(&f->fdb_node))
+ fdb_delete(br, f, true);
+ spin_unlock_bh(&br->hash_lock);
+ }
+ }
+ rcu_read_unlock();
+
+ /* Cleanup minimum 10 milliseconds apart */
+ work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
+ mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
+}
+
+/* Completely flush all dynamic entries in forwarding database.*/
+void br_fdb_flush(struct net_bridge *br)
+{
+ struct net_bridge_fdb_entry *f;
+ struct hlist_node *tmp;
+
+ spin_lock_bh(&br->hash_lock);
+ hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
+ if (!test_bit(BR_FDB_STATIC, &f->flags))
+ fdb_delete(br, f, true);
+ }
+ spin_unlock_bh(&br->hash_lock);
+}
+
+/* Flush all entries referring to a specific port.
+ * if do_all is set also flush static entries
+ * if vid is set delete all entries that match the vlan_id
+ */
+void br_fdb_delete_by_port(struct net_bridge *br,
+ const struct net_bridge_port *p,
+ u16 vid,
+ int do_all)
+{
+ struct net_bridge_fdb_entry *f;
+ struct hlist_node *tmp;
+
+ spin_lock_bh(&br->hash_lock);
+ hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
+ if (f->dst != p)
+ continue;
+
+ if (!do_all)
+ if (test_bit(BR_FDB_STATIC, &f->flags) ||
+ (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags) &&
+ !test_bit(BR_FDB_OFFLOADED, &f->flags)) ||
+ (vid && f->key.vlan_id != vid))
+ continue;
+
+ if (test_bit(BR_FDB_LOCAL, &f->flags))
+ fdb_delete_local(br, p, f);
+ else
+ fdb_delete(br, f, true);
+ }
+ spin_unlock_bh(&br->hash_lock);
+}
+
+#if IS_ENABLED(CONFIG_ATM_LANE)
+/* Interface used by ATM LANE hook to test
+ * if an addr is on some other bridge port */
+int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
+{
+ struct net_bridge_fdb_entry *fdb;
+ struct net_bridge_port *port;
+ int ret;
+
+ rcu_read_lock();
+ port = br_port_get_rcu(dev);
+ if (!port)
+ ret = 0;
+ else {
+ fdb = br_fdb_find_rcu(port->br, addr, 0);
+ ret = fdb && fdb->dst && fdb->dst->dev != dev &&
+ fdb->dst->state == BR_STATE_FORWARDING;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+#endif /* CONFIG_ATM_LANE */
+
+/*
+ * Fill buffer with forwarding table records in
+ * the API format.
+ */
+int br_fdb_fillbuf(struct net_bridge *br, void *buf,
+ unsigned long maxnum, unsigned long skip)
+{
+ struct net_bridge_fdb_entry *f;
+ struct __fdb_entry *fe = buf;
+ int num = 0;
+
+ memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+ if (num >= maxnum)
+ break;
+
+ if (has_expired(br, f))
+ continue;
+
+ /* ignore pseudo entry for local MAC address */
+ if (!f->dst)
+ continue;
+
+ if (skip) {
+ --skip;
+ continue;
+ }
+
+ /* convert from internal format to API */
+ memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
+
+ /* due to ABI compat need to split into hi/lo */
+ fe->port_no = f->dst->port_no;
+ fe->port_hi = f->dst->port_no >> 8;
+
+ fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
+ if (!test_bit(BR_FDB_STATIC, &f->flags))
+ fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
+ ++fe;
+ ++num;
+ }
+ rcu_read_unlock();
+
+ return num;
+}
+
+static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
+ struct net_bridge_port *source,
+ const unsigned char *addr,
+ __u16 vid,
+ unsigned long flags)
+{
+ struct net_bridge_fdb_entry *fdb;
+
+ fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
+ if (fdb) {
+ memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
+ fdb->dst = source;
+ fdb->key.vlan_id = vid;
+ fdb->flags = flags;
+ fdb->updated = fdb->used = jiffies;
+ if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
+ &fdb->rhnode,
+ br_fdb_rht_params)) {
+ kmem_cache_free(br_fdb_cache, fdb);
+ fdb = NULL;
+ } else {
+ hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
+ }
+ }
+ return fdb;
+}
+
+static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
+ const unsigned char *addr, u16 vid)
+{
+ struct net_bridge_fdb_entry *fdb;
+
+ if (!is_valid_ether_addr(addr))
+ return -EINVAL;
+
+ fdb = br_fdb_find(br, addr, vid);
+ if (fdb) {
+ /* it is okay to have multiple ports with same
+ * address, just use the first one.
+ */
+ if (test_bit(BR_FDB_LOCAL, &fdb->flags))
+ return 0;
+ br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
+ source ? source->dev->name : br->dev->name, addr, vid);
+ fdb_delete(br, fdb, true);
+ }
+
+ fdb = fdb_create(br, source, addr, vid,
+ BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC));
+ if (!fdb)
+ return -ENOMEM;
+
+ fdb_add_hw_addr(br, addr);
+ fdb_notify(br, fdb, RTM_NEWNEIGH, true);
+ return 0;
+}
+
+/* Add entry for local address of interface */
+int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
+ const unsigned char *addr, u16 vid)
+{
+ int ret;
+
+ spin_lock_bh(&br->hash_lock);
+ ret = fdb_insert(br, source, addr, vid);
+ spin_unlock_bh(&br->hash_lock);
+ return ret;
+}
+
+/* returns true if the fdb was modified */
+static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb)
+{
+ return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) &&
+ test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags));
+}
+
+void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
+ const unsigned char *addr, u16 vid, unsigned long flags)
+{
+ struct net_bridge_fdb_entry *fdb;
+
+ /* some users want to always flood. */
+ if (hold_time(br) == 0)
+ return;
+
+ fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
+ if (likely(fdb)) {
+ /* attempt to update an entry for a local interface */
+ if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) {
+ if (net_ratelimit())
+ br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
+ source->dev->name, addr, vid);
+ } else {
+ unsigned long now = jiffies;
+ bool fdb_modified = false;
+
+ if (now != fdb->updated) {
+ fdb->updated = now;
+ fdb_modified = __fdb_mark_active(fdb);
+ }
+
+ /* fastpath: update of existing entry */
+ if (unlikely(source != fdb->dst &&
+ !test_bit(BR_FDB_STICKY, &fdb->flags))) {
+ fdb->dst = source;
+ fdb_modified = true;
+ /* Take over HW learned entry */
+ if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
+ &fdb->flags)))
+ clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
+ &fdb->flags);
+ }
+
+ if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
+ set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
+ if (unlikely(fdb_modified)) {
+ trace_br_fdb_update(br, source, addr, vid, flags);
+ fdb_notify(br, fdb, RTM_NEWNEIGH, true);
+ }
+ }
+ } else {
+ spin_lock(&br->hash_lock);
+ fdb = fdb_create(br, source, addr, vid, flags);
+ if (fdb) {
+ trace_br_fdb_update(br, source, addr, vid, flags);
+ fdb_notify(br, fdb, RTM_NEWNEIGH, true);
+ }
+ /* else we lose race and someone else inserts
+ * it first, don't bother updating
+ */
+ spin_unlock(&br->hash_lock);
+ }
+}
+
+static int fdb_to_nud(const struct net_bridge *br,
+ const struct net_bridge_fdb_entry *fdb)
+{
+ if (test_bit(BR_FDB_LOCAL, &fdb->flags))
+ return NUD_PERMANENT;
+ else if (test_bit(BR_FDB_STATIC, &fdb->flags))
+ return NUD_NOARP;
+ else if (has_expired(br, fdb))
+ return NUD_STALE;
+ else
+ return NUD_REACHABLE;
+}
+
+static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
+ const struct net_bridge_fdb_entry *fdb,
+ u32 portid, u32 seq, int type, unsigned int flags)
+{
+ unsigned long now = jiffies;
+ struct nda_cacheinfo ci;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = 0;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
+ ndm->ndm_state = fdb_to_nud(br, fdb);
+
+ if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
+ ndm->ndm_flags |= NTF_OFFLOADED;
+ if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
+ ndm->ndm_flags |= NTF_EXT_LEARNED;
+ if (test_bit(BR_FDB_STICKY, &fdb->flags))
+ ndm->ndm_flags |= NTF_STICKY;
+
+ if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
+ goto nla_put_failure;
+ ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
+ ci.ndm_confirmed = 0;
+ ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
+ ci.ndm_refcnt = 0;
+ if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+ goto nla_put_failure;
+
+ if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
+ &fdb->key.vlan_id))
+ goto nla_put_failure;
+
+ if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) {
+ struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS);
+ u8 notify_bits = FDB_NOTIFY_BIT;
+
+ if (!nest)
+ goto nla_put_failure;
+ if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
+ notify_bits |= FDB_NOTIFY_INACTIVE_BIT;
+
+ if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) {
+ nla_nest_cancel(skb, nest);
+ goto nla_put_failure;
+ }
+
+ nla_nest_end(skb, nest);
+ }
+
+ nlmsg_end(skb, nlh);
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static inline size_t fdb_nlmsg_size(void)
+{
+ return NLMSG_ALIGN(sizeof(struct ndmsg))
+ + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
+ + nla_total_size(sizeof(u32)) /* NDA_MASTER */
+ + nla_total_size(sizeof(u16)) /* NDA_VLAN */
+ + nla_total_size(sizeof(struct nda_cacheinfo))
+ + nla_total_size(0) /* NDA_FDB_EXT_ATTRS */
+ + nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */
+}
+
+static void fdb_notify(struct net_bridge *br,
+ const struct net_bridge_fdb_entry *fdb, int type,
+ bool swdev_notify)
+{
+ struct net *net = dev_net(br->dev);
+ struct sk_buff *skb;
+ int err = -ENOBUFS;
+
+ if (swdev_notify)
+ br_switchdev_fdb_notify(fdb, type);
+
+ skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
+ if (skb == NULL)
+ goto errout;
+
+ err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
+ rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+ return;
+errout:
+ rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+}
+
+/* Dump information about entries, in response to GETNEIGH */
+int br_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev,
+ int *idx)
+{
+ struct net_bridge *br = netdev_priv(dev);
+ struct net_bridge_fdb_entry *f;
+ int err = 0;
+
+ if (!(dev->priv_flags & IFF_EBRIDGE))
+ return err;
+
+ if (!filter_dev) {
+ err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
+ if (err < 0)
+ return err;
+ }
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+ if (*idx < cb->args[2])
+ goto skip;
+ if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
+ if (filter_dev != dev)
+ goto skip;
+ /* !f->dst is a special case for bridge
+ * It means the MAC belongs to the bridge
+ * Therefore need a little more filtering
+ * we only want to dump the !f->dst case
+ */
+ if (f->dst)
+ goto skip;
+ }
+ if (!filter_dev && f->dst)
+ goto skip;
+
+ err = fdb_fill_info(skb, br, f,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH,
+ NLM_F_MULTI);
+ if (err < 0)
+ break;
+skip:
+ *idx += 1;
+ }
+ rcu_read_unlock();
+
+ return err;
+}
+
+int br_fdb_get(struct sk_buff *skb,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid, u32 portid, u32 seq,
+ struct netlink_ext_ack *extack)
+{
+ struct net_bridge *br = netdev_priv(dev);
+ struct net_bridge_fdb_entry *f;
+ int err = 0;
+
+ rcu_read_lock();
+ f = br_fdb_find_rcu(br, addr, vid);
+ if (!f) {
+ NL_SET_ERR_MSG(extack, "Fdb entry not found");
+ err = -ENOENT;
+ goto errout;
+ }
+
+ err = fdb_fill_info(skb, br, f, portid, seq,
+ RTM_NEWNEIGH, 0);
+errout:
+ rcu_read_unlock();
+ return err;
+}
+
+/* returns true if the fdb is modified */
+static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify)
+{
+ bool modified = false;
+
+ /* allow to mark an entry as inactive, usually done on creation */
+ if ((notify & FDB_NOTIFY_INACTIVE_BIT) &&
+ !test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
+ modified = true;
+
+ if ((notify & FDB_NOTIFY_BIT) &&
+ !test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) {
+ /* enabled activity tracking */
+ modified = true;
+ } else if (!(notify & FDB_NOTIFY_BIT) &&
+ test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) {
+ /* disabled activity tracking, clear notify state */
+ clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags);
+ modified = true;
+ }
+
+ return modified;
+}
+
+/* Update (create or replace) forwarding database entry */
+static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
+ const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid,
+ struct nlattr *nfea_tb[])
+{
+ bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY);
+ bool refresh = !nfea_tb[NFEA_DONT_REFRESH];
+ struct net_bridge_fdb_entry *fdb;
+ u16 state = ndm->ndm_state;
+ bool modified = false;
+ u8 notify = 0;
+
+ /* If the port cannot learn allow only local and static entries */
+ if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
+ !(source->state == BR_STATE_LEARNING ||
+ source->state == BR_STATE_FORWARDING))
+ return -EPERM;
+
+ if (!source && !(state & NUD_PERMANENT)) {
+ pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
+ br->dev->name);
+ return -EINVAL;
+ }
+
+ if (is_sticky && (state & NUD_PERMANENT))
+ return -EINVAL;
+
+ if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) {
+ notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]);
+ if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) ||
+ (notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT)
+ return -EINVAL;
+ }
+
+ fdb = br_fdb_find(br, addr, vid);
+ if (fdb == NULL) {
+ if (!(flags & NLM_F_CREATE))
+ return -ENOENT;
+
+ fdb = fdb_create(br, source, addr, vid, 0);
+ if (!fdb)
+ return -ENOMEM;
+
+ modified = true;
+ } else {
+ if (flags & NLM_F_EXCL)
+ return -EEXIST;
+
+ if (fdb->dst != source) {
+ fdb->dst = source;
+ modified = true;
+ }
+ }
+
+ if (fdb_to_nud(br, fdb) != state) {
+ if (state & NUD_PERMANENT) {
+ set_bit(BR_FDB_LOCAL, &fdb->flags);
+ if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
+ fdb_add_hw_addr(br, addr);
+ } else if (state & NUD_NOARP) {
+ clear_bit(BR_FDB_LOCAL, &fdb->flags);
+ if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
+ fdb_add_hw_addr(br, addr);
+ } else {
+ clear_bit(BR_FDB_LOCAL, &fdb->flags);
+ if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags))
+ fdb_del_hw_addr(br, addr);
+ }
+
+ modified = true;
+ }
+
+ if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) {
+ change_bit(BR_FDB_STICKY, &fdb->flags);
+ modified = true;
+ }
+
+ if (fdb_handle_notify(fdb, notify))
+ modified = true;
+
+ set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
+
+ fdb->used = jiffies;
+ if (modified) {
+ if (refresh)
+ fdb->updated = jiffies;
+ fdb_notify(br, fdb, RTM_NEWNEIGH, true);
+ }
+
+ return 0;
+}
+
+static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
+ struct net_bridge_port *p, const unsigned char *addr,
+ u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
+ struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ if (ndm->ndm_flags & NTF_USE) {
+ if (!p) {
+ pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
+ br->dev->name);
+ return -EINVAL;
+ }
+ if (!nbp_state_should_learn(p))
+ return 0;
+
+ local_bh_disable();
+ rcu_read_lock();
+ br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER));
+ rcu_read_unlock();
+ local_bh_enable();
+ } else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
+ if (!p && !(ndm->ndm_state & NUD_PERMANENT)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "FDB entry towards bridge must be permanent");
+ return -EINVAL;
+ }
+ err = br_fdb_external_learn_add(br, p, addr, vid, true);
+ } else {
+ spin_lock_bh(&br->hash_lock);
+ err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
+ spin_unlock_bh(&br->hash_lock);
+ }
+
+ return err;
+}
+
+static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = {
+ [NFEA_ACTIVITY_NOTIFY] = { .type = NLA_U8 },
+ [NFEA_DONT_REFRESH] = { .type = NLA_FLAG },
+};
+
+/* Add new permanent fdb entry with RTM_NEWNEIGH */
+int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid, u16 nlh_flags,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *nfea_tb[NFEA_MAX + 1], *attr;
+ struct net_bridge_vlan_group *vg;
+ struct net_bridge_port *p = NULL;
+ struct net_bridge_vlan *v;
+ struct net_bridge *br = NULL;
+ int err = 0;
+
+ trace_br_fdb_add(ndm, dev, addr, vid, nlh_flags);
+
+ if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
+ pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
+ return -EINVAL;
+ }
+
+ if (is_zero_ether_addr(addr)) {
+ pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
+ return -EINVAL;
+ }
+
+ if (dev->priv_flags & IFF_EBRIDGE) {
+ br = netdev_priv(dev);
+ vg = br_vlan_group(br);
+ } else {
+ p = br_port_get_rtnl(dev);
+ if (!p) {
+ pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
+ dev->name);
+ return -EINVAL;
+ }
+ br = p->br;
+ vg = nbp_vlan_group(p);
+ }
+
+ if (tb[NDA_FDB_EXT_ATTRS]) {
+ attr = tb[NDA_FDB_EXT_ATTRS];
+ err = nla_parse_nested(nfea_tb, NFEA_MAX, attr,
+ br_nda_fdb_pol, extack);
+ if (err)
+ return err;
+ } else {
+ memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1));
+ }
+
+ if (vid) {
+ v = br_vlan_find(vg, vid);
+ if (!v || !br_vlan_should_use(v)) {
+ pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
+ return -EINVAL;
+ }
+
+ /* VID was specified, so use it. */
+ err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
+ extack);
+ } else {
+ err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
+ extack);
+ if (err || !vg || !vg->num_vlans)
+ goto out;
+
+ /* We have vlans configured on this port and user didn't
+ * specify a VLAN. To be nice, add/update entry for every
+ * vlan on this port.
+ */
+ list_for_each_entry(v, &vg->vlan_list, vlist) {
+ if (!br_vlan_should_use(v))
+ continue;
+ err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
+ nfea_tb, extack);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ return err;
+}
+
+static int fdb_delete_by_addr_and_port(struct net_bridge *br,
+ const struct net_bridge_port *p,
+ const u8 *addr, u16 vlan)
+{
+ struct net_bridge_fdb_entry *fdb;
+
+ fdb = br_fdb_find(br, addr, vlan);
+ if (!fdb || fdb->dst != p)
+ return -ENOENT;
+
+ fdb_delete(br, fdb, true);
+
+ return 0;
+}
+
+static int __br_fdb_delete(struct net_bridge *br,
+ const struct net_bridge_port *p,
+ const unsigned char *addr, u16 vid)
+{
+ int err;
+
+ spin_lock_bh(&br->hash_lock);
+ err = fdb_delete_by_addr_and_port(br, p, addr, vid);
+ spin_unlock_bh(&br->hash_lock);
+
+ return err;
+}
+
+/* Remove neighbor entry with RTM_DELNEIGH */
+int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct net_bridge_vlan_group *vg;
+ struct net_bridge_port *p = NULL;
+ struct net_bridge_vlan *v;
+ struct net_bridge *br;
+ int err;
+
+ if (dev->priv_flags & IFF_EBRIDGE) {
+ br = netdev_priv(dev);
+ vg = br_vlan_group(br);
+ } else {
+ p = br_port_get_rtnl(dev);
+ if (!p) {
+ pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
+ dev->name);
+ return -EINVAL;
+ }
+ vg = nbp_vlan_group(p);
+ br = p->br;
+ }
+
+ if (vid) {
+ v = br_vlan_find(vg, vid);
+ if (!v) {
+ pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
+ return -EINVAL;
+ }
+
+ err = __br_fdb_delete(br, p, addr, vid);
+ } else {
+ err = -ENOENT;
+ err &= __br_fdb_delete(br, p, addr, 0);
+ if (!vg || !vg->num_vlans)
+ return err;
+
+ list_for_each_entry(v, &vg->vlan_list, vlist) {
+ if (!br_vlan_should_use(v))
+ continue;
+ err &= __br_fdb_delete(br, p, addr, v->vid);
+ }
+ }
+
+ return err;
+}
+
+int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
+{
+ struct net_bridge_fdb_entry *f, *tmp;
+ int err = 0;
+
+ ASSERT_RTNL();
+
+ /* the key here is that static entries change only under rtnl */
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+ /* We only care for static entries */
+ if (!test_bit(BR_FDB_STATIC, &f->flags))
+ continue;
+ err = dev_uc_add(p->dev, f->key.addr.addr);
+ if (err)
+ goto rollback;
+ }
+done:
+ rcu_read_unlock();
+
+ return err;
+
+rollback:
+ hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
+ /* We only care for static entries */
+ if (!test_bit(BR_FDB_STATIC, &tmp->flags))
+ continue;
+ if (tmp == f)
+ break;
+ dev_uc_del(p->dev, tmp->key.addr.addr);
+ }
+
+ goto done;
+}
+
+void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
+{
+ struct net_bridge_fdb_entry *f;
+
+ ASSERT_RTNL();
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+ /* We only care for static entries */
+ if (!test_bit(BR_FDB_STATIC, &f->flags))
+ continue;
+
+ dev_uc_del(p->dev, f->key.addr.addr);
+ }
+ rcu_read_unlock();
+}
+
+int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
+ const unsigned char *addr, u16 vid,
+ bool swdev_notify)
+{
+ struct net_bridge_fdb_entry *fdb;
+ bool modified = false;
+ int err = 0;
+
+ trace_br_fdb_external_learn_add(br, p, addr, vid);
+
+ spin_lock_bh(&br->hash_lock);
+
+ fdb = br_fdb_find(br, addr, vid);
+ if (!fdb) {
+ unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN);
+
+ if (swdev_notify)
+ flags |= BIT(BR_FDB_ADDED_BY_USER);
+
+ if (!p)
+ flags |= BIT(BR_FDB_LOCAL);
+
+ fdb = fdb_create(br, p, addr, vid, flags);
+ if (!fdb) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
+ fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
+ } else {
+ fdb->updated = jiffies;
+
+ if (fdb->dst != p) {
+ fdb->dst = p;
+ modified = true;
+ }
+
+ if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
+ /* Refresh entry */
+ fdb->used = jiffies;
+ } else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
+ /* Take over SW learned entry */
+ set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
+ modified = true;
+ }
+
+ if (swdev_notify)
+ set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
+
+ if (!p)
+ set_bit(BR_FDB_LOCAL, &fdb->flags);
+
+ if (modified)
+ fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
+ }
+
+err_unlock:
+ spin_unlock_bh(&br->hash_lock);
+
+ return err;
+}
+
+int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
+ const unsigned char *addr, u16 vid,
+ bool swdev_notify)
+{
+ struct net_bridge_fdb_entry *fdb;
+ int err = 0;
+
+ spin_lock_bh(&br->hash_lock);
+
+ fdb = br_fdb_find(br, addr, vid);
+ if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
+ fdb_delete(br, fdb, swdev_notify);
+ else
+ err = -ENOENT;
+
+ spin_unlock_bh(&br->hash_lock);
+
+ return err;
+}
+
+void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
+ const unsigned char *addr, u16 vid, bool offloaded)
+{
+ struct net_bridge_fdb_entry *fdb;
+
+ spin_lock_bh(&br->hash_lock);
+
+ fdb = br_fdb_find(br, addr, vid);
+ if (fdb && offloaded != test_bit(BR_FDB_OFFLOADED, &fdb->flags))
+ change_bit(BR_FDB_OFFLOADED, &fdb->flags);
+
+ spin_unlock_bh(&br->hash_lock);
+}
+#ifdef CONFIG_FASTNAT_MODULE
+int fast_br(struct sk_buff *skb)
+{
+ //lium_fastnat_del
+ const unsigned char *dest = NULL;
+ struct hlist_head *head;
+ struct net_bridge_fdb_entry *fdb;
+ struct net_bridge_port *p;
+ struct net_bridge *br;
+ u16 vid = 0;
+ int ntl_port_id = 0xff;
+
+ if(!skb->dev)
+ {
+ //print_sun(SUN_DBG, "fast_br skb->dev err skb->dev = %x\n", skb->dev);
+ return 0;
+ }
+
+ /*if(skb->mac_header == 0 || skb->mac_header == ~0U)
+ panic("driver not set macheader !!!\n");*/
+
+ dest = eth_hdr(skb)->h_dest;
+
+
+ p = br_port_get_rtnl(skb->dev);
+ if (p == NULL || p->br == NULL)
+ {
+ //print_sun(SUN_DBG, "fast_br br_port_get_rtnl err p = %x\n", p);
+ return 0;
+ }
+
+ br = p->br;
+ br_should_learn(p, skb, &vid);
+
+ //head = &br->hash[br_mac_hash(dest, vid)];
+
+ //if((fdb = fdb_find_rcu(head, dest,vid)) != NULL)
+ if((fdb = fdb_find_rcu(&(br->fdb_hash_tbl), dest,vid)) != NULL)
+ {
+
+ if((!(test_bit(BR_FDB_LOCAL, &fdb->flags))) && fdb->dst && fdb->dst->dev &&
+ (((fdb->dst->flags & BR_HAIRPIN_MODE) || skb->dev != fdb->dst->dev) &&fdb->dst->state == BR_STATE_FORWARDING))
+ {
+ fast_tcpdump(skb);
+ if(fastnat_level == FAST_NET_DEVICE){
+ skb->dev->stats.rx_packets++;
+ skb->dev->stats.rx_bytes += skb->len;
+ }
+ skb->dev = fdb->dst->dev;
+ skb->isFastbr = 1;
+ fdb->updated = jiffies;
+ skb->now_location |= FASTBR_SUCC;
+ skb_rest_data_byproto(skb);
+
+ br_dev_queue_push_xmit(NULL, NULL, skb);
+ return 1;
+ }
+
+
+ if((!(test_bit(BR_FDB_LOCAL, &fdb->flags))) && fdb->dst && fdb->dst->dev &&
+ (skb->dev == fdb->dst->dev) && fdb->dst->state == BR_STATE_FORWARDING)
+ {
+ skbinfo_add(NULL,SKB_LOOP);
+ skb->dev->stats.rx_dropped++;
+ //print_sun(SUN_ERR,"fast_br loop data discarded, dev:%s \n", skb->dev->name);
+ kfree_skb(skb);
+ return 1;
+ }
+ }
+ //print_sun(SUN_DBG, "fast_br fdb_find_rcu err fdb = %x \n",fdb);
+
+ return 0;
+}
+EXPORT_SYMBOL(fast_br);
+
+
+
+struct net_device *getbrport_bydst(struct net_device *dev,unsigned char *dest)
+{
+ //lium_fastnat_del
+ //struct hlist_head *head;
+ struct net_bridge_fdb_entry *fdb;
+ struct net_bridge_port *p;
+ struct net_bridge *br;
+ struct net_bridge_vlan_group *vg;
+ __u16 vid;
+#if FASTNAT_DEBUG
+ printk("getbrport_bydst() begine");
+#endif
+
+ if (dev == NULL || !(dev->priv_flags & IFF_EBRIDGE))
+ return dev;
+#if FASTNAT_DEBUG
+ printk("getbrport_bydst() 1");
+#endif
+ br = netdev_priv(dev);
+ vg = br_vlan_group_rcu(br);
+ vid = br_get_pvid(vg);
+#if FASTNAT_DEBUG
+ printk("getbrport_bydst() 2");
+#endif
+
+ //head = &br->hash[br_mac_hash(dest,vid)];
+#if FASTNAT_DEBUG
+ printk("getbrport_bydst() 3");
+#endif
+ //if((fdb = fdb_find_rcu(head, dest,vid)) != NULL)
+ if((fdb = fdb_find_rcu(&(br->fdb_hash_tbl), dest,vid)) != NULL)
+ {
+
+ if((!(test_bit(BR_FDB_LOCAL, &fdb->flags))) && fdb->dst && fdb->dst->dev &&
+ (fdb->dst->state == BR_STATE_FORWARDING)) //(fdb->dst->flags & BR_HAIRPIN_MODE)
+ {
+ return fdb->dst->dev;
+ }
+ }
+ return dev;
+}
+
+extern void fast_tcpdump(struct sk_buff *skb);
+extern struct neigh_table arp_tbl;
+extern char default_route_name[IFNAMSIZ];
+char default_br_name[IFNAMSIZ] = {0};
+int fast_fwd_ip4addr_conflict(struct sk_buff *skb)
+{
+ struct iphdr *iph = ip_hdr(skb);
+ __be32 saddr,daddr,wan_ip,br_ip=0,br_bcast=0;
+ struct net_device* in_dev = NULL;
+ struct net_device* out_dev = NULL;
+ struct ethhdr *eth;
+ struct net_bridge_port *p;
+ struct net_bridge *br = NULL;
+ struct net_device *default_route_dev;
+ struct net_device *default_br_dev;
+ struct in_device *ip_ptr;
+
+ if(iph->version != 4 || skb->indev == NULL)
+ {
+ return 0;
+ }
+ default_route_dev = dev_get_by_name(&init_net, default_route_name);
+ if(default_route_dev == NULL)
+ {
+ return 0;
+ }
+ ip_ptr = __in_dev_get_rtnl(default_route_dev);
+ if(ip_ptr && ip_ptr->ifa_list)
+ {
+ wan_ip = ip_ptr->ifa_list->ifa_local;
+ }
+ else
+ {
+ default_br_name[0] = 0;
+ dev_put(default_route_dev);
+ return 0;
+ }
+ in_dev = skb->indev;
+ saddr = iph->saddr;
+ daddr = iph->daddr;
+ p = br_port_get_rtnl(in_dev);
+ if (p != NULL)
+ {
+ br = p->br;
+ if (br && br->dev && strncmp(br->dev->name, default_br_name, IFNAMSIZ-1))
+ {
+ strncpy(default_br_name, br->dev->name, IFNAMSIZ-1);
+ }
+ }
+ default_br_dev = dev_get_by_name(&init_net, default_br_name);
+ if(default_br_dev)
+ {
+ ip_ptr = __in_dev_get_rtnl(default_br_dev);
+ if(ip_ptr && ip_ptr->ifa_list)
+ {
+ br_ip = ip_ptr->ifa_list->ifa_local;
+ br_bcast = ip_ptr->ifa_list->ifa_broadcast;
+ }
+ }
+ else
+ {
+ dev_put(default_route_dev);
+ return 0;
+ }
+ if(br && ((daddr == br_ip) || (daddr == br_bcast) || (daddr == wan_ip)))
+ {
+ //printk("@!@1saddr=%08x,daddr=%08x,br_ip=%08x,br_bcast=%08x,wan_ip=%08x\n",saddr, daddr, br_ip, br_bcast, wan_ip);
+ if (IPPROTO_UDP == iph->protocol)
+ {
+ struct udphdr *udph = (struct udphdr *)(skb->data + iph->ihl * 4);
+ if(udph->source == 0x4300 || udph->source == 0x4400
+ || udph->dest == 0x4300 || udph->dest == 0x4400)
+ {
+ //printk("@!@dhcp packet\n");
+ dev_put(default_route_dev);
+ dev_put(default_br_dev);
+ return 0;
+ }
+ }
+ out_dev = default_route_dev;
+ skb_push(skb, ETH_HLEN);
+ eth = (struct ethhdr*)(skb->data);
+ memcpy(eth->h_source, in_dev->dev_addr, ETH_ALEN);
+ memcpy(eth->h_dest, out_dev->dev_addr, ETH_ALEN);
+ fast_tcpdump(skb);
+ skb->dev = out_dev;
+ }
+ else if(in_dev == default_route_dev && ((saddr == br_ip) || (saddr == br_bcast) || (saddr == wan_ip)))
+ {
+ struct neighbour *neigh = neigh_lookup(&arp_tbl, &daddr, default_br_dev);
+ //printk("@!@2saddr=%08x,daddr=%08x,neigh=%08x,wan_ip=%08x\n",saddr, daddr, neigh, wan_ip);
+ if(neigh)
+ {
+ //printk("@!@neigh=%s\n",neigh->dev->name);
+ out_dev = getbrport_bydst(default_br_dev,neigh->ha);
+ if(out_dev)
+ {
+ //printk("@!@out_dev=%s\n",out_dev->name);
+ skb_push(skb, ETH_HLEN);
+ eth = (struct ethhdr*)(skb->data);
+ memcpy(eth->h_source, out_dev->dev_addr, ETH_ALEN);
+ memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
+ //printk("@!@mac=%02x %02x %02x %02x %02x %02x\n",eth->h_dest[0],eth->h_dest[1],eth->h_dest[2],eth->h_dest[3],eth->h_dest[4],eth->h_dest[5]);
+ }
+ neigh_release(neigh);
+ }
+ if(out_dev == NULL)
+ {
+ printk("@!@dev: br port not found\n");
+ dev_put(default_route_dev);
+ dev_put(default_br_dev);
+ return 0;
+ }
+ fast_tcpdump(skb);
+ skb->dev = out_dev;
+ }
+ else
+ {
+ dev_put(default_route_dev);
+ dev_put(default_br_dev);
+ return 0;
+ }
+
+ eth->h_proto = htons(ETH_P_IP);
+ skb->now_location |= FASTNAT_SUCC;
+ dev_queue_xmit(skb);
+
+ dev_put(default_route_dev);
+ dev_put(default_br_dev);
+ return 1;
+}
+
+int fast_for_multicast(struct sk_buff *skb)
+{
+ if (skb->indev && !strncmp(skb->indev->name, default_route_name, IFNAMSIZ-1))
+ {
+ struct net_device* dev = NULL;
+ struct net_bridge *br;
+ struct net_bridge_port *p;
+
+ dev = dev_get_by_name(&init_net, default_br_name);
+ if (dev == NULL || !(dev->priv_flags & IFF_EBRIDGE))
+ {
+ printk("@!@dev: br not found\n");
+ return 0;
+ }
+ br = (struct net_bridge *)netdev_priv(dev);
+ p = br_get_port(br, 1);
+ if(p && p->dev)
+ {
+ struct ethhdr *eth;
+ struct iphdr *iph = ip_hdr(skb);
+
+ skb_push(skb, ETH_HLEN);
+ eth = (struct ethhdr *)skb->data;
+ memcpy(eth->h_source, p->dev->dev_addr, ETH_ALEN);
+ ip_eth_mc_map(iph->daddr, eth->h_dest);
+ eth->h_proto = htons(ETH_P_IP);
+ skb->dev = p->dev;
+ skb->now_location |= FASTNAT_SUCC;
+ dev_queue_xmit(skb);
+ dev_put(dev);
+ return 1;
+ }
+ dev_put(dev);
+ }
+ return 0;
+}
+#endif
+
+void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
+{
+ struct net_bridge_fdb_entry *f;
+ struct net_bridge_port *p;
+
+ ASSERT_RTNL();
+
+ p = br_port_get_rtnl(dev);
+ if (!p)
+ return;
+
+ spin_lock_bh(&p->br->hash_lock);
+ hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) {
+ if (f->dst == p && f->key.vlan_id == vid)
+ clear_bit(BR_FDB_OFFLOADED, &f->flags);
+ }
+ spin_unlock_bh(&p->br->hash_lock);
+}
+EXPORT_SYMBOL_GPL(br_fdb_clear_offload);
diff --git a/upstream/linux-5.10/net/core/SI/net_other.c b/upstream/linux-5.10/net/core/SI/net_other.c
new file mode 100755
index 0000000..a6748c7
--- /dev/null
+++ b/upstream/linux-5.10/net/core/SI/net_other.c
@@ -0,0 +1,1222 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/jhash.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include "../../bridge/br_private.h"
+#include <net/arp.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/netioctl.h>
+#include <net/SI/errno_track.h>
+#include <net/ipv6.h>
+#include <net/SI/net_other.h>
+#include <linux/if_arp.h>
+
+#ifdef NETLINK_UC
+#include "../../../../drivers/net/fast6/fast6.h"
+#include "../../../../drivers/net/fastnat/fastnat.h"
+#else
+#include <net/SI/fastnat.h>
+#include <net/SI/fast6.h>
+#endif
+#ifdef CONFIG_SPEED_OPT
+extern size_t skb_sys_pool_size(const void *ptr);
+#endif
+unsigned long check_pkt = 0;
+EXPORT_SYMBOL(check_pkt);
+int set_print_pkt = 0; /*´òÓ¡°üÄÚÈÝ*/
+
+/*¼Ç¼±¨ÎijöÈë¶ÔÐÅÏ¢*/
+struct check_pkt_info skb_insert_info = {0};
+EXPORT_SYMBOL(skb_insert_info);
+struct check_pkt_info skb_unlink_info = {0};
+EXPORT_SYMBOL(skb_unlink_info);
+
+/*½«skbÌø×ªµ½ARP/IPV4Í·²¿,·µ»ØÍøÂç²ãÐÒ飬²»¸Ä±äskb*/
+static unsigned char* skip_mac_header(struct sk_buff *skb, unsigned short *protocol)
+{
+ __be16 next_pro;
+ unsigned char *curr_ptr = NULL;
+
+ if(skb_mac_header_was_set(skb))
+ {
+ curr_ptr = skb_mac_header(skb);
+ curr_ptr += ETH_HLEN;
+ next_pro = *(__be16 *)(curr_ptr - 2);
+ }
+ else
+ {
+ curr_ptr = skb->data + ETH_HLEN;
+ next_pro = *(__be16 *)(curr_ptr - 2);
+ }
+
+again:
+ if (htons(ETH_P_IP) == next_pro || htons(ETH_P_ARP) == next_pro)
+ {
+ *protocol = ntohs(next_pro);
+ return curr_ptr;
+ }
+ //vlan
+ else if (next_pro == cpu_to_be16(ETH_P_8021Q))
+ {
+ curr_ptr += VLAN_HLEN;
+ next_pro = *((__be16 *)(curr_ptr - 2));
+ goto again;
+ }
+ //pppoe
+ else if (next_pro == htons(ETH_P_PPP_SES))
+ {
+ if (*(curr_ptr + 6) == 0x00 && (*(curr_ptr + 7) == 0x21 || *(curr_ptr + 7) == 0x57))
+ {
+ next_pro = htons(ETH_P_IP);
+ curr_ptr += PPPOE_HEADER_LEN;
+ goto again;
+ }
+ }
+ return NULL;
+}
+
+
+/*½âÎöDHCPÑ¡Ïî×ֶΣ¬»ñÈ¡¶ÔÓ¦message type,ûÓнâoverload*/
+unsigned char *dhcp_option_get(unsigned char *data, int data_len, int code)
+{
+ unsigned char *opt_ptr;
+ int len;
+ int overload = 0;
+
+ opt_ptr = data;
+ while (1) {
+ if (data_len <= 0)
+ return NULL;
+
+ if (opt_ptr[0] == DHCP_PADDING)
+ {
+ data_len--;
+ opt_ptr++;
+ continue;
+ }
+ if (opt_ptr[0] == DHCP_END)
+ return NULL;
+
+ len = 2 + opt_ptr[1];
+ data_len -= len;
+ if (data_len < 0)
+ return NULL;
+
+ if (opt_ptr[0] == code)
+ return opt_ptr + 2;
+
+ opt_ptr += len;
+ }
+
+ return NULL;
+}
+
+
+void print_check_pkt_info(struct check_pkt_info *pkt_info, int num)
+{
+ int i = 0;
+
+ num = num > MAX_PKT_NUM ? MAX_PKT_NUM : num;
+ printk("\n%10s %10s %10s\n", "Protocol", "MsgType", "Time");
+ for(i = 0; i < num; i++)
+ {
+ printk("%10s %10d %10lu\n",
+ proto_str[pkt_info->info[i].proto_type],
+ pkt_info->info[i].msg_type,
+ pkt_info->info[i].time);
+ }
+}
+EXPORT_SYMBOL(print_check_pkt_info);
+
+/*¼ì²âÊý¾Ý°üÊÇ·ñÊÇÖ¸¶¨ÀàÐ͵İü²¢¼Ç¼ʱ¼ä*/
+/*pkt_info·µ»Ø½âÎöµÄÐÒéÏà¹ØÄÚÈÝ*/
+int check_packet_type(struct sk_buff *skb, struct pkt_info *pkt_info)
+{
+ struct iphdr *ip_hdr = NULL;
+ struct icmphdr *icmphdr = NULL;
+ struct udphdr *udp_hdr = NULL;
+ struct arphdr *arp_hdr = NULL;
+ unsigned char *data_ptr = NULL;
+ unsigned char *opt_ptr = NULL;
+ unsigned short data_len = 0;
+ unsigned short protocol = 0;
+
+ if(0 == check_pkt)
+ return 0;
+
+ memset(pkt_info, 0, sizeof(struct pkt_info));
+
+ /*Ìø¹ýMACÍ·£¬µ½ARP/IPV4*/
+ data_ptr = skip_mac_header(skb, &protocol);
+ if(NULL == data_ptr)
+ return 0;
+
+ if(ETH_P_ARP == protocol)
+ {
+ if(test_bit(PKT_TYPE_ARP_BIT, &check_pkt))
+ {
+ arp_hdr = (struct arphdr *)data_ptr;
+ pkt_info->proto_type = PROTO_TYPE_ARP;
+ pkt_info->msg_type = ntohs(arp_hdr->ar_op);
+ pkt_info->time = jiffies;
+ return 1;
+ }
+ }
+ else if(ETH_P_IP == protocol)
+ {
+ ip_hdr = (struct iphdr *)data_ptr;
+
+ /*¶ÔÓÚ·ÖÆ¬°ü£¬Ö»Ê¶±ðÊׯ¬*/
+ if(ntohs(ip_hdr->frag_off) & IP_OFFSET)
+ {
+ return 0;
+ }
+
+ data_len = ntohs(ip_hdr->tot_len);
+
+ switch(ip_hdr->protocol)
+ {
+ case IPPROTO_UDP:
+ udp_hdr = (struct udphdr *)((unsigned char *)ip_hdr + ip_hdr->ihl * 4);
+ if(test_bit(PKT_TYPE_DHCP_BIT, &check_pkt))
+ {
+ if((DHCP_CLIENT_PORT == ntohs(udp_hdr->source) && DHCP_SERVER_PORT == ntohs(udp_hdr->dest)) ||
+ (DHCP_CLIENT_PORT == ntohs(udp_hdr->dest) && DHCP_SERVER_PORT == ntohs(udp_hdr->source)))
+ {
+ /*Ìø¹ýUDPÍ·*/
+ data_ptr = (unsigned char *)udp_hdr + 8;
+ /*Ìø¹ýDHCP¹Ì¶¨Í·*/
+ data_ptr += 236;
+ /*Ìø¹ýmagic cookies*/
+ data_ptr += 4;
+ data_len = data_len - ip_hdr->ihl * 4 - 8 - 236 - 4;
+
+ /*È¡DHCPµÄmessage type*/
+ opt_ptr = dhcp_option_get(data_ptr, data_len, DHCP_MSG_TYPE);
+ if(opt_ptr)
+ pkt_info->msg_type = opt_ptr[0];
+
+ pkt_info->proto_type = PROTO_TYPE_DHCP;
+ pkt_info->time = jiffies;
+
+ return 1;
+ }
+ }
+ break;
+
+ case IPPROTO_TCP:
+ break;
+ case IPPROTO_ICMP:
+ icmphdr = (struct icmphdr *)((unsigned char *)ip_hdr + ip_hdr->ihl * 4);
+ if(test_bit(PKT_TYPE_PING_BIT, &check_pkt))
+ {
+ if(ICMP_ECHOREPLY == icmphdr->type || ICMP_ECHO == icmphdr->type)
+ {
+ pkt_info->proto_type = PROTO_TYPE_PING;
+ pkt_info->msg_type = icmphdr->type;
+ pkt_info->time = jiffies;
+ return 1;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(check_packet_type);
+
+
+#ifdef CONFIG_NETCTL
+
+void net_print_packet(unsigned char *data, unsigned int len, int flag)
+{
+ int i = 0;
+
+ if(set_print_pkt && net_ratelimit())
+ {
+ if(0 == flag)
+ printk("\nrecv packet:\n");
+ else if(1 == flag)
+ printk("\nsend packet:\n");
+ else
+ printk("\nprint packet:\n");
+
+ for(i = 0; i < len; i++)
+ {
+ if(i % 16 == 0)
+ printk("\n");
+ printk("%2x ", data[i]);
+ }
+ }
+}
+
+
+void track_netlink(struct sk_buff *skb,u32 group)
+{
+ struct nlmsghdr *nlh;
+
+ nlh = (struct nlmsghdr*)(skb->data);
+ net_run_track(PRT_RTNL_SEND,"rtnetlink_send,msg_type =%d;group = %d",nlh->nlmsg_type,group);
+}
+
+
+/*½«MACµØÖ·ºÍnet_deviceµØÖ·½øÐбȽÏ
+ ³öÏֵij¡¾°£º
+ 1.Êý¾Ý»Ø»·
+ 2.Êý¾ÝͨѶÕý³££¬Á½¸öCPEÓÐÏàͬµÄMACµØÖ·
+ */
+void check_macaddr_only(unsigned char *ha, unsigned char ha_len)
+{
+ struct net_device *dev;
+ unsigned char addr_len = 0;
+ unsigned char addr[ETH_ALEN] = {0};
+
+ if(0 == addr_check)
+ {
+ return;
+ }
+
+ read_lock(&dev_base_lock);
+
+ for_each_netdev(&init_net, dev)
+ {
+ if(dev->addr_len != ha_len)
+ {
+ //ÐÞ¸ÄÔÒò£ºÄ¬ÈÏsit0µÄaddr_len=4ÇÒdev_addrĬÈÏΪȫ0£¬µ±ÆäËûÉ豸macµØÖ·[0:3]ҲȫΪ0ʱ¾Í»á¶ÏÑÔËÀ»ú¡£
+ continue;
+ }
+ //addr_len = dev->addr_len > ha_len ? ha_len : dev->addr_len;
+ addr_len = ha_len;
+ if((addr_len > 0) && !memcmp(dev->dev_addr, ha, addr_len))
+ {
+ addr_len = addr_len > ETH_ALEN ? ETH_ALEN : addr_len;
+ memcpy(addr, ha, addr_len);
+
+ panic("check_macaddr_only: mac address of pc is same as the device, dev name: %s, mac %x:%x:%x:%x:%x:%x\n",
+ dev->name, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ read_unlock(&dev_base_lock);
+ return;
+ }
+ }
+
+ read_unlock(&dev_base_lock);
+
+ return;
+}
+
+
+void skbinfo_add(unsigned char *addr,unsigned int skbinfo_type)
+{
+ struct sk_buff *skb;
+ skbinfo_dbg[skbinfo_type] ++;
+/*
+ if(skb_max_panic && skbinfo_type == SKB_TYPE_ALL)
+ {
+ //´Ë´¦Î´ÓÃspinlock±£»¤£¬Ò»µ©Ê¹Óøù¦ÄÜ£¬ÐèÒªÓÃËø±£»¤
+ if(skbinfo_dbg[SKB_TYPE_ALL] > skb_max_panic)
+ panic("too much skb is alloced,pleasw check data_leak ");
+ }
+*/
+ if(skbinfo_type == SKB_TYPE_DATA)
+ {
+ skb = (struct sk_buff *)addr;
+ //if(skb->isExtern == 0)
+ // skbinfo_dbg[SKB_DATA_BYTES] += ksize(skb->head);
+ //else
+ skbinfo_dbg[SKB_DATA_BYTES] += skb->data - skb->head + skb->len;
+ }
+ if(skbinfo_type==(skb_info_track&0X7F))
+ {
+ printk("net resource monitor!!!");
+ dump_stack();
+ if(skb_info_track&0X80)
+ panic("net team dbg panic !!!");
+ }
+
+}
+
+
+void skbinfo_del(unsigned char *addr,unsigned int skbinfo_type)
+{
+ struct sk_buff *skb;
+ skbinfo_dbg[skbinfo_type] --;
+
+ if(skbinfo_type == SKB_TYPE_DATA)
+ {
+ skb = (struct sk_buff *)addr;
+ //if(skb->isExtern == 0)
+ // skbinfo_dbg[SKB_DATA_BYTES] -= ksize(skb->head);
+ //else
+ skbinfo_dbg[SKB_DATA_BYTES] -= skb->data - skb->head + skb->len;
+ }
+
+}
+
+void netruninfo_add(unsigned char *addr,unsigned int info_type)
+{
+ netruninfo_dbg[info_type] ++;
+ if(info_type==(net_info_track&0X7F))
+ {
+ printk("net resource monitor!!!");
+ dump_stack();
+ if(net_info_track&0X80)
+ panic("net team dbg panic !!!");
+ }
+}
+
+void netruninfo_del(unsigned char *addr,unsigned int info_type)
+{
+ netruninfo_dbg[info_type] --;
+}
+static int filter(void *start1, void *start2, unsigned int len)
+{
+ int i = 100, ret = -1;
+ int *p = start1, *q = start2;
+
+ do {
+ if (!memcmp(p, q, len)) {
+ ret = 0;
+ break;
+ }
+ ++p;
+ }while(i--);
+
+ return ret;
+}
+
+//¸Ãº¯ÊýÓÃÓÚÅжÏskb->network_headerÓÐЧÐÔ£¬·µ»Ø»ñÈ¡ÓÐЧµÄipÍ·
+unsigned char * check_skb_for_dump(struct sk_buff *skb, int *mark)
+{
+ unsigned char * mac_head, *net_head, *tsp_head;
+ //dri->net 0; net->dri 1;
+ mac_head = skb_mac_head(skb);
+ net_head = skb_network_head(skb);
+ tsp_head = skb_transport_head(skb);
+ *mark = 0;
+ if (!skb->dev){
+ // printk("skb->dev = NULL err in %s.\n", __func__);
+ return NULL;
+ }
+ if (skb->data == NULL){
+ printk("skb->data = NULL err in %s.\n", __func__);
+ return NULL;
+ }
+ if (skb->data == mac_head){
+ if(net_head && net_head < (mac_head + skb->dev->hard_header_len )){
+ *mark = -2;
+ return mac_head + skb->dev->hard_header_len;
+ }
+ *mark = 2;
+ return mac_head + skb->dev->hard_header_len;
+ }
+ else if(skb->data == net_head){
+ if(mac_head && net_head < (mac_head + skb->dev->hard_header_len )){
+ *mark = -23;
+ return mac_head + skb->dev->hard_header_len;
+ }
+ if(tsp_head && net_head < (tsp_head - 20)){
+ *mark = -3;
+ return net_head;
+ }
+ *mark = 3;
+ return net_head;
+ }
+ else if(skb->data == tsp_head ){
+ if((!net_head) || (net_head && net_head < (tsp_head - 20))){
+ *mark = -4;
+ return NULL;
+ }
+ *mark = 4;
+ return net_head;
+ }
+ else{
+ // printk("unexpected err in %s\n", __func__);
+ return NULL;
+ }
+
+}
+
+extern struct nf_conntrack_tuple tuple_info;
+extern int getconn_type;
+
+//ÔÚskb_release_dataʱ½øÐÐÄÚÈݵÄ×Ö·û´®Æ¥Å䣬²¢µ¼³öÕ»¹ì¼£
+void dump_net_stack(struct sk_buff *skb, unsigned int offset)
+{
+ if(getconn_type != 8 && getconn_type != 10)
+ return;
+ int mark = 0;
+ struct iphdr *iphv4;
+ struct ipv6hdr *iphv6;
+ unsigned char *tsp_start = NULL;
+ unsigned char *iph = check_skb_for_dump(skb, &mark);
+
+ if(getconn_type == 10 && iph){
+ if (skb->now_location & FASTNAT_SUCC){
+ printk("skb->len = %d now_location = %d\n", skb->len, skb->now_location);
+ struct tcphdr *th = (struct tcphdr *)(iph + 20);
+ printk("th->seq=%lu, th->sport=%ld, th->dport=%ld\n", htonl(th->seq), htons(th->source), htons(th->dest));
+ goto out;
+ }
+ return;
+ }
+ if(iph){
+ if((iph[0] & 0xf0) == 0x40){
+ iphv4 = (struct iphdr*)iph;
+ if(tuple_info.dst.protonum && tuple_info.dst.protonum != iphv4->protocol){
+ return;
+ }
+ if(tuple_info.dst.u3.ip && memcmp(&tuple_info.dst.u3.ip, &iphv4->daddr, 4) != 0){
+ return;
+ }
+ if(tuple_info.src.u3.ip && memcmp(&tuple_info.src.u3.ip, &iphv4->saddr, 4) != 0){
+ return;
+ }
+ tsp_start = (unsigned char*)iphv4 + (iphv4->ihl << 2);
+ }
+ else if((iph[0] & 0xf0) == 0x60){
+ iphv6 = (struct ipv6hdr*)iph;
+ if(tuple_info.dst.protonum && tuple_info.dst.protonum != iphv6->nexthdr){
+ return;
+ }
+ if(tuple_info.dst.u3.ip && memcmp(&tuple_info.dst.u3.in6, &iphv6->daddr, 16) != 0){
+ return;
+ }
+ if(tuple_info.src.u3.ip && memcmp(&tuple_info.src.u3.in6, &iphv6->saddr, 16) != 0){
+ return;
+ }
+ tsp_start = (unsigned char*)iphv6 + 40;
+ }
+ }
+ //Èç¹ûÊÍ·ÅʱֻÓд«ÊäͷûÓÐÍøÂçÍ·£¬Ö»±È½ÏÓÐЧport
+ if(mark == -4){
+ tsp_start = skb->data;
+ }
+ if(tsp_start == NULL)
+ return;
+
+ if(tuple_info.src.u.all && memcmp(&tuple_info.src.u.all, tsp_start, 2) != 0){
+ return;
+ }
+ if(tuple_info.dst.u.all && memcmp(&tuple_info.dst.u.all, tsp_start + 2, 2) != 0) {
+ return;
+ }
+
+ printk("free skb match mark = %d:\n", mark);
+ if(tuple_info.dst.protonum && mark != -4)
+ printk("protonum = %d ",tuple_info.dst.protonum);
+ if(tuple_info.src.u3.ip && mark != -4){
+ if(iph && (iph[0] & 0xf0) == 0x40){
+ printk("sip: %08x ", ntohl(tuple_info.src.u3.ip));
+ }else if(iph && (iph[0] & 0xf0) == 0x60){
+ printk("sip: %x:%x:%x:%x:%x:%x:%x:%x ", ntohs(tuple_info.src.u3.in6.s6_addr16[0]), ntohs(tuple_info.src.u3.in6.s6_addr16[1]), ntohs(tuple_info.src.u3.in6.s6_addr16[2]), ntohs(tuple_info.src.u3.in6.s6_addr16[3]),
+ ntohs(tuple_info.src.u3.in6.s6_addr16[4]), ntohs(tuple_info.src.u3.in6.s6_addr16[5]), ntohs(tuple_info.src.u3.in6.s6_addr16[6]), ntohs(tuple_info.src.u3.in6.s6_addr16[7]));
+ }
+ }
+ if(tuple_info.src.u.all){
+ printk("sport : %d ", ntohs(tuple_info.src.u.all));
+ }
+ if(tuple_info.dst.u3.ip && mark != -4){
+ if(iph && (iph[0] & 0xf0) == 0x40){
+ printk("%dip: %08x ", ntohl(tuple_info.dst.u3.ip));
+ }else if(iph && (iph[0] & 0xf0) == 0x60){
+ printk("dip: %x:%x:%x:%x:%x:%x:%x:%x ", ntohs(tuple_info.dst.u3.in6.s6_addr16[0]), ntohs(tuple_info.dst.u3.in6.s6_addr16[1]), ntohs(tuple_info.dst.u3.in6.s6_addr16[2]), ntohs(tuple_info.dst.u3.in6.s6_addr16[3]),
+ ntohs(tuple_info.dst.u3.in6.s6_addr16[4]), ntohs(tuple_info.dst.u3.in6.s6_addr16[5]), ntohs(tuple_info.dst.u3.in6.s6_addr16[6]), ntohs(tuple_info.dst.u3.in6.s6_addr16[7]));
+ }
+ }
+ if(tuple_info.dst.u.all) {
+ printk("dport : %d ", ntohs(tuple_info.dst.u.all));
+ }
+ printk("\n");
+ // if (skb_dump_len)
+ // if(!filter((skb->head + offset), skb_dump_str, skb_dump_len))
+out:
+ dump_stack();
+}
+
+
+
+/***********************************************************************************************************/
+/*ÒÔÏÂÎªÍøÂç×éÐÂÔöµÄ½Ó¿Ú£¬ÓÉÓÚñîºÏÇ¿£¬²»ÄܶÀÁ¢³ÉÔ´Îļþ£¬µ«Ðè×¢ÊÍÇå³þ*/
+/***********************************************************************************************************/
+
+extern int set_tcpdump;
+extern char br_name[];
+
+//ÐèÒªskb->mac_headerÒѾ¸³Öµ
+unsigned int get_network_head_len(struct sk_buff *skb)
+{
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ unsigned char *buf = (unsigned char *)skb->head + skb->mac_header + ETH_HLEN;
+#else
+ unsigned char *buf = (unsigned char *)skb->mac_header + ETH_HLEN;
+#endif
+
+ if ((((unsigned)buf[0]) & 0xF0) == 0x40)
+ return 20; //ipv4 ipÍ·³¤¶È20
+
+ if ((((unsigned)buf[0]) & 0xF0) == 0x60)
+ return 40; //ipv6 ipÍ·³¤¶È40
+
+ return 20; //ĬÈÏipv4´¦Àí
+}
+
+//ËïȪÌí¼Ó£¬²Î¿¼__netif_receive_skb£¬ÓÃÓÚץȡ½ÓÊÕ·½ÏòµÄ¶¨ÖƱ¨ÎÄ£¬ÈçfastnatµÈÈë¿Ú±¨ÎÄ£¬°´ÕÕtcpdumpÕý³£·½Ê½Ê¹Óü´¿É£¬µ«»áÓ°ÏìÐÔÄÜ£¬ËùÒÔÐèÒªÓÃproc±äÁ¿¿ØÖÆ
+void tcpdumpin_sq(struct sk_buff *skb)
+{
+ unsigned char *data_priv;
+ struct packet_type *ptype;
+ __be16 type;
+ unsigned int len_priv;
+ int dev_flag = 0;
+
+ if (!(set_tcpdump & 1))
+ return;
+
+ if (list_empty(&ptype_all))
+ {
+ return;
+ }
+
+ //±£´æµ±Ç°±¨ÎĵÄÊý¾ÝÖ¡Í·ÐÅÏ¢£¬×¥°ü½áÊøºó»Ö¸´
+ data_priv = skb->data;
+ len_priv = skb->len;
+
+ //½«³¤¶ÈºÍdataÖ¸ÏòMACÍ·£¬¸ù¾Ýµ±Ç°Çé¿ö¶¯Ì¬µ÷Õû
+ if (skb->mac_header == 0 || skb->mac_header == ~0U)
+ {
+ skb_reset_mac_header(skb);
+ skb->network_header = skb->mac_header + ETH_HLEN;
+ skb->transport_header = skb->network_header + get_network_head_len(skb);
+ }
+ else if (skb->network_header == 0 || skb->network_header == ~0U)
+ {
+ skb->network_header = skb->mac_header + ETH_HLEN;
+ skb->transport_header = skb->network_header + get_network_head_len(skb);
+ skb_reset_data_bymachd(skb);
+ skb_push(skb, ETH_HLEN);
+ }
+ else if (skb->transport_header == 0 || skb->transport_header == ~0U)
+ {
+ skb->network_header = skb->mac_header + ETH_HLEN;
+ skb->transport_header = skb->network_header + get_network_head_len(skb);
+ skb_reset_data_bymachd(skb);
+ skb_push(skb, ETH_HLEN);
+ }
+
+ //devΪ¿Õʱ£¬ÎªÁ˱£Ö¤ËùÓе㶼ÄÜ×¥µ½±¨£¬½«°ÑdevÉèΪptype->dev
+ if (skb->dev == NULL)
+ {
+ dev_flag = 1;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, &ptype_all, list) {
+ //Èë¿ÚskbÊÇÍêÕûµÄMACÖ¡£¬²»ÐèÒª½øÐÐÈÎºÎÆ«ÒƵ÷Õû
+ if ((!ptype->dev || !skb->dev || ptype->dev == skb->dev)) // && (ptype->func == packet_rcv)
+ {
+ //packet_rcvÄÚ²¿ÒªÇóskb->dev±ØÐ벻Ϊ¿Õ£¬ËùÒÔ´Ë´¦½øÐÐÁËС¼¼ÇÉ£¬µ«¿ÉÄÜÓжàÓà°ü±»×¥µ½
+ if(skb->dev == NULL && ptype->dev)
+ skb->dev = ptype->dev;
+ else if(skb->dev == NULL)
+ skb->dev = __dev_get_by_name(&init_net, br_name);
+
+ atomic_inc(&skb->users);
+ //track_add(skb, 0, USER_INFO, 0);
+ ptype->func(skb, skb->dev, ptype, skb->dev);
+ if(dev_flag == 1)
+ skb->dev = NULL;
+ }
+ }
+ rcu_read_unlock();
+
+ //»Ö¸´skb³õʼ״̬
+ skb->data = data_priv;
+ skb->len = len_priv;
+ if(dev_flag == 1)
+ skb->dev = NULL;
+}
+
+//ËïȪÌí¼Ó£¬²Î¿¼dev_queue_xmit_nit£¬ÓÃÓÚץȡ·¢ËÍ·½ÏòµÄ¶¨ÖƱ¨ÎÄ£¬ÈçfastnatµÈ³ö¿Ú±¨ÎÄ£¬°´ÕÕtcpdumpÕý³£·½Ê½Ê¹Óü´¿É£¬µ«»áÓ°ÏìÐÔÄÜ£¬ËùÒÔÐèÒªÓÃproc±äÁ¿¿ØÖÆ
+void tcpdumpout_sq(struct sk_buff *skb)
+{
+ struct packet_type *ptype;
+ struct sk_buff *skb2 = NULL;
+ int dev_flag = 0;
+ sk_buff_data_t transport_header;
+ sk_buff_data_t network_header;
+ sk_buff_data_t mac_header;
+ unsigned char *data_priv;
+ unsigned int len_priv;
+
+ if (!(set_tcpdump & 2))
+ return;
+
+ if (list_empty(&ptype_all))
+ {
+ return;
+ }
+
+ //±£´æµ±Ç°±¨ÎĵÄÊý¾ÝÖ¡Í·ÐÅÏ¢£¬×¥°ü½áÊøºó»Ö¸´
+ data_priv = skb->data;
+ len_priv = skb->len;
+ transport_header = skb->transport_header;
+ network_header = skb->network_header;
+ mac_header = skb->mac_header;
+
+ if (skb->mac_header == 0 || skb->mac_header == ~0U)
+ {
+ skb_reset_mac_header(skb);
+ skb->network_header = skb->mac_header + ETH_HLEN;
+ skb->transport_header = skb->network_header + get_network_head_len(skb);
+ }
+ else if (skb->network_header == 0 || skb->network_header == ~0U)
+ {
+ skb->network_header = skb->mac_header + ETH_HLEN;
+ skb->transport_header = skb->network_header + get_network_head_len(skb);
+ skb_reset_data_bymachd(skb);
+ skb_push(skb, ETH_HLEN);
+ }
+ else if (skb->transport_header == 0 || skb->transport_header == ~0U)
+ {
+ skb->transport_header = skb->network_header + get_network_head_len(skb);
+ skb_reset_data_bymachd(skb);
+ skb_push(skb, ETH_HLEN);
+ }
+
+ //TCP²ãÃæÉÐδÀ´µÃ¼°¸³Öµ³ö¿Údev£¬´Ë´¦Ç¿Ðи³ÖµÎªbr0£¬ËùÒÔÒªÇó×¥³ö¿Ú±¨ÎÄʱ£¬Òª²»ÏÞÖÆdev²ÅÐУ¬·ñÔòÍâÍø¿Ú±¨ÎÄ×¥²»µ½
+ if(skb->dev == NULL)
+ {
+ dev_flag = 1;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, &ptype_all, list) {
+ //ÈôÔÚTCP»òIP²ãÃæ£¬´ËʱdataδָÏòMACÍ·£¬µ«ÓÉÓÚÎÒÃǽøÐÐÁËÆ«ÒÆ£¬Ö¸ÏòÁËMACÍ·£¬ËùÒÔÄÜץȡÍêÕû±¨ÎÄ£¬µ«PPPoEÀ౨ÎÄ£¬ÎÞ·¨×¥È«
+ if ((ptype->dev == skb->dev || !ptype->dev || !skb->dev) && (ptype->af_packet_priv != NULL) &&
+ (struct sock *)ptype->af_packet_priv != skb->sk) // && (ptype->func == packet_rcv)
+ {
+ //packet_rcvÄÚ²¿ÒªÇóskb->dev±ØÐ벻Ϊ¿Õ£¬ËùÒÔ´Ë´¦½øÐÐÁËС¼¼ÇÉ£¬µ«¿ÉÄÜÓжàÓà°ü±»×¥µ½
+ if(skb->dev == NULL && ptype->dev)
+ skb->dev = ptype->dev;
+ else if(skb->dev == NULL)
+ skb->dev = __dev_get_by_name(&init_net, br_name);
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (!skb2)
+ break;
+ ptype->func(skb2, skb->dev, ptype, skb->dev);
+
+ if (dev_flag == 1)
+ skb->dev = NULL;
+ }
+ }
+ rcu_read_unlock();
+
+ //»Ö¸´skb³õʼ״̬
+ if(dev_flag == 1)
+ skb->dev = NULL;
+ skb->transport_header = transport_header;
+ skb->network_header = network_header;
+ skb->mac_header = mac_header;
+ skb->data = data_priv;
+ skb->len = len_priv;
+}
+
+
+
+
+/*¸ù¾ÝdevÆ¥Åä²éÕÒÁÚ¾Ó±íarp_tblÖеÄÁÚ¾Ó½Úµã*/
+void get_neigh_bydev(struct neigh_table *tbl, struct net_device *dev, struct dev_neigh_info *neigh_info)
+{
+ int i;
+ int len;
+ unsigned int neigh_num = 0;
+ struct neigh_hash_table *nht;
+
+ if(tbl->family != AF_INET && tbl->family != AF_INET6)
+ return;
+
+ rcu_read_lock_bh();
+ nht = rcu_dereference_bh(tbl->nht);
+
+ for(i = 0; i < (1 << nht->hash_shift); i++)
+ {
+ struct neighbour *neigh;
+
+ for(neigh = rcu_dereference_bh(nht->hash_buckets[i]); neigh != NULL; neigh = rcu_dereference_bh(neigh->next))
+ {
+ if(neigh->dev == dev)
+ {
+ len = tbl->key_len > MAX_IPADDR_LEN ? MAX_IPADDR_LEN : tbl->key_len;
+
+ memcpy(neigh_info->neigh_nod[neigh_num].ip_addr, neigh->primary_key, len);
+ neigh_info->neigh_nod[neigh_num].ip_len = len;
+ memcpy(neigh_info->neigh_nod[neigh_num].mac_addr, neigh->ha, MAX_MACADDR_LEN);
+ neigh_num++;
+ if(neigh_num >= 20)
+ goto end;
+ }
+ }
+ }
+
+end:
+ neigh_info->num = neigh_num;
+
+ rcu_read_unlock_bh();
+
+ return;
+}
+
+/*¸ù¾ÝÔ¶¶ËÁÚ¾ÓµÄMACµØÖ·ÕÒµ½arp_tblÖеÄÁÚ¾ÓIPµØÖ·µÈÐÅÏ¢*/
+void get_neigh_bymac(struct neigh_table *tbl,mac_addr *addr, struct neigh_info *info)
+{
+ int i;
+ int len;
+ struct neigh_hash_table *nht;
+
+ if(tbl->family != AF_INET && tbl->family != AF_INET6)
+ return;
+
+ nht = rcu_dereference_bh(tbl->nht);
+
+ for(i = 0; i < (1 << nht->hash_shift); i++)
+ {
+ struct neighbour *neigh;
+
+ for(neigh = rcu_dereference_bh(nht->hash_buckets[i]); neigh != NULL; neigh = rcu_dereference_bh(neigh->next))
+ {
+ if(!compare_ether_addr(neigh->ha,addr->addr))
+ {
+ len = tbl->key_len > MAX_IPADDR_LEN ? MAX_IPADDR_LEN : tbl->key_len;
+
+ memcpy(info->ip_addr, neigh->primary_key, len);
+ info->ip_len = len;
+ memcpy(info->mac_addr, neigh->ha, MAX_MACADDR_LEN);
+ return;
+ }
+ }
+ }
+}
+
+//»ñȡij2²ãÇŵãÉ豸µÄÁÚ¾ÓÁбíÐÅÏ¢£¬Ïȸù¾Ý³ö¿ÚdevÕÒµ½ËùÓеÄÔ¶³ÌÁÚ¾ÓMACµØÖ·£¬ÔÙ¸ù¾ÝMACµØÖ·ÕÒµ½arp_tblÖеÄÁÚ¾ÓIPµØÖ·µÈÐÅÏ¢
+void getneigh_ofdev(struct net_device *dst_dev,struct dev_neigh_info *neigh_info)
+{
+ int i;
+ int neigh_num = 0;
+ struct net_device *br_dev;
+ struct net_bridge *br;
+
+ br_dev = dev_get_by_name(&init_net, br_name);
+ br = netdev_priv(br_dev);
+
+ spin_lock_bh(&br->hash_lock);
+ for (i = 0; i < BR_HASH_SIZE; i++) {
+ struct hlist_node *h;
+ hlist_for_each(h, &br->hash[i]) {
+ struct net_bridge_fdb_entry *f;
+
+ f = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
+ if (f->dst && f->dst->dev == dst_dev && !(test_bit(0, &f->flags))) {//BR_FDB_LOCAL==0
+ get_neigh_bymac(&arp_tbl, &f->addr,neigh_info->neigh_nod+neigh_num);
+ neigh_num++;
+
+ }
+ }
+ }
+ spin_unlock_bh(&br->hash_lock);
+ neigh_info->num = neigh_num;
+}
+
+void update_brport_info(struct devlist_info *dev_info){
+ int i = 0, j = 0, k = 0;
+ struct net_device *br_dev;
+ struct net_device *temp_dev;
+ struct dev_neigh_info *temp_neigh;
+ int temp_count ;
+ for(i = 0; i < dev_info->num; i++){
+ if(dev_info->info[i].dev_layer == BR_DEV){
+ br_dev = dev_get_by_name(&init_net, dev_info->info[i].name);
+ for(j = 0; j < dev_info->info[i].dev_neigh.num; j++){
+ temp_dev = getbrport_bydst(br_dev, dev_info->info[i].dev_neigh.neigh_nod[j].mac_addr);
+ if(!temp_dev || temp_dev->ifindex == br_dev->ifindex){
+ printk("temp_dev error!!!\n");
+ continue;
+ }
+ for(k = 0 ; k < dev_info->num; k++){
+ if(strcmp(dev_info->info[k].name, temp_dev->name) == 0){
+ temp_neigh = &(dev_info->info[k].dev_neigh);
+ if(temp_neigh->num >= 20){
+ printk("dev=%s , neigh info is full!\n", temp_dev->name);
+ break;
+ }
+ temp_count = temp_neigh->num;
+ memcpy(temp_neigh->neigh_nod[temp_count].ip_addr, dev_info->info[i].dev_neigh.neigh_nod[j].ip_addr, dev_info->info[i].dev_neigh.neigh_nod[j].ip_len);
+ temp_neigh->neigh_nod[temp_count].ip_len = dev_info->info[i].dev_neigh.neigh_nod[j].ip_len;
+ memcpy(temp_neigh->neigh_nod[temp_count].mac_addr, dev_info->info[i].dev_neigh.neigh_nod[j].mac_addr, MAX_MACADDR_LEN);
+ temp_count ++;
+ temp_neigh->num = temp_count;
+ break;
+ }
+ }
+ }
+ if(br_dev)
+ dev_put(br_dev);
+ }
+ }
+}
+
+/*»ñÈ¡init_netÖÐÍøÂçÉ豸Ïà¹ØÐÅÏ¢£¬°üÀ¨IP¡¢MAC¡¢³ö¿Údev¡¢ÁÚ¾ÓÁбíµÈ*/
+int get_devlist_info(unsigned long arg)
+{
+ struct devlist_info *dev_info;
+ struct net_device *dev;
+ struct net_device *temp_dev;
+ struct dev_neigh_info *temp_neigh;
+ unsigned int temp_count = 0;
+ unsigned int dev_num = 0;
+
+ dev_info=(struct devlist_info*)kzalloc(sizeof(struct devlist_info), GFP_KERNEL);
+ if(!dev_info)
+ return -EFAULT;
+
+ read_lock(&dev_base_lock);
+
+ for_each_netdev(&init_net, dev)
+ {
+ if(dev->flags & IFF_UP && strcmp(dev->name, "lo") != 0)
+ {
+ //¼Ç¼±¾µØÍø¿ÚµÄÐÅÏ¢
+ strcpy(dev_info->info[dev_num].name, dev->name);
+ if(dev->ip_ptr && dev->ip_ptr->ifa_list)
+ dev_info->info[dev_num].ipv4_addr = dev->ip_ptr->ifa_list->ifa_address;
+ if(dev->header_ops && dev->dev_addr){
+ memcpy(dev_info->info[dev_num].mac_addr, dev->dev_addr, dev->addr_len);
+ if(is_zero_ether_addr(dev->dev_addr))
+ dev_info->info[dev_num].mac_errtype = ZERO_ADDRERR;
+ else if(is_broadcast_ether_addr(dev->dev_addr))
+ dev_info->info[dev_num].mac_errtype = BROADCAST_ADDRERR;
+ else if(is_multicast_ether_addr(dev->dev_addr))
+ dev_info->info[dev_num].mac_errtype = MULTICAST_ADDRERR;
+ }
+
+ //ÒÔÏÂΪ¸üб¾µØÍø¿Ú¹ØÁªµÄÔ¶³ÌÁÚ¾ÓÁбíÐÅÏ¢
+ //¶ÔÓÚÇŵãÉ豸£¬Ïȸù¾Ý³ö¿ÚdevÕÒµ½ËùÓеÄÔ¶³ÌÁÚ¾ÓMACµØÖ·£¬ÔÙ¸ù¾ÝMACµØÖ·ÕÒµ½arp_tblÖеÄÁÚ¾ÓIPµØÖ·µÈÐÅÏ¢
+ if(dev->priv_flags & IFF_BRIDGE_PORT){
+ dev_info->info[dev_num].dev_layer = L2_DEV;
+ dev_info->info[dev_num].dev_neigh.num = 0;
+ // getneigh_ofdev(dev,&(dev_info->info[dev_num].dev_neigh));
+ }
+ //br0ÌØÊâÍø¹Ø£¬»ñÈ¡ÆäÁÚ¾ÓÁбí
+ else if(dev->priv_flags & IFF_EBRIDGE){
+ dev_info->info[dev_num].dev_layer = BR_DEV;
+ get_neigh_bydev(&arp_tbl, dev,&(dev_info->info[dev_num].dev_neigh));
+ }
+ //¶ÔÓÚÆÕͨµÄ3²ãÉ豸£¬»ñÈ¡ÆäÁÚ¾ÓÁбíÐÅÏ¢
+ else{
+ //Åųý·Çarp½Ó¿Ú
+ if(!(dev->flags & IFF_NOARP))
+ get_neigh_bydev(&arp_tbl, dev,&(dev_info->info[dev_num].dev_neigh));
+ dev_info->info[dev_num].dev_layer = L3_DEV;
+ }
+ dev_num++;
+ if(dev_num >= MAX_DEV_NUM)
+ {
+ break;
+ }
+ }
+ }
+ dev_info->num = dev_num;
+ update_brport_info(dev_info);
+ read_unlock(&dev_base_lock);
+
+ if (copy_to_user((char *)arg, dev_info, sizeof(struct devlist_info)))
+ {
+ kfree(dev_info);
+ return -EFAULT;
+ }
+ kfree(dev_info);
+
+ return 0;
+}
+
+extern wait_queue_head_t skb_wait_queue;
+//extern atomic_t skb_used;
+extern atomic_t skb_tops;
+extern atomic_t skb_fromps;
+//ÓÃÓÚ¸ú×Ùskb½á¹¹ÌåµÄÉêÇ룬°üº¬Íⲿ´«µÝÀ´µÄdata
+void skb_alloc_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+ netslab_inc(SKB_SLAB);
+ //track_add(skb, 0, SKB_INFO, skb->truesize);
+ skbinfo_add(skb,SKB_TYPE_ALL);
+#endif
+// atomic_inc(&skb_used);
+}
+//ÓÃÓÚ¸ú×Ùskb½á¹¹ÌåµÄÊÍ·Å£¬°üº¬Íⲿ´«µÝÀ´µÄdata
+extern wait_queue_head_t skb_wait_queue;
+void skb_free_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+ track_del(skb, 0, SKB_INFO);
+ skbinfo_del(skb,SKB_TYPE_ALL);
+ netslab_dec(SKB_SLAB);
+#endif
+ //2017.6.3 add by linxu set a limit for skb
+// atomic_dec(&skb_used);
+ if(waitqueue_active(&skb_wait_queue))
+ {
+ wake_up(&skb_wait_queue);
+ }
+}
+//ÓÃÓÚ¸ú×Ùslab»úÖÆµÄskb->data½á¹¹ÌåµÄÉêÇë,²»°üº¬ÍⲿPSBUF
+void skbdata_alloc_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+ skbinfo_add((unsigned char *)skb,SKB_TYPE_DATA);
+ //track_add(skb->head, 0, DATA_INFO, skb->len);
+#endif
+}
+//ÓÃÓÚ¸ú×Ùslab»úÖÆµÄskb->data½á¹¹ÌåµÄÊÍ·Å,²»°üº¬ÍⲿPSBUF
+void skbdata_free_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+ track_del(skb->head, 0, DATA_INFO);
+ skbinfo_del(skb,SKB_TYPE_DATA);
+#endif
+}
+
+//ÓÃÓÚ¸ú×ÙÍⲿ´«µÝÀ´µÄdataÉêÇë
+void fromext_alloc_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+ //track_add(skb, 0, DATA_INFO, skb->len);
+ skbinfo_add((unsigned char *)skb,SKB_TYPE_FROMCP);
+#endif
+ atomic_inc(&skb_fromps);
+}
+//ÓÃÓÚ¸ú×ÙÍⲿ´«µÝÀ´µÄdataµÄÊÍ·Å
+void fromext_free_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+ track_del(skb->head, 0, DATA_INFO);
+ skbinfo_del(skb,SKB_TYPE_FROMCP);
+#endif
+ atomic_dec(&skb_fromps);
+}
+//ÓÃÓÚ¸ú×Ù·¢Ë͸øÍⲿµÄÉêÇë
+void toext_alloc_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+ skbinfo_add((unsigned char *)skb,SKB_TYPE_TOCP);
+#endif
+ atomic_inc(&skb_tops);
+}
+//ÓÃÓÚ¸ú×Ù·¢Ë͸øÍⲿµÄÊÍ·Å
+void toext_free_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+ skbinfo_del(skb,SKB_TYPE_TOCP);
+#endif
+ atomic_dec(&skb_tops);
+}
+#else
+
+
+
+int set_print = 0; //ÍøÂç×Ô¶¨ÒåµÄ´òÓ¡¿ª¹Ø
+EXPORT_SYMBOL(set_print);
+
+int set_tcpdump = 0; //ÈÎÒâµã×¥°ü¿ª¹Ø
+
+//´æ·Åµ±Ç°skbÏà¹ØµÄͳ¼ÆÐÅÏ¢£¬°üÀ¨Ò»Ð©Òì³£Ïà¹ØÐÅÏ¢
+unsigned long skbinfo_dbg[SKB_INFO_MAX]= {0};
+
+//¸÷ÖÖÍøÂçÔËÐÐ×ÊÔ´µÄͳ¼ÆÖµ£¬ÓÈÆäÊǸ÷¸ö¹Ø¼ü½á¹¹ÌåµÄÉêÇ룬ÒÔ¹©ÄÚ²¿Ñ§Ï°ºÍ¶¨Î»
+unsigned long netruninfo_dbg[NET_INFO_MAX]= {0};
+
+//skbÉêÇëÉÏÏÞµÄÅäÖÃÈ«¾Ö£¬Ä¿Ç°ÉÐδʹÓÃ
+unsigned long skb_max_panic = 0; //skb×ÜÊýÉÏÏÞ£¬³¬³ö»áµ¼ÖÂpanic
+unsigned long skb_num_limit = 6000; //skb×ÜÊýÉÏÏÞ£¬³¬³ö·µ»ØNULL
+
+//¶ÔÌØ¶¨µÄskbÔÚÊͷŵãʱ½øÐÐÕ»¸ú×Ù
+char skb_dump_str[NIOCTL_MAX_MSGLEN] = {0};
+unsigned int skb_dump_len = 0;
+
+/*¶Ô±¾µØTCP½øÐÐÏà¹ØÍ³¼Æ*/
+unsigned long tcp_stats_dbg[TCP_STATS_MAX] = {0};
+
+
+//ÒÔÏÂΪÊý¾Ý°üµÄ½¨Ä£ÐÅÏ¢£¬ÒÔͳ¼Æ³öÊý¾Ý°üÄ£ÐÍ
+int skb_num4 = 0; //½ÓÊÕµ½µÄV4Êý¾Ý°ü
+int skb_num6 = 0; //½ÓÊÕµ½µÄV6Êý¾Ý°ü
+int skb_big_num; //len³¤¶È³¬¹ý1000µÄÊý¾Ý°ü£¬º¬V4ºÍV6
+int skb_small_num; //len³¤¶ÈСÓÚ100µÄÊý¾Ý°ü£¬º¬V4ºÍV6
+int skb_bytes4 = 0; //½ÓÊÕµ½µÄV4Êý¾Ý°ü×Ö½ÚÊý
+int skb_bytes6 = 0; //½ÓÊÕµ½µÄV6Êý¾Ý°ü×Ö½ÚÊý
+int skb_unknown = 0; //½ÓÊÕµ½µÄδ֪ÐÒéÊý¾Ý°ü£¬°üÀ¨ARPµÈ·ÇV4ºÍV6µÄ±¨ÎÄ
+int skb_tcpnum = 0; //½ÓÊÕµ½µÄtcpÊý¾Ý°ü£¬º¬V4ºÍV6£¬µ«²»º¬fastbrµÄ±¨ÎÄ
+int skb_udpnum = 0; //½ÓÊÕµ½µÄudpÊý¾Ý°ü£¬º¬V4ºÍV6£¬µ«²»º¬fastbrµÄ±¨ÎÄ
+int broadcast_num4 = 0; //½ÓÊÕµ½µÄV4¹ã²¥°ü
+int broadcast_num6 = 0; //½ÓÊÕµ½µÄV6¹ã²¥°ü
+int multicast_num4 = 0; //½ÓÊÕµ½µÄV4×é²¥±¨
+int multicast_num6 = 0; //½ÓÊÕµ½µÄV6×é²¥±¨
+int fastnat_num = 0; //fastnat³É¹¦µÄ±¨ÎÄ
+int fast6_num = 0; //fast6³É¹¦µÄ±¨ÎÄ
+int fastbr_num = 0; //fastbr³É¹¦µÄ±¨ÎÄ
+int fast_local4_rcv_num = 0; //±¾µØfast_local4³É¹¦½ÓÊÕ±¨ÎÄ
+int fast_local6_rcv_num = 0; //±¾µØfast_local6³É¹¦½ÓÊÕ±¨ÎÄ
+int fast_local4_output_num = 0; //±¾µØfast_local4³É¹¦·¢Ëͱ¨ÎÄ
+int fast_local6_output_num = 0; //±¾µØfast_local6³É¹¦·¢Ëͱ¨ÎÄ
+int fast_tcpdump_num = 0; //fast×¥°üÊýÁ¿
+
+
+int double_mac = 0; //mac¼ì²é¿ª¹Ø
+//slabÄÚ´æÊ¹ÓÃÏà¹ØÍ³¼Æ£¬Î´¿¼ÂÇͨÓÃslabµØÖ·³Ø£¬Èçkmalloc
+struct slab_info slab_count = {0};
+
+/*½øÐÐTCPͳ¼Æ*/
+#define TCP_PKT_STATS_INC(_mod) tcp_stats_dbg[_mod]++
+
+void dump_net_stack(struct sk_buff *skb, unsigned int offset)
+{
+
+}
+
+
+void check_macaddr_only(unsigned char *ha, unsigned char ha_len)
+{
+
+}
+
+//ÓÃÓÚ¸ú×Ùskb½á¹¹ÌåµÄÉêÇ룬°üº¬Íⲿ´«µÝÀ´µÄdata
+void skb_alloc_track(struct sk_buff *skb)
+{
+
+}
+//ÓÃÓÚ¸ú×Ùskb½á¹¹ÌåµÄÊÍ·Å£¬°üº¬Íⲿ´«µÝÀ´µÄdata
+void skb_free_track(struct sk_buff *skb)
+{
+
+}
+//ÓÃÓÚ¸ú×Ùslab»úÖÆµÄskb->data½á¹¹ÌåµÄÉêÇë,²»°üº¬ÍⲿPSBUF
+void skbdata_alloc_track(struct sk_buff *skb)
+{
+
+}
+//ÓÃÓÚ¸ú×Ùslab»úÖÆµÄskb->data½á¹¹ÌåµÄÊÍ·Å,²»°üº¬ÍⲿPSBUF
+void skbdata_free_track(struct sk_buff *skb)
+{
+
+}
+
+//ÓÃÓÚ¸ú×ÙÍⲿ´«µÝÀ´µÄdataÉêÇë
+void fromext_alloc_track(struct sk_buff *skb)
+{
+
+
+}
+//ÓÃÓÚ¸ú×ÙÍⲿ´«µÝÀ´µÄdataµÄÊÍ·Å
+void fromext_free_track(struct sk_buff *skb)
+{
+
+}
+//ÓÃÓÚ¸ú×Ù·¢Ë͸øÍⲿµÄÉêÇë
+void toext_alloc_track(struct sk_buff *skb)
+{
+
+}
+//ÓÃÓÚ¸ú×Ù·¢Ë͸øÍⲿµÄÊÍ·Å
+void toext_free_track(struct sk_buff *skb)
+{
+
+}
+
+void net_print_packet(unsigned char *data, unsigned int len, int flag)
+{
+
+}
+
+
+void tcpdumpin_sq(struct sk_buff *skb)
+{
+
+}
+
+void track_netlink(struct sk_buff *skb,u32 group)
+{
+
+}
+
+void netslab_inc(int i)
+{
+
+}
+
+void netslab_dec(int i)
+{
+
+}
+
+void netruninfo_add(unsigned char *addr,unsigned int info_type)
+{
+}
+
+void netruninfo_del(unsigned char *addr,unsigned int info_type)
+{
+}
+
+void skbinfo_add(unsigned char *addr,unsigned int skbinfo_type)
+{
+}
+
+void skbinfo_del(unsigned char *addr,unsigned int skbinfo_type)
+{
+}
+
+int net_debug_packet = 0;
+struct timeval net_debug_packet_tv = {0, 0};
+struct list_head net_debug_packet_list_head;
+int net_debug_packet_sec = 0;
+
+#if 0
+//¼Ç¼ӦÓ÷¢°üÇé¿ö
+void record_app_atcive_net()
+{
+
+}
+#endif
+int get_tcp_stat_info(unsigned long arg)
+{
+ return 0;
+}
+#endif
+
+#ifdef _USE_TestHarness
+int *vir_addr_ddrnet = 0;
+
+void psnet_freepsbuf(void *head)
+{
+}
+#endif
\ No newline at end of file
diff --git a/upstream/linux-5.10/net/core/fastproc/fast4_fw.c b/upstream/linux-5.10/net/core/fastproc/fast4_fw.c
new file mode 100755
index 0000000..0ef32e4
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fast4_fw.c
@@ -0,0 +1,829 @@
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/xt_multiport.h>
+#include <linux/netfilter/xt_iprange.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/route.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <net/SI/fast_common.h>
+#include <net/inet_hashtables.h>
+#include <linux/igmp.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/SI/net_track.h>
+#include <net/SI/net_other.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/net_cache.h>
+#include <net/SI/print_sun.h>
+
+#include <linux/netfilter/xt_tcpudp.h>
+
+MODULE_LICENSE("GPL");
+
+
+static struct task_struct *ct_iptables_syn;
+static struct tasklet_struct ct_iptables_bh;
+unsigned int ct_iptables_syn_sw;
+enum table_index {
+ IPTABLE_RAW,
+ IPTABLE_MANGLE,
+ IPTABLE_NAT,
+ IPTABLE_FILTER
+};
+
+
+/* ***************** ipv4 ¿ìËÙת·¢Ïà¹Ø±äÁ¿ ********************************/
+#ifndef CONFIG_PREEMPT_RT_FULL
+extern int *vir_addr_ddrnet;
+#endif
+/* ******************************** º¯ÊýÉêÃ÷ ********************************/
+
+
+/* ******************************** º¯ÊýʵÏÖ ********************************/
+int dst_expire_count = 0;
+extern int no_neighbour;
+
+static inline int rt_is_expired(struct rtable *rth)
+{
+ return rth->rt_genid != atomic_read(&(dev_net(rth->dst.dev))->ipv4.rt_genid);
+}
+
+void __flush_dcache_area(void *addr, size_t len)
+{
+ //´ò×®º¯Êý,ºóÐø¿´ÈçºÎʹÓÃ
+}
+
+
+extern void ntl_ct_set_iw(struct sk_buff *skb, struct nf_conn *ct, int ct_dir);
+int fast4_fw_recv(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ struct nf_conn *ct,
+ struct nf_conntrack_l4proto *l4proto,
+ unsigned int dataoff,
+ int dir,
+ u_int8_t protonum)
+{
+ struct iphdr *iph = ip_hdr(skb);
+ struct udphdr *udph = NULL;
+ struct tcphdr *tcph = NULL;
+ __sum16 *cksum = NULL;
+ __be32 *oldip = NULL;
+ __be16 *oldport = 0;
+ struct net_device *dev = NULL;
+ u_int32_t skip_nat = 0;
+
+ enum ip_conntrack_info ctinfo;
+ int ret;
+ int rdir;
+ int type;
+ u_int32_t nat_addr;
+ u_int16_t nat_port;
+ struct ethhdr * eth;
+ struct dst_entry *dst_dir = NULL, *dst_rdir = NULL;
+ struct neighbour *_neighbour = NULL;
+
+ __be16 vlan_proto_raw = skb->vlan_proto;
+ __u16 vlan_tci_raw = skb->vlan_tci;
+
+ /*²Î¿¼tcf_ipt_act()*/
+ struct nf_hook_state state = {
+ .hook = NF_INET_PRE_ROUTING,
+ .net = &init_net,
+ .in = skb->dev,
+ .pf = NFPROTO_IPV4,
+ };
+
+
+ rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+ dst_dir = dst_get_by_ct(ct, dir);
+
+ if (!dst_dir)
+ {
+ goto err_out;
+ }
+
+ if (rt_is_expired((struct rtable*)dst_dir))
+ {
+ dst_expire_count++;
+ fast_fw_conn_release(ct);
+ goto err_out;
+ }
+
+ // Èç¹û¼Ç¼ÁËÇŵ㣬¾ÍÖ±½ÓÖ¸ÏòÇŵã
+ if (ct->fast_ct.fast_brport[dir])
+ {
+ rcu_read_lock();
+ dev = rcu_dereference_protected(ct->fast_ct.fast_brport[dir], 1);
+ rcu_read_unlock();
+ }
+ else {
+ dev = dst_dir->dev;
+ }
+
+ /*¼ì²é°ü³¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚÉ豸MTU*/
+
+ if (!dev || (skb->len > dev->mtu))
+ {
+ skbinfo_add(NULL, SKB_OVER_MTU);
+ goto err_out;
+ }
+
+ //»Ø´«µÄ°üÖ±½ÓÊͷŲ¢¼ÆÊý
+ if (strcmp(skb->dev->name, dev->name) == 0)
+ {
+ skbinfo_add(NULL, SKB_LOOP);
+
+ kfree_skb(skb);
+ goto drop_packet;
+ }
+
+ //²Î¿¼resolve_normal_ct
+ if (dir == 1) {
+ ctinfo = IP_CT_ESTABLISHED_REPLY;
+ } else {
+ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+ ctinfo = IP_CT_ESTABLISHED;
+ } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
+ ctinfo = IP_CT_RELATED;
+ } else {
+ ctinfo = IP_CT_NEW;
+ }
+ }
+
+
+ ret = nf_conntrack_handle_packet_fast(ct, skb, dataoff, ctinfo, &state);
+ if (ret <= 0) {
+ skb->_nfct = 0;
+ goto err_out; // fastʧ°Üǰ¶¼²»Äܸü¸ÄskbµÄÄÚÈÝ£¬·ñÔòʧ°Ü¾ÍÒª×ö»Ö¸´²Ù×÷
+ }
+ //Åжϳö¿ÚdevµÄÍ·²¿¿Õ¼äÊÇ·ñ×ã¹»£¬²»¹»ÐèÒªexpand
+ if (!(skb = fast_expand_headroom(skb, dev))) {
+ goto drop_packet;
+ }
+
+ fast_tcpdump(skb);
+
+ //Èç¹û×¥°üÃüÖУ¬Êý¾Ý»áclone£¬fast³É¹¦ÐèÒª¸Ä±ädataÄÚÈÝ£¬ÐèÒªÖØÐÂcopyÒ»·Ý
+ if (skb_cloned(skb))
+ {
+ if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ {
+ print_sun(SUN_DBG, "fast4_fw_recv clone copy failed !!!\n");
+ kfree_skb(skb);
+ goto drop_packet;
+ }
+ clean_cache(skb->data,skb->len);
+ }
+
+ iph = ip_hdr(skb);
+
+ //½öµ±fastnat³É¹¦£¬²ÅÄÜÉèÖÃIPÍ·Ê×µØÖ·,ÒÔ¹©cacheË¢ÐÂʱʹÓÃ
+ skb_reset_network_header(skb);
+ skb->isFastnat = 1;
+ nf_ct_set(skb, (struct nf_conn *)&ct->ct_general, ctinfo);
+
+ if (test_bit(IPS_SRC_NAT_BIT, &ct->status))
+ {
+ if(IP_CT_DIR_ORIGINAL == dir)
+ {
+ nat_addr = ct->tuplehash[rdir].tuple.dst.u3.ip;
+ nat_port = ct->tuplehash[rdir].tuple.dst.u.all;
+ type = FN_TYPE_SRC;
+ }
+ else
+ {
+ nat_addr = ct->tuplehash[rdir].tuple.src.u3.ip;
+ nat_port = ct->tuplehash[rdir].tuple.src.u.all;
+ type = FN_TYPE_DST;
+ }
+ }
+ else if (test_bit(IPS_DST_NAT_BIT, &ct->status))
+ {
+ if (IP_CT_DIR_ORIGINAL == dir)
+ {
+ nat_addr = ct->tuplehash[rdir].tuple.src.u3.ip;
+ nat_port = ct->tuplehash[rdir].tuple.src.u.all;
+ type = FN_TYPE_DST;
+ }
+ else
+ {
+ nat_addr = ct->tuplehash[rdir].tuple.dst.u3.ip;
+ nat_port = ct->tuplehash[rdir].tuple.dst.u.all;
+ type = FN_TYPE_SRC;
+ }
+ }
+ else
+ {
+ skip_nat = 1;
+ }
+
+ if (!skip_nat)
+ {
+ /*½øÐÐnatת»»*/
+ if (IPPROTO_TCP == iph->protocol)
+ {
+ tcph = (struct tcphdr *)(skb->data + iph->ihl * 4);
+ cksum = &tcph->check;
+ oldport = (FN_TYPE_SRC == type)? (&tcph->source): (&tcph->dest);
+ }
+ else if (IPPROTO_UDP == iph->protocol)
+ {
+ udph = (struct udphdr *)(skb->data + iph->ihl * 4);
+ cksum = &udph->check;
+ oldport = (FN_TYPE_SRC == type)? (&udph->source): (&udph->dest);
+ }
+
+ oldip = (FN_TYPE_SRC == type)? (&iph->saddr) : (&iph->daddr);
+
+ if (cksum != NULL && (0!=*cksum || IPPROTO_TCP == iph->protocol))
+ {
+ inet_proto_csum_replace4(cksum, skb, *oldip, nat_addr, 0);
+ inet_proto_csum_replace2(cksum, skb, *oldport, nat_port, 0);
+ }
+ csum_replace4(&iph->check, *oldip, nat_addr);
+ if(oldport)
+ *oldport = nat_port;
+ *oldip = nat_addr;
+ }
+ else
+ {
+ if (IPPROTO_TCP == iph->protocol)
+ {
+ tcph = (struct tcphdr *)(skb->data + iph->ihl * 4);
+ }
+ }
+
+ //»ùÓÚctÁ´½ÓµÄÁ÷Á¿Í³¼Æ
+ ct->packet_info[dir].packets++;
+ ct->packet_info[dir].bytes += skb->len;
+ //»ùÓÚÍø¿ÚµÄÁ÷Á¿Í³¼Æ --- ²Î¿¼linuxÔÉúµÄÇý¶¯£¬Í³¼ÆµÄ¶¼ÊÇIP°ü³¤¶È
+ if (fastnat_level == FAST_NET_DEVICE)
+ {
+ skb->dev->stats.rx_packets++;
+ skb->dev->stats.rx_bytes += skb->len;
+ }
+
+
+ if (dev->flags & IFF_UP)
+ {
+ if (!(dev->flags & IFF_POINTOPOINT)) {
+ skb_push(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ if(skb->isvlan == 1)
+ {
+ struct vlan_ethhdr *vlan_eth = (struct vlan_ethhdr*)(skb->data - VLAN_HLEN);
+ skb->vlan_proto = vlan_eth->h_vlan_proto;
+ skb->vlan_tci = ntohs(vlan_eth->h_vlan_TCI);
+ }
+ eth = (struct ethhdr *)skb->data;
+ _neighbour = dst_neigh_lookup_skb(dst_dir, skb);
+ //³ö¿Údev macµØÖ·×÷ΪÊý¾Ý°üÔ´macµØÖ·
+ memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
+ if (_neighbour)
+ {
+ memcpy(eth->h_dest, _neighbour->ha, ETH_ALEN);
+ neigh_release(_neighbour);
+ }
+ else {
+ __flush_dcache_area(skb->data, skb->len);
+ kfree_skb(skb);
+ no_neighbour++;
+ goto drop_packet;
+ }
+ eth->h_proto = htons(ETH_P_IP);
+ }
+ skb->dev = dev;
+ skb->now_location |= FASTNAT_SUCC;
+
+ if(ct->indev[dir] == NULL && skb->indev != NULL)
+ {
+ ct->indev[dir] = skb->indev;
+ }
+
+ if(ct->outdev[dir] == NULL && skb->dev != NULL)
+ {
+ ct->outdev[dir] = skb->dev;
+ }
+
+ skb->vlan_proto = vlan_proto_raw;
+ skb->vlan_tci = vlan_tci_raw;
+ __flush_dcache_area(skb->data, skb->len);
+ spin_unlock_bh(&fast_fw_spinlock);
+ dev_queue_xmit(skb);
+ spin_lock_bh(&fast_fw_spinlock);
+ }
+ else
+ {
+ print_sun(SUN_DBG, "ERR &&&&&& %s DOWN, kfree_skb!!!!!!!! \n", skb->dev->name);
+ kfree_skb(skb);
+ }
+
+
+ print_sun(SUN_DBG, "skb : 0x%x, new fastnat succ--------", skb);
+
+succ_out:
+drop_packet:
+ if (tmpl)
+ nf_conntrack_put(&tmpl->ct_general);
+ dst_release(dst_dir);
+ return 1;
+
+err_out :
+ dst_release(dst_dir);
+ nf_conntrack_put(&ct->ct_general);
+ print_sun(SUN_DBG, "skb : 0x%x, new fastnat FAIL!!!!!!!!!!", skb);
+ if (tmpl) {
+ nf_ct_set(skb, (struct nf_conn *)&tmpl->ct_general, IP_CT_NEW);
+ }
+ else {
+ skb->_nfct = 0;
+ }
+ return 0; /* not fast nat */
+}
+
+//¹¤×÷ÔÚPOST_ROUTING½Úµã£¬ÓÃÓÚfastÏà¹ØÊý¾ÝµÄ¸³Öµ
+unsigned int napt_handle4_fw(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ u_int8_t protocol;
+ enum ip_conntrack_dir dir, rdir;
+ struct dst_entry *dst = skb_dst(skb);
+#ifdef CONFIG_ATHRS_HW_NAT
+ u_int32_t mask =0;
+#endif
+ struct neighbour *_neighbour = NULL;
+ struct net_device *out = state->out;
+
+
+ //¿ìËÙת·¢×Ü¿ª¹Ø
+ if (fastnat_level == FAST_CLOSE || fastnat_level == FAST_CLOSE_KEEP_LINK)
+ {
+ return NF_ACCEPT;
+ }
+
+ //Ö»ÓÐTCP¡¢UDPÖ§³Öfast£¬ICMPÕÒµ½µÄct¿ÉÄÜÊÇTCP¡¢UDPµÄ£¬ÀýÈç: ¶Ë¿Ú²»¿É´ï£¬ËùÒÔ±ØÐëÏÔʾÅжÏ
+ if (ip_hdr(skb)->protocol != IPPROTO_TCP && ip_hdr(skb)->protocol != IPPROTO_UDP)
+ {
+ return NF_ACCEPT;
+ }
+ //¿ìËÙת·¢×Ó¹¦ÄÜλͼ¿ª¹Ø
+
+ if (!test_bit(FAST_TYPE_VERSION_BIT, &fast_switch)
+ || !test_bit(FAST_TYPE_FW4_BIT, &fast_switch) )
+ {
+ return NF_ACCEPT;
+ }
+
+ if (!out)
+ {
+ return NF_ACCEPT;
+ }
+
+ //¹ã²¥¡¢×é²¥²»½¨Á´
+ if (ipv4_is_multicast(ip_hdr(skb)->daddr) || ipv4_is_lbcast(ip_hdr(skb)->daddr))
+ {
+ return NF_ACCEPT;
+ }
+
+ if(!dst)
+ {
+ return NF_ACCEPT;
+ }
+
+ _neighbour = dst_neigh_lookup_skb(dst, skb);
+ if (!_neighbour)
+ {
+ return NF_ACCEPT;
+ }
+ if (memcmp(_neighbour->ha, zeromac, ETH_ALEN) == 0)
+ {
+ if (!(skb->dev->flags & IFF_POINTOPOINT))
+ goto accept;
+ }
+
+ /*¼ì²é°ü³¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚÉ豸MTU*/
+ if (dst->dev && (skb->len > dst->dev->mtu))
+ {
+ goto accept;
+ }
+
+ ct = nf_ct_get(skb, &ctinfo);
+
+ if (!ct)
+ {
+ goto accept;
+ }
+
+ protocol = nf_ct_protonum(ct);
+
+ if (ct->master == NULL)
+ {
+ struct nf_conn_help *temp_help = nfct_help(ct);
+ //¶ÔÓÚijÌõÁ´½ÓÉÏ´æÔÚhelpµÈ¹³×Ó£¬±ØÐë½»ÓÉlinux±ê×¢Äں˴¦Àí£¬·ñÔòÄں˲»ÄÜ»ñÈ¡Ïà¹ØµÄÊý¾Ý°üÐÅÏ¢
+ if(temp_help!=NULL)
+ {
+ goto accept;
+ }
+ }
+
+ /* only forward */
+ if (!skb->skb_iif)
+ {
+ goto accept;
+ }
+
+ //¹ýÂ˲»ÐèÒª¾¹ýfastnatµÄÐÒé°ü,¸ù¾Ý¶Ë¿ÚºÅ½øÐйýÂË
+ if (check_skip_ports(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all))
+ {
+ goto accept;
+ }
+
+ dir = CTINFO2DIR(ctinfo);
+
+ rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+
+ /*Ö»Õë¶ÔTCP/UDP½øÐÐfastnat*/
+ if (IPPROTO_TCP == protocol)
+ {
+ /*TCPÈý´ÎÎÕÊֳɹ¦*/
+ if(!test_bit(IPS_ASSURED_BIT, &ct->status))
+ {
+ goto accept;
+ }
+ }
+ else if (IPPROTO_UDP != protocol)
+ {
+ goto accept;
+ }
+
+ spin_lock_bh(&fast_fw_spinlock);
+ if (ct->fast_ct.fast_dst[dir] && (ct->fast_ct.fast_dst[dir] != dst))
+ {
+ fast_fw_conn_release(ct);
+ }
+
+ if (!ct->fast_ct.fast_dst[dir])
+ {
+ rcu_assign_pointer(ct->fast_ct.fast_dst[dir], dst);
+ ct->fast_ct.fast_brport[dir] = getBridgePort(_neighbour, out);
+ fast_dst_add_ct(dst, ct);
+ }
+
+ ct->fast_ct.isFast = FAST_CT_FW4;
+ spin_unlock_bh(&fast_fw_spinlock);
+
+accept:
+
+ neigh_release(_neighbour);
+ return NF_ACCEPT;
+}
+
+static struct nf_hook_ops fast4_fw_hook = {
+ .hook = napt_handle4_fw,
+ //.owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_LAST,
+};
+
+static inline bool
+port_match(u_int16_t min, u_int16_t max, u_int16_t port, bool invert)
+{
+ return (port >= min && port <= max) ^ invert;
+}
+
+static bool ctable_mt(struct nf_conn* ct, struct xt_action_param *par,int dir, int* match_filter)
+{
+ const struct xt_tcp *tcpinfo = par->matchinfo;
+
+ if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
+ ntohs(ct->tuplehash[dir].tuple.src.u.tcp.port),
+ !!(tcpinfo->invflags & XT_TCP_INV_SRCPT)))
+ return false;
+ if((tcpinfo->spts[0] || tcpinfo->spts[1]) ^ (!!(tcpinfo->invflags & XT_TCP_INV_SRCPT)))
+ *match_filter = 1;
+ if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
+ ntohs(ct->tuplehash[dir].tuple.dst.u.tcp.port),
+ !!(tcpinfo->invflags & XT_TCP_INV_DSTPT)))
+ return false;
+ if((tcpinfo->dpts[0] || tcpinfo->dpts[1]) ^ (!!(tcpinfo->invflags & XT_TCP_INV_DSTPT)))
+ *match_filter = 1;
+ return true;
+}
+
+static inline bool
+ip_packet_match(const struct ipt_ip *ipinfo,struct nf_conn* ct,int dir, int* match_filter)
+{
+ unsigned long ret;
+ if (NF_INVF(ipinfo, IPT_INV_SRCIP,
+ (ct->tuplehash[dir].tuple.src.u3.ip & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
+ NF_INVF(ipinfo, IPT_INV_DSTIP,
+ (ct->tuplehash[dir].tuple.dst.u3.ip & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
+ return false;
+ if(ipinfo->src.s_addr || ipinfo->dst.s_addr)
+ *match_filter = 1;
+
+ if(ct->indev[dir] != NULL)
+ {
+ ret = ifname_compare_aligned(ct->indev[dir]->name, ipinfo->iniface, ipinfo->iniface_mask);
+ }
+
+ if(ipinfo->iniface[0] != '\0')
+ *match_filter = 1;
+
+ if (NF_INVF(ipinfo, IPT_INV_VIA_IN, ret != 0))
+ return false;
+ if(ct->outdev[dir] != NULL)
+ {
+ ret = ifname_compare_aligned(ct->outdev[dir]->name, ipinfo->outiface, ipinfo->outiface_mask);
+ }
+
+ if(ipinfo->outiface[0] != '\0')
+ *match_filter = 1;
+
+ if (NF_INVF(ipinfo, IPT_INV_VIA_OUT, ret != 0))
+ return false;
+
+ if (ipinfo->proto &&
+ NF_INVF(ipinfo, IPT_INV_PROTO, ct->tuplehash[dir].tuple.dst.protonum != ipinfo->proto))
+ return false;
+
+ return true;
+}
+
+static inline bool
+ip_packet_match_neg(const struct ipt_ip *ipinfo,struct nf_conn* ct,int dir, int* match_filter)
+{
+ unsigned long ret;
+ if (NF_INVF(ipinfo, IPT_INV_SRCIP,
+ (ct->tuplehash[dir].tuple.dst.u3.ip & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
+ NF_INVF(ipinfo, IPT_INV_DSTIP,
+ (ct->tuplehash[dir].tuple.src.u3.ip & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
+ return false;
+ if(ipinfo->src.s_addr || ipinfo->dst.s_addr)
+ *match_filter = 1;
+
+ if(ct->outdev[dir] != NULL)
+ {
+ ret = ifname_compare_aligned(ct->outdev[dir]->name, ipinfo->iniface, ipinfo->iniface_mask);
+ }
+
+ if(ipinfo->iniface[0] != '\0')
+ *match_filter = 1;
+
+ if (NF_INVF(ipinfo, IPT_INV_VIA_IN, ret != 0))
+ return false;
+ if(ct->indev[dir] != NULL)
+ {
+ ret = ifname_compare_aligned(ct->indev[dir]->name, ipinfo->outiface, ipinfo->outiface_mask);
+ }
+
+ if(ipinfo->outiface[0] != '\0')
+ *match_filter = 1;
+
+ if (NF_INVF(ipinfo, IPT_INV_VIA_OUT, ret != 0))
+ return false;
+
+ if (ipinfo->proto &&
+ NF_INVF(ipinfo, IPT_INV_PROTO, ct->tuplehash[dir].tuple.dst.protonum != ipinfo->proto))
+ return false;
+
+ return true;
+}
+
+static inline struct ipt_entry *
+get_entry(const void *base, unsigned int offset)
+{
+ return (struct ipt_entry *)(base + offset);
+}
+
+static inline
+struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
+{
+ return (void *)entry + entry->next_offset;
+}
+
+
+
+//ͬ²½ctͳ¼ÆÐÅÏ¢µ½iptables,ͬ²½ÖÜÆÚÔݶ¨1s
+static int ct_iptables_syn_thread(void *param)
+{
+ while (1)
+ {
+ if(ct_iptables_syn_sw)
+ {
+ rcu_read_lock();
+ tasklet_schedule(&ct_iptables_bh);
+ rcu_read_unlock();
+ //ÿ´Îͬ²½¼ä¸ôΪ1s.
+ }
+ msleep(1*1000);
+ }
+ return 0;
+}
+
+void ct_iptables_syn_handle(struct nf_conn *ct,struct xt_table_info *private,int table_id)
+{
+ void *table_base;
+ struct ipt_entry *e;
+ const struct xt_entry_match *ematch;
+ struct xt_action_param acpar;
+ struct xt_counters *counter;
+ int match_flag = 0;
+ int match_filter = 0;
+ int num = 0;
+
+
+ table_base = private->entries;
+ num = private->number;
+ switch(table_id)
+ {
+ case 0:
+ case 1:
+ case 2:
+ e = get_entry(table_base, private->hook_entry[NF_INET_PRE_ROUTING]);
+ break;
+ case 3:
+ e = get_entry(table_base, private->hook_entry[NF_INET_LOCAL_IN]);
+ break;
+ default:
+ break;
+ }
+ while(num--)
+ {
+ match_flag = 0;
+ match_filter = 0;
+ if(!ip_packet_match(&e->ip, ct, IP_CT_DIR_ORIGINAL,&match_filter) &&
+ !ip_packet_match_neg(&e->ip, ct, IP_CT_DIR_REPLY,&match_filter))
+ {
+ }
+ else
+ {
+ xt_ematch_foreach(ematch, e)
+ {
+ acpar.matchinfo = ematch->data;
+ if (!ctable_mt(ct, &acpar, IP_CT_DIR_ORIGINAL,&match_filter))
+ {
+ match_flag = 1;
+ break;
+ }
+ else
+ {
+ }
+ }
+ if(!match_flag)
+ {
+ if(match_filter)
+ {
+ counter = xt_get_this_cpu_counter(&e->counters);
+ ADD_COUNTER(*counter, ct->packet_info[IP_CT_DIR_ORIGINAL].bytes,ct->packet_info[IP_CT_DIR_ORIGINAL].packets);
+ }
+ e = ipt_next_entry(e);
+ continue;
+ }
+ match_flag = 0;
+ match_filter = 0;
+ }
+
+ if (!ip_packet_match(&e->ip, ct, IP_CT_DIR_REPLY,&match_filter) &&
+ !ip_packet_match_neg(&e->ip, ct, IP_CT_DIR_ORIGINAL,&match_filter))
+ {
+ e = ipt_next_entry(e);
+ continue;
+ }
+ else
+ {
+ xt_ematch_foreach(ematch, e)
+ {
+ acpar.matchinfo = ematch->data;
+ if (!ctable_mt(ct, &acpar, IP_CT_DIR_REPLY,&match_filter))
+ {
+ match_flag = 1;
+ break;
+ }
+ }
+ if(!match_flag)
+ {
+ if(match_filter)
+ {
+ counter = xt_get_this_cpu_counter(&e->counters);
+ ADD_COUNTER(*counter, ct->packet_info[IP_CT_DIR_REPLY].bytes,ct->packet_info[IP_CT_DIR_REPLY].packets);
+ }
+ e = ipt_next_entry(e);
+ continue;
+ }
+ }
+ }
+
+}
+
+static void ct_iptables_bhfunc(unsigned long param)
+{
+ int hash = 0;
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+ struct nf_conn *ct;
+ struct xt_table_info *private;
+ struct net * net;
+ unsigned int addend;
+ local_bh_disable();
+ addend = xt_write_recseq_begin();
+ for(hash = 0; hash < nf_conntrack_htable_size; hash++)
+ {
+ hlist_nulls_for_each_entry_rcu(h,n,&nf_conntrack_hash[hash],hnnode)
+ {
+ if(h)
+ {
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ if(ct->fast_ct.isFast)
+ {
+ net = nf_ct_net(ct);
+ private = READ_ONCE(net->ipv4.iptable_raw->private);
+ ct_iptables_syn_handle(ct,private,IPTABLE_RAW);
+ private = READ_ONCE(net->ipv4.iptable_mangle->private);
+ ct_iptables_syn_handle(ct,private,IPTABLE_MANGLE);
+ private = READ_ONCE(net->ipv4.nat_table->private);
+ ct_iptables_syn_handle(ct,private,IPTABLE_NAT);
+ private = READ_ONCE(net->ipv4.iptable_filter->private);
+ ct_iptables_syn_handle(ct,private,IPTABLE_FILTER);
+ }
+ else
+ continue;
+ spin_lock_bh(&fast_fw_spinlock);
+ ct->packet_info[IP_CT_DIR_ORIGINAL].bytes = 0;
+ ct->packet_info[IP_CT_DIR_ORIGINAL].packets = 0;
+ ct->packet_info[IP_CT_DIR_REPLY].bytes = 0;
+ ct->packet_info[IP_CT_DIR_REPLY].packets = 0;
+ spin_unlock_bh(&fast_fw_spinlock);
+ }
+ }
+ }
+ xt_write_recseq_end(addend);
+ local_bh_enable();
+}
+
+
+int fast4_fw_init(void)
+{
+ int ret = 0;
+
+ ret = nf_register_net_hook(&init_net, &fast4_fw_hook);
+ if (ret != 0)
+ {
+ print_sun(SUN_ERR,"init fast4_fw_init failed\n");
+ return -EINVAL;
+ }
+ print_sun(SUN_DBG,"init fast4_fw_init done\n");
+
+ ct_iptables_bh.func = ct_iptables_bhfunc;
+ ct_iptables_syn = kthread_create(ct_iptables_syn_thread, (void *)0, "ct_iptables_syn" );
+ if (!IS_ERR(ct_iptables_syn))
+ {
+ printk("ntl_syn_task thread's init is succ");
+ wake_up_process(ct_iptables_syn);
+ }
+
+ return 0;
+}
+
+int fast4_fw_cleanup(void)
+{
+ fast_release_all(RELEASE_ALL_DST);
+ nf_unregister_net_hook(&init_net, &fast4_fw_hook);
+ if (ct_iptables_syn)
+ {
+ kthread_stop(ct_iptables_syn);
+ ct_iptables_syn = NULL;
+ }
+ tasklet_kill(&ct_iptables_bh);
+ return 0;
+}
+
diff --git a/upstream/linux-5.10/net/core/fastproc/fast6.c b/upstream/linux-5.10/net/core/fastproc/fast6.c
new file mode 100755
index 0000000..d5fab9c
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fast6.c
@@ -0,0 +1,626 @@
+/* * Copyright (c) 2011 Qualcomm Atheros, Inc. * */
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter/xt_multiport.h>
+#include <linux/netfilter/xt_iprange.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/route.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <net/SI/fast6.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/print_sun.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/SI/net_track.h>
+
+
+MODULE_LICENSE("GPL");
+
+/* ***************** ipv6 ¿ìËÙת·¢Ïà¹Ø±äÁ¿ ********************************/
+spinlock_t fast6_spinlock; //×ÔÐýËø£¬±£»¤Á´±íµÄ²Ù×÷
+fast_list_t working_list6 = {0};
+struct hlist_nulls_head *working_hash6;
+
+/* ******************************** º¯ÊýÉêÃ÷ ********************************/
+
+
+/* ******************************** º¯ÊýʵÏÖ ********************************/
+// ipv6±¨Í·À©Õ¹Ñ¡ÏÅжÏÊÇ·ñ°üº¬l4head
+static int ip6nol4head(int type)
+{
+ int i, count;
+ int optarray[] = {IPPROTO_ESP}; //ÔÝʱµ÷ÊÔÖ»ÖªµÀESP£¬½«À´Ñо¿ÐÒéºóÀ©Õ¹
+
+ count = sizeof(optarray)/sizeof(optarray[0]);
+ for (i = 0; i < count; i++)
+ {
+ if (type == optarray[i])
+ return (1);
+ }
+ return (0);
+}
+
+/*ÅжÏÊÇ·ñÊÇIPV6À©Õ¹Í·*/
+static int ip6option(int type)
+{
+ int i, optarray[8] = {IPPROTO_HOPOPTS, IPPROTO_IPV6, IPPROTO_ROUTING, IPPROTO_FRAGMENT,
+ IPPROTO_ESP, IPPROTO_AH, IPPROTO_DSTOPTS, IPPROTO_NONE};
+
+ for (i = 0; i < 8; i++)
+ {
+ if (type == optarray[i])
+ return(optarray[i]);
+ }
+ return (0);
+}
+
+//skb->dataÐèÒªÖ¸ÏòipÍ·
+/*Ìø¹ýIPV6Í·¼°À©Õ¹Í·£¬Ö¸ÏòL4Í·,²¢·µ»ØL4ÐÒéÀàÐÍ*/
+unsigned char *getipv6uppkg(unsigned char *ippkg, unsigned char *protocol, int *uppkglen)
+{
+ unsigned char *ippkgpos = ippkg + 40;
+ struct ip6_hdr *hdr = (struct ip6_hdr *)ippkg;
+ struct ip6_opthdr *opthdr;
+ int ip6hdrlen;
+ int proto = 0;
+
+ proto = ip6option(hdr->ip6_nxt);
+ if (proto)
+ {
+ return NULL;
+#if 0
+ if (ip6nol4head(proto))
+ return NULL;
+
+ opthdr =(struct ip6_opthdr *)ippkgpos;
+ while (proto = ip6option(opthdr->nxt))
+ {
+ if (ip6nol4head(proto))
+ return NULL;
+ ippkgpos += (opthdr->len + 1) << 3;
+ opthdr = (struct ip6_opthdr *)ippkgpos;
+ }
+ if (protocol)
+ *protocol = opthdr->nxt;
+ ippkgpos += (opthdr->len + 1) << 3;
+#endif
+ }
+ else
+ if (protocol)
+ *protocol = hdr->ip6_nxt;
+
+ ip6hdrlen = ippkgpos - ippkg;
+
+ if (uppkglen)
+ *uppkglen = ntohs(hdr->ip6_plen) + 40 - ip6hdrlen;
+
+ return (ippkgpos);
+}
+
+/*»ñÈ¡IPV6ÎåÔª×éÐÅÏ¢£¬Ä¿Ç°½ö´¦ÀíTCP/UDP/ICMP°ü*/
+int fast6_get_tuple(struct sk_buff *skb, struct nf_conntrack_tuple *tuple)
+{
+ if (!skb || !tuple)
+ {
+ return -1;
+ }
+ __u8 next_hdr;
+ unsigned char *l4head;
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+ struct udphdr *udph = NULL;
+ struct tcphdr *tcph = NULL;
+ struct icmp6hdr *icmph = NULL;
+
+
+ /* only IPv6 packets */
+ if (htons(ETH_P_IPV6) != skb->protocol)
+ {
+ return -1;
+ }
+
+ if (skb->len - sizeof(struct ipv6hdr) >= 0)
+ {
+ l4head = getipv6uppkg(skb->data, &next_hdr, NULL);
+ if (l4head == NULL)
+ return -1;
+ }
+ else
+ return -1;
+
+ memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
+
+ /* only tcp/udp */
+ if (NEXTHDR_UDP == next_hdr)
+ {
+ udph = (struct udphdr *)l4head;
+ tuple->src.u.udp.port = udph->source;
+ tuple->dst.u.udp.port = udph->dest;
+ skb_udpnum++;
+ }
+ else if (NEXTHDR_TCP == next_hdr)
+ {
+ tcph = (struct tcphdr *)l4head;
+ tuple->src.u.tcp.port = tcph->source;
+ tuple->dst.u.tcp.port = tcph->dest;
+ skb_tcpnum++;
+ }
+ else if (NEXTHDR_ICMP == next_hdr)
+ {
+ icmph = (struct icmp6hdr *)l4head; /* point to ICMPv4 header */
+ tuple->src.u.icmp.id = icmph->icmp6_identifier;
+ tuple->dst.u.icmp.type = icmph->icmp6_type;
+ tuple->dst.u.icmp.code = icmph->icmp6_code;
+ }
+ else
+ {
+ return -1;
+ }
+
+ tuple->src.l3num = AF_INET6;
+ tuple->src.u3.in6 = iph->saddr;
+ tuple->dst.u3.in6 = iph->daddr;
+ tuple->dst.protonum = next_hdr;
+ tuple->dst.dir = IP_CT_DIR_ORIGINAL;
+
+ return 0;
+}
+
+//´Ë´¦ÐèÒª±£³ÖºÍdev_xmit_completeÒ»ÖÂ
+//ÓÃinlineÎÞ·¨ÌáÈ¡µ½¹«¹²ÎļþÖУ¬Ö»ÄÜfastnat¡¢fast6¸÷·ÅÒ»·Ý
+static inline bool start_xmit_complete(int rc)
+{
+ /*
+ * Positive cases with an skb consumed by a driver:
+ * - successful transmission (rc == NETDEV_TX_OK)
+ * - error while transmitting (rc < 0)
+ * - error while queueing to a different device (rc & NET_XMIT_MASK)
+ */
+ if (likely(rc < NET_XMIT_MASK))
+ return true;
+
+ return false;
+}
+
+//ipv6Êý¾Ý°üµÄ¿ìËÙ´¦Àí£¬hashÓÃRCU»úÖÆ½øÐб£»¤£¬×ܵÄÁ¬½ÓÁ´±íÓÃspin½øÐб£»¤
+int fast6_recv(struct sk_buff *skb)
+{
+ struct nf_conntrack_tuple tuple;
+ fast_entry_data_t *fast6_entry_data = NULL;
+ fast_entry_t *fast6_entry = NULL;
+ struct tcphdr *tcph = NULL;
+ struct net_device *dev = NULL;
+ __u8 next_hdr = 0;
+ unsigned char *l4head;
+ struct ipv6hdr *ip6;
+
+ print_sun(SUN_DBG, "enter fast_6_recv \n");
+
+ if (fastnat_level == FAST_CLOSE)
+ {
+ return 0;
+ }
+
+ if (fast6_get_tuple(skb, &tuple) < 0)
+ {
+ print_sun(SUN_DBG, "fast_6_recv get tuple err \n");
+ return 0;
+ }
+
+ ip6 = ipv6_hdr(skb);
+ if (ip6->nexthdr != IPPROTO_TCP && ip6->nexthdr != IPPROTO_UDP)
+ return 0;
+
+ rcu_read_lock();
+
+ fast6_entry_data = fast_find_entry_data(working_hash6, &tuple);
+ if (fast6_entry_data == NULL)
+ {
+ rcu_read_unlock();
+
+ print_sun(SUN_DBG, "fast_6_recv fast_6_find null \n");
+ return 0;
+ }
+
+ /*Åжϱ¨Îij¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚDEVµÄMTU*/
+ dev = fast6_entry_data->outdev;
+ if (!dev || (skb->len > dev->mtu))
+ {
+ skbinfo_add(NULL, SKB_OVER_MTU);
+ rcu_read_unlock();
+
+ print_sun(SUN_DBG, "fast_6_recv outdev err \n");
+ return 0;
+ }
+
+ //»Ø´«µÄ°üÖ±½ÓÊͷŲ¢¼ÆÊý
+ if (strcmp(skb->dev->name, dev->name) == 0)
+ {
+ skbinfo_add(NULL, SKB_LOOP);
+ rcu_read_unlock();
+
+ kfree_skb(skb);
+ printk("loopback skb, free skb\n");
+ return 1;
+ }
+
+ fast6_entry = fast_data_to_entry(fast6_entry_data);
+ if (!fast6_entry)
+ {
+ rcu_read_unlock();
+
+ print_sun(SUN_DBG, "fast_6_recv fast6_entry is null \n");
+ return 0;
+ }
+
+ /* Ö»Óе±Ë«Ïò¿ìËÙÁ´½Ó¶¼½¨Á¢³É¹¦²Å×ßFASTNAT£¬·ñÔò×ß±ê×¼Á÷³Ì */
+ /* udp²»ÐèҪ˫Ïò½¨Á´ */
+ if ((fast6_entry->flags != FAST_ALL_DIR) && (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP))
+ {
+ rcu_read_unlock();
+
+ print_sun(SUN_DBG, "fast_6_recv flags is not FAST_ALL_DIR \n");
+ return 0;
+ }
+
+ /*Ìø¹ýIPV6Í·£¬»ñÈ¡L4Í·Ö¸Õë*/
+ l4head = getipv6uppkg(skb->data, &next_hdr, NULL);
+ if (l4head == NULL)
+ {
+ rcu_read_unlock();
+
+ print_sun(SUN_DBG, "fast_6_recv l4head is null \n");
+ return 0;
+ }
+
+
+
+ if (!(skb = fast_expand_headroom_v6(skb, dev))){
+ rcu_read_unlock();
+ return 1;
+ }
+
+ fast_tcpdump(skb);
+
+ //Èç¹û×¥°üÃüÖУ¬Êý¾Ý»áclone£¬fast³É¹¦ÐèÒª¸Ä±ädataÄÚÈÝ£¬ÐèÒªÖØÐÂcopyÒ»·Ý
+ if (skb_cloned(skb))
+ {
+ print_sun(SUN_DBG, "fast6_recv clone \n");
+ if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ {
+ rcu_read_unlock();
+
+ print_sun(SUN_DBG, "fast6_recv clone copy failed !!!\n");
+ printk("pskb_expand_head skb failed, free skb\n");
+ kfree_skb(skb);
+ return 1;
+ }
+ }
+
+ //½öµ±fast6³É¹¦£¬²ÅÄÜÉèÖÃIPÍ·Ê×µØÖ·,ÒÔ¹©cacheË¢ÐÂʱʹÓÃ
+ skb_reset_network_header(skb);
+ skb->isFastnat = 1;
+ skb->priority = fast6_entry_data->priority;
+ skb->mark = fast6_entry_data->mark;
+
+
+ //ÄÚºË×Ô´øµÄ»ùÓÚÁ´½ÓµÄÁ÷Á¿Í³¼Æ
+ struct nf_conn_counter *acct = (struct nf_conn_counter*)nf_conn_acct_find(fast6_entry->ct);
+ if (acct) {
+ enum ip_conntrack_info ctinfo;
+ if (fast6_entry_data->tuplehash.tuple.dst.dir == IP_CT_DIR_ORIGINAL)
+ ctinfo = IP_CT_ESTABLISHED;
+ else
+ ctinfo = IP_CT_ESTABLISHED_REPLY;
+
+ atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
+ atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes);
+ }
+
+ /* ¶¨Öƹ¦ÄÜ£¬ÎªÁ˽â¾öµ¥UDP¹à°üʱ£¬ÎÞ·¨ÖªÏþindev½øÐÐÁ÷Á¿Í³¼ÆÎÊÌâ¶¨ÖÆ */
+ if ((fast6_entry_data->indev == NULL) && skb->dev)
+ {
+ fast6_entry_data->indev = skb->dev;
+ }
+
+ // ͳ¼ÆÈë¿ÚÍøÂçÉ豸µÄ½ÓÊÕ°üÊýÁ¿ --- ²Î¿¼linuxÔÉúµÄÇý¶¯£¬Í³¼ÆµÄ¶¼ÊÇIP°ü³¤¶È
+ if (fast6_entry_data->indev && (fastnat_level == FAST_NET_DEVICE))
+ {
+ fast6_entry_data->indev->stats.rx_packets++;
+ fast6_entry_data->indev->stats.rx_bytes += skb->len;
+ }
+
+ skb->dev = dev;
+
+ //Ö»Óе±ÓÐMACÍ·Ô¤¸³ÖµÊ±£¬²Å×¼¸³Öµ£¬·ñÔòΪIPÍ·
+ skb_push(skb, ETH_HLEN);
+
+ memcpy(skb->data, fast6_entry_data->hh_data, ETH_HLEN);
+ /*¸üÐÂÁ¬½Ó³¬Ê±*/
+ if (IPPROTO_TCP == tuple.dst.protonum)
+ {
+ mod_timer(&fast6_entry->timeout, jiffies + tcp_timeouts[fast6_entry->ct->proto.tcp.state]);
+ tcph = (struct tcphdr *)l4head;
+ update_tcp_timeout(fast6_entry, fast6_entry_data, tcph);
+ fast6_entry->ct->timeout = jiffies + tcp_timeouts[fast6_entry->ct->proto.tcp.state];
+ }
+ else if (IPPROTO_UDP == tuple.dst.protonum)
+ {
+ /*udp*/
+ if (test_bit(IPS_SEEN_REPLY_BIT, &fast6_entry->ct->status))
+ {
+ mod_timer(&fast6_entry->timeout, jiffies + fast_udp_timeout_stream);
+ fast6_entry->ct->timeout = jiffies + fast_udp_timeout_stream;
+ }
+ else
+ {
+ mod_timer(&fast6_entry->timeout, jiffies + fast_udp_timeout);
+ fast6_entry->ct->timeout = jiffies + fast_udp_timeout;
+ }
+ }
+
+ if (skb->dev->flags & IFF_UP)
+ {
+ //pppÖ»ÐèÒª´«ÊäIP°ü
+ if (strncmp(skb->dev->name, ppp_name, strlen(ppp_name)) == 0)
+ {
+ skb_pull(skb, ETH_HLEN);
+ }
+
+ skb->now_location |= FAST6_SUCC;
+ if (fastnat_level == FAST_NET_DEVICE)
+ {
+ print_sun(SUN_DBG, "fastnat-2 dev_queue_xmit, send to:%s !!!!!!!! \n", skb->dev->name);
+ dev_queue_xmit(skb);
+ }
+ //¶ÔÓÚÁ÷¿ØµÈÌØÊâÓ¦Óã¬Ö»ÄÜ×ß±ê×¼µÄfastnatÁ÷³Ì£¬·ñÔòÎÞ·¨½øÐвå¼þÖ´ÐÐ
+ else if (fastnat_level == FAST_NET_CORE)
+ {
+ dev_queue_xmit(skb);
+ }
+ /*add by jiangjing*/
+ fast6_entry_data->packet_num++;
+
+ }
+ else
+ {
+ print_sun(SUN_DBG, "fast6_recv ERR &&&&&& %s DOWN, kfree_skb!!!!!!!! \n", skb->dev->name);
+ kfree_skb(skb);
+ }
+
+ rcu_read_unlock();
+
+ print_sun(SUN_DBG, "fast_6_recv okokok \n");
+ return 1;
+}
+
+static struct nf_hook_ops fast6_hook = {
+ .hook = napt6_handle,
+ //.owner = THIS_MODULE,
+ .pf = PF_INET6,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP6_PRI_LAST,
+};
+
+//¹¤×÷ÔÚPOST_ROUTING½Úµã£¬ÓÃÓÚfastÁ´½ÓµÄ¸³ÖµºÍÌí¼Óhash±í
+unsigned int napt6_handle(void* priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ u_int8_t protocol;
+ fast_entry_t *fast6_entry;
+ fast_entry_data_t *fast6_entry_data;
+ enum ip_conntrack_dir dir, rdir;
+ struct dst_entry *dst = skb_dst(skb);
+#ifdef CONFIG_ATHRS_HW_NAT
+ u_int32_t mask =0;
+#endif
+ struct neighbour *_neighbour = NULL;
+ struct net_device *out = state->out;
+
+
+ if (fastnat_level == FAST_CLOSE || fastnat_level == FAST_CLOSE_KEEP_LINK)
+ {
+ return NF_ACCEPT;
+ }
+
+ if (test_bit(FAST_TYPE_VERSION_BIT, &fast_switch))
+ {
+ return NF_ACCEPT;
+ }
+
+ if (!out)
+ {
+ return NF_ACCEPT;
+ }
+
+ if (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP && ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
+ return NF_ACCEPT;
+
+ //×é²¥²»½¨Á´
+ if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
+ {
+ return NF_ACCEPT;
+ }
+
+ //´Ë´¦Òª¹Ø×¢ÊÇ·ñ»áƵ·±³öÏÖfastÁ´½ÓÒÑÂúÇé¿ö£¬Èç¹û¾³£³öÏÖ£¬ÊÇ·ñ¿¼ÂÇ×î¾É¸²¸ÇÇé¿ö
+ if (working_list6.count > nf_conntrack_max)
+ {
+ return NF_ACCEPT;
+ }
+ /*ÅжÏÊÇ·ñÓÐÏÂÒ»Ìø*/
+ if (!dst)
+ {
+ return NF_ACCEPT;
+ }
+ _neighbour = dst_neigh_lookup_skb(dst, skb);
+ if(!_neighbour)
+ {
+ return NF_ACCEPT;
+ }
+
+ if (memcmp(_neighbour->ha, zeromac, ETH_ALEN) == 0)
+ {
+ goto accept;
+ }
+
+ if (!(ct = nf_ct_get(skb, &ctinfo)))
+ {
+ goto accept;
+ }
+ protocol = nf_ct_protonum(ct);
+
+
+ /* only forward */
+ if (!skb->skb_iif)
+ {
+ goto accept;
+ }
+
+
+ //¹ýÂ˲»ÐèÒª¾¹ýfastnatµÄÐÒé°ü
+ if (check_skip_ports(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all))
+ {
+ goto accept;
+ }
+
+ dir = CTINFO2DIR(ctinfo);
+
+ rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+
+ if (IPPROTO_TCP == protocol)
+ {
+ /* only established */
+ if(!test_bit(IPS_ASSURED_BIT, &ct->status))
+ {
+ goto accept;
+ }
+ }
+ else if (IPPROTO_UDP != protocol)
+ {
+ goto accept;
+ }
+
+ spin_lock_bh(&fast6_spinlock);
+ if (!(fast6_entry = fast_get_entry(&working_list6, ct, dir)))
+ {
+ spin_unlock_bh(&fast6_spinlock);
+ goto accept;
+ }
+ fast6_entry->fast_spinlock = &fast6_spinlock;
+
+ //Ê״ν¨Á´£¬»ñÈ¡ct¼ÆÊýËø£¬²¢É¾³ýct¶¨Ê±Æ÷£»Ê״ν¨Á´Öظ´°ü£¬²»ÄܲÙ×÷
+ if (!(fast6_entry->flags & FAST_ALL_DIR))
+ {
+ nf_conntrack_get(&ct->ct_general);
+ //del_timer(&ct->timeout);
+ ct->timeout = fast6_entry->timeout.expires;
+ }
+
+ fast6_entry_data = &fast6_entry->data[dir];
+ fast6_entry_data->tuplehash.tuple = ct->tuplehash[dir].tuple;
+
+ memcpy(fast6_entry_data->dmac, _neighbour->ha, ETH_ALEN);
+ fast6_entry_data->priority = skb->priority;
+ fast6_entry_data->mark = skb->mark;
+ fast6_entry_data->outdev = out;
+
+ if (!record_MAC_header(working_hash6, ct, fast6_entry, fast6_entry_data, _neighbour, out, htons(ETH_P_IPV6)))
+ {
+ spin_unlock_bh(&fast6_spinlock);
+ goto accept;
+ }
+
+ //´Ë´¦±£Ö¤Õý·´Á½¸ö±ê¼Çλ²»³åÍ»
+ fast6_entry->flags = fast6_entry->flags | (1 << dir);
+
+ fast_add_entry(working_hash6, fast6_entry_data);
+
+ if (fast6_entry->flags == FAST_ALL_DIR)
+ {
+ fast6_entry->data[0].indev = fast6_entry->data[1].outdev;
+ fast6_entry->data[1].indev = fast6_entry->data[0].outdev;
+ }
+
+ spin_unlock_bh(&fast6_spinlock);
+
+ ct->fast_ct.isFast = FAST_CT_WND6;
+
+accept:
+
+ neigh_release(_neighbour);
+ return NF_ACCEPT;
+}
+
+/*´¦Àí֪ͨÁ´Ê¼þ*/
+int fast6_event(traverse_command_t *cmd)
+{
+ spin_lock_bh(&fast6_spinlock);
+ traverse_process(&working_list6, cmd);
+ spin_unlock_bh(&fast6_spinlock);
+ return 0;
+}
+
+//fastnat_level¹Ø±Õ£¬Çå¿ÕËùÓÐipv6¿ìËÙת·¢ÐÅÏ¢
+void fast6_cleanup_links(void)
+{
+ spin_lock_bh(&fast6_spinlock);
+ fast_cleanup_links(&working_list6);
+ spin_unlock_bh(&fast6_spinlock);
+}
+
+int tsp_fast6_init(void)
+{
+ int ret;
+
+ print_sun(SUN_DBG,"start init fast6\n");
+
+ working_hash6 = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, /*&fast6hash_vmalloc,*/ 1);
+ if (!working_hash6)
+ {
+ print_sun(SUN_DBG, "Unable to create working_hash6\n");
+ return -EINVAL;
+ }
+
+ spin_lock_init(&fast6_spinlock);
+
+ ret = nf_register_net_hook(&init_net, &fast6_hook);
+ if (ret != 0)
+ {
+ print_sun(SUN_DBG,"init fast6 failed\n");
+ goto err;
+ }
+
+ print_sun(SUN_DBG,"init fast6 done\n");
+ return 0;
+
+err:
+ nf_ct_free_hashtable(working_hash6, /*fast6_hash_vmalloc, */nf_conntrack_htable_size);
+ return -EINVAL;
+}
+
+int tsp_fast6_cleanup(void)
+{
+ nf_unregister_net_hook(&init_net, &fast6_hook);
+ nf_ct_free_hashtable(working_hash6, /*fast6_hash_vmalloc,*/ nf_conntrack_htable_size);
+
+ print_sun(SUN_DBG,"fast6 cleanup done\n");
+ return 0;
+}
+
diff --git a/upstream/linux-5.10/net/core/fastproc/fast6_fw.c b/upstream/linux-5.10/net/core/fastproc/fast6_fw.c
new file mode 100755
index 0000000..322175b
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fast6_fw.c
@@ -0,0 +1,395 @@
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter/xt_multiport.h>
+#include <linux/netfilter/xt_iprange.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/route.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <net/SI/fast6.h>
+#include <net/SI/fast_common.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/ip6_fib.h>
+#include <net/SI/net_track.h>
+#include <net/SI/net_other.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/net_cache.h>
+#include <net/SI/print_sun.h>
+
+MODULE_LICENSE("GPL");
+
+/* ***************** ipv6 ¿ìËÙת·¢Ïà¹Ø±äÁ¿ ************************* */
+
+
+/* **************************** º¯ÊýÉêÃ÷ ************************ */
+
+
+/* **************************** º¯ÊýʵÏÖ ************************ */
+extern u32 rt6_peer_genid(void);
+
+int dst_expire_count_v6 = 0;
+extern int no_neighbour;
+extern void ntl_ct_set_iw(struct sk_buff *skb, struct nf_conn *ct, int ct_dir);
+int fast6_fw_recv(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ struct nf_conn *ct,
+ struct nf_conntrack_l4proto *l4proto,
+ unsigned int dataoff,
+ int dir,
+ u_int8_t protonum)
+{
+ struct net_device *dev = NULL;
+ enum ip_conntrack_info ctinfo;
+ int ret;
+ int rdir;
+ struct ethhdr * eth;
+ __u8 next_hdr = 0;
+ unsigned char *l4head;
+ struct dst_entry *dst_dir = NULL;
+ struct neighbour *_neighbour = NULL;
+
+ __be16 vlan_proto_raw = skb->vlan_proto;
+ __u16 vlan_tci_raw = skb->vlan_tci;
+
+ rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+ dst_dir = dst_get_by_ct(ct, dir);
+
+ struct nf_hook_state state = {
+ .hook = NF_INET_PRE_ROUTING,
+ .net = &init_net,
+ .in = skb->dev,
+ .pf = NFPROTO_IPV6,
+ };
+
+
+ //TCP±ØÐëË«Ïò½¨Á´£¬UDPµ¥Ïò¼´¿É
+ if (!dst_dir)
+ {
+ goto err_out;
+ }
+
+ // Èç¹û¼Ç¼ÁËÇŵ㣬¾ÍÖ±½ÓÖ¸ÏòÇŵã
+ if (ct->fast_ct.fast_brport[dir])
+ {
+ rcu_read_lock();
+ dev = rcu_dereference_protected(ct->fast_ct.fast_brport[dir], 1);
+ rcu_read_unlock();
+ }
+ else {
+ dev = dst_dir->dev;
+ }
+
+ /*¼ì²é°ü³¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚÉ豸MTU*/
+ if (!dev || (skb->len > dev->mtu))
+ {
+ skbinfo_add(NULL, SKB_OVER_MTU);
+ goto err_out;
+ }
+
+ //»Ø´«µÄ°üÖ±½ÓÊͷŲ¢¼ÆÊý
+ if (strcmp(skb->dev->name, dev->name) == 0)
+ {
+ skbinfo_add(NULL, SKB_LOOP);
+ //nf_conntrack_put(&ct->ct_general);
+ kfree_skb(skb);
+ goto drop_packet;
+ }
+
+ if (dir == 1) {
+ ctinfo = IP_CT_ESTABLISHED_REPLY;
+ } else {
+ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+ ctinfo = IP_CT_ESTABLISHED;
+ } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
+ ctinfo = IP_CT_RELATED;
+ } else {
+ ctinfo = IP_CT_NEW;
+ }
+ }
+
+
+
+ ret = nf_conntrack_handle_packet_fast(ct, skb, dataoff, ctinfo, &state);
+ if (ret <= 0) {
+ skb->_nfct = 0;
+ goto err_out; // fastʧ°Üǰ¶¼²»Äܸü¸ÄskbµÄÄÚÈÝ£¬·ñÔòʧ°Ü¾ÍÒª×ö»Ö¸´²Ù×÷
+ }
+ //Åжϳö¿ÚdevµÄÍ·²¿¿Õ¼äÊÇ·ñ×ã¹»£¬²»¹»ÐèÒªexpand
+ if (!(skb = fast_expand_headroom_v6(skb, dev))) {
+ //nf_conntrack_put(&ct->ct_general);
+ goto drop_packet;
+ }
+
+ fast_tcpdump(skb);
+
+ //Èç¹û×¥°üÃüÖУ¬Êý¾Ý»áclone£¬fast³É¹¦ÐèÒª¸Ä±ädataÄÚÈÝ£¬ÐèÒªÖØÐÂcopyÒ»·Ý
+ if (skb_cloned(skb))
+ {
+ if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ {
+ print_sun(SUN_DBG, "fast6_fw_recv clone copy failed !!!\n");
+ kfree_skb(skb);
+ goto drop_packet;
+ }
+ clean_cache(skb->data,skb->len);
+ }
+
+ //½öµ±fastnat³É¹¦£¬²ÅÄÜÉèÖÃIPÍ·Ê×µØÖ·,ÒÔ¹©cacheË¢ÐÂʱʹÓÃ
+ skb_reset_network_header(skb);
+ skb->isFastnat = 1;
+ nf_ct_set(skb, (struct nf_conn *)&ct->ct_general, ctinfo);
+
+
+ //»ùÓÚÍø¿ÚµÄÁ÷Á¿Í³¼Æ --- ²Î¿¼linuxÔÉúµÄÇý¶¯£¬Í³¼ÆµÄ¶¼ÊÇIP°ü³¤¶È
+ if (fastnat_level == FAST_NET_DEVICE)
+ {
+ skb->dev->stats.rx_packets++;
+ skb->dev->stats.rx_bytes += skb->len;
+ }
+
+ if (dev->flags & IFF_UP)
+ {
+ if (!(dev->flags & IFF_POINTOPOINT)) {
+ //·Çppp¶Ë¿Ú²Å×¼¸³Öµ£¬·ñÔòΪIPÍ·
+ skb_push(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ if(skb->isvlan == 1)
+ {
+ struct vlan_ethhdr *vlan_eth = (struct vlan_ethhdr*)(skb->data - VLAN_HLEN);
+ skb->vlan_proto = vlan_eth->h_vlan_proto;
+ skb->vlan_tci = ntohs(vlan_eth->h_vlan_TCI);
+ }
+ eth = (struct ethhdr *)skb->data;
+ //³ö¿Údev macµØÖ·×÷ΪÊý¾Ý°üÔ´macµØÖ·
+ memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
+ _neighbour = dst_neigh_lookup_skb(dst_dir, skb);
+ if (_neighbour)
+ {
+ memcpy(eth->h_dest, _neighbour->ha, ETH_ALEN);
+ neigh_release(_neighbour);
+ }
+ else {
+ __flush_dcache_area(skb->data, skb->len);
+ kfree_skb(skb);
+ no_neighbour++;
+ goto drop_packet;
+ }
+
+ eth->h_proto = htons(ETH_P_IPV6);
+ }
+ skb->dev = dev;
+ skb->now_location |= FASTNAT_SUCC;
+ skb->vlan_proto = vlan_proto_raw;
+ skb->vlan_tci = vlan_tci_raw;
+ __flush_dcache_area(skb->data, skb->len);
+ spin_unlock_bh(&fast_fw_spinlock);
+ dev_queue_xmit(skb);
+ spin_lock_bh(&fast_fw_spinlock);
+ }
+ else
+ {
+ print_sun(SUN_DBG, "ERR &&&&&& %s DOWN, kfree_skb!!!!!!!! \n", skb->dev->name);
+ kfree_skb(skb);
+ }
+
+
+ print_sun(SUN_DBG, "skb : 0x%x, fast6_fw succ--------", skb);
+
+succ_out:
+drop_packet:
+ if (tmpl)
+ nf_conntrack_put(&tmpl->ct_general);
+ dst_release(dst_dir);
+ return 1;
+
+err_out :
+ dst_release(dst_dir);
+
+ nf_conntrack_put(&ct->ct_general);
+ print_sun(SUN_DBG, "skb : 0x%x, fast6_fw fail!!!!!!!!!!", skb);
+ if (tmpl) {
+ nf_ct_set(skb, (struct nf_conn *)&tmpl->ct_general, IP_CT_NEW);
+ }
+ else {
+ skb->_nfct = 0;
+ }
+ return 0; /* not fast nat */
+}
+
+//¹¤×÷ÔÚPOST_ROUTING½Úµã£¬ÓÃÓÚfastÁ´½ÓµÄ¸³ÖµºÍÌí¼Óhash±í
+unsigned int napt_handle6_fw(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ u_int8_t protocol;
+ enum ip_conntrack_dir dir;
+ struct dst_entry *dst = skb_dst(skb);
+#ifdef CONFIG_ATHRS_HW_NAT
+ u_int32_t mask =0;
+#endif
+ struct neighbour *_neighbour = NULL;
+ struct net_device *out = state->out;
+
+ //¿ìËÙת·¢×Ü¿ª¹Ø
+ if (fastnat_level == FAST_CLOSE || fastnat_level == FAST_CLOSE_KEEP_LINK)
+ {
+ return NF_ACCEPT;
+ }
+
+ //¿ìËÙת·¢×Ó¹¦ÄÜλͼ¿ª¹Ø
+ if (!test_bit(FAST_TYPE_VERSION_BIT, &fast_switch)
+ || !test_bit(FAST_TYPE_FW6_BIT, &fast_switch))
+ {
+ return NF_ACCEPT;
+ }
+
+ if (!out)
+ {
+ return NF_ACCEPT;
+ }
+
+ //Ö»ÓÐTCP¡¢UDPÖ§³Öfast£¬ICMPÕÒµ½µÄct¿ÉÄÜÊÇTCP¡¢UDPµÄ£¬ÀýÈç: ¶Ë¿Ú²»¿É´ï£¬ËùÒÔ±ØÐëÏÔʾÅжÏ
+ if (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP && ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
+ return NF_ACCEPT;
+
+ //×é²¥²»½¨Á´
+ if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
+ {
+ return NF_ACCEPT;
+ }
+
+ /*ÅжÏÊÇ·ñÓÐÏÂÒ»Ìø*/
+ if(!dst)
+ {
+ return NF_ACCEPT;
+ }
+
+ _neighbour = dst_neigh_lookup_skb(dst, skb);
+ if(!_neighbour)
+ {
+ return NF_ACCEPT;
+ }
+
+ if (memcmp(_neighbour->ha, zeromac, ETH_ALEN) == 0)
+ {
+ goto accept;
+ }
+
+ /*¼ì²é°ü³¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚÉ豸MTU*/
+ if (dst->dev && (skb->len > dst->dev->mtu))
+ {
+ goto accept;
+ }
+ if (!(ct = nf_ct_get(skb, &ctinfo)))
+ {
+ goto accept;
+ }
+
+ protocol = nf_ct_protonum(ct);
+
+ /* only forward */
+ if (!skb->skb_iif)
+ {
+ goto accept;
+ }
+
+ //¹ýÂ˲»ÐèÒª¾¹ýfastnatµÄÐÒé°ü
+ if (check_skip_ports(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all))
+ {
+ goto accept;
+ }
+
+ dir = CTINFO2DIR(ctinfo);
+
+ /*Ö»Õë¶ÔTCP/UDP½øÐÐfastnat*/
+ if (IPPROTO_TCP == protocol)
+ {
+ /*TCPÈý´ÎÎÕÊֳɹ¦*/
+ if(!test_bit(IPS_ASSURED_BIT, &ct->status))
+ {
+ goto accept;
+ }
+ }
+ else if (IPPROTO_UDP != protocol)
+ {
+ goto accept;
+ }
+
+ spin_lock_bh(&fast_fw_spinlock);
+ if (ct->fast_ct.fast_dst[dir] && (ct->fast_ct.fast_dst[dir] != dst))
+ {
+ fast_fw_conn_release(ct);
+ }
+
+ if (!ct->fast_ct.fast_dst[dir])
+ {
+ rcu_assign_pointer(ct->fast_ct.fast_dst[dir], dst);
+ ct->fast_ct.fast_brport[dir] = getBridgePort(_neighbour, out);
+ fast_dst_add_ct(dst, ct);
+ }
+
+ ct->fast_ct.isFast = FAST_CT_FW6;
+ spin_unlock_bh(&fast_fw_spinlock);
+accept:
+
+ neigh_release(_neighbour);
+ return NF_ACCEPT;
+}
+
+static struct nf_hook_ops fast6_fw_hook = {
+ .hook = napt_handle6_fw,
+ //.owner = THIS_MODULE,
+ .pf = PF_INET6,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP6_PRI_LAST,
+};
+
+
+int fast6_fw_init(void)
+{
+ int ret = 0;
+
+ ret = nf_register_net_hook(&init_net, &fast6_fw_hook);
+ if (ret != 0)
+ {
+ print_sun(SUN_ERR,"init fast6_fw_init failed\n");
+ return -EINVAL;
+ }
+ print_sun(SUN_DBG,"init fast6_fw_init done\n");
+
+ return 0;
+}
+
+int fast6_fw_cleanup(void)
+{
+ fast_release_all(RELEASE_ALL_DST);
+ nf_unregister_net_hook(&init_net, &fast6_fw_hook);
+ return 0;
+}
+
diff --git a/upstream/linux-5.10/net/core/fastproc/fast_common.c b/upstream/linux-5.10/net/core/fastproc/fast_common.c
new file mode 100755
index 0000000..69f3761
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fast_common.c
@@ -0,0 +1,2113 @@
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/xt_multiport.h>
+#include <linux/netfilter/xt_iprange.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/route.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <net/SI/fast_common.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/SI/net_track.h>
+#include <net/SI/net_other.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/net_cache.h>
+#include <net/SI/print_sun.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+
+
+MODULE_LICENSE("GPL");
+
+/* ************************** ¿ìËÙת·¢¹«ÓõıäÁ¿ ************************** */
+struct kmem_cache *fast_head_cache;
+
+spinlock_t fast_fw_spinlock; //×ÔÐýËø£¬±£»¤×ª·¢Êý¾ÝÏ໥²Ù×÷
+spinlock_t fastlocal_spinlock; //×ÔÐýËø£¬±£»¤±¾µØÊý¾ÝÏ໥²Ù×÷
+
+/*
+* 0: ¹Ø±Õfastnat£¬×ß±ê×¼linux£¬¿ÉÒÔÖжÏÔÁ´½ÓÖØÐÂÁ´½Ó
+* 1: ×ßIP²ã±ê×¼fasnat£¬½ø¶øÒýÓÃÈíÖжϵ÷¶È£¬ÐÔÄÜÂԲ³¡¾°ÈçÁ÷¿Ø¹¦ÄÜ
+* 2: ×ßnet_device²ãÃæµÄÇý¶¯µ½Çý¶¯£¬ÎÞÈíÖжÏ
+* 5: ¹Ø±Õfastnat£¬×ß±ê×¼linux£¬±£³ÖÔÁ´½Ó²»ÖØÐÂÁ´½Ó
+* ¿ÉÒÔͨ¹ýprocÐÞ¸ÄÖµ
+*/
+int fastnat_level = FAST_NET_DEVICE;/*FAST_NET_DEVICE; modify by zdd, close fastnat*/
+
+/* λͼ·½Ê½ --- ¸÷×Ó¹¦ÄÜ¿ìËÙת·¢¿ª¹Ø£¬²Î¿¼fast_common.hÖж¨Òå*/
+
+//unsigned long fast_switch = 0x67;
+unsigned long fast_switch = 0x0;
+
+/* ************************ ×ÓÍø¼ä ¿ìËÙת·¢Ïà¹Ø±äÁ¿ ************************ */
+int fastbr_level = 1; //ÊÇ·ñ´ò¿ªfastbr¹¦ÄÜ
+
+/* **************************** ƽ̨»¯Ïà¹Ø±äÁ¿ **************************** */
+/*
+ *ÏÂÃæbr_nameµÈ¼¸¸ö×Ö·û´®Êý×éͨ¹ýproc½ÚµãÉèÖã¬
+ *´Ë´¦½öÊǶ¨ÒåºÍ¸³³õÖµ¡£
+ */
+char br_name[MAX_NET_DEVICE_NAME_LEN + 1] = "br0";
+char ps_name[MAX_NET_DEVICE_NAME_LEN + 1] = "wan1";
+char usb_name[MAX_NET_DEVICE_NAME_LEN + 1] = "usblan0";
+char ppp_name[MAX_NET_DEVICE_NAME_LEN + 1] = "ppp";
+//cp:µ¥ºË£¬Ä£¿éÐÎ̬ÐèҪ·Óɶ¨ÖÆ
+//ap:Ë«ºË£¬Ä£¿éÐÎ̬¿É¼òµ¥ÇŽÓ
+char need_jilian[MAX_NET_DEVICE_NAME_LEN + 1] = "0";
+
+//ÒÔÏÂÈý¸öÓÅÏȼ¶ÅäÖã¬Ö»×¼ÍøÂç×é¹Ç¸Éµ÷Õû£¬·ñÔòÑÏÖØÓ°Ïì¸÷¸ö·½ÏòµÄÐÔÄÜ
+int fast_br_level = 1;
+int fast_fwd_level = 2;
+int fast_local_level = 0;
+unsigned char zeromac[ETH_ALEN] = "";
+
+/* ÄÚºËÁ´Â·×´Ì¬µÈµÄ±äÁ¿¡¢½Ó¿ÚÌ壬À´×ÔÄںˣ¬ÒÆÖ²ÐÂÄÚºËʱÐèÒª¸üÐÂ*/
+#define sNO TCP_CONNTRACK_NONE
+#define sSS TCP_CONNTRACK_SYN_SENT
+#define sSR TCP_CONNTRACK_SYN_RECV
+#define sES TCP_CONNTRACK_ESTABLISHED
+#define sFW TCP_CONNTRACK_FIN_WAIT
+#define sCW TCP_CONNTRACK_CLOSE_WAIT
+#define sLA TCP_CONNTRACK_LAST_ACK
+#define sTW TCP_CONNTRACK_TIME_WAIT
+#define sCL TCP_CONNTRACK_CLOSE
+#define sS2 TCP_CONNTRACK_SYN_SENT2
+#define sIV TCP_CONNTRACK_MAX
+#define sIG TCP_CONNTRACK_IGNORE
+
+/* What TCP flags are set from RST/SYN/FIN/ACK. */
+enum tcp_bit_set {
+ TCP_SYN_SET,
+ TCP_SYNACK_SET,
+ TCP_FIN_SET,
+ TCP_ACK_SET,
+ TCP_RST_SET,
+ TCP_NONE_SET,
+};
+
+//À´×Ônf_conntrack_proto_tcp.c
+static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
+ {
+ /* ORIGINAL */
+ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
+ /*syn*/ { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sS2 },
+ /*
+ * sNO -> sSS Initialize a new connection
+ * sSS -> sSS Retransmitted SYN
+ * sS2 -> sS2 Late retransmitted SYN
+ * sSR -> sIG
+ * sES -> sIG Error: SYNs in window outside the SYN_SENT state
+ * are errors. Receiver will reply with RST
+ * and close the connection.
+ * Or we are not in sync and hold a dead connection.
+ * sFW -> sIG
+ * sCW -> sIG
+ * sLA -> sIG
+ * sTW -> sSS Reopened connection (RFC 1122).
+ * sCL -> sSS
+ */
+ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
+ /*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
+ /*
+ * sNO -> sIV Too late and no reason to do anything
+ * sSS -> sIV Client can't send SYN and then SYN/ACK
+ * sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open
+ * sSR -> sSR Late retransmitted SYN/ACK in simultaneous open
+ * sES -> sIV Invalid SYN/ACK packets sent by the client
+ * sFW -> sIV
+ * sCW -> sIV
+ * sLA -> sIV
+ * sTW -> sIV
+ * sCL -> sIV
+ */
+ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
+ /*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
+ /*
+ * sNO -> sIV Too late and no reason to do anything...
+ * sSS -> sIV Client migth not send FIN in this state:
+ * we enforce waiting for a SYN/ACK reply first.
+ * sS2 -> sIV
+ * sSR -> sFW Close started.
+ * sES -> sFW
+ * sFW -> sLA FIN seen in both directions, waiting for
+ * the last ACK.
+ * Migth be a retransmitted FIN as well...
+ * sCW -> sLA
+ * sLA -> sLA Retransmitted FIN. Remain in the same state.
+ * sTW -> sTW
+ * sCL -> sCL
+ */
+ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
+ /*ack*/ { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
+ /*
+ * sNO -> sES Assumed.
+ * sSS -> sIV ACK is invalid: we haven't seen a SYN/ACK yet.
+ * sS2 -> sIV
+ * sSR -> sES Established state is reached.
+ * sES -> sES :-)
+ * sFW -> sCW Normal close request answered by ACK.
+ * sCW -> sCW
+ * sLA -> sTW Last ACK detected.
+ * sTW -> sTW Retransmitted last ACK. Remain in the same state.
+ * sCL -> sCL
+ */
+ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
+ /*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
+ /*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
+ },
+ {
+ /* REPLY */
+ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
+ /*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sS2 },
+ /*
+ * sNO -> sIV Never reached.
+ * sSS -> sS2 Simultaneous open
+ * sS2 -> sS2 Retransmitted simultaneous SYN
+ * sSR -> sIV Invalid SYN packets sent by the server
+ * sES -> sIV
+ * sFW -> sIV
+ * sCW -> sIV
+ * sLA -> sIV
+ * sTW -> sIV Reopened connection, but server may not do it.
+ * sCL -> sIV
+ */
+ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
+ /*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
+ /*
+ * sSS -> sSR Standard open.
+ * sS2 -> sSR Simultaneous open
+ * sSR -> sIG Retransmitted SYN/ACK, ignore it.
+ * sES -> sIG Late retransmitted SYN/ACK?
+ * sFW -> sIG Might be SYN/ACK answering ignored SYN
+ * sCW -> sIG
+ * sLA -> sIG
+ * sTW -> sIG
+ * sCL -> sIG
+ */
+ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
+ /*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
+ /*
+ * sSS -> sIV Server might not send FIN in this state.
+ * sS2 -> sIV
+ * sSR -> sFW Close started.
+ * sES -> sFW
+ * sFW -> sLA FIN seen in both directions.
+ * sCW -> sLA
+ * sLA -> sLA Retransmitted FIN.
+ * sTW -> sTW
+ * sCL -> sCL
+ */
+ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
+ /*ack*/ { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIG },
+ /*
+ * sSS -> sIG Might be a half-open connection.
+ * sS2 -> sIG
+ * sSR -> sSR Might answer late resent SYN.
+ * sES -> sES :-)
+ * sFW -> sCW Normal close request answered by ACK.
+ * sCW -> sCW
+ * sLA -> sTW Last ACK detected.
+ * sTW -> sTW Retransmitted last ACK.
+ * sCL -> sCL
+ */
+ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
+ /*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
+ /*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
+ }
+};
+
+#define SECS * HZ
+#define MINS * 60 SECS
+#define HOURS * 60 MINS
+#define DAYS * 24 HOURS
+
+//À´×Ônf_conntrack_proto_tcp.c
+unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] __read_mostly = {
+ [TCP_CONNTRACK_SYN_SENT] = 2 MINS,
+ [TCP_CONNTRACK_SYN_RECV] = 5 MINS, //60 SECS,
+ [TCP_CONNTRACK_ESTABLISHED] = 2 HOURS, //5 DAYS
+ [TCP_CONNTRACK_FIN_WAIT] = 2 MINS,
+ [TCP_CONNTRACK_CLOSE_WAIT] = 60 SECS,
+ [TCP_CONNTRACK_LAST_ACK] = 30 SECS,
+ [TCP_CONNTRACK_TIME_WAIT] = 2 MINS,
+ [TCP_CONNTRACK_CLOSE] = 120 SECS, /*normal is 10SEC*/
+ [TCP_CONNTRACK_SYN_SENT2] = 2 MINS,
+ /* RFC1122 says the R2 limit should be at least 100 seconds.
+ Linux uses 15 packets as limit, which corresponds
+ to ~13-30min depending on RTO. */
+ //[TCP_CONNTRACK_MAX] = 2 MINS,
+ //[TCP_CONNTRACK_IGNORE] = 2 MINS,
+ [TCP_CONNTRACK_RETRANS] = 5 MINS,
+ [TCP_CONNTRACK_UNACK] = 5 MINS,
+};
+
+unsigned int fast_udp_timeout_stream = 180*HZ;
+unsigned int fast_udp_timeout = 120*HZ; /*normal is 30*HZ*/
+
+//²»Ö§³ÖfastnatµÄÐÒéÀàÐÍ
+//²»ÔÙʹÓ㬸ijÉͨ¹ýproc¶¯Ì¬´«Èëµ½nofast_proto£¬ÔÝʱ±£Áô¶Îʱ¼ä£¬Èÿª·¢Á˽â¶ÔÓ¦µÄ¶Ë¿ÚÐÒéºÅ
+unsigned int nofast_port[NOFAST_PROTO_MAX] = {
+ 21, // FTP¶Ë¿Ú£¬ÓÐʱ±»Îļþ·þÎñÐÒé (FSP)ʹÓÃ
+ 22, // ssh °²È«Shell(SSH)·þÎñ
+ 23, // telnet Telnet ·þÎñ
+ 25, // smtp ¼òµ¥Óʼþ´«ÊäÐÒé(SMTP)
+ 53, // domain ÓòÃû·þÎñ(Èç BIND)
+ 67, // server¶Ëdhcp·þÎñ¶Ë¿Ú
+ 68, // client¶Ëdhcp·þÎñ¶Ë¿Ú
+ 69, // tftp СÎļþ´«ÊäÐÒé(TFTP)
+ 110, // ÓʾÖÐÒé°æ±¾3
+ 115, // sftp °²È«Îļþ´«ÊäÐÒé(SFTP)·þÎñ
+ 123, // ntp ÍøÂçʱ¼äÐÒé(NTP)
+ 443, // https °²È«³¬Îı¾´«ÊäÐÒé(HTTP)
+ 500, // isakmp »¥ÁªÍø°²È«¹ØÁªºÍÔ¿³×¹ÜÀíÐÒé(ISAKMP)
+ 1352, // Lotus Notes
+ 1723, // PPTP TCP
+ 1990, // stun-p1 cisco STUN Priority 1 port
+ 1991, // stun-p2 cisco STUN Priority 2 port
+ 1992, // stun-p3 cisco STUN Priority 3 port,ipsendmsg IPsendmsg
+ 1993, // snmp-tcp-port cisco SNMP TCP port
+ 1994, // stun-port cisco serial tunnel portTCP
+ 1995, // perf-port cisco perf portTCP
+ 1996, // tr-rsrb-port cisco Remote SRB portTCP
+ 1997, // gdp-port Cisco Íø¹Ø·¢ÏÖÐÒé(GDP)
+ 1998, // x25-svc-port cisco X.25 service
+ 4500, // NAT-T UDP
+ 5060 // ¶Ë¿Ú¶Ë¿Ú:5060/udpÃèÊö:SessionInitiationProtocol(SIP»Ø»°·¢ÆðÐÒé)
+};
+
+/* ******************************* º¯ÊýÉêÃ÷ ******************************* */
+int (*fast_nat4_proc)(struct sk_buff *skb);
+int (*fast_nat6_proc)(struct sk_buff *skb);
+int (*fast_fw4_proc)(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ struct nf_conn *ct,
+ struct nf_conntrack_l4proto *l4proto,
+ unsigned int dataoff,
+ int dir,
+ u_int8_t protonum);
+int (*fast_fw6_proc)(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ struct nf_conn *ct,
+ struct nf_conntrack_l4proto *l4proto,
+ unsigned int dataoff,
+ int dir,
+ u_int8_t protonum);
+
+int (*fast_local4_proc)(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ struct nf_conn *ct,
+ struct nf_conntrack_l4proto *l4proto,
+ unsigned int dataoff,
+ int dir,
+ u_int8_t protonum);
+int (*fast_local6_proc)(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ struct nf_conn *ct,
+ struct nf_conntrack_l4proto *l4proto,
+ unsigned int dataoff,
+ int dir,
+ u_int8_t protonum);
+int (*fast_local4_output_proc)(struct sk_buff *skb);
+int (*fast_local6_output_proc)(struct sk_buff *skb);
+
+int (*fast_br_proc)(struct sk_buff *skb);
+
+extern int fast_nat_recv(struct sk_buff *skb);
+extern int fast6_recv(struct sk_buff *skb);
+
+unsigned long iphdr_err_num =0;
+unsigned long ip6hdr_err_num =0;
+unsigned long tcphdr_err_num =0;
+unsigned long tcp6hdr_err_num =0;
+
+extern int fast4_fw_recv(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ struct nf_conn *ct,
+ struct nf_conntrack_l4proto *l4proto,
+ unsigned int dataoff,
+ int dir,
+ u_int8_t protonum);
+extern int fast6_fw_recv(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ struct nf_conn *ct,
+ struct nf_conntrack_l4proto *l4proto,
+ unsigned int dataoff,
+ int dir,
+ u_int8_t protonum);
+
+//extern int fast_br(struct sk_buff *skb);
+//extern struct net_device *getbrport_bydst(struct net_device *dev,unsigned char *dest);
+extern struct nf_conntrack_tuple_hash *
+nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
+ const struct nf_conntrack_tuple *tuple);
+
+
+extern int (*fast_from_softirq) (struct sk_buff *skb);
+extern int (*fast_from_driver)(struct sk_buff *skb, struct net_device* dev);
+
+extern void fastnat_cleanup_links(void);
+extern void fast6_cleanup_links(void);
+
+extern fast_entry_t *cur_timeout_entry;
+extern int tcpack_timeout(fast_entry_t *entry, unsigned long *next_schedule, int *set_next);
+extern int tcpack_rel(fast_entry_t *entry);
+
+extern int tsp_fastnat_init(void);
+extern int tsp_fastnat_cleanup(void);
+
+extern int fast4_fw_init(void);
+extern int fast6_fw_init(void);
+
+extern int fast4_fw_cleanup(void);
+extern int fast6_fw_cleanup(void);
+
+extern int tsp_fast6_init(void);
+extern int tsp_fast6_cleanup(void);
+
+extern int fastnat_event(traverse_command_t *cmd);
+extern int fast6_event(traverse_command_t *cmd);
+
+//¿ìËÙת·¢procÎļþµÄ³õʼ»¯
+extern int fast_conntrack_init_proc(void );
+
+//ÄÚºËÆ½Ì¨»¯procÎļþµÄ³õʼ»¯
+extern int net_adapter_init_proc(void );
+
+unsigned int (*tsp_mirror_handle)(struct sk_buff *skb);
+
+extern void net_dbg_perf_dev_recv(char * packet_addr,char* node_str);
+extern void net_dbg_perf_clear_last_item(struct sk_buff *skb);
+
+
+/* ******************************* º¯ÊýʵÏÖ ******************************* */
+static int fast_iphdr_check(struct sk_buff *skb, int proto)
+{
+ const struct iphdr *iph;
+ const struct ipv6hdr *ip6h;
+ u32 len;
+
+ if (proto == ETH_P_IP)
+ {
+ iph = ip_hdr(skb);
+
+ if (iph->ihl < 5 || iph->version != 4)
+ return 0;
+
+ len = ntohs(iph->tot_len);
+ if (skb->len < len) {
+ return 0;
+ }
+ if (len < (iph->ihl*4))
+ return 0;
+ }
+ else if(proto == ETH_P_IPV6)
+ {
+ ip6h = ipv6_hdr(skb);
+ if (ip6h->version != 6)
+ return 0;
+
+ len = ntohs(ip6h->payload_len);
+ if (len || ip6h->nexthdr != NEXTHDR_HOP) {
+ if (len + sizeof(struct ipv6hdr) > skb->len) {
+ return 0;
+ }
+ }
+
+ }
+
+ return 1;
+}
+
+/*
+ * Based on ipv6_skip_exthdr() in net/ipv6/exthdr.c
+ *
+ * This function parses (probably truncated) exthdr set "hdr"
+ * of length "len". "nexthdrp" initially points to some place,
+ * where type of the first header can be found.
+ *
+ * It skips all well-known exthdrs, and returns pointer to the start
+ * of unparsable area i.e. the first header with unknown type.
+ * if success, *nexthdr is updated by type/protocol of this header.
+ *
+ * NOTES: - it may return pointer pointing beyond end of packet,
+ * if the last recognized header is truncated in the middle.
+ * - if packet is truncated, so that all parsed headers are skipped,
+ * it returns -1.
+ * - if packet is fragmented, return pointer of the fragment header.
+ * - ESP is unparsable for now and considered like
+ * normal payload protocol.
+ * - Note also special handling of AUTH header. Thanks to IPsec wizards.
+ */
+
+static int nf_ct_ipv6_skip_exthdr(const struct sk_buff *skb, int start,
+ u8 *nexthdrp, int len)
+{
+ u8 nexthdr = *nexthdrp;
+
+ while (ipv6_ext_hdr(nexthdr)) {
+ struct ipv6_opt_hdr hdr;
+ int hdrlen;
+
+ if (len < (int)sizeof(struct ipv6_opt_hdr))
+ return -1;
+ if (nexthdr == NEXTHDR_NONE)
+ break;
+ if (nexthdr == NEXTHDR_FRAGMENT)
+ break;
+ if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
+ BUG();
+ if (nexthdr == NEXTHDR_AUTH)
+ hdrlen = (hdr.hdrlen+2)<<2;
+ else
+ hdrlen = ipv6_optlen(&hdr);
+
+ nexthdr = hdr.nexthdr;
+ len -= hdrlen;
+ start += hdrlen;
+ }
+
+ *nexthdrp = nexthdr;
+ return start;
+}
+
+
+static int fast_tcphdr_check(struct sk_buff *skb, int proto)
+{
+ const struct iphdr *iph = NULL;
+ const struct ipv6hdr *ip6h = NULL;
+ struct tcphdr *tcph = NULL;
+ unsigned int iphdr_len = 0;
+ unsigned int ip6hdr_len = 0;
+ unsigned int tcphdr_len = 0;
+ unsigned char *l4head = NULL;
+ __u8 protonum;
+ int extoff = 0;
+
+
+
+ if (proto == ETH_P_IP)
+ {
+ iph = ip_hdr(skb);
+ iphdr_len = iph->ihl * 4;
+ tcph = (struct tcphdr *)((unsigned char*)iph + iphdr_len);
+ tcphdr_len = sizeof(struct tcphdr);
+
+ if (tcphdr_len > skb->len - iphdr_len)
+ return 0;
+
+ //tcpÍ·³¤¶ÈºÍdoffÊÇ·ñÆ¥Åä
+ if (tcph->doff < tcphdr_len/4)
+ return 0;
+
+ if (tcph->doff*4 > skb->len - iphdr_len)
+ return 0;
+ }
+ else if(proto == ETH_P_IPV6)
+ {
+ ip6h = ipv6_hdr(skb);
+ ip6hdr_len = sizeof(struct ipv6hdr);
+ tcphdr_len = sizeof(struct tcphdr);
+
+ //²Î¿¼º¯Êýipv6_get_l4proto£¬È¡³öËIJãÐÒéºÅ
+ extoff = skb_network_offset(skb) + ip6hdr_len;
+ protonum = 0;
+ if (skb_copy_bits(skb, skb_network_offset(skb) + offsetof(struct ipv6hdr, nexthdr),
+ &protonum, sizeof(protonum)) != 0) {
+ return 0;
+ }
+ extoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &protonum, skb->len - extoff);
+
+ if(protonum != NEXTHDR_TCP)
+ return 1;
+
+ tcph = (struct tcphdr *)((unsigned char*)ip6h + extoff);
+ if (tcphdr_len > skb->len - extoff)
+ return 0;
+
+ //tcpÍ·³¤¶ÈºÍdoffÊÇ·ñÆ¥Åä
+ if (tcph->doff < tcphdr_len/4)
+ return 0;
+
+ if (tcph->doff*4 > skb->len - extoff)
+ return 0;
+ }
+
+
+ return 1;
+}
+static inline int deliver_skb(struct sk_buff *skb,
+ struct packet_type *pt_prev,
+ struct net_device *orig_dev)
+{
+ atomic_inc(&skb->users.refs);
+ return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+}
+
+void *get_ct_for_ap(struct sk_buff *skb)
+{
+ if(skb){
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn * ct = nf_ct_get(skb, &ctinfo);
+ if(ct){
+ nf_conntrack_get(&ct->ct_general);
+ nf_conntrack_get(&ct->ct_general);
+ return &ct->ct_general;
+ }
+ }
+ return NULL;
+}
+
+void put_ct_for_ap(void *pct)
+{
+ struct nf_conn *ct = (struct nf_conn *)pct;
+ nf_conntrack_put((struct nf_conntrack *)ct);
+ nf_conntrack_put((struct nf_conntrack *)ct);
+}
+
+//´Ë´¦½øÐÐRAW_PACKETÀàÐ͵Äsocket½ÓÊÕ´¦Àí£¬ÒÔ½â¾ö__netif_receive_skbÖÐÕý³£×¥°üʱ£¬Êý¾Ý°üÄÚÈÝÒѱ»Ð޸ĵÄÇé¿ö
+void fast_tcpdump(struct sk_buff *skb)
+{
+ struct packet_type *ptype = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, &ptype_all, list)
+ {
+ if (!ptype->dev || ptype->dev == skb->dev)
+ {
+ skbinfo_add(NULL, SKB_IRQ_FREE);
+ deliver_skb(skb, ptype, skb->dev);
+ }
+ }
+ rcu_read_unlock();
+}
+
+//²Î¿¼ip_finish_output2,À©³äskbÍ·²¿
+struct sk_buff *fast_expand_headroom(struct sk_buff *skb, struct net_device *dev) {
+ unsigned int hh_len = LL_RESERVED_SPACE(dev);
+ struct sk_buff *skb2 = NULL;
+ if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
+ skb2 = skb_realloc_headroom(skb, max(hh_len, NET_SKB_PAD));
+ if(skb2)
+ clean_cache(skb2->data,skb2->len);
+ kfree_skb(skb);
+ return skb2;
+ }
+ return skb;
+}
+
+//²Î¿¼ip6_xmit,À©³äskbÍ·²¿
+struct sk_buff *fast_expand_headroom_v6(struct sk_buff *skb, struct net_device *dev) {
+ unsigned int hh_len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr);
+ struct sk_buff *skb2 = NULL;
+ if (unlikely(skb_headroom(skb) < hh_len)) {
+ skb2 = skb_realloc_headroom(skb, max(hh_len, NET_SKB_PAD));
+ if(skb2)
+ clean_cache(skb2->data,skb2->len);
+ kfree_skb(skb);
+ return skb2;
+ }
+ return skb;
+}
+
+
+/* ɾ³ýÒ»ÌõÁ¬½Ó */
+fast_entry_t *fn_list_del(fast_list_t *list_head, fast_entry_t *entry)
+{
+ fast_entry_t *ret_entry = NULL, **pprev = NULL;
+
+ if (!entry)
+ {
+ return NULL;
+ }
+
+ pprev = &list_head->next;
+ for (ret_entry = list_head->next; ret_entry; ret_entry = ret_entry->next)
+ {
+ if (ret_entry == entry)
+ {
+ *pprev = ret_entry->next;
+ list_head->count--;
+ break;
+ }
+ pprev = &ret_entry->next;
+ }
+ kmem_cache_free(fast_head_cache, entry);
+ netslab_dec(FAST_SLAB);
+ return NULL;
+}
+
+//Ìí¼Ó½Úµã
+void fn_list_add(fast_list_t *list_head, fast_entry_t *entry)
+{
+ entry->next = list_head->next;
+ list_head->next = entry;
+ list_head->count++;
+}
+
+//²éѯÊý¾Ý
+fast_entry_data_t *fast_find_entry_data(const struct hlist_nulls_head *working_hash, const struct nf_conntrack_tuple *tuple)
+{
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+ unsigned int hash;
+
+ hash = hash_conntrack_fast(tuple);
+ hlist_nulls_for_each_entry_rcu(h, n, &working_hash[hash], hnnode)
+ {
+ if (nf_ct_tuple_equal(tuple, &h->tuple))
+ {
+ return fast_hash_to_data(h);
+ }
+ }
+
+ return NULL;
+}
+
+//Ìí¼Ó½Úµã
+int fast_add_entry(struct hlist_nulls_head *working_hash, fast_entry_data_t *entry_data)
+{
+ unsigned int hash;
+
+ hash = hash_conntrack_fast(&entry_data->tuplehash.tuple);
+ if (fast_find_entry_data(working_hash, &entry_data->tuplehash.tuple))
+ {
+ return 0;
+ }
+
+ hlist_nulls_add_head_rcu(&entry_data->tuplehash.hnnode, &working_hash[hash]);
+ //ÒòΪÔÚµ±Ç°Á÷³ÌÖУ¬devÒѾ±»holdסÁË£¬ËùÒÔ´Ë´¦²»ÓÃrcu_read_lock();±£»¤£¬ÎÊÌâ²»´ó
+ dev_hold(entry_data->outdev);
+
+ return 0;
+}
+
+static void workinghash_del_node(fast_entry_t *entry)
+{
+ int i = 0;
+
+ for (i = 0; i < IP_CT_DIR_MAX; i++)
+ {
+ //if (entry->flags & (1 << i))
+ if ((entry->flags & (1 << i))&&(0 != entry->data[i].tuplehash.hnnode.next))
+ {
+ hlist_nulls_del_rcu(&entry->data[i].tuplehash.hnnode);
+ //hlist_nulls_del(&entry->data[i].tuplehash.hnnode);
+ dev_put(entry->data[i].outdev);
+ }
+ }
+}
+
+/*ɾ³ýÁ¬½Ó*/
+static void fastlist_del_entry(fast_list_t *list_head, fast_entry_t *entry)
+{
+ tcpack_rel(entry);
+ //nf_ct_put(entry->ct);
+ atomic_dec(&(entry->ct->ct_general.use));
+ fn_list_del(list_head, entry);
+}
+
+/*fast³¬Ê±´¦Àí£¬É¾³ýÁ¬½Ó*/
+/*jiangjing, ÐÞ¸ÄÈë¿Ú²ÎÊýÀàÐÍΪunsigned long*/
+extern spinlock_t fast6_spinlock;
+extern spinlock_t fastnat_spinlock;
+static void fast_timeout(struct timer_list *ptimer)
+{
+ fast_entry_t *entry = (fast_entry_t *)(ptimer->data);
+ fast_entry_t *ret_entry = NULL;
+ struct fast_list_s *list_head = entry->list_head;
+ spinlock_t *fast_spinlock = entry->fast_spinlock;
+ if(fast_spinlock == &fast6_spinlock || fast_spinlock == &fastnat_spinlock)
+ {
+ spin_lock_bh(fast_spinlock);
+
+ for (ret_entry = list_head->next; ret_entry; ret_entry = ret_entry->next)
+ {
+ if (ret_entry == entry)
+ {
+ workinghash_del_node(entry);
+ fastlist_del_entry(entry->list_head, entry);
+ spin_unlock_bh(fast_spinlock);
+ return;
+ }
+ }
+ spin_unlock_bh(fast_spinlock);
+ }
+}
+
+//²éѯ½Úµã£¬²é²»µ½Ôòд´½¨
+fast_entry_t *fast_get_entry(fast_list_t *list_head, struct nf_conn *ct, char dir)
+{
+ fast_entry_t *ret = NULL;
+ u_int8_t protocol;
+ unsigned long expires;
+
+ for (ret = list_head->next; ret; ret = ret->next)
+ {
+ if (ret->ct == ct)
+ {
+ protocol = nf_ct_protonum(ct);
+ if (IPPROTO_TCP == protocol)
+ {
+ /*tcp*/
+ expires = jiffies + tcp_timeouts[ct->proto.tcp.state];
+ }
+ else
+ {
+ /*udp*/
+ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status))
+ {
+ expires = jiffies + fast_udp_timeout_stream;
+ }
+ else
+ {
+ expires = jiffies + fast_udp_timeout;
+ }
+
+
+ }
+ mod_timer(&ret->timeout, expires);
+ return ret;
+ }
+ }
+
+ /*Ö»Õë¶Ôoriginal·½Ïò´´½¨Á¬½Ó*/
+ if (IP_CT_DIR_ORIGINAL != dir)
+ {
+ return NULL;
+ }
+
+ //Á½ÖÖ¶¼ÊÇslab»úÖÆ£¬kmallocÊÇͨÓÃslab£¬ºóÕßÊÇרÊôslab£¬¸Ä³ÉרÊôslab
+ ret = kmem_cache_alloc(fast_head_cache, GFP_ATOMIC);
+ if (ret == NULL)
+ {
+ print_sun(SUN_ERR,"fast_get_entry: kmalloc fail!\n");
+ return NULL;
+ }
+ netslab_inc(FAST_SLAB);
+ memset(ret, 0, sizeof(fast_entry_t));
+ ret->ct = ct;
+ ret->list_head = list_head;
+
+ //ÉèÖö¨Ê±Æ÷
+ __init_timer(&ret->timeout, NULL, 0);
+
+ protocol = nf_ct_protonum(ct);
+ if (IPPROTO_TCP == protocol)
+ {
+ /*tcp*/
+ ret->timeout.expires = jiffies + tcp_timeouts[ct->proto.tcp.state];
+ }
+ else
+ {
+ /*udp*/
+ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status))
+ {
+ ret->timeout.expires = jiffies + fast_udp_timeout_stream;
+ }
+ else
+ {
+ ret->timeout.expires = jiffies + fast_udp_timeout;
+ }
+ }
+
+ ret->timeout.data = (unsigned long)ret;
+ ret->timeout.function = fast_timeout;
+ add_timer(&ret->timeout);
+
+ fn_list_add(list_head, ret);
+
+ return ret;
+}
+
+unsigned int get_conntrack_index(const struct tcphdr *tcph)
+{
+ if (tcph->rst) return TCP_RST_SET;
+ else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
+ else if (tcph->fin) return TCP_FIN_SET;
+ else if (tcph->ack) return TCP_ACK_SET;
+ else return TCP_NONE_SET;
+}
+
+/*¸üÐÂtcp³¬Ê±¶¨Ê±Æ÷*/
+void update_tcp_timeout(fast_entry_t *entry, fast_entry_data_t *entry_data, struct tcphdr *tcph)
+{
+ enum tcp_conntrack new_state, old_state;
+ unsigned int dir, index;
+
+ old_state = entry->ct->proto.tcp.state;
+ dir = entry_data->tuplehash.tuple.dst.dir;
+
+ if (tcph == NULL || old_state >=TCP_CONNTRACK_MAX)
+ {
+ print_sun(SUN_ERR,"update_tcp_timeout tcph is null! \n");
+ return;
+ }
+ index = get_conntrack_index(tcph);
+
+ /*¸üÐÂTCPÁ´½Ó״̬*/
+ new_state = tcp_conntracks[dir][index][old_state];
+ if(old_state != new_state)
+ {
+ //²Î¿¼nf_conntrack_proto_tcp.cº¯Êýtcp_packet
+ if (new_state == TCP_CONNTRACK_IGNORE)
+ new_state = TCP_CONNTRACK_SYN_RECV;
+ else if (new_state == TCP_CONNTRACK_MAX)
+ return;
+
+ entry->ct->proto.tcp.state = new_state;
+ //¶¨Ê±Æ÷´æ»îʱ¼ä¸Ä±äʱ²Åµ÷Óó¬Ê±¼ì²â£¬¼õÉÙ±éÀúÁ´±í´ÎÊý
+ mod_timer(&entry->timeout, jiffies + tcp_timeouts[new_state]);
+ }
+}
+
+/* ¼Ç¼¶ÔÓ¦µÄmacÐÅÏ¢£¬³É¹¦·µ»Ø1£¬·ñÔò·µ»Ø0 */
+int record_MAC_header(const struct hlist_nulls_head *working_hash, struct nf_conn *ct,
+ fast_entry_t *entry, fast_entry_data_t *entry_data,
+ struct neighbour *neigh, const struct net_device *out, int proto)
+{
+ struct ethhdr *eth;
+ struct net_device *dst_out = NULL;
+ int i;
+
+ if (out == NULL)
+ goto REL;
+
+ //½ö¶ÔÒÔÌ«ÍøÀàÍø¿Ú½øÐÐMACÍ·Ô¤¸³Öµ£¬ÆäËüµÄÈçPPP¡¢PPPoEÕâЩ£¬½ÔÊǿɱäµÄ£¬²»ÄÜÔ¤¸³Öµ£»²Î¼ûalloc_netdevºÍalloc_etherdev½Ó¿Ú
+ if (out->type != ARPHRD_ETHER)
+ return 1;
+
+ //¶ÔÓÚ³ö¿ÚÎªÍøÇÅbrʱ£¬Ö±½Ó»ñÈ¡L2ÍøÂçÉ豸£¬¼´usb0»òwifi0
+ if (out->priv_flags & IFF_EBRIDGE)
+ {
+ if(out->dev_addr == NULL)
+ goto REL;
+
+ if (fastbr_level == 1)
+ {
+ dst_out = getbrport_bydst(out, neigh->ha);
+ if (dst_out == NULL)
+ {
+ print_sun(SUN_DBG,"!!!!! getbrport_bydst fail \n");
+ goto REL;
+ }
+ entry_data->outdev = dst_out;
+ }
+ else
+ {
+ entry_data->outdev = out;
+ }
+ entry_data->hh_flag = 1;
+ eth = (struct ethhdr *)entry_data->hh_data;
+ eth->h_proto = proto;
+ memcpy(eth->h_source, out->dev_addr, ETH_ALEN);
+ memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
+ }
+ //pppת·¢£¬Ö»ÐèÒª´«ËÍIP°ü
+ else if (strncmp(out->name, ppp_name, strlen(ppp_name)) == 0)
+ {
+ if(out->dev_addr == NULL)
+ goto REL;
+
+ entry_data->outdev = out;
+ entry_data->hh_flag = 0;
+ }
+ //ÆÕͨµÄÒÔÌ«ÍøÊý¾Ýת·¢
+ else
+ {
+ if(out->dev_addr == NULL)
+ goto REL;
+
+ //¶ÔÓÚwifi station/RJ45/USBµÈ£¬ÐèÒª¸³ÖµMACÍ·
+ entry_data->outdev = out;
+ entry_data->hh_flag = 1;
+ eth = (struct ethhdr *)entry_data->hh_data;
+ eth->h_proto = proto;
+ memcpy(eth->h_source, out->dev_addr, ETH_ALEN);
+ memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
+ }
+ return 1;
+
+REL:
+ //֮ǰÁ´½Ó¿ÉÄÜÒѱ»´´½¨£¬ÐèҪɾ³ýËùÓÐ×ÊÔ´
+ for (i = 0; i < IP_CT_DIR_MAX; i++)
+ {
+ if (entry->flags & (1 << i))
+ {
+ hlist_nulls_del(&entry->data[i].tuplehash.hnnode);
+ dev_put(entry->data[i].outdev);
+ }
+ }
+ //»Ö¸´ctµÄ³¬Ê±
+ //add_timer(&ct->timeout);
+ //nf_ct_put(ct);
+ atomic_dec(&(ct->ct_general.use));
+ del_timer(&entry->timeout);
+ fn_list_del(entry->list_head, entry);
+ return 0;
+}
+
+/* ¸ù¾ÝÄ¿µÄMACºÍÍøÇŲéÕÒÇŵ㣬ÕÒµ½·µ»ØÇŵ㣬·ñÔò·µ»ØNULL */
+struct net_device *getBridgePort(struct neighbour *neigh, const struct net_device *out)
+{
+ struct net_device *dst_out = NULL;
+
+ if (!test_bit(FAST_TYPE_BR_LOCAL_BIT, &fast_switch))
+ return NULL;
+
+ if (!out || !neigh)
+ return NULL;
+
+ //½ö¶ÔÒÔÌ«ÍøÀàÍø¿Ú½øÐÐMACÍ·Ô¤¸³Öµ
+ if (out->type != ARPHRD_ETHER)
+ return NULL;
+
+ //¶ÔÓÚ³ö¿ÚÎªÍøÇÅʱ£¬Ö±½Ó»ñÈ¡L2ÇŵãÉ豸
+ if (out->priv_flags & IFF_EBRIDGE)
+ {
+ if (out->dev_addr == NULL)
+ return NULL;
+
+ //»ñÈ¡Çŵã
+ dst_out = getbrport_bydst(out, neigh->ha);
+ if (dst_out && dst_out != out)
+ return dst_out;
+
+ print_sun(SUN_DBG, "!!!!! getbrport_bydst fail \n");
+ }
+ return NULL;
+}
+
+//Åжϲ»ÐèÒª¾¹ýfastnatµÄÐÒé°ü
+int check_skip_ports(unsigned int net_dst_port)
+{
+ int i = 0;
+ unsigned int dst_port = htons(net_dst_port);
+
+ if (!dst_port)
+ return 0;
+
+ for (i = 0; i < sizeof(nofast_port)/sizeof(nofast_port[0]); i++)
+ {
+ if (dst_port == nofast_port[i])
+ {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+//Á´±íµÄ²Ù×÷Ö÷Ì庯Êý£¬ÄÚ²¿ÊµÏÖ³¬Ê±¡¢É豸ʼþµÈÁ´±í²Ù×÷
+void traverse_process(fast_list_t *list_head, unsigned long param)
+{
+ fast_entry_t *entry, *next;
+ traverse_command_t *cmd;
+ int i, need_del;
+
+ cmd = (traverse_command_t *)param;
+ if (!cmd)
+ {
+ return;
+ }
+
+ for(entry = list_head->next; entry; entry = next)
+ {
+ next = entry->next;
+ need_del = 0;
+
+ if (cmd->cmd == TRAVERSE_CMD_DEV_DOWN)
+ {
+ for (i = 0; i < IP_CT_DIR_MAX; i++)
+ {
+ if (entry->flags & (1 << i))
+ {
+ const struct nf_conn_nat *nat = nfct_nat(entry->ct);
+ if ((entry->data[i].outdev && entry->data[i].outdev->ifindex == cmd->arg)
+ || (nat && nat->masq_index == cmd->arg))
+ {
+ need_del = FAST_ALL_DIR;
+ break;
+ }
+ }
+ }
+ }
+
+ if (need_del)
+ {
+ del_timer(&entry->timeout);
+ workinghash_del_node(entry);
+ if (need_del == FAST_ALL_DIR)
+ {
+ fastlist_del_entry(entry->list_head, entry);
+ }
+ }
+ }
+}
+
+//fastnat_level¹Ø±Õ£¬Çå¿ÕËùÓÐipv4,ipv6¿ìËÙת·¢ÐÅÏ¢£¬±£ÁôÔctÁ¬½Ó
+void fast_cleanup_links(fast_list_t *list_head)
+{
+ fast_entry_t *entry, *next;
+
+ for (entry = list_head->next; entry; entry = next)
+ {
+ next = entry->next;
+ //ɾ³ýentry×Ô¶¨ÒåµÄ¶¨Ê±Æ÷
+ del_timer(&entry->timeout);
+
+ workinghash_del_node(entry);
+
+ //»Ö¸´ctµÄ³¬Ê±
+ //add_timer(&entry->ct->timeout);
+ fn_list_del(list_head, entry);
+
+ }
+}
+
+void athr_fast_dump(int ctl)
+{
+
+}
+/* Built-in default zone used e.g. by modules. */
+const struct nf_conntrack_zone nf_ct_zone_dflt_fast = {
+ .id = NF_CT_DEFAULT_ZONE_ID,
+ .dir = NF_CT_DEFAULT_ZONE_DIR,
+};
+
+/* ***************** ¿ìËÙת·¢Í³Ò»´¦Àíº¯Êý ********************************/
+/* ²éѯÂú×ã¿ìËÙת·¢µÄctÐÅÏ¢ --- ²Î¿¼nf_conntrack_inʵÏÖ */
+struct nf_conn *skb_get_ct(struct nf_conn **tmpl,
+ struct sk_buff *skb,
+ struct nf_conntrack_l4proto **l4proto,
+ unsigned int *dataoff,
+ u_int8_t pf,
+ unsigned int hooknum,
+ int *dir,
+ u_int8_t *protonum)
+{
+ int ret;
+ struct nf_conntrack_tuple tuple;
+ struct nf_conntrack_tuple_hash *h;
+ struct nf_conn *ct;
+ struct nf_conntrack_zone * zone;
+ enum ip_conntrack_info ctinfo;
+ struct nf_conntrack_zone tmp;
+ u32 hash;
+
+
+ //ÒÔϲο¼nf_conntrack_inʵÏÖ²éѯÒÑÓÐct
+ *tmpl = nf_ct_get(skb, &ctinfo);
+ if (*tmpl || ctinfo == IP_CT_UNTRACKED) {
+ /* Previously seen (loopback or untracked)? Ignore. */
+ if ((*tmpl && !nf_ct_is_template(*tmpl)) ||
+ ctinfo == IP_CT_UNTRACKED) {
+ goto err_out;
+ }
+ skb->_nfct = 0;
+ }
+
+ *dataoff = get_l4proto_fast(skb, skb_network_offset(skb), pf, protonum);
+
+ if (*dataoff <= 0) {
+ goto err_out;
+ }
+
+
+ *l4proto = nf_ct_l4proto_find(*protonum);
+
+
+ if (*protonum != IPPROTO_TCP && *protonum != IPPROTO_UDP)
+ goto err_out;
+
+ if (!nf_ct_get_tuple_fast(skb, skb_network_offset(skb),
+ *dataoff, pf, *protonum, &init_net, &tuple)) {
+ goto err_out;
+ }
+
+
+ zone = nf_ct_zone_tmpl(*tmpl, skb, &tmp);
+ hash = hash_conntrack_raw_fast(&tuple, &init_net);
+
+ h = nf_conntrack_find_fast(&init_net, zone, &tuple, hash);
+
+ if (!h || IS_ERR(h))
+ goto err_out;
+
+ ct = nf_ct_tuplehash_to_ctrack(h);
+
+
+ if (!ct || IS_ERR(ct)) {
+ goto err_out;
+ }
+
+ if (test_bit(IPS_DYING_BIT, &ct->status) || test_bit(IPS_UNTRACKED_BIT, &ct->status))
+ {
+ nf_conntrack_put(&ct->ct_general);
+ goto err_out;
+ }
+
+ if (*tmpl && *tmpl == ct)
+ {
+ nf_conntrack_put(&ct->ct_general);
+ goto err_out;
+ }
+
+ //TCP±ØÐëË«Ïò½¨Á´ºó²Å×ß¿ìËÙת·¢
+ if (IPPROTO_TCP == *protonum && !test_bit(IPS_ASSURED_BIT, &ct->status))
+ {
+ nf_conntrack_put(&ct->ct_general);
+ goto err_out;
+ }
+
+ if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
+ *dir = 1;
+ } else {
+ *dir = 0;
+ }
+ return ct;
+
+err_out :
+ print_sun(SUN_DBG, "skb : 0x%x, skb_get_ct fail!!!!!!!!!!", skb);
+ if (*tmpl) {
+ nf_ct_set(skb, (struct nf_conn *)&((*tmpl)->ct_general), ctinfo);
+ }
+ else {
+ skb->_nfct = 0;
+ }
+ return NULL;
+}
+
+//еÄfastģʽ´¦Àí·½Ê½
+int fast_for_ip_new(struct sk_buff *skb,
+ int(*fast_fw)(struct nf_conn *, struct sk_buff *, struct nf_conn *,
+ struct nf_conntrack_l4proto *, unsigned int, int, u_int8_t),
+ int(*fast_local_proc)(struct nf_conn *, struct sk_buff *, struct nf_conn *,
+ struct nf_conntrack_l4proto *, unsigned int, int, u_int8_t),
+ int proto)
+{
+ struct nf_conn *ct = NULL, *tmpl = NULL;
+ struct nf_conntrack_l4proto *l4proto;
+ unsigned int dataoff;
+ u_int8_t protonum;
+ int dir = 0;
+ int ret = 0;
+
+ if (proto == ETH_P_IP)
+ ct = skb_get_ct(&tmpl, skb, &l4proto, &dataoff, PF_INET, NF_INET_PRE_ROUTING, &dir, &protonum);
+ else if (proto == ETH_P_IPV6)
+ ct = skb_get_ct(&tmpl, skb, &l4proto, &dataoff, PF_INET6, NF_INET_PRE_ROUTING, &dir, &protonum);
+ if (!ct)
+ {
+ if (fast_br_proc && fast_br_proc(skb))
+ {
+ fastbr_num++;
+ return 1;
+ }
+ return 0;
+ }
+
+ spin_lock_bh(&fast_fw_spinlock);
+ /*TCP±ØÐëÈý´ÎÎÕÊֳɹ¦¡¢fast½¨Á´³É¹¦*/
+ if (IPPROTO_TCP == protonum || NEXTHDR_TCP == protonum)
+ {
+ int rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+ if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
+ nf_conntrack_put(&ct->ct_general);
+ spin_unlock_bh(&fast_fw_spinlock);
+ return 0;
+ }
+ if (!(ct->fast_ct.fast_dst[dir] && ct->fast_ct.fast_dst[rdir])) {
+ nf_conntrack_put(&ct->ct_general);
+ spin_unlock_bh(&fast_fw_spinlock);
+ return 0;
+ }
+ }
+ switch(ct->fast_ct.isFast)
+ {
+ case FAST_CT_FW4:
+ case FAST_CT_FW6:
+ if (fast_fw && fast_fw(tmpl, skb, ct, l4proto, dataoff, dir, protonum)) {
+ if (proto == ETH_P_IP)
+ {
+ fastnat_num++;
+ }
+
+ else if (proto == ETH_P_IPV6)
+ fast6_num++;
+ ret = 1;
+ break;
+ }
+ ret = 0;
+ break;
+ case FAST_CT_LOCAL4:
+ case FAST_CT_LOCAL6:
+ if (fast_local_proc && fast_local_proc(tmpl, skb, ct, l4proto, dataoff, dir, protonum)) {
+ if (proto == ETH_P_IP)
+ fast_local4_rcv_num++;
+ else if (proto == ETH_P_IPV6)
+ fast_local6_rcv_num++;
+ ret = 1;
+ break;
+ }
+ ret = 0;
+ break;
+ default:
+ nf_conntrack_put(&ct->ct_general);
+ if (fast_br_proc && fast_br_proc(skb)) {
+ fastbr_num++;
+ ret = 1;
+ break;
+ }
+ ret = 0;
+ break;
+ }
+
+ spin_unlock_bh(&fast_fw_spinlock);
+ return ret;
+}
+
+//¾ÉµÄfastģʽ´¦Àí·½Ê½
+int fast_for_ip(struct sk_buff *skb, int(*fast_fw)(struct sk_buff *),
+ int(* fast_local_proc)(struct nf_conn *, struct sk_buff *, struct nf_conn *,
+ struct nf_conntrack_l4proto *, unsigned int, int, u_int8_t),
+ int proto)
+{
+ struct nf_conn *ct = NULL, *tmpl = NULL;
+ struct nf_conntrack_l4proto *l4proto;
+ unsigned int dataoff;
+ u_int8_t protonum;
+ int dir = 0;
+ //ת·¢¡¢ÇŽӡ¢±¾µØË³Ðò´¦Àí
+ if (fast_fwd_level > fast_br_level && fast_br_level > fast_local_level)
+ {
+ if (fast_fw && fast_fw(skb))
+ {
+ if (proto == ETH_P_IP)
+ {
+ fastnat_num++;
+ }
+ else if (proto == ETH_P_IPV6)
+ fast6_num++;
+ return 1;
+ }
+ else if (fast_br_proc && fast_br_proc(skb) == 1)
+ {
+ fastbr_num++;
+ return 1;
+ }
+ }
+ //ת·¢¡¢±¾µØ¡¢ÇŽÓ˳Ðò´¦Àí
+ else if (fast_fwd_level > fast_local_level && fast_local_level > fast_br_level)
+ {
+ if (fast_fw && fast_fw(skb))
+ {
+ if (proto == ETH_P_IP)
+ fastnat_num++;
+ else if (proto == ETH_P_IPV6)
+ fast6_num++;
+ return 1;
+ }
+ else if (fast_br_proc && fast_br_proc(skb) == 1)
+ {
+ fastbr_num++;
+ return 1;
+ }
+ }
+ //ÇŽӡ¢×ª·¢¡¢±¾µØË³Ðò´¦Àí
+ else if (fast_br_level > fast_fwd_level && fast_fwd_level > fast_local_level)
+ {
+ if (fast_br_proc && fast_br_proc(skb) == 1)
+ {
+ fastbr_num++;
+ return 1;
+ }
+ else if (fast_fw && fast_fw(skb))
+ {
+ if (proto == ETH_P_IP)
+ fastnat_num++;
+ else if (proto == ETH_P_IPV6)
+ fast6_num++;
+ return 1;
+ }
+ }
+ //ÇŽӡ¢±¾µØ¡¢×ª·¢Ë³Ðò´¦Àí
+ else if(fast_br_level > fast_local_level && fast_local_level > fast_fwd_level)
+ {
+ if (fast_br_proc && fast_br_proc(skb) == 1)
+ {
+ fastbr_num++;
+ return 1;
+ }
+ else if (fast_local_proc)
+ {
+ if (proto == ETH_P_IP)
+ ct = skb_get_ct(&tmpl, skb, &l4proto, &dataoff, PF_INET, NF_INET_PRE_ROUTING, &dir, &protonum);
+ else if (proto == ETH_P_IPV6)
+ ct = skb_get_ct(&tmpl, skb, &l4proto, &dataoff, PF_INET6, NF_INET_PRE_ROUTING, &dir, &protonum);
+
+ if(!ct)
+ return 0;
+
+ if (fast_local_proc(tmpl, skb, ct, l4proto, dataoff, dir, protonum)) {
+ if (proto == ETH_P_IP)
+ fast_local4_rcv_num++;
+ else if (proto == ETH_P_IPV6)
+ fast_local6_rcv_num++;
+ return 1;
+ }
+ }
+ else if (fast_fw && fast_fw(skb))
+ {
+ if (proto == ETH_P_IP)
+ fastnat_num ++;
+ else if (proto == ETH_P_IPV6)
+ fast6_num++;
+ return 1;
+ }
+ }
+ //±¾µØ¡¢×ª·¢¡¢ÇŽÓ˳Ðò´¦Àí
+ else if(fast_local_level > fast_fwd_level && fast_fwd_level > fast_br_level)
+ {
+ if (fast_fw && fast_fw(skb))
+ {
+ if (proto == ETH_P_IP)
+ fastnat_num++;
+ else if (proto == ETH_P_IPV6)
+ fast6_num++;
+ return 1;
+ }
+ else if (fast_br_proc == 1 && fast_br_proc(skb) == 1)
+ {
+ fastbr_num++;
+ return 1;
+ }
+ }
+ //±¾µØ¡¢ÇŽӡ¢×ª·¢Ë³Ðò´¦Àí
+ else if(fast_local_level > fast_br_level && fast_br_level > fast_fwd_level)
+ {
+ if (fast_br_proc && fast_br_proc(skb) == 1)
+ {
+ fastbr_num++;
+ return 1;
+ }
+ else if (fast_fw && fast_fw(skb))
+ {
+ if (proto == ETH_P_IP)
+ fastnat_num++;
+ else if (proto == ETH_P_IPV6)
+ fast6_num++;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int btrunk_fw = 0;
+module_param(btrunk_fw, int, 0644);
+//extern int fast_fwd_ip4addr_conflict(struct sk_buff *skb);
+//extern int fast_for_multicast(struct sk_buff *skb);
+/*ÓÉÓÚ¿ÉÄܲ»´æÔÚMACÖ¡Í·£¬ÈçPSÍø¿Ú£¬ËùÒÔÐèҪͨ¹ýIPͷʶ±ð³öskb->protocolÖµ*/
+int fast_for_ipdata(struct sk_buff *skb)
+{
+
+ struct iphdr *iph;
+
+ if (skb->len > 1000)
+ skb_big_num++;
+ else if (skb->len < 100)
+ skb_small_num++;
+
+ if (skb->dev == NULL)
+ return 0;
+ if (skb->protocol == htons(ETH_P_IP)) //ipv4
+ {
+ skb_num4++;
+ skb_bytes4 += skb->len;
+
+ if(btrunk_fw && fast_fwd_ip4addr_conflict(skb) == 1)
+ {
+ return 1;
+ }
+ //Ö÷²¥µÄ¿ìËÙת·¢£¬´ýʵÏÖ£¬ÔÝʱֻ×öͳ¼Æ
+ if (ipv4_is_multicast(ip_hdr(skb)->daddr))
+ {
+ multicast_num4++;
+ if(btrunk_fw && fast_for_multicast(skb) == 1)
+ {
+ return 1;
+ }
+ return 0;
+ }
+ //¹ã²¥²»Ö§³Ö¿ìËÙת·¢£¬Ö»×öͳ¼Æ
+ else if (ipv4_is_lbcast(ip_hdr(skb)->daddr)) {
+ broadcast_num4++;
+ return 0;
+ }
+
+ if (ip_is_fragment(ip_hdr(skb)))
+ {
+ skbinfo_add(NULL, SKB_FRAG);
+ return 0;
+ }
+
+ //Ö»ÓÐTCPºÍUDP½øÐпìËÙת·¢
+ if (ip_hdr(skb)->protocol != IPPROTO_TCP && ip_hdr(skb)->protocol != IPPROTO_UDP)
+ {
+ return 0;
+ }
+
+ if(!fast_iphdr_check(skb, ETH_P_IP))
+ {
+ iphdr_err_num++;
+ kfree_skb(skb);
+ return 1;
+ }
+
+ /*Èç¹ûÓÐpadding£¬ÔòÈ¥³ýskbβ²¿µÄpadding, ²Î¿¼ip_rcv*/
+ skb_trim(skb, ntohs(ip_hdr(skb)->tot_len));
+
+ /* tcpÍ·²¿ÐÅÏ¢¼à²â*/
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
+ if (!fast_tcphdr_check(skb, ETH_P_IP)) {
+ tcphdr_err_num++;
+ //kfree_skb(skb);
+ return 0;
+ }
+ }
+ if (!test_bit(FAST_TYPE_VERSION_BIT, &fast_switch))
+ return fast_for_ip(skb, fast_nat4_proc, fast_local4_proc, ETH_P_IP);
+ else
+ return fast_for_ip_new(skb, fast_fw4_proc, fast_local4_proc, ETH_P_IP);
+
+ }
+ else if (skb->protocol == htons(ETH_P_IPV6)) //ipv6
+ {
+ skb_num6++;
+ skb_bytes6 += skb->len;
+
+ //×é²¥µÄ¿ìËÙת·¢£¬´ýʵÏÖ£¬ÔÝʱֱ½Ó·µ»Ø¿ìËÙת·¢Ê§°Ü
+ if(ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
+ {
+ multicast_num6++;
+ return 0;
+ }
+
+ if(!fast_iphdr_check(skb, ETH_P_IPV6))
+ {
+ ip6hdr_err_num++;
+ kfree_skb(skb);
+ return 1;
+ }
+ /*Èç¹ûÓÐpadding£¬ÔòÈ¥³ýskbβ²¿µÄpadding£¬²Î¿¼ipv6_rcv*/
+ skb_trim(skb, ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr));
+ /* tcpÍ·²¿ÐÅÏ¢¼à²â*/
+ if (!fast_tcphdr_check(skb, ETH_P_IPV6)) {
+ tcp6hdr_err_num++;
+ //kfree_skb(skb);
+ return 0;
+ }
+ if (!test_bit(FAST_TYPE_VERSION_BIT, &fast_switch))
+ return fast_for_ip(skb, fast_nat6_proc, fast_local6_proc, ETH_P_IPV6);
+ else
+ return fast_for_ip_new(skb, fast_fw6_proc, fast_local6_proc, ETH_P_IPV6);
+ }
+ else
+ skb_unknown++;
+ return 0;
+}
+
+/*skbÖ¸ÕëÌø×ªµ½IPÍ·*/
+static int set_skbdata_toip(struct sk_buff *skb)
+{
+ __be16 next_pro = skb->protocol;
+again:
+ if (next_pro == htons(ETH_P_IP) || next_pro == htons(ETH_P_IPV6))
+ {
+ skb_set_network_header(skb, 0);
+ skb_reset_mac_len(skb);
+ skb->protocol = next_pro;
+ return 1;
+ }
+ //vlan
+ else if (next_pro == cpu_to_be16(ETH_P_8021Q))
+ {
+ skb->isvlan = 1;
+ skb_pull(skb, VLAN_HLEN);
+ next_pro = *((__be16 *)(skb->data - 2));
+ goto again;
+ }
+
+ //pppoe
+ else if (next_pro == htons(ETH_P_PPP_SES))
+ {
+ if (*(skb->data + 6) == 0x00 && *(skb->data + 7) == 0x21)
+ {
+ next_pro = htons(ETH_P_IP);
+ __skb_pull(skb, PPPOE_HEADER_LEN);
+ goto again;
+ }
+ else if(*(skb->data+ 6) == 0x00 && *(skb->data + 7) == 0x57)
+ {
+ next_pro = htons(ETH_P_IPV6);
+ __skb_pull(skb, PPPOE_HEADER_LEN);
+ goto again;
+ }
+ }
+ return 0;
+}
+
+/*¶ÔÄÚºËdev.cÖÐÊý¾Ý½øÐпìËÙ´¦Àí£¬ÓÐip°ü¡¢ppp°üµÈ*/
+static int try_fast_for_netcoredata(struct sk_buff *skb)
+{
+ __be16 old_pro = skb->protocol;
+ unsigned int old_len = skb->len;
+ unsigned char * old_data = skb->data;
+ __be16 old_netheader = skb->network_header;
+
+ //ipÍ·4×Ö½Ú¶ÔÆë
+ //if (((unsigned long)skb->data)%4 != 0)
+ //panic("ERR: fast from dev skb->data%4 != 0");
+
+ if (skb->indev == NULL)
+ skb->indev = skb->dev;
+ //ÐèҪ׼ȷ¶¨Î»µ½IPÍ·£¬ÆÚ¼ä¿ÉÄÜÌø¹ýppp/mac/pppoeµÈ¸÷ÖÖ²ã2Í·²¿
+ if (set_skbdata_toip(skb) == 1 && fast_for_ipdata(skb))
+ return 1;
+
+ //Èç¹û¿ìËÙ´¦Àíʧ°Ü£¬±ØÐëͨ¹ýÈçϸ³Öµ·µ»Ø»ØÔʼskb²ÎÊýÖµ£¬½»Óɱê×¼linuxÄں˴¦Àí
+ skb->protocol = old_pro;
+ skb->data = old_data;
+ skb->len = old_len;
+ skb->network_header = old_netheader;
+ return 0;
+}
+
+/*¶ÔskbÖÐÖ¸ÏòMACÖ¡Í·µÄÇý¶¯Éϱ¨Êý¾Ý½øÐпìËÙ´¦Àí£¬¿ÉÓÃÓÚËùÓÐÒÔÌ«Íø¼Ü¹¹µÄÍøÂçÉ豸Éϱ¨Êý¾ÝµÄ¿ìËÙ´¦Àí*/
+static int try_fast_for_macdata(struct sk_buff *skb, struct net_device *dev)
+{
+ /*
+ struct ethhdr *eth;
+ if (!(skb->network_header == 0 || skb->network_header == ~0U))
+ panic("network_header ERR!!!!!!!!!!\n");
+ skb->dev = dev;
+ if (skb->indev == NULL)
+ skb->indev = dev;
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+ skb->protocol = eth->h_proto;
+ skb_pull(skb, ETH_HLEN);
+ */
+
+ //if (tsp_mirror_handle)
+ //tsp_mirror_handle(skb);
+
+ //×ÔÑÐtcp/udp°ü¼Ç¼¶ª°üʱÑÓ¹¦ÄÜ£¬ÐèÒªserver¶ÎÄܰ²×°×ÔÑÐÓ¦Óã¬Èç¹û¿ìËÙת·¢Ê§°Ü£¬ÐèÒªÇå³þ¼Ç¼
+ //net_dbg_perf_dev_recv((char *)skb, skb->dev->name);
+ if (try_fast_for_netcoredata(skb))
+ {
+ return 1;
+ }
+ //×ÔÑÐtcp/udp°ü¼Ç¼¶ª°üʱÑÓ¹¦ÄÜ£¬ÐèÒªserver¶ÎÄܰ²×°×ÔÑÐÓ¦ÓÃ
+ //net_dbg_perf_clear_last_item(skb);
+
+ //skb_push(skb, ETH_HLEN);
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(tsp_mirror_handle);
+
+/*´¦Àí֪ͨÁ´Ê¼þ*/
+static int fast_event(struct notifier_block *this, unsigned long event, struct net_device *dev)
+{
+ traverse_command_t cmd;
+
+ switch (event) {
+ case NETDEV_DOWN:
+ if (dev)
+ {
+ cmd.cmd = TRAVERSE_CMD_DEV_DOWN;
+ cmd.arg = dev->ifindex;
+
+ fastnat_event(&cmd);
+ fast6_event(&cmd);
+ }
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static int fast_device_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ struct net_device *dev = (struct net_device *)ptr;
+
+ return fast_event(this, event, dev);
+}
+
+static int fast_inet_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
+
+ return fast_event(this, event, dev);
+}
+
+/*priority should be higher than masquerade, otherwise kernel will hang*/
+static struct notifier_block fast_dev_notifier = {
+ .notifier_call = fast_device_event,
+ .priority = 1,
+};
+
+/*priority should be higher than masquerade, otherwise kernel will hang*/
+static struct notifier_block fast_inet_notifier = {
+ .notifier_call = fast_inet_event,
+ .priority = 1,
+};
+
+void fast_device_down_event_by_name(char *dev_name)
+{
+ struct net_device *dev = NULL;
+
+ if (!dev_name)
+ {
+ print_sun(SUN_ERR,"fast_device_down_event_by_name dev_name is null \n");
+ return;
+ }
+
+ dev = dev_get_by_name(&init_net, dev_name);
+ if (!dev)
+ {
+ print_sun(SUN_ERR,"fast_device_down_event_by_name dev not found \n");
+ return;
+ }
+
+ fast_event(NULL, NETDEV_DOWN, dev);
+
+ /*add by jiangjing*/
+ dev_put(dev);
+}
+
+/**** ÒÔϲ¿·ÖÊÇеÄfastģʽʹÓú¯Êý ****/
+extern void fast_local_conn_release(struct nf_conn *ct);
+extern void fast_local_sock_release(struct sock *sk);
+
+
+/* ½«¿ìËÙת·¢¹ØÁªµÄct¼Óµ½sock¼Ç¼ÖÐ */
+void fast_dst_add_ct(struct dst_entry *dst, struct nf_conn *ct)
+{
+ struct conn_list *entry;
+ int conn_flag = 0;
+
+ list_for_each_entry_rcu(entry, &dst->conn_head, list)
+ {
+ if (entry->nfct == ct)
+ {
+ conn_flag = 1;
+ break;
+ }
+ }
+
+ if (conn_flag == 0)
+ {
+ struct conn_list *conn_list_node =(struct conn_list*)kzalloc(sizeof(struct conn_list), GFP_KERNEL);
+ if(conn_list_node) {
+ rcu_assign_pointer(conn_list_node->nfct, ct);
+ list_add_rcu(&conn_list_node->list, &dst->conn_head);
+ }
+ }
+}
+
+/* ¸ù¾Ýnet_deviceµÄÊÍ·Å£¬ÊÍ·Åת·¢ÀàÏà¹ØÄÚÈÝ */
+void fast_fw_conn_release_by_dev(struct net_device* dev)
+{
+ int hash = 0;
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+ struct nf_conn *ct;
+ int dir;
+ struct net_device *net;
+
+ if(fastnat_level == FAST_CLOSE)
+ return ;
+
+ rcu_read_lock();
+ for (hash = 0; hash < nf_conntrack_htable_size; hash++)
+ {
+ local_bh_disable();
+ hlist_nulls_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnnode)
+ {
+ if (h)
+ {
+ ct = nf_ct_tuplehash_to_ctrack(h);
+
+ //ÊÍ·Å´ËÁ´½ÓÏà¹Ø¿ìËÙת·¢ÐÅÏ¢
+ spin_lock_bh(&fast_fw_spinlock);
+ for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++)
+ {
+ net = ct->fast_ct.fast_brport[dir];
+ if (net != NULL)
+ {
+ if(!strcmp(dev->name, net->name))
+ {
+ ct->fast_ct.fast_brport[dir] = NULL;
+ }
+ }
+ }
+ spin_unlock_bh(&fast_fw_spinlock);
+ }
+ }
+ local_bh_enable();
+ }
+ rcu_read_unlock();
+}
+/* ¸ù¾ÝconnµÄÊÍ·Å£¬ÊÍ·Åת·¢ÀàÏà¹ØÄÚÈÝ */
+void fast_fw_conn_release(struct nf_conn *ct)
+{
+ struct dst_entry *dst;
+ struct conn_list *entry;
+ int dir;
+
+ for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++)
+ {
+ if (!(dst = dst_get_by_ct(ct, dir)))
+ continue;
+
+ list_for_each_entry_rcu(entry, &dst->conn_head, list)
+ {
+ if (entry->nfct == ct)
+ {
+ entry->nfct = NULL;
+ __list_del_entry(&entry->list);
+ kfree(entry);
+ break;
+ }
+ }
+ //ÔÚdst_get_by_ctÖÐholdһϣ¬ËùÒÔÕâÀïÒªrelease
+ dst_release(dst);
+ rcu_assign_pointer(ct->fast_ct.fast_dst[dir], NULL);
+ ct->fast_ct.fast_brport[dir] = NULL;
+ }
+ ct->fast_ct.isFast = 0;
+}
+
+//¸ù¾Ýdst_entryµÄÊÍ·Å£¬ÊÍ·Åת·¢ÀàÏà¹ØÄÚÈÝ
+void fast_fw_dst_entry_release(struct dst_entry *dst)
+{
+ struct conn_list *entry = NULL;
+ struct conn_list *entry_tmp = NULL;
+ struct nf_conn *ct;
+ struct list_head *tmp;
+
+ list_for_each_entry_safe(entry, entry_tmp, &dst->conn_head, list) {
+
+ rcu_assign_pointer(ct, entry->nfct);
+ if (!ct)
+ continue;
+
+ if (ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL] && ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL] == dst) {
+ rcu_assign_pointer(ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL], NULL);
+ ct->fast_ct.fast_brport[IP_CT_DIR_ORIGINAL] = NULL;
+ }
+ else if (ct->fast_ct.fast_dst[IP_CT_DIR_REPLY] && ct->fast_ct.fast_dst[IP_CT_DIR_REPLY] == dst) {
+ rcu_assign_pointer(ct->fast_ct.fast_dst[IP_CT_DIR_REPLY], NULL);
+ ct->fast_ct.fast_brport[IP_CT_DIR_REPLY] = NULL;
+ }
+ else
+ print_sun(SUN_ERR,"fast_fw_dst_entry_release \n");
+
+ if (!ct->fast_ct.fast_brport[IP_CT_DIR_ORIGINAL] && !ct->fast_ct.fast_brport[IP_CT_DIR_REPLY])
+ ct->fast_ct.isFast = 0;
+ entry->nfct = NULL;
+ __list_del_entry(&entry->list);
+ kfree(entry);
+ }
+}
+
+/* connÊÍ·Å֪ͨº¯Êý£¬Í¨ÖªfastÊÍ·ÅÏà¹ØÄÚÈÝ */
+void fast_conn_release(struct nf_conn *ct, int mark)
+{
+ spin_lock_bh(&fast_fw_spinlock);
+ if ((ct->fast_ct.isFast == FAST_CT_FW4 || ct->fast_ct.isFast == FAST_CT_FW6) && (mark & RELEASE_ALL_DST))
+ {
+ fast_fw_conn_release(ct);
+ }
+ spin_unlock_bh(&fast_fw_spinlock);
+}
+
+/* dst_entryÊÍ·Å֪ͨº¯Êý£¬Í¨ÖªfastÊÍ·ÅÏà¹ØÄÚÈÝ */
+void fast_dst_entry_release(struct dst_entry * dst)
+{
+ spin_lock_bh(&fast_fw_spinlock);
+ fast_fw_dst_entry_release(dst);
+ spin_unlock_bh(&fast_fw_spinlock);
+}
+
+/* ÊÍ·ÅÐÂfastģʽÏÂËùÓпìËÙת·¢ÐÅÏ¢: Ö»ÓÐctÊǺÍdst¡¢sk¶¼¹ØÁªµÄ£¬ËùÒÔͨ¹ýctÀ´²éѯ */
+void fast_release_all(int mark)
+{
+ int hash = 0;
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+ struct nf_conn *ct;
+
+ rcu_read_lock();
+ for (hash = 0; hash < nf_conntrack_htable_size; hash++) {
+ local_bh_disable();
+ hlist_nulls_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnnode) {
+ if (h)
+ {
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+ continue;
+
+ //ÊÍ·Å´ËÁ´½ÓÏà¹Ø¿ìËÙת·¢ÐÅÏ¢
+ fast_conn_release(ct, mark);
+
+ nf_ct_put(ct);
+ }
+ }
+ local_bh_enable();
+ }
+ rcu_read_unlock();
+}
+
+/**** ÒÔϲ¿·ÖÊÇоÉfastģʽ¹²Óõĺ¯Êý ****/
+
+//¸ù¾ÝÓÅÏȼ¶ÅäÖò»Í¬²ã´ÎµÄ¹³×Óº¯Êý£¬Ä¿Ç°µÄ²ßÂÔÊǸߵȼ¶µÄ¹³×Óº¯Êý±»¸³Öµºó£¬µÍµÈ¼¶µÄ¹³×Óº¯ÊýÒ²Ò»¶¨´æÔÚ£¬
+//ÒÔ½â¾öijЩÉ豸ûÓиߵȼ¶µÄ¹³×Óº¯ÊýÇé¿ö£»¸Ã²ßÂÔ½ö»áÔì³É¹³×Óº¯Êý¶à´ÎÆ¥Åäʧ°ÜµÄ¿ÕÅÜ£¬²»»á´æÔÚÐÔÄÜÆ¿¾±
+void set_fast_level_cb(int param)
+{
+ //¸ù¾Ý¿ìËÙת·¢¼¶±ðÉèÖù©Íⲿµ÷Óú¯ÊýÖ¸Õë
+ if (param == FAST_CLOSE || param == FAST_CLOSE_KEEP_LINK) //¹Ø±Õ¿ìËÙת·¢
+ {
+ fast_from_softirq = NULL;
+ fast_from_driver = NULL;
+ }
+ else if (param == FAST_NET_CORE) //Äں˲ã¿ìËÙת·¢£¬FAST_NEWÖ»Ö§³ÖÈíÖжÏÖе÷ÓÿìËÙת·¢
+ {
+ fast_from_softirq = try_fast_for_netcoredata;
+ fast_from_driver = NULL;
+ }
+ //net_deviceµ½net_device£¬²»½øÈëIPÈíÖжÏ
+ else if (param == FAST_NET_DEVICE)
+ {
+ fast_from_softirq = try_fast_for_netcoredata;
+ fast_from_driver = try_fast_for_macdata;
+ }
+ else
+ print_sun(SUN_ERR,"fastnat_level error, shoud be 0~2!\n");
+}
+
+/* ÉèÖø÷×Ó¹¦ÄܵĿìËÙת·¢µ÷Óú¯Êý */
+void set_fast_switch_cb(unsigned long param)
+{
+ //¸ù¾ÝоɿìËÙת·¢ÉèÖÃʹÓõIJ»Í¬ipv4¡¢ipv6½Ó¿Ú
+ if (test_bit(FAST_TYPE_VERSION_BIT, ¶m)) //еÄfastģʽ
+ {
+ //еÄfastģʽÏÂ: ÉèÖÃIPv4±¾µØ¿ìËÙת·¢»Øµ÷º¯Êý
+ if (test_bit(FAST_TYPE_FW4_BIT, &fast_switch))
+ fast_fw4_proc = fast4_fw_recv;
+ else
+ fast_fw4_proc = NULL;
+
+ //еÄfastģʽÏÂ: ÉèÖÃIPv6±¾µØ¿ìËÙת·¢»Øµ÷º¯Êý
+ if (test_bit(FAST_TYPE_FW6_BIT, &fast_switch))
+ fast_fw6_proc = fast6_fw_recv;
+ else
+ fast_fw6_proc = NULL;
+
+ fast_local4_proc = NULL;
+ fast_local4_output_proc = NULL;
+
+ fast_local6_proc = NULL;
+ fast_local6_output_proc = NULL;
+
+ //ÉèÖÃ×ÓÍø¼ä¿ìËÙת·¢»Øµ÷º¯Êý
+ if (test_bit(FAST_TYPE_BR_BIT, &fast_switch))
+ fast_br_proc = fast_br;
+ else
+ fast_br_proc = NULL;
+
+ fast_nat4_proc = NULL;
+ fast_nat6_proc = NULL;
+ }
+ else //ÀϵÄfastģʽ
+ {
+ //ÀϵÄfastģʽÏÂ: ÉèÖÃIPv4±¾µØ¿ìËÙת·¢»Øµ÷º¯Êý
+ fast_nat4_proc = fast_nat_recv;
+
+ //ÀϵÄfastģʽÏÂ: ÉèÖÃIPv6±¾µØ¿ìËÙת·¢»Øµ÷º¯Êý
+ fast_nat6_proc = fast6_recv;
+
+ fast_br_proc = fast_br;
+ fast_local4_proc = NULL;
+ fast_local4_output_proc = NULL;
+ fast_local6_proc = NULL;
+ fast_local6_output_proc = NULL;
+ fast_fw4_proc = NULL;
+ fast_fw6_proc = NULL;
+ }
+}
+
+void fast_level_change(int new_level)
+{
+ int old_level = 0;
+
+ old_level = fastnat_level;
+
+ if (old_level == new_level)
+ return;
+
+ fastnat_level = new_level;
+
+ //¸ù¾Ý×îеÄfastnat level£¬µ÷Õû»Øµ÷º¯Êý
+ set_fast_level_cb(fastnat_level);
+
+ //fastnat¹Ø±Õ£¬Çå¿ÕËùÓÐÐÅÏ¢
+ if (fastnat_level == FAST_CLOSE || fastnat_level == FAST_CLOSE_KEEP_LINK)
+ {
+ if (!test_bit(FAST_TYPE_VERSION_BIT, &fast_switch))
+ {
+ fastnat_cleanup_links();
+ fast6_cleanup_links();
+ }
+ else
+ {
+ fast_release_all(RELEASE_ALL_DST | RELEASE_ALL_SK);
+ }
+ }
+}
+
+void fast_switch_change(unsigned long new_switch)
+{
+ unsigned long old_switch = fast_switch;
+
+ if (old_switch == new_switch)
+ return;
+
+ fast_switch = new_switch;
+
+ //¸ù¾Ý×Ó¹¦ÄÜ¿ìËÙת·¢µÄλͼ¿ª¹Ø£¬ÉèÖÃ×Ó¹¦Äܻص÷º¯Êý
+ set_fast_switch_cb(fast_switch);
+
+ //×Ó¹¦ÄÜ¿ìËÙת·¢´Ó´ò¿ªµ½¹Ø±ÕµÄ£¬ÐèÒªÇå³þÏà¹Ø¿ìËÙת·¢ÐÅÏ¢
+ //´ÓÀϵÄfastÇе½ÐµÄfast£¬ÐèÒªÇå³þÀÏfast±£´æµÄÏà¹Ø¿ìËÙת·¢Êý¾Ý
+ if (!test_bit(FAST_TYPE_VERSION_BIT, &old_switch) && test_bit(FAST_TYPE_VERSION_BIT, &new_switch))
+ {
+ //ÀϵÄfast²»Çø·ÖIPv4¡¢IPv6µÄ·Ö¿ª¿ØÖÆ
+ fastnat_cleanup_links();
+
+ fast6_cleanup_links();
+ }
+ //´ÓеÄfastÇе½¾ÉµÄfast£¬ÐèÒªÇå³þÐÂfast±£´æµÄÏà¹Ø¿ìËÙת·¢Êý¾Ý
+ else if (test_bit(FAST_TYPE_VERSION_BIT, &old_switch) && !test_bit(FAST_TYPE_VERSION_BIT, &new_switch))
+ {
+ fast_release_all(RELEASE_ALL_DST | RELEASE_ALL_SK);
+ }
+
+ if (test_bit(FAST_TYPE_VERSION_BIT, &new_switch) && (!test_bit(FAST_TYPE_FW4_BIT, &new_switch) || !test_bit(FAST_TYPE_FW6_BIT, &new_switch))) {
+ fast_release_all(RELEASE_ALL_DST);
+ }
+ if (test_bit(FAST_TYPE_VERSION_BIT, &new_switch) && (!test_bit(FAST_TYPE_LOCAL4_BIT, &new_switch) || !test_bit(FAST_TYPE_LOCAL6_BIT, &new_switch))) {
+ fast_release_all(RELEASE_ALL_SK);
+ }
+}
+
+//¼Ç¼ÓÐDST_NOCACHE±êÖ¾µÄdst³öÏֵĴÎÊý
+int no_cache = 0;
+//¼Ç¼ÔÚʹÓÃdstʱ£¬dst->neighbourΪ¿ÕµÄ´ÎÊý
+int no_neighbour = 0;
+struct dst_entry * dst_get_by_ct(struct nf_conn * ct, int dir)
+{
+ struct dst_entry *dst;
+ struct neighbour *_neighbour = NULL;
+
+ rcu_read_lock();
+ dst = rcu_dereference_protected(ct->fast_ct.fast_dst[dir], 1);
+
+ if(dst)
+ dst_hold_and_use(dst, jiffies);
+ else {
+ dst = NULL;
+ }
+ rcu_read_unlock();
+ return dst;
+}
+
+/*fast³õʼ»¯*/
+static int __init
+tsp_fast_init(void)
+{
+ int ret4 = 0, ret6 = 0;
+
+ memset(zeromac, 0, sizeof(zeromac));
+
+ spin_lock_init(&fast_fw_spinlock);
+ //²ÉÓÃרÊôslab»úÖÆ
+ fast_head_cache = kmem_cache_create("fast_head_cache",
+ sizeof(struct fast_entry_s),
+ 0,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+ NULL);
+
+ //·Ö±ðµ÷ÓÃipv4¡¢ipv6µÄ³õʼ»¯º¯Êý
+ ret4 = tsp_fastnat_init();
+ ret6 = tsp_fast6_init();
+ fast4_fw_init();
+ fast6_fw_init();
+
+ if ((ret4 != 0) && (ret6 != 0))
+ return -EINVAL;
+
+ /*×¢²á֪ͨÁ´*/
+ register_netdevice_notifier(&fast_dev_notifier);
+ register_inetaddr_notifier(&fast_inet_notifier);
+
+ //¿ìËÙת·¢ºÍƽ̨»¯procÎļþ³õʼ»¯
+ set_fast_level_cb(fastnat_level);
+ set_fast_switch_cb(fast_switch);
+ fast_conntrack_init_proc();
+
+ net_adapter_init_proc();
+ return 0;
+}
+
+static void __exit
+tsp_fast_cleanup(void)
+{
+ set_fast_level_cb(FAST_CLOSE);
+ set_fast_switch_cb(0);
+ unregister_netdevice_notifier(&fast_dev_notifier);
+ unregister_inetaddr_notifier(&fast_inet_notifier);
+
+ //·Ö±ðµ÷ÓÃipv4¡¢ipv6µÄ×¢Ïúº¯Êý
+ tsp_fastnat_cleanup();
+ tsp_fast6_cleanup();
+ fast4_fw_cleanup();
+ fast6_fw_cleanup();
+}
+
+late_initcall(tsp_fast_init);
+module_exit(tsp_fast_cleanup);
+
diff --git a/upstream/linux-5.10/net/core/fastproc/fast_track.c b/upstream/linux-5.10/net/core/fastproc/fast_track.c
new file mode 100755
index 0000000..37f3cfc
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fast_track.c
@@ -0,0 +1,1203 @@
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h> /* Necessary because we use proc fs */
+#include <linux/skbuff.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/percpu.h>
+#include <linux/netdevice.h>
+#include <linux/security.h>
+#include <net/net_namespace.h>
+#ifdef CONFIG_SYSCTL
+#include <linux/sysctl.h>
+#endif
+#include <linux/rculist_nulls.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <net/SI/fastnat.h>
+#include <net/SI/fast6.h>
+#include <net/SI/fast_common.h>
+#include <net/SI/netioc_proc.h>
+
+
+#define PORT_LEN 10
+extern int fastnat_ack_param;
+extern int ackdrop_maxnum;
+extern unsigned int ct_iptables_syn_sw;
+
+
+static unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)
+{
+ struct nf_conn_acct *acct;
+ struct nf_conn_counter *counter;
+
+ acct = nf_conn_acct_find(ct);
+ if (!acct)
+ return 0;
+
+ counter = acct->counter;
+ seq_printf(s, "packets=%llu bytes=%llu ",
+ (unsigned long long)atomic64_read(&counter[dir].packets),
+ (unsigned long long)atomic64_read(&counter[dir].bytes));
+
+ return 0;
+}
+
+static void *fastnat_level_seq_start(struct seq_file *seq, loff_t *pos)
+__acquires(RCU)
+{
+ if (*pos >= 1)
+ return NULL;
+ return 1;
+}
+
+static void *fastnat_level_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void fastnat_level_seq_stop(struct seq_file *s, void *v)
+__releases(RCU)
+{
+ return;
+}
+
+/* return 0 on success, 1 in case of error */
+static int fastnat_level_seq_show(struct seq_file *s, void *v)
+{
+ seq_printf(s, "fastnat_level: %d\n", fastnat_level);
+ return 0;
+}
+
+static const struct seq_operations fastnat_level_seq_ops = {
+ .start = fastnat_level_seq_start,
+ .next = fastnat_level_seq_next,
+ .stop = fastnat_level_seq_stop,
+ .show = fastnat_level_seq_show
+};
+
+static int fastnat_level_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &fastnat_level_seq_ops);
+}
+
+//Óû§Ð´ÃüÁîµÄ²Î¿¼´úÂ룬¿ÉÓÃÀ´ÅäÖùØÁªÉèÖá¢Íø¿ÚÃûµÈ²Ù×÷
+static ssize_t fastnat_level_set(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ size_t size;
+ char char_fastnat[5] = {0};
+ int level = 0;
+
+ //countβ²¿°üº¬ÁË1¸ö½áÊø·û£¬Ö»Ö§³Ö0-3ÊäÈë
+ if (count != 2)
+ return -EINVAL;
+
+ if (copy_from_user(char_fastnat, buffer, 1))
+ return -EFAULT;
+
+ if ((char_fastnat[0] < '0' || char_fastnat[0] > '2') && (char_fastnat[0] != '5'))
+ return -EINVAL;
+
+ level = (int)(char_fastnat[0] - '0');
+
+ //ÖØÐÂÉèÖÿìËÙת·¢¹³×Óº¯Êý
+ fast_level_change(level);
+ return count;
+}
+
+static void *fast_switch_seq_start(struct seq_file *seq, loff_t *pos)
+__acquires(RCU)
+{
+ if (*pos >= 1)
+ return NULL;
+ return 1;
+}
+
+static void *fast_switch_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void fast_switch_seq_stop(struct seq_file *s, void *v)
+__releases(RCU)
+{
+ return;
+}
+
+/* return 0 on success, 1 in case of error */
+static int fast_switch_seq_show(struct seq_file *s, void *v)
+{
+ seq_printf(s, "fast_switch: 0x%x\n", (unsigned int)fast_switch);
+ return 0;
+}
+
+static const struct seq_operations fast_switch_seq_ops = {
+ .start = fast_switch_seq_start,
+ .next = fast_switch_seq_next,
+ .stop = fast_switch_seq_stop,
+ .show = fast_switch_seq_show
+};
+
+static int fast_switch_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &fast_switch_seq_ops);
+}
+
+//Óû§Ð´ÃüÁîµÄ²Î¿¼´úÂ룬¿ÉÓÃÀ´ÅäÖùØÁªÉèÖá¢Íø¿ÚÃûµÈ²Ù×÷
+static ssize_t fast_switch_set(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ size_t size;
+ char char_fastnat[5] = {0};
+ int level = 0, i = 0;
+
+ //countβ²¿°üº¬ÁË1¸ö½áÊø·û£¬Ö»Ö§³Ö0-3ÊäÈë
+ if (count > 5)
+ return -EINVAL;
+
+ memset(char_fastnat, 0, 5);
+ if (copy_from_user(char_fastnat, buffer, 5))
+ return -EFAULT;
+
+ for(i = 0; i < count - 1; i++) {
+ if(char_fastnat[i] < '0' || char_fastnat[i] > '9')
+ return -EINVAL;
+ level = (int)(char_fastnat[i] - '0') + level*10;
+ }
+
+ //ÖØÐÂÉèÖÿìËÙת·¢¹³×Óº¯Êý
+ fast_switch_change(level);
+ return count;
+}
+
+static void *fastbr_level_seq_start(struct seq_file *seq, loff_t *pos)
+__acquires(RCU)
+{
+ if (*pos >= 1)
+ return NULL;
+ return 1;
+}
+
+static void *fastbr_level_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void fastbr_level_seq_stop(struct seq_file *s, void *v)
+__releases(RCU)
+{
+ return;
+}
+
+/* return 0 on success, 1 in case of error */
+static int fastbr_level_seq_show(struct seq_file *s, void *v)
+{
+ seq_printf(s, "fastbr_level: %d\n", fastbr_level);
+ return 0;
+}
+
+static const struct seq_operations fastbr_level_seq_ops = {
+ .start = fastbr_level_seq_start,
+ .next = fastbr_level_seq_next,
+ .stop = fastbr_level_seq_stop,
+ .show = fastbr_level_seq_show
+};
+
+static int fastbr_level_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &fastbr_level_seq_ops);
+}
+
+//Óû§Ð´ÃüÁîµÄ²Î¿¼´úÂ룬¿ÉÓÃÀ´ÅäÖùØÁªÉèÖá¢Íø¿ÚÃûµÈ²Ù×÷
+static ssize_t fastbr_level_set(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ size_t size;
+ char char_fastbr[5] = {0};
+
+ //countβ²¿°üº¬ÁË1¸ö½áÊø·û£¬Ö»Ö§³Ö0-1ÊäÈë
+ if (count != 2)
+ return -EINVAL;
+
+ if (copy_from_user(char_fastbr, buffer, 1))
+ return -EFAULT;
+
+ if (char_fastbr[0] < '0' || char_fastbr[0] > '1')
+ return -EINVAL;
+
+ fastbr_level = (int)(char_fastbr[0] - '0');
+
+ return count;
+}
+
+static void *fastnat_seq_start(struct seq_file *seq, loff_t *pos)
+__acquires(RCU)
+{
+ spin_lock_bh(&fastnat_spinlock);
+ if (*pos >= nf_conntrack_htable_size)
+ return NULL;
+ else
+ {
+ if (*pos == 0)
+ {
+ seq_printf(seq, "fastnat have %d conn!!!\nskb_num4:%d,fastnat_num:%d\n",
+ working_list.count, skb_num4, fastnat_num);
+ seq_printf(seq, "fastbr_sum:%d,fastbr_num:%d\n",
+ skb_num4 + skb_num6 + skb_unknown - fastnat_num - fast6_num, fastbr_num);
+
+ if ((fastnat_ack_param == 1) && (ackdrop_maxnum >= 1))
+ {
+ seq_printf(seq, "fastnat ack_delay_stats : total_count = %u, forword_count = %u, drop_count = %u, "
+ "timeout_xmit_count = %u, timeout_drop_count = %u\n",
+ (unsigned int)ack_delay_stats.total_count, (unsigned int)ack_delay_stats.forword_count,
+ (unsigned int)ack_delay_stats.drop_count, (unsigned int)ack_delay_stats.timeout_xmit_count,
+ (unsigned int)ack_delay_stats.timeout_drop_count);
+ }
+ }
+ return &working_hash[*pos];
+ }
+
+}
+
+static void *fastnat_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ //return fastnat_get_next(s, v);
+ if (*pos >= nf_conntrack_htable_size)
+ return NULL;
+ else
+ return &working_hash[*pos];
+}
+
+
+static void fastnat_seq_stop(struct seq_file *s, void *v)
+__releases(RCU)
+{
+ spin_unlock_bh(&fastnat_spinlock);
+}
+
+/* return 0 on success, 1 in case of error */
+static int fastnat_seq_show(struct seq_file *s, void *v)
+{
+ struct hlist_nulls_head *head = (struct hlist_nulls_head *) v;
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+ fast_entry_data_t *nat_entry_data;
+ fast_entry_t *nat_entry = NULL;
+ const struct nf_conntrack_l3proto *l3proto;
+ const struct nf_conntrack_l4proto *l4proto;
+ int ret = 0;
+
+ hlist_nulls_for_each_entry(h, n, head, hnnode)
+ {
+ nat_entry_data = fast_hash_to_data(h);
+ nat_entry = fast_data_to_entry(nat_entry_data);
+
+ if (unlikely(!atomic_inc_not_zero(&nat_entry->ct->ct_general.use)))
+ return 0;
+
+ /* we only want to print DIR_ORIGINAL */
+ if (NF_CT_DIRECTION(h))
+ {
+ nf_ct_put(nat_entry->ct);
+ continue;
+ }
+
+ l4proto = nf_ct_l4proto_find(nf_ct_protonum(nat_entry->ct));
+
+
+ ret = -ENOSPC;
+ seq_printf(s, "l3proto: %u l4proto: %u %lu %lu %lu ",
+ nf_ct_l3num(nat_entry->ct), nf_ct_protonum(nat_entry->ct),
+ (unsigned long)(nat_entry->timeout.expires/HZ),
+ (unsigned long)(tcp_timeouts[nat_entry->ct->proto.tcp.state]/HZ), (unsigned long)(jiffies/HZ));
+ //tcp_conntrack_namesÖ»¶¨ÒåÁ˵½TCP_CONNTRACK_MAXµÄÃû³Æ£¬¶østateºóÃæ»¹ÓжàÓàµÄ״̬
+ if (nat_entry->ct->proto.tcp.state < TCP_CONNTRACK_MAX)
+ {
+#ifdef CONFIG_NF_CONNTRACK_PROCFS
+ if (l4proto->print_conntrack)
+ {
+ l4proto->print_conntrack(s, nat_entry->ct);
+ }
+#endif
+ }
+
+ print_tuple(s, &nat_entry->ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ l4proto);
+
+ if (seq_print_acct(s, nat_entry->ct, IP_CT_DIR_ORIGINAL))
+ {
+ nf_ct_put(nat_entry->ct);
+ continue;
+ }
+
+ if (!(test_bit(IPS_SEEN_REPLY_BIT, &nat_entry->ct->status)))
+ seq_printf(s, "[UNREPLIED] ");
+
+
+ print_tuple(s, &nat_entry->ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+ l4proto);
+
+ if (seq_print_acct(s, nat_entry->ct, IP_CT_DIR_REPLY))
+ {
+ nf_ct_put(nat_entry->ct);
+ continue;
+ }
+
+ if (test_bit(IPS_ASSURED_BIT, &nat_entry->ct->status))
+ seq_printf(s, "[ASSURED] ");
+
+
+ seq_printf(s, "NAT_ip=%pI4 NAT_port==%hu \n",&nat_entry->data[IP_CT_DIR_ORIGINAL].nat_addr,ntohs(nat_entry->data[IP_CT_DIR_ORIGINAL].nat_port));
+ nf_ct_put(nat_entry->ct);
+ }
+
+ return 0;
+}
+
+
+static const struct seq_operations fastnat_seq_ops = {
+ .start = fastnat_seq_start,
+ .next = fastnat_seq_next,
+ .stop = fastnat_seq_stop,
+ .show = fastnat_seq_show
+};
+
+static int fastnat_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &fastnat_seq_ops);
+}
+
+static void *fast6_seq_start(struct seq_file *seq, loff_t *pos)
+__acquires(RCU)
+{
+ spin_lock_bh(&fast6_spinlock);
+ if (*pos >= nf_conntrack_htable_size)
+ return NULL;
+ else
+ {
+ if (*pos == 0)
+ {
+#if 0
+ seq_printf(seq, "fastnat ack_delay_stats : total_count = %d, forword_count = %d, drop_count = %d, "
+ "timeout_xmit_count = %d, timeout_drop_count = %d\n",
+ ack_delay_stats.total_count, ack_delay_stats.forword_count, ack_delay_stats.drop_count,
+ ack_delay_stats.timeout_xmit_count, ack_delay_stats.timeout_drop_count);
+ seq_printf(seq, "fastnat have %d conn!!!\nfastnat_recv_count:%d,fastnat_real_count:%d\n",
+ working_list.count,fastnat_recv_count,fastnat_real_count);
+ seq_printf(seq, "send_2_ps_failed:%u, send_2_usb_failed:%u\n", send_2_ps_failed, send_2_usb_failed);
+#endif
+ seq_printf(seq, "fast6 have %d conn!!!\nskb_num6:%d,fast6_num:%d\n",
+ working_list6.count, skb_num6, fast6_num);
+ }
+ return &working_hash6[*pos];
+ }
+
+}
+
+static void *fast6_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ if (*pos >= nf_conntrack_htable_size)
+ return NULL;
+ else
+ return &working_hash6[*pos];
+}
+
+
+static void fast6_seq_stop(struct seq_file *s, void *v)
+__releases(RCU)
+{
+ spin_unlock_bh(&fast6_spinlock);
+}
+
+/* return 0 on success, 1 in case of error */
+static int fast6_seq_show(struct seq_file *s, void *v)
+{
+ struct hlist_nulls_head *head = (struct hlist_nulls_head *) v;
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+ fast_entry_data_t *fast6_entry_data;
+ fast_entry_t *fast6_entry = NULL;
+ const struct nf_conntrack_l3proto *l3proto;
+ const struct nf_conntrack_l4proto *l4proto;
+ int ret = 0;
+
+ hlist_nulls_for_each_entry(h, n, head, hnnode)
+ {
+ fast6_entry_data = fast_hash_to_data(h);
+ fast6_entry = fast_data_to_entry(fast6_entry_data);
+
+ if (unlikely(!atomic_inc_not_zero(&fast6_entry->ct->ct_general.use)))
+ return 0;
+
+ /* we only want to print DIR_ORIGINAL */
+ if (NF_CT_DIRECTION(h))
+ {
+ nf_ct_put(fast6_entry->ct);
+ continue;
+ }
+
+ l4proto = nf_ct_l4proto_find(nf_ct_protonum(fast6_entry->ct));
+
+ ret = -ENOSPC;
+ seq_printf(s, "l3proto: %u l4proto: %u %lu %lu %lu ",
+ nf_ct_l3num(fast6_entry->ct), nf_ct_protonum(fast6_entry->ct),
+ (unsigned long)(fast6_entry->timeout.expires/HZ),
+ (unsigned long)(tcp_timeouts[fast6_entry->ct->proto.tcp.state]/HZ), (unsigned long)(jiffies/HZ));
+
+ //tcp_conntrack_namesÖ»¶¨ÒåÁ˵½TCP_CONNTRACK_MAXµÄÃû³Æ£¬¶østateºóÃæ»¹ÓжàÓàµÄ״̬
+ if (fast6_entry->ct->proto.tcp.state < TCP_CONNTRACK_MAX)
+ {
+#ifdef CONFIG_NF_CONNTRACK_PROCFS
+ if (l4proto->print_conntrack)
+ {
+ l4proto->print_conntrack(s, fast6_entry->ct);
+ }
+#endif
+ }
+
+ print_tuple(s, &fast6_entry->ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ l4proto);
+
+ if (seq_print_acct(s, fast6_entry->ct, IP_CT_DIR_ORIGINAL))
+ {
+ nf_ct_put(fast6_entry->ct);
+ continue;
+ }
+
+ if (!(test_bit(IPS_SEEN_REPLY_BIT, &fast6_entry->ct->status)))
+ seq_printf(s, "[UNREPLIED] ");
+
+ print_tuple(s, &fast6_entry->ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+ l4proto);
+
+ if (seq_print_acct(s, fast6_entry->ct, IP_CT_DIR_REPLY))
+ {
+ nf_ct_put(fast6_entry->ct);
+ continue;
+ }
+
+ if (test_bit(IPS_ASSURED_BIT, &fast6_entry->ct->status))
+ seq_printf(s, "[ASSURED] ");
+
+
+ seq_printf(s, "\n");
+ nf_ct_put(fast6_entry->ct);
+ }
+
+ return 0;
+}
+
+static const struct seq_operations fast6_seq_ops = {
+ .start = fast6_seq_start,
+ .next = fast6_seq_next,
+ .stop = fast6_seq_stop,
+ .show = fast6_seq_show
+};
+
+static int fast6_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &fast6_seq_ops);
+}
+
+//Óû§Ð´ÃüÁîµÄ²Î¿¼´úÂ룬¿ÉÓÃÀ´ÅäÖùØÁªÉèÖá¢Íø¿ÚÃûµÈ²Ù×÷
+static ssize_t dev_down_set(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ size_t size;
+ char dev_name[MAX_NET_DEVICE_NAME_LEN + 1] = {0};
+
+ //countβ²¿°üº¬ÁË1¸ö½áÊø·û
+ size = min(count - 1, MAX_NET_DEVICE_NAME_LEN);
+ if (copy_from_user(dev_name, buffer, size))
+ return -EFAULT;
+
+ //ɾ³ý´ËÍøÂçÉ豸Ïà¹Øipv4,ipv6Á´½Ó
+ fast_device_down_event_by_name(dev_name);
+
+ return count;
+}
+
+static void *nofast_port_seq_start(struct seq_file *seq, loff_t *pos)
+__acquires(RCU)
+{
+ if (*pos >= 1)
+ return NULL;
+ return 1;
+}
+
+static void *nofast_port_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void nofast_port_seq_stop(struct seq_file *s, void *v)
+__releases(RCU)
+{
+ return;
+}
+
+static int nofast_port_seq_show(struct seq_file *s, void *v)
+{
+ int i = 0;
+
+ if (nofast_port[0] == 0)
+ {
+ seq_printf(s, "All ports support fast! \n");
+ }
+ else
+ {
+ seq_printf(s, "Not supported ports include:\n%d", nofast_port[0]);
+
+ for (i = 1; i < NOFAST_PROTO_MAX; i++)
+ {
+ //¶Ë¿ÚºÅÓöµ½0½áÊø
+ if (nofast_port[i] == 0)
+ break;
+ seq_printf(s, "+%d", nofast_port[i]);
+ }
+ seq_printf(s, "\n\n");
+ }
+ return 0;
+}
+
+static const struct seq_operations nofast_port_seq_ops = {
+ .start = nofast_port_seq_start,
+ .next = nofast_port_seq_next,
+ .stop = nofast_port_seq_stop,
+ .show = nofast_port_seq_show
+};
+
+static int nofast_port_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &nofast_port_seq_ops);
+}
+
+struct nf_conntrack_tuple tuple_info;
+/*
+1~6 fast Á´½ÓÐÅÏ¢²éѯ
+8 skbÊͷŵã²éѯ
+9 socket ¸ú×ÙÐÅÏ¢²éѯ
+*/
+int getconn_type = 0;
+
+static void *conn_datainfo_seq_start(struct seq_file *seq, loff_t *pos)
+__acquires(RCU)
+{
+ if (*pos >= 1)
+ return NULL;
+ seq_printf(seq, "start fast4 count:%ld, fw:%ld, local4_recv:%ld, local4_output:%ld\n",
+ (long)skb_num4, (long)fastnat_num, (long)fast_local4_rcv_num, (long)fast_local4_output_num);
+ seq_printf(seq, "start fast6 count:%ld, fw:%ld, local6_recv:%ld, local6_output:%ld\n",
+ (long)skb_num6, (long)fast6_num, (long)fast_local6_rcv_num, (long)fast_local6_output_num);
+ return 1;
+}
+
+static void *conn_datainfo_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void conn_datainfo_seq_stop(struct seq_file *s, void *v)
+__releases(RCU)
+{
+ return;
+}
+
+static int conn_datainfo_seq_show(struct seq_file *s, void *v)
+{
+ int i = 0, j = 0;
+ struct nf_conntrack_tuple_hash *h;
+ struct nf_conntrack_tuple_hash *h_rdir;
+ struct nf_conn * ct;
+ struct hlist_nulls_node *n;
+
+ for(i = 0; i < nf_conntrack_htable_size; i++) {
+ hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[i], hnnode) {
+ if(h->tuple.dst.dir != IP_CT_DIR_ORIGINAL)
+ continue;
+ if(tuple_info.dst.protonum && tuple_info.dst.protonum != h->tuple.dst.protonum)
+ continue;
+ if(tuple_info.dst.u3.ip && memcmp(&tuple_info.dst.u3.ip6, h->tuple.dst.u3.ip6, 16) != 0)
+ continue;
+ if(tuple_info.src.u3.ip && memcmp(&tuple_info.src.u3.ip6, h->tuple.src.u3.ip6, 16) != 0)
+ continue;
+ if(tuple_info.dst.u.all && tuple_info.dst.u.all != h->tuple.dst.u.all) {
+ continue;
+ }
+ if(tuple_info.src.u.all && tuple_info.src.u.all != h->tuple.src.u.all) {
+ continue;
+ }
+
+ ct = container_of(h, struct nf_conn, tuplehash[h->tuple.dst.dir]);
+
+ spin_lock_bh(&fast_fw_spinlock);
+ if(getconn_type && getconn_type != ct->fast_ct.isFast)
+ {
+ spin_unlock_bh(&fast_fw_spinlock);
+ continue;
+ }
+ if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+ {
+ spin_unlock_bh(&fast_fw_spinlock);
+ continue;
+ }
+
+ h_rdir = &ct->tuplehash[IP_CT_DIR_REPLY];
+ if(h->tuple.src.l3num == AF_INET) {
+ /*seq_printf(s, "ctinfo protonum: %d Original sip: %08x, sport: %d, dip: %08x, dport: %d, packets: %lu , bytes: %lu;",
+ h->tuple.dst.protonum, ntohl(h->tuple.src.u3.ip), ntohs(h->tuple.src.u.all), ntohl(h->tuple.dst.u3.ip), ntohs(h->tuple.dst.u.all),
+ ct->packet_info[IP_CT_DIR_ORIGINAL].packets, ct->packet_info[IP_CT_DIR_ORIGINAL].bytes);
+ seq_printf(s, " reply sip: %08x, sport: %d, dip: %08x, dport: %d, packets: %lu , bytes: %lu\n",
+ ntohl(h_rdir->tuple.src.u3.ip), ntohs(h_rdir->tuple.src.u.all), ntohl(h_rdir->tuple.dst.u3.ip), ntohs(h_rdir->tuple.dst.u.all),
+ ct->packet_info[IP_CT_DIR_REPLY].packets, ct->packet_info[IP_CT_DIR_REPLY].bytes);*/
+ seq_printf(s, "ctinfo protonum: %d Original sip: %08x, sport: %d, dip: %08x, dport: %d;",
+ h->tuple.dst.protonum, ntohl(h->tuple.src.u3.ip), ntohs(h->tuple.src.u.all), ntohl(h->tuple.dst.u3.ip), ntohs(h->tuple.dst.u.all));
+ seq_printf(s, " reply sip: %08x, sport: %d, dip: %08x, dport: %d\n",
+ ntohl(h_rdir->tuple.src.u3.ip), ntohs(h_rdir->tuple.src.u.all), ntohl(h_rdir->tuple.dst.u3.ip), ntohs(h_rdir->tuple.dst.u.all));
+ }
+ else if(h->tuple.src.l3num == AF_INET6) {
+ /*seq_printf(s, "ctinfo protonum: %d Original sip: %x:%x:%x:%x:%x:%x:%x:%x sport: %d, dip: %x:%x:%x:%x:%x:%x:%x:%x, dport: %d, packets: %lu , bytes: %lu;",
+ h->tuple.dst.protonum, ntohs(h->tuple.src.u3.in6.s6_addr16[0]), ntohs(h->tuple.src.u3.in6.s6_addr16[1]), ntohs(h->tuple.src.u3.in6.s6_addr16[2]), ntohs(h->tuple.src.u3.in6.s6_addr16[3]),
+ ntohs(h->tuple.src.u3.in6.s6_addr16[4]), ntohs(h->tuple.src.u3.in6.s6_addr16[5]), ntohs(h->tuple.src.u3.in6.s6_addr16[6]), ntohs(h->tuple.src.u3.in6.s6_addr16[7]), ntohs(h->tuple.src.u.all),
+ ntohs(h->tuple.dst.u3.in6.s6_addr16[0]), ntohs(h->tuple.dst.u3.in6.s6_addr16[1]), ntohs(h->tuple.dst.u3.in6.s6_addr16[2]), ntohs(h->tuple.dst.u3.in6.s6_addr16[3]),
+ ntohs(h->tuple.dst.u3.in6.s6_addr16[4]), ntohs(h->tuple.dst.u3.in6.s6_addr16[5]), ntohs(h->tuple.dst.u3.in6.s6_addr16[6]), ntohs(h->tuple.dst.u3.in6.s6_addr16[7]), ntohs(h->tuple.dst.u.all),
+ ct->packet_info[IP_CT_DIR_ORIGINAL].packets, ct->packet_info[IP_CT_DIR_ORIGINAL].bytes);
+ seq_printf(s, " Reply sip: %x:%x:%x:%x:%x:%x:%x:%x sport: %d, dip: %x:%x:%x:%x:%x:%x:%x:%x, dport: %d, packets: %lu , bytes: %lu\n",
+ ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[0]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[1]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[2]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[3]),
+ ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[4]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[5]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[6]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[7]), ntohs(h_rdir->tuple.src.u.all),
+ ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[0]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[1]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[2]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[3]),
+ ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[4]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[5]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[6]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[7]), ntohs(h_rdir->tuple.dst.u.all),
+ ct->packet_info[IP_CT_DIR_REPLY].packets, ct->packet_info[IP_CT_DIR_REPLY].bytes);*/
+ seq_printf(s, "ctinfo protonum: %d Original sip: %x:%x:%x:%x:%x:%x:%x:%x sport: %d, dip: %x:%x:%x:%x:%x:%x:%x:%x, dport: %d, packets: xx , bytes: xx;",
+ h->tuple.dst.protonum, ntohs(h->tuple.src.u3.in6.s6_addr16[0]), ntohs(h->tuple.src.u3.in6.s6_addr16[1]), ntohs(h->tuple.src.u3.in6.s6_addr16[2]), ntohs(h->tuple.src.u3.in6.s6_addr16[3]),
+ ntohs(h->tuple.src.u3.in6.s6_addr16[4]), ntohs(h->tuple.src.u3.in6.s6_addr16[5]), ntohs(h->tuple.src.u3.in6.s6_addr16[6]), ntohs(h->tuple.src.u3.in6.s6_addr16[7]), ntohs(h->tuple.src.u.all),
+ ntohs(h->tuple.dst.u3.in6.s6_addr16[0]), ntohs(h->tuple.dst.u3.in6.s6_addr16[1]), ntohs(h->tuple.dst.u3.in6.s6_addr16[2]), ntohs(h->tuple.dst.u3.in6.s6_addr16[3]),
+ ntohs(h->tuple.dst.u3.in6.s6_addr16[4]), ntohs(h->tuple.dst.u3.in6.s6_addr16[5]), ntohs(h->tuple.dst.u3.in6.s6_addr16[6]), ntohs(h->tuple.dst.u3.in6.s6_addr16[7]), ntohs(h->tuple.dst.u.all));
+ seq_printf(s, " Reply sip: %x:%x:%x:%x:%x:%x:%x:%x sport: %d, dip: %x:%x:%x:%x:%x:%x:%x:%x, dport: %d, packets: xx , bytes: xx\n",
+ ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[0]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[1]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[2]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[3]),
+ ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[4]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[5]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[6]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[7]), ntohs(h_rdir->tuple.src.u.all),
+ ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[0]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[1]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[2]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[3]),
+ ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[4]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[5]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[6]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[7]), ntohs(h_rdir->tuple.dst.u.all));
+ }
+ if(ct->fast_ct.isFast == FAST_CT_LOCAL6 || ct->fast_ct.isFast == FAST_CT_LOCAL4) {
+ seq_printf(s, "ctinfo ->ISFAST: %d, sk: %#llx\n", ct->fast_ct.isFast, (UINT64)ct->fast_ct.sk);
+ } else if(ct->fast_ct.isFast == FAST_CT_FW6 || ct->fast_ct.isFast == FAST_CT_FW4) {
+ seq_printf(s, "ctinfo ->ISFAST: %d", ct->fast_ct.isFast);
+ if(ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL])
+ seq_printf(s, " Original fast_dst: %#llx", (UINT64)ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL]);
+ if(ct->fast_ct.fast_dst[IP_CT_DIR_REPLY])
+ seq_printf(s, " Reply fast_dst: %#llx", (UINT64)ct->fast_ct.fast_dst[IP_CT_DIR_REPLY]);
+ if(ct->fast_ct.fast_brport[IP_CT_DIR_ORIGINAL])
+ seq_printf(s, " Original fast_brport: %#llx", (UINT64)ct->fast_ct.fast_brport[IP_CT_DIR_ORIGINAL]);
+ if(ct->fast_ct.fast_brport[IP_CT_DIR_REPLY])
+ seq_printf(s, " Reply fast_brport: %#llx", (UINT64)ct->fast_ct.fast_brport[IP_CT_DIR_REPLY]);
+ seq_printf(s, "\n");
+ }
+
+ spin_unlock_bh(&fast_fw_spinlock);
+ nf_ct_put(ct);
+ }
+ }
+ return 0;
+}
+
+static const struct seq_operations conn_datainfo_seq_ops= {
+ .start = conn_datainfo_seq_start,
+ .next = conn_datainfo_seq_next,
+ .stop = conn_datainfo_seq_stop,
+ .show = conn_datainfo_seq_show
+
+};
+
+static int conn_datainfo_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &conn_datainfo_seq_ops);
+}
+
+//Ê®½øÖÆ×Ö·û´®×ª»¯ÎªÕûÊý
+static int str2int(char *str)
+{
+ int i = 0, value = 0, negative = 1;
+ int len = strlen(str);
+
+ for (i = 0; i < len; i++)
+ {
+ //Ìø¹ýÇ°ÃæµÄ¿Õ¸ñ
+ if ((value == 0) && (str[i] == ' '))
+ continue;
+
+ //µÚÒ»¸öÓÐЧ×Ö·ûÊǸººÅ
+ if ((negative == 1) && (str[i] == '-'))
+ {
+ negative = -1;
+ continue;
+ }
+
+ //Óöµ½·ÇÊ®½øÖÆÊý×ÖÔò½áÊø
+ if (str[i] < '0' || str[i] > '9')
+ break;
+ value = value * 10 + (str[i] - '0');
+ }
+ return value * negative;
+}
+
+static void parse_nofast_port(const char *str, char split)
+{
+ char *p = NULL;
+ char *pre = str;
+ char portStr[PORT_LEN] = {0}; //ÐÒé¶Ë¿ÚºÅ×î´óΪ65535
+ int count = 0, port = 0, len = 0;
+
+ memset(nofast_port, 0, NOFAST_PROTO_MAX * sizeof(nofast_port[0]));
+
+ for (; (p = strchr(pre, split)) != NULL; pre = p + 1)
+ {
+ //µÚÒ»¸ö×Ö·û¾ÍÊÇ·Ö¸ô·û
+ if (p == pre)
+ continue;
+
+ memset(portStr, 0, PORT_LEN);
+ len = min(p - pre, PORT_LEN - 1);
+ snprintf(portStr,len+1,"%s",pre);
+ port = str2int(portStr);
+ if (port <= 0 || port > 65535) //¶Ë¿ÚºÅ×î´ó65535
+ {
+ continue;
+ }
+ nofast_port[count++] = port;
+ if (count == NOFAST_PROTO_MAX)
+ return;
+ }
+
+ if (*pre != '\0') //×îºóÒ»¸ö²»ÊÇ·Ö¸ô·û
+ {
+ memset(portStr, 0, PORT_LEN);
+ len = min(str + strlen(str) - pre, PORT_LEN - 1);
+ snprintf(portStr,len+1,"%s",pre);
+ port = str2int(portStr);
+ if (port <= 0 || port > 65535) //¶Ë¿ÚºÅ×î´ó65535
+ {
+ return;
+ }
+ nofast_port[count++] = port;
+ }
+}
+
+//Óû§Ð´ÃüÁîµÄ²Î¿¼´úÂ룬¿ÉÓÃÀ´ÅäÖùØÁªÉèÖá¢Íø¿ÚÃûµÈ²Ù×÷
+static ssize_t nofast_port_set(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ size_t size;
+ char proto[1024] = {0};
+
+ //countβ²¿°üº¬ÁË1¸ö½áÊø·û
+ size = min(count - 1, 1024);
+ if (copy_from_user(proto, buffer, size))
+ return -EFAULT;
+
+ //½âÎö×Ö·û´®
+ parse_nofast_port(proto, '+');
+
+ return count;
+}
+
+extern int in4_pton(const char *src, int srclen,
+ u8 *dst,
+ int delim, const char **end);
+extern int in6_pton(const char *src, int srclen,
+ u8 *dst,
+ int delim, const char **end);
+
+
+static void conn_datainfo_get_str(char *str, char *start, char *end) {
+ strncat(str, start, end - start);
+ *(str + (unsigned long)end - (unsigned long)start) = '\0';
+}
+
+/***************************************
+ÊäÈë¸ñʽ: Ô´ip+Ô´port+Ä¿µÄip+Ä¿µÄport+l4ÐÒéÀàÐÍ+fastÁ´½ÓÀàÐÍ(²Î¿¼ enum conn_fast_type)
+ʵÀý: 192.168.0.100+1111+192.168.30.102+2222+6+4
+×¢Òâ: ȱʡijһÏĬÈÏÄÇÒ»ÏîȫƥÅä
+ +++++:±íʾÊä³öÈ«²¿Á´½ÓÐÅÏ¢
+***************************************/
+static ssize_t conn_datainfo_set(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ char tuple[1024] = "";
+ int i = 0;
+ char *split[5];
+ char sip[40] = "";
+ char sport[6] = "";
+ char dip[40] = "";
+ char dport[6] = "";
+ char protonum[6] = "";
+ char conn_type[6] = "";
+ const char *end;
+
+ tuple[1023] = '\0';
+ if (strncpy_from_user(tuple, (char *)buffer, count) <= 0) {
+ return -EFAULT;
+ }
+ memset(&tuple_info, 0,sizeof(struct nf_conntrack_tuple));
+ for(i = 0; i < 5; i++) {
+ if(i == 0)
+ split[i] = strchr(tuple, '+');
+ else
+ split[i] = strchr(split[i-1] + 1, '+');
+ if(!split[i])
+ goto err_out;
+ switch(i) {
+ case 0:
+ conn_datainfo_get_str(sip, tuple, split[i]);
+ break;
+ case 1:
+ conn_datainfo_get_str(sport, split[i-1] + 1, split[i]);
+ break;
+ case 2:
+ conn_datainfo_get_str(dip, split[i-1] + 1, split[i]);
+ break;
+ case 3:
+ conn_datainfo_get_str(dport, split[i-1] + 1, split[i]);
+ break;
+ case 4:
+ conn_datainfo_get_str(protonum, split[i-1] + 1, split[i]);
+ break;
+ default:
+ goto err_out;
+ }
+ }
+ strncat(conn_type, split[i-1] + 1, sizeof(conn_type)-strlen(conn_type)-1);
+ if(strlen(sip) > 0) {
+ if(strchr(sip,'.') != NULL && in4_pton(sip, strlen(sip), (u8 *)&tuple_info.src.u3.in, -1, &end) != 1)
+ goto err_out;
+ else if(strchr(sip,':') != NULL && in6_pton(sip, strlen(sip), (u8 *)&tuple_info.src.u3.in6, -1, &end) != 1)
+ goto err_out;
+ }
+ if(strlen(sport) > 0) {
+ for(i = 0; i < strlen(sport); i++) {
+ if(sport[i] < '0' || sport[i] > '9')
+ goto err_out;
+ tuple_info.src.u.all = sport[i] - '0' + tuple_info.src.u.all*10;
+ }
+ if(tuple_info.src.u.all > 65535)
+ goto err_out;
+ tuple_info.src.u.all = htons(tuple_info.src.u.all);
+ }
+ if(strlen(dip) > 0) {
+ if(strchr(dip,'.') != NULL && in4_pton(dip, strlen(dip), (u8 *)&tuple_info.dst.u3.in, -1, &end) != 1)
+ goto err_out;
+ else if(strchr(dip,':') != NULL && in6_pton(dip, strlen(dip), (u8 *)&tuple_info.dst.u3.in6, -1, &end) != 1)
+ goto err_out;
+ }
+ if(strlen(dport) > 0) {
+ for(i = 0; i < strlen(dport); i++) {
+ if(dport[i] < '0' || dport[i] > '9')
+ goto err_out;
+ tuple_info.dst.u.all = dport[i] - '0' + tuple_info.dst.u.all*10;
+ }
+ if(tuple_info.dst.u.all > 65535)
+ goto err_out;
+ tuple_info.dst.u.all = htons(tuple_info.dst.u.all);
+ }
+ if(strlen(protonum) > 0) {
+ for(i = 0; i < strlen(protonum); i++) {
+ if(protonum[i] < '0' || protonum[i] > '9')
+ goto err_out;
+ tuple_info.dst.protonum = protonum[i] - '0' + tuple_info.dst.protonum*10;
+ }
+ }
+ if(strlen(conn_type) > 0) {
+ getconn_type = 0;
+ for(i = 0; i < strlen(conn_type) - 1; i++) {
+ if(conn_type[i] < '0' || conn_type[i] > '9')
+ goto err_out;
+ getconn_type = conn_type[i] - '0' + getconn_type*10;
+ }
+ }
+ return count;
+err_out:
+ memset(&tuple_info, 0,sizeof(struct nf_conntrack_tuple));
+ getconn_type = 0;
+ return -EFAULT;
+}
+
+extern int pkt_lost_track;
+static void *pkt_lostinfo_seq_start(struct seq_file *seq, loff_t *pos)
+__acquires(RCU)
+{
+ if (*pos >= 1)
+ return NULL;
+ return 1;
+}
+
+static void *pkt_lostinfo_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void pkt_lostinfo_seq_stop(struct seq_file *s, void *v)
+__releases(RCU)
+{
+ return;
+}
+
+static int pkt_lostinfo_seq_show(struct seq_file *s, void *v)
+{
+ return 0;
+
+}
+
+static const struct seq_operations pkt_lostinfo_seq_ops= {
+ .start = pkt_lostinfo_seq_start,
+ .next = pkt_lostinfo_seq_next,
+ .stop = pkt_lostinfo_seq_stop,
+ .show = pkt_lostinfo_seq_show,
+};
+
+static int pkt_lostinfo_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &pkt_lostinfo_seq_ops);
+}
+
+static ssize_t pkt_lostinfo_set(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ size_t size;
+ char temp[5] = {0};
+
+ //countβ²¿°üº¬ÁË1¸ö½áÊø·û£¬Ö»Ö§³Ö0-1ÊäÈë
+ if (count != 2)
+ return -EINVAL;
+
+ if (copy_from_user(temp, buffer, 1))
+ return -EFAULT;
+
+ if (temp[0] < '0' || temp[0] > '1')
+ return -EINVAL;
+
+
+
+ return count;
+}
+
+
+static ssize_t dev_reset_set(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ struct net_device *dev = NULL;
+ size_t size;
+ char dev_name[MAX_NET_DEVICE_NAME_LEN + 1] = {0};
+
+ //countβ²¿°üº¬ÁË1¸ö½áÊø·û
+ size = min(count - 1, MAX_NET_DEVICE_NAME_LEN);
+ if (copy_from_user(dev_name, buffer, size))
+ return -EFAULT;
+
+ //ɾ³ý´ËÍøÂçÉ豸Ïà¹Østat
+ dev = dev_get_by_name(&init_net, dev_name);
+ if (dev){
+ memset(&dev->stats, 0, sizeof(struct net_device_stats));
+ atomic_long_set(&dev->rx_dropped, 0);
+ dev_put(dev);
+ }else
+ printk("dev_reset_set %s not find\n", dev_name);
+ return count;
+}
+
+
+static void *ct_iptables_syn_seq_start(struct seq_file *seq, loff_t *pos)
+__acquires(RCU)
+{
+ if (*pos >= 1)
+ return NULL;
+ return 1;
+}
+
+static void *ct_iptables_syn_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void ct_iptables_syn_seq_stop(struct seq_file *s, void *v)
+__releases(RCU)
+{
+ return;
+}
+
+static int ct_iptables_syn_seq_show(struct seq_file *s, void *v)
+{
+ seq_printf(s, "ct_iptables_syn_sw: %u\n", ct_iptables_syn_sw);
+ return 0;
+
+}
+
+static const struct seq_operations ct_iptables_syn_seq_ops= {
+ .start = ct_iptables_syn_seq_start,
+ .next = ct_iptables_syn_seq_next,
+ .stop = ct_iptables_syn_seq_stop,
+ .show = ct_iptables_syn_seq_show,
+};
+
+static int ct_iptables_syn_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &ct_iptables_syn_seq_ops);
+}
+
+static ssize_t ct_iptables_syn_set(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ size_t size;
+ char temp[5] = {0};
+
+ //countβ²¿°üº¬ÁË1¸ö½áÊø·û£¬Ö»Ö§³Ö0-1ÊäÈë
+ if (count != 2)
+ return -EINVAL;
+
+ if (copy_from_user(temp, buffer, 1))
+ return -EFAULT;
+
+ if (temp[0] < '0' || temp[0] > '1')
+ return -EINVAL;
+
+ ct_iptables_syn_sw = (unsigned int)(temp[0] - '0');
+
+ return count;
+}
+
+static const struct proc_ops fastnat_level_file_ops = {
+ .proc_open = fastnat_level_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release, //seq_release_privateËÆºõÒ²¿ÉÒÔ£¬µ«ÊÇsingle_release£¬seq_release_net»áÓпÕÖ¸ÕëÒì³£
+ .proc_write = fastnat_level_set,
+};
+
+static const struct proc_ops fast_switch_file_ops = {
+ .proc_open = fast_switch_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release, //seq_release_privateËÆºõÒ²¿ÉÒÔ£¬µ«ÊÇsingle_release£¬seq_release_net»áÓпÕÖ¸ÕëÒì³£
+ .proc_write = fast_switch_set,
+};
+
+static const struct proc_ops fastbr_level_file_ops = {
+ .proc_open = fastbr_level_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
+ .proc_write = fastbr_level_set,
+};
+
+static const struct proc_ops fastnat_file_ops = {
+ .proc_open = fastnat_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
+};
+
+static const struct proc_ops fast6_file_ops = {
+ .proc_open = fast6_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
+};
+
+static const struct proc_ops dev_down_file_ops = {
+ .proc_write = dev_down_set,
+};
+
+static const struct proc_ops nofast_port_file_ops = {
+ .proc_open = nofast_port_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
+ .proc_write = nofast_port_set,
+};
+
+static const struct proc_ops conn_datainfo_file_ops = {
+ .proc_open = conn_datainfo_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
+ .proc_write = conn_datainfo_set
+};
+
+static const struct proc_ops pkt_lostinfo_file_ops = {
+ .proc_open = pkt_lostinfo_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
+ .proc_write = pkt_lostinfo_set,
+};
+
+static const struct proc_ops dev_reset_file_ops = {
+ .proc_write = dev_reset_set,
+};
+
+static const struct proc_ops ct_iptables_syn_file_ops = {
+ .proc_open = ct_iptables_syn_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
+ .proc_write = ct_iptables_syn_set,
+};
+
+
+//¿ìËÙת·¢procÎļþµÄ³õʼ»¯
+int fast_conntrack_init_proc(void)
+{
+ //Èý²ã¿ìËÙת·¢×Ü¿ª¹Ø
+ proc_create("fastnat_level", 0440, init_net.proc_net, &fastnat_level_file_ops);
+
+ //Èý²ã¿ìËÙת·¢Ð¾ɿª¹Ø£¬¸÷ÀàÐÍ¿ª¹Ø
+ proc_create("fast_switch", 0440, init_net.proc_net, &fast_switch_file_ops);
+
+ //¶þ²ã¿ìËÙת·¢µÈ¼¶0-1
+ proc_create("fastbr_level", 0440, init_net.proc_net, &fastbr_level_file_ops);
+
+ //ipv4¿ìËÙת·¢Ïà¹ØÍ³¼Æ
+ proc_create("fastnat", 0440, init_net.proc_net, &fastnat_file_ops);
+
+ //ipv6¿ìËÙת·¢Ïà¹ØÍ³¼Æ
+ proc_create("fast6", 0440, init_net.proc_net, &fast6_file_ops);
+
+ //ijЩÉ豸²»ÄÜdown²Ù×÷£¬µ«ÊÇÈ¥¼¤»îºóÓÖҪɾ³ýÏà¹ØÁ´½Ó
+ proc_create("dev_down", 0440, init_net.proc_net, &dev_down_file_ops);
+
+ //²»Ö§³ÖfastnatµÄÐÒé¶Ë¿Ú£¬Ö§³Ö¶¯Ì¬ÅäÖÃ
+ proc_create("nofast_port", 0440, init_net.proc_net, &nofast_port_file_ops);
+
+ //¶ÁÈ¡Á´½ÓÐÅÏ¢
+ proc_create("conn_datainfo", 0440, init_net.proc_net, &conn_datainfo_file_ops);
+
+ //¶ÁÈ¡Á´½Ó¶ª°üÐÅÏ¢
+ //proc_create("pkt_lostinfo", 0440, init_net.proc_net, &pkt_lostinfo_file_ops);
+
+ //reset dev stats
+ proc_create("dev_reset_stats", 0440, init_net.proc_net, &dev_reset_file_ops);
+
+ proc_create("ct_iptables_syn", 0440, init_net.proc_net, &ct_iptables_syn_file_ops);
+ return 1;
+}
+
+EXPORT_SYMBOL(fast_conntrack_init_proc);
+
+
diff --git a/upstream/linux-5.10/net/core/fastproc/fastnat.c b/upstream/linux-5.10/net/core/fastproc/fastnat.c
new file mode 100755
index 0000000..717454c
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fastnat.c
@@ -0,0 +1,687 @@
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include <linux/if_arp.h>
+#include <linux/timer.h>
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/xt_multiport.h>
+#include <linux/netfilter/xt_iprange.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/route.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <net/SI/fastnat.h>
+#include <net/SI/net_other.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/print_sun.h>
+#include <net/SI/net_track.h>
+#include <linux/netfilter.h>
+#include <net/SI/fast_common.h>
+
+MODULE_LICENSE("GPL");
+
+/* ***************** ipv4 ¿ìËÙת·¢Ïà¹Ø±äÁ¿ ************************* */
+spinlock_t fastnat_spinlock; //×ÔÐýËø£¬±£»¤Á´±íµÄ²Ù×÷
+fast_list_t working_list = {0};
+struct hlist_nulls_head *working_hash;
+
+/* **************************** º¯ÊýÉêÃ÷ ************************ */
+
+
+/* **************************** º¯ÊýʵÏÖ ************************ */
+
+/*»ñÈ¡±¨ÎÄÎåÔª×éÐÅÏ¢*/
+static inline int fast_nat_get_tuple(struct sk_buff *skb, struct nf_conntrack_tuple *tuple)
+{
+ struct iphdr *iph;
+ struct udphdr *udph;
+ struct tcphdr *tcph;
+#if 0
+ if (!skb || !tuple)
+ {
+ return -1;
+ }
+
+ /* only IP packets */
+ if (htons(ETH_P_IP) != skb->protocol)
+ {
+ return -1;
+ }
+#endif
+ iph = (struct iphdr *)skb->data;
+#if 0
+ /* not deal with fragment packets now */
+ if (ntohs(iph->frag_off) & (IP_MF | IP_OFFSET))
+ {
+ skbinfo_add(NULL,SKB_FRAG);
+ return -1;
+ }
+
+ if (iph->ttl <= 1)
+ {
+ return -1;
+ }
+#endif
+ memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
+
+ /* only tcp/udp */
+ if (IPPROTO_UDP == iph->protocol)
+ {
+ udph = (struct udphdr *)(skb->data + iph->ihl * 4);
+ tuple->src.u.udp.port = udph->source;
+ tuple->dst.u.udp.port = udph->dest;
+ skb_udpnum++;
+ }
+ else if (IPPROTO_TCP == iph->protocol)
+ {
+ tcph = (struct tcphdr *)(skb->data + iph->ihl * 4);
+ tuple->src.u.tcp.port = tcph->source;
+ tuple->dst.u.tcp.port = tcph->dest;
+ skb_tcpnum++;
+ }
+ else
+ {
+ return -1;
+ }
+
+ tuple->src.l3num = AF_INET;
+ tuple->src.u3.ip = iph->saddr;
+ tuple->dst.u3.ip = iph->daddr;
+ tuple->dst.protonum = iph->protocol;
+ tuple->dst.dir = IP_CT_DIR_ORIGINAL;
+
+ return 0;
+}
+
+//´Ë´¦ÐèÒª±£³ÖºÍdev_xmit_completeÒ»ÖÂ
+//ÓÃinlineÎÞ·¨ÌáÈ¡µ½¹«¹²ÎļþÖУ¬Ö»ÄÜfastnat¡¢fast6¸÷·ÅÒ»·Ý
+static inline bool start_xmit_complete(int rc)
+{
+ /*
+ * Positive cases with an skb consumed by a driver:
+ * - successful transmission (rc == NETDEV_TX_OK)
+ * - error while transmitting (rc < 0)
+ * - error while queueing to a different device (rc & NET_XMIT_MASK)
+ */
+ if (likely(rc < NET_XMIT_MASK))
+ return true;
+
+ return false;
+}
+#ifndef CONFIG_PREEMPT_RT_FULL
+extern int *vir_addr_ddrnet;
+#endif
+
+//ipv4Êý¾Ý°üµÄ¿ìËÙ´¦Àí£¬hashÓÃRCU»úÖÆ½øÐб£»¤£¬×ܵÄÁ¬½ÓÁ´±íÓÃspin½øÐб£»¤
+int fast_nat_recv(struct sk_buff *skb)
+{
+ struct nf_conntrack_tuple tuple;
+ fast_entry_data_t *nat_entry_data = NULL;
+ fast_entry_t *nat_entry = NULL;
+ struct iphdr *iph = NULL;
+ struct udphdr *udph = NULL;
+ struct tcphdr *tcph = NULL;
+ __sum16 *cksum = NULL;
+ __be32 *oldip = NULL;
+ __be16 *oldport = NULL;
+ struct net_device *dev = NULL;
+ //u_int32_t skip_nat = 0;
+ struct sk_buff *skb2 = NULL;
+
+ iph = (struct iphdr *)skb->data;
+ //if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
+ //goto err_out;
+
+ if (fast_nat_get_tuple(skb, &tuple) < 0)
+ {
+ print_sun(SUN_DBG, "fast_nat_get_tuple ERR !!!\n");
+ goto err_out;
+ }
+
+ rcu_read_lock();
+ nat_entry_data = fast_find_entry_data(working_hash, &tuple);
+ if (unlikely(nat_entry_data == NULL))
+ {
+ rcu_read_unlock();
+ print_sun(SUN_DBG, "fast_nat_find ERR !!!\n");
+ goto err_out;
+ }
+
+ dev = nat_entry_data->outdev;
+ if (unlikely(!dev))
+ {
+ rcu_read_unlock();
+ goto err_out;
+ }
+
+ /*¼ì²é°ü³¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚÉ豸MTU*/
+ if (unlikely(skb->len > dev->mtu))
+ {
+ skbinfo_add(NULL, SKB_OVER_MTU);
+ rcu_read_unlock();
+ print_sun(SUN_DBG, "fast_nat_recv outdev mtu ERR !!!\n");
+ goto err_out;
+ }
+
+ //»Ø´«µÄ°üÖ±½ÓÊͷŲ¢¼ÆÊý
+ if (unlikely(skb->dev == dev))
+ {
+ skbinfo_add(NULL, SKB_LOOP);
+ rcu_read_unlock();
+ kfree_skb(skb);
+ return 1;
+ }
+
+ nat_entry = fast_data_to_entry(nat_entry_data);
+ if (unlikely(!nat_entry))
+ {
+ rcu_read_unlock();
+ print_sun(SUN_DBG, "fast_nat_recv fast_nat_data_to_entry null !!!\n");
+ goto err_out;
+ }
+
+ /* Ö»Óе±Ë«Ïò¿ìËÙÁ´½Ó¶¼½¨Á¢³É¹¦²Å×ßFASTNAT£¬·ñÔò×ß±ê×¼Á÷³Ì */
+ if ((nat_entry->flags != FAST_ALL_DIR) && (IPPROTO_UDP != iph->protocol))
+ {
+ rcu_read_unlock();
+ print_sun(SUN_DBG, "fast_nat_recv flags is not FAST_ALL_DIR !!!\n");
+ goto err_out;
+ }
+
+ if (unlikely(!(skb2 = fast_expand_headroom(skb, dev)))) {
+ rcu_read_unlock();
+ return 1;
+ }
+
+ if (unlikely(skb2 != skb))
+ {
+ iph = (struct iphdr *)skb2->data;
+ skb = skb2;
+ }
+
+ fast_tcpdump(skb);
+
+ //Èç¹û×¥°üÃüÖУ¬Êý¾Ý»áclone£¬fast³É¹¦ÐèÒª¸Ä±ädataÄÚÈÝ£¬ÐèÒªÖØÐÂcopyÒ»·Ý
+ if (skb_cloned(skb))
+ {
+ print_sun(SUN_DBG, "fast_nat_recv clone \n");
+ if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ {
+ rcu_read_unlock();
+ print_sun(SUN_DBG, "fast_nat_recv clone copy failed !!!\n");
+ kfree_skb(skb);
+ return 1;
+ }
+ }
+
+
+ //½öµ±fastnat³É¹¦£¬²ÅÄÜÉèÖÃIPÍ·Ê×µØÖ·,ÒÔ¹©cacheË¢ÐÂʱʹÓÃ
+ skb_reset_network_header(skb);
+ skb->isFastnat = 1;
+ if (likely(skb_get_nfct(skb) == 0)){
+ skb_set_nfct(skb, (unsigned long)nat_entry->ct);
+ nf_conntrack_get(&nat_entry->ct->ct_general);
+ }
+ //²»Ö§³ÖNAT£¬Ô´µØÖ·/Ä¿µÄµØÖ·/¶Ë¿ÚºÅ¶¼²»ÐèÒª¸Ä±ä£¬Ö±½Ó͸´«¾ÍÐÐ
+ //if (nat_entry_data->is_not_nat)
+ //skip_nat = 1;
+ if (!nat_entry_data->is_not_nat)//(!skip_nat)
+ {
+ /*½øÐÐnatת»»*/
+ if (IPPROTO_TCP == iph->protocol)
+ {
+ tcph = (struct tcphdr *)(skb->data + iph->ihl * 4);
+ cksum = &tcph->check;
+ oldport = (FN_TYPE_SRC == nat_entry_data->type)? (&tcph->source): (&tcph->dest);
+ }
+ else if (IPPROTO_UDP == iph->protocol)
+ {
+ udph = (struct udphdr *)(skb->data + iph->ihl * 4);
+ cksum = &udph->check;
+ oldport = (FN_TYPE_SRC == nat_entry_data->type)? (&udph->source): (&udph->dest);
+ }
+
+ oldip = (FN_TYPE_SRC == nat_entry_data->type)? (&iph->saddr) : (&iph->daddr);
+
+ if (cksum != NULL && (0!=*cksum || IPPROTO_TCP == iph->protocol))
+ {
+ inet_proto_csum_replace4(cksum, skb, *oldip, nat_entry_data->nat_addr, 0);
+ inet_proto_csum_replace2(cksum, skb, *oldport, nat_entry_data->nat_port, 0);
+ }
+ csum_replace4(&iph->check, *oldip, nat_entry_data->nat_addr);
+ if(oldport)
+ *oldport = nat_entry_data->nat_port;
+ *oldip = nat_entry_data->nat_addr;
+ }
+ else
+ {
+ if (IPPROTO_TCP == iph->protocol)
+ {
+ tcph = (struct tcphdr *)(skb->data + iph->ihl * 4);
+ }
+ }
+
+ skb->priority = nat_entry_data->priority;
+ skb->mark = nat_entry_data->mark;
+
+ //»ùÓÚctÁ´½ÓµÄÁ÷Á¿Í³¼Æ --- ͳ¼ÆIP°ü²»ÊÇMAC°ü
+ if (nat_entry_data->tuplehash.tuple.dst.dir == IP_CT_DIR_ORIGINAL){
+ nat_entry->ct->packet_info[IP_CT_DIR_ORIGINAL].packets++;
+ nat_entry->ct->packet_info[IP_CT_DIR_ORIGINAL].bytes += skb->len;
+ if(unlikely(nat_entry->ct->indev[IP_CT_DIR_ORIGINAL] == NULL))
+ nat_entry->ct->indev[IP_CT_DIR_ORIGINAL] = skb->indev;
+ if(unlikely(nat_entry->ct->outdev[IP_CT_DIR_ORIGINAL] == NULL))
+ nat_entry->ct->outdev[IP_CT_DIR_ORIGINAL] = dev;
+ } else if (nat_entry_data->tuplehash.tuple.dst.dir == IP_CT_DIR_REPLY){
+ nat_entry->ct->packet_info[IP_CT_DIR_REPLY].packets++;
+ nat_entry->ct->packet_info[IP_CT_DIR_REPLY].bytes += skb->len;
+ if(unlikely(nat_entry->ct->indev[IP_CT_DIR_REPLY] == NULL))
+ nat_entry->ct->indev[IP_CT_DIR_REPLY] = skb->indev;
+ if(unlikely(nat_entry->ct->outdev[IP_CT_DIR_REPLY] == NULL))
+ nat_entry->ct->outdev[IP_CT_DIR_REPLY] = dev;
+ } else {
+ printk("fastnat packet error\n");
+ }
+
+ //ÄÚºË×Ô´øµÄ»ùÓÚÁ´½ÓµÄÁ÷Á¿Í³¼Æ
+ struct nf_conn_counter *acct = (struct nf_conn_counter *)nf_conn_acct_find((const struct nf_conn *)nat_entry->ct);
+ if (acct) {
+ enum ip_conntrack_info ctinfo;
+ if (nat_entry_data->tuplehash.tuple.dst.dir == IP_CT_DIR_ORIGINAL)
+ ctinfo = IP_CT_ESTABLISHED;
+ else
+ ctinfo = IP_CT_ESTABLISHED_REPLY;
+
+ atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
+ atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes);
+ }
+
+ /* ¶¨Öƹ¦ÄÜ£¬ÎªÁ˽â¾öµ¥UDP¹à°üʱ£¬ÎÞ·¨ÖªÏþindev½øÐÐÁ÷Á¿Í³¼ÆÎÊÌâ¶¨ÖÆ */
+ if (unlikely(nat_entry_data->indev == NULL))
+ {
+ nat_entry_data->indev = skb->dev;
+ }
+
+ // ͳ¼ÆÈë¿ÚÍøÂçÉ豸µÄ½ÓÊÕ°üÊýÁ¿ --- ²Î¿¼linuxÔÉúµÄÇý¶¯£¬Í³¼ÆµÄ¶¼ÊÇIP°ü³¤¶È
+ if (likely(fastnat_level == FAST_NET_DEVICE))
+ {
+ nat_entry_data->indev->stats.rx_packets++;
+ nat_entry_data->indev->stats.rx_bytes += skb->len;
+ }
+
+ skb->dev = dev;
+
+ //Ö»Óе±ÓÐMACÍ·Ô¤¸³ÖµÊ±£¬²Å×¼¸³Öµ£¬·ñÔòΪIPÍ·
+ skb_push(skb, ETH_HLEN);
+ if (likely(nat_entry_data->hh_flag))
+ {
+ memcpy(skb->data, nat_entry_data->hh_data, ETH_HLEN);
+ }
+
+ /*¸üÐÂÁ´½Ó³¬Ê±*/
+ if (IPPROTO_TCP == iph->protocol)
+ {
+ mod_timer(&nat_entry->timeout, jiffies + tcp_timeouts[nat_entry->ct->proto.tcp.state]);
+ update_tcp_timeout(nat_entry, nat_entry_data, tcph);
+ nat_entry->ct->timeout = jiffies + tcp_timeouts[nat_entry->ct->proto.tcp.state];
+
+ if(ackfilter(skb, nat_entry, &working_list) == 1)
+ {
+ rcu_read_unlock();
+ //spin_unlock_bh(&fastnat_spinlock);
+ return 1;
+ }
+ }
+ else if (IPPROTO_UDP == iph->protocol)
+ {
+ /*udp*/
+ if (test_bit(IPS_SEEN_REPLY_BIT, &nat_entry->ct->status))
+ {
+ mod_timer(&nat_entry->timeout, jiffies + fast_udp_timeout_stream);
+ nat_entry->ct->timeout = jiffies + fast_udp_timeout_stream;
+ }
+ else
+ {
+ mod_timer(&nat_entry->timeout, jiffies + fast_udp_timeout);
+ nat_entry->ct->timeout = jiffies + fast_udp_timeout;
+ }
+ }
+
+ if (likely(skb->dev->flags & IFF_UP))
+ {
+ //pppÖ»ÐèÒª´«ÊäIP°ü
+ if (unlikely(skb->dev->type == ARPHRD_PPP))//(strncmp(skb->dev->name, ppp_name, strlen(ppp_name)) == 0)
+ {
+ skb_pull(skb, ETH_HLEN);
+ }
+
+ skb->now_location |= FASTNAT_SUCC;
+ if (likely(fastnat_level == FAST_NET_DEVICE))
+ {
+ //print_sun(SUN_DBG, "fastnat-2 dev_queue_xmit, send to:%s iph->id=0x%02x!!!!!!!! \n", skb->dev->name, iph->id);
+ if (skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev) >= NET_XMIT_MASK) {
+ skb->dev->stats.tx_dropped++;
+ kfree_skb(skb);
+ }
+ }
+ //¶ÔÓÚÁ÷¿ØµÈÌØÊâÓ¦Óã¬Ö»ÄÜ×ß±ê×¼µÄfastnatÁ÷³Ì£¬·ñÔòÎÞ·¨½øÐвå¼þÖ´ÐÐ
+ else if (fastnat_level == FAST_NET_CORE)
+ {
+ //print_sun(SUN_DBG, "fastnat ok-1, send to:%s !!!!!!!! \n", skb->dev->name);
+
+ dev_queue_xmit(skb);
+ }
+ else
+ print_sun(SUN_DBG,"fastnat_level:%d is not supported !!!!!!!! \n", fastnat_level);
+
+ nat_entry_data->packet_num++;
+ }
+ else
+ {
+ print_sun(SUN_DBG, "ERR &&&&&& %s DOWN, kfree_skb!!!!!!!! \n", skb->dev->name);
+ kfree_skb(skb);
+ }
+
+ //print_sun(SUN_DBG, "skb : 0x%x, fastnat succ--------", skb);
+ //nf_ct_dump_tuple(&tuple);
+ rcu_read_unlock();
+
+ return 1;
+
+err_out :
+ print_sun(SUN_DBG, "skb : 0x%x, fastnat FAIL!!!!!!!!!!", skb);
+ return 0; /* not fast nat */
+}
+
+static struct nf_hook_ops nat_hook = {
+ .hook = napt_handle,
+// .owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_LAST,
+};
+
+//¹¤×÷ÔÚPOST_ROUTING½Úµã£¬ÓÃÓÚfastÁ´½ÓµÄ¸³ÖµºÍÌí¼Óhash±í
+unsigned int napt_handle(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ u_int8_t protocol;
+ fast_entry_t *nat_entry;
+ fast_entry_data_t *entry_data;
+ enum ip_conntrack_dir dir, rdir;
+ struct dst_entry *dst = skb_dst(skb);
+ struct net_device *out = state->out;
+#ifdef CONFIG_ATHRS_HW_NAT
+ u_int32_t mask =0;
+#endif
+ struct neighbour *_neighbour = NULL;
+
+ if (fastnat_level == FAST_CLOSE || fastnat_level == FAST_CLOSE_KEEP_LINK)
+ {
+ return NF_ACCEPT;
+ }
+
+ if (test_bit(FAST_TYPE_VERSION_BIT, &fast_switch))
+ {
+ return NF_ACCEPT;
+ }
+
+ if (ip_hdr(skb)->protocol != IPPROTO_TCP && ip_hdr(skb)->protocol != IPPROTO_UDP)
+ return NF_ACCEPT;
+
+ if (!out)
+ {
+ return NF_ACCEPT;
+ }
+
+ //¹ã²¥¡¢×é²¥²»½¨Á´
+ if (ipv4_is_multicast(ip_hdr(skb)->daddr) || ipv4_is_lbcast(ip_hdr(skb)->daddr))
+ {
+ return NF_ACCEPT;
+ }
+
+ //´Ë´¦Òª¹Ø×¢ÊÇ·ñ»áƵ·±³öÏÖfastÁ´½ÓÒÑÂúÇé¿ö£¬Èç¹û¾³£³öÏÖ£¬ÊÇ·ñ¿¼ÂÇ×î¾É¸²¸ÇÇé¿ö
+ if (working_list.count > nf_conntrack_max)
+ {
+ return NF_ACCEPT;
+ }
+
+ if (!dst)
+ {
+ return NF_ACCEPT;
+ }
+
+ _neighbour = dst_neigh_lookup_skb(dst, skb);
+ if (!_neighbour)
+ {
+ print_sun(SUN_DBG,"napt_handle() _neighbour = null\n");
+ return NF_ACCEPT;
+ }
+
+ if (memcmp(_neighbour->ha, zeromac, ETH_ALEN) == 0)
+ {
+ if (strncmp(out->name, ppp_name, strlen(ppp_name)) != 0)
+ {
+ goto accept;
+ }
+ }
+
+ if (!(ct = nf_ct_get(skb, &ctinfo)))
+ {
+ print_sun(SUN_DBG,"napt_handle() ct = null\n");
+ goto accept;
+ }
+
+ protocol = nf_ct_protonum(ct);
+ print_sun(SUN_DBG,"napt_handle() protocol = %d\n", protocol);
+
+ if (ct->master == NULL)
+ {
+ //const struct nf_conntrack_helper *helper;
+ struct nf_conn_help *temp_help = nfct_help(ct);
+ //¶ÔÓÚijÌõÁ´½ÓÉÏ´æÔÚhelpµÈ¹³×Ó£¬±ØÐë½»ÓÉlinux±ê×¢Äں˴¦Àí£¬·ñÔòÄں˲»ÄÜ»ñÈ¡Ïà¹ØµÄÊý¾Ý°üÐÅÏ¢
+ if(temp_help!=NULL)
+ {
+ //helper = rcu_dereference(temp_help->helper);
+ //if(!(helper->tuple.src.u.all == htons(21)&&helper->tuple.dst.protonum == IPPROTO_TCP)) {
+ goto accept;
+ // }
+ }
+ }
+
+
+ /* only forward */
+ if (!skb->skb_iif)
+ {
+ goto accept;
+ }
+
+
+ //¹ýÂ˲»ÐèÒª¾¹ýfastnatµÄÐÒé°ü,¸ù¾Ý¶Ë¿ÚºÅ½øÐйýÂË
+ if (check_skip_ports(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all))
+ {
+ goto accept;
+ }
+
+ dir = CTINFO2DIR(ctinfo);
+
+ rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+ print_sun(SUN_DBG,"napt_handle() dir=%d, rdir=%d\n", dir, rdir);
+ /*Ö»Õë¶ÔTCP/UDP½øÐÐfastnat*/
+ if (IPPROTO_TCP == protocol)
+ {
+ /* only established */
+ /*TCPÈý´ÎÎÕÊֳɹ¦*/
+ if(!test_bit(IPS_ASSURED_BIT, &ct->status))
+ {
+ goto accept;
+ }
+ }
+ else if (IPPROTO_UDP != protocol)
+ {
+ goto accept;
+ }
+
+ spin_lock_bh(&fastnat_spinlock);
+ if (!(nat_entry = fast_get_entry(&working_list, ct, dir)))
+ {
+ print_sun(SUN_DBG,"napt_handle() nat_entry=%p\n", nat_entry);
+ spin_unlock_bh(&fastnat_spinlock);
+ goto accept;
+ }
+ nat_entry->fast_spinlock = &fastnat_spinlock;
+
+ //Ê״ν¨Á´£¬»ñÈ¡ct¼ÆÊýËø£¬²¢É¾³ýct¶¨Ê±Æ÷£»Ê״ν¨Á´Öظ´°ü£¬²»ÄܲÙ×÷
+ if (!(nat_entry->flags & FAST_ALL_DIR))
+ {
+ nf_conntrack_get(&ct->ct_general);
+ //del_timer(&ct->timeout);
+ ct->timeout = nat_entry->timeout.expires;
+
+ }
+
+ entry_data = &nat_entry->data[dir];
+ entry_data->tuplehash.tuple = ct->tuplehash[dir].tuple;
+ memcpy(entry_data->dmac, _neighbour->ha, ETH_ALEN);
+ entry_data->priority = skb->priority;
+ entry_data->mark = skb->mark;
+ entry_data->outdev = out;
+
+ /*¼Ç¼MACµØÖ·µ½entry_data->hh_data*/
+ if (!record_MAC_header(working_hash, ct, nat_entry, entry_data, _neighbour, out, htons(ETH_P_IP)))
+ {
+ spin_unlock_bh(&fastnat_spinlock);
+ goto accept;
+ }
+ print_sun(SUN_DBG,"napt_handle() ct->status=0x%x\n", ct->status);
+ /*»ñÈ¡natת»»ÐèÒªµÄIPºÍportÐÅÏ¢*/
+ if (test_bit(IPS_SRC_NAT_BIT, &ct->status))
+ {
+ if(IP_CT_DIR_ORIGINAL == dir)
+ {
+ entry_data->nat_addr = ct->tuplehash[rdir].tuple.dst.u3.ip;
+ entry_data->nat_port = ct->tuplehash[rdir].tuple.dst.u.all;
+ entry_data->type = FN_TYPE_SRC;
+ }
+ else
+ {
+ entry_data->nat_addr = ct->tuplehash[rdir].tuple.src.u3.ip;
+ entry_data->nat_port = ct->tuplehash[rdir].tuple.src.u.all;
+ entry_data->type = FN_TYPE_DST;
+ }
+ }
+ else if (test_bit(IPS_DST_NAT_BIT, &ct->status))
+ {
+ if (IP_CT_DIR_ORIGINAL == dir)
+ {
+ entry_data->nat_addr = ct->tuplehash[rdir].tuple.src.u3.ip;
+ entry_data->nat_port = ct->tuplehash[rdir].tuple.src.u.all;
+ entry_data->type = FN_TYPE_DST;
+ }
+ else
+ {
+ entry_data->nat_addr = ct->tuplehash[rdir].tuple.dst.u3.ip;
+ entry_data->nat_port = ct->tuplehash[rdir].tuple.dst.u.all;
+ entry_data->type = FN_TYPE_SRC;
+ }
+ }
+ else //²»Ö§³ÖNATµÄ³¡¾°
+ {
+ //´ËÖÖ³¡¾°£¬Ô´µØÖ·/Ä¿µÄµØÖ·/¶Ë¿ÚºÅ¶¼²»ÐèÒª¸Ä±ä£¬Ö±½Ó͸´«¾ÍÐÐ
+ entry_data->is_not_nat = 1;
+ }
+
+ //´Ë´¦±£Ö¤Õý·´Á½¸ö±ê¼Çλ²»³åÍ»
+ nat_entry->flags = nat_entry->flags | (1 << dir);
+
+ //Ìí¼Óhash½Úµã
+ fast_add_entry(working_hash, entry_data);
+ if (nat_entry->flags == FAST_ALL_DIR)
+ {
+ nat_entry->data[0].indev = nat_entry->data[1].outdev;
+ nat_entry->data[1].indev = nat_entry->data[0].outdev;
+ }
+
+ spin_lock_bh(&fast_fw_spinlock);
+ ct->fast_ct.isFast = FAST_CT_WND4;
+ spin_unlock_bh(&fast_fw_spinlock);
+
+ spin_unlock_bh(&fastnat_spinlock);
+
+accept:
+ neigh_release(_neighbour);
+ return NF_ACCEPT;
+}
+
+/*´¦Àí֪ͨÁ´Ê¼þ*/
+int fastnat_event(traverse_command_t *cmd)
+{
+ spin_lock_bh(&fastnat_spinlock);
+ traverse_process(&working_list, cmd);
+ spin_unlock_bh(&fastnat_spinlock);
+ return 0;
+}
+
+//fastnat_level¹Ø±Õ£¬Çå¿ÕËùÓÐipv4¿ìËÙת·¢ÐÅÏ¢
+void fastnat_cleanup_links(void)
+{
+ spin_lock_bh(&fastnat_spinlock);
+ fast_cleanup_links(&working_list);
+ spin_unlock_bh(&fastnat_spinlock);
+}
+
+/*fastnat³õʼ»¯*/
+int tsp_fastnat_init(void)
+{
+ int ret;
+
+ print_sun(SUN_DBG,"start init fastnat\n");
+
+ working_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, /*&fastnat_hash_vmalloc,*/ 1);
+ if (!working_hash)
+ {
+ print_sun(SUN_ERR, "Unable to create working_hash\n");
+ return -EINVAL;
+ }
+
+ spin_lock_init(&fastnat_spinlock);
+
+ ret = nf_register_net_hook(&init_net, &nat_hook);
+ if (ret != 0)
+ {
+ print_sun(SUN_ERR,"init fastnat failed\n");
+ goto err;
+ }
+ print_sun(SUN_DBG,"init fastnat done\n");
+
+ return 0;
+
+err:
+ nf_ct_free_hashtable(working_hash, /*fastnat_hash_vmalloc, */nf_conntrack_htable_size);
+ return -EINVAL;
+}
+
+int tsp_fastnat_cleanup(void)
+{
+ nf_unregister_net_hook(&init_net, &nat_hook);
+ nf_ct_free_hashtable(working_hash, /*fastnat_hash_vmalloc,*/ nf_conntrack_htable_size);
+
+ print_sun(SUN_DBG,"fastnat cleanup done\n");
+ return 0;
+}
+
diff --git a/upstream/linux-5.10/net/netfilter/nf_conntrack_core.c b/upstream/linux-5.10/net/netfilter/nf_conntrack_core.c
new file mode 100755
index 0000000..e92413e
--- /dev/null
+++ b/upstream/linux-5.10/net/netfilter/nf_conntrack_core.c
@@ -0,0 +1,2837 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Connection state tracking for netfilter. This is separated from,
+ but required by, the NAT layer; it can also be used by an iptables
+ extension. */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
+ * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/jhash.h>
+#include <linux/siphash.h>
+#include <linux/err.h>
+#include <linux/percpu.h>
+#include <linux/moduleparam.h>
+#include <linux/notifier.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/socket.h>
+#include <linux/mm.h>
+#include <linux/nsproxy.h>
+#include <linux/rculist_nulls.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
+#include <net/netfilter/nf_conntrack_labels.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netns/hash.h>
+#include <net/ip.h>
+
+#include "nf_internals.h"
+
+#ifdef CONFIG_FASTNAT_MODULE
+#include <net/SI/fast_common.h>
+#endif
+
+__cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
+EXPORT_SYMBOL_GPL(nf_conntrack_locks);
+
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
+
+struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_hash);
+
+struct conntrack_gc_work {
+ struct delayed_work dwork;
+ u32 next_bucket;
+ bool exiting;
+ bool early_drop;
+};
+
+static __read_mostly struct kmem_cache *nf_conntrack_cachep;
+static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
+static __read_mostly bool nf_conntrack_locks_all;
+
+/* serialize hash resizes and nf_ct_iterate_cleanup */
+static DEFINE_MUTEX(nf_conntrack_mutex);
+
+#define GC_SCAN_INTERVAL (120u * HZ)
+#define GC_SCAN_MAX_DURATION msecs_to_jiffies(10)
+
+static struct conntrack_gc_work conntrack_gc_work;
+
+void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
+{
+ /* 1) Acquire the lock */
+ spin_lock(lock);
+
+ /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
+ * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
+ */
+ if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
+ return;
+
+ /* fast path failed, unlock */
+ spin_unlock(lock);
+
+ /* Slow path 1) get global lock */
+ spin_lock(&nf_conntrack_locks_all_lock);
+
+ /* Slow path 2) get the lock we want */
+ spin_lock(lock);
+
+ /* Slow path 3) release the global lock */
+ spin_unlock(&nf_conntrack_locks_all_lock);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_lock);
+
+static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
+{
+ h1 %= CONNTRACK_LOCKS;
+ h2 %= CONNTRACK_LOCKS;
+ spin_unlock(&nf_conntrack_locks[h1]);
+ if (h1 != h2)
+ spin_unlock(&nf_conntrack_locks[h2]);
+}
+
+/* return true if we need to recompute hashes (in case hash table was resized) */
+static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
+ unsigned int h2, unsigned int sequence)
+{
+ h1 %= CONNTRACK_LOCKS;
+ h2 %= CONNTRACK_LOCKS;
+ if (h1 <= h2) {
+ nf_conntrack_lock(&nf_conntrack_locks[h1]);
+ if (h1 != h2)
+ spin_lock_nested(&nf_conntrack_locks[h2],
+ SINGLE_DEPTH_NESTING);
+ } else {
+ nf_conntrack_lock(&nf_conntrack_locks[h2]);
+ spin_lock_nested(&nf_conntrack_locks[h1],
+ SINGLE_DEPTH_NESTING);
+ }
+ if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
+ nf_conntrack_double_unlock(h1, h2);
+ return true;
+ }
+ return false;
+}
+
+static void nf_conntrack_all_lock(void)
+ __acquires(&nf_conntrack_locks_all_lock)
+{
+ int i;
+
+ spin_lock(&nf_conntrack_locks_all_lock);
+
+ nf_conntrack_locks_all = true;
+
+ for (i = 0; i < CONNTRACK_LOCKS; i++) {
+ spin_lock(&nf_conntrack_locks[i]);
+
+ /* This spin_unlock provides the "release" to ensure that
+ * nf_conntrack_locks_all==true is visible to everyone that
+ * acquired spin_lock(&nf_conntrack_locks[]).
+ */
+ spin_unlock(&nf_conntrack_locks[i]);
+ }
+}
+
+static void nf_conntrack_all_unlock(void)
+ __releases(&nf_conntrack_locks_all_lock)
+{
+ /* All prior stores must be complete before we clear
+ * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
+ * might observe the false value but not the entire
+ * critical section.
+ * It pairs with the smp_load_acquire() in nf_conntrack_lock()
+ */
+ smp_store_release(&nf_conntrack_locks_all, false);
+ spin_unlock(&nf_conntrack_locks_all_lock);
+}
+
+unsigned int nf_conntrack_htable_size __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
+
+unsigned int nf_conntrack_max __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_max);
+seqcount_spinlock_t nf_conntrack_generation __read_mostly;
+static unsigned int nf_conntrack_hash_rnd __read_mostly;
+
+static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
+ const struct net *net)
+{
+ unsigned int n;
+ u32 seed;
+
+ get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
+
+ /* The direction must be ignored, so we hash everything up to the
+ * destination ports (which is a multiple of 4) and treat the last
+ * three bytes manually.
+ */
+ seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
+ n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
+ return jhash2((u32 *)tuple, n, seed ^
+ (((__force __u16)tuple->dst.u.all << 16) |
+ tuple->dst.protonum));
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+u32 hash_conntrack_raw_fast(const struct nf_conntrack_tuple *tuple,
+ const struct net *net)
+{
+ return hash_conntrack_raw(tuple, net);
+}
+#endif
+
+static u32 scale_hash(u32 hash)
+{
+ return reciprocal_scale(hash, nf_conntrack_htable_size);
+}
+
+static u32 __hash_conntrack(const struct net *net,
+ const struct nf_conntrack_tuple *tuple,
+ unsigned int size)
+{
+ return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
+}
+
+static u32 hash_conntrack(const struct net *net,
+ const struct nf_conntrack_tuple *tuple)
+{
+ return scale_hash(hash_conntrack_raw(tuple, net));
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+u_int32_t hash_conntrack_fast(const struct nf_conntrack_tuple *tuple)
+{
+ return __hash_conntrack(&init_net, tuple, 32);
+}
+#endif
+
+static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct nf_conntrack_tuple *tuple)
+{ struct {
+ __be16 sport;
+ __be16 dport;
+ } _inet_hdr, *inet_hdr;
+
+ /* Actually only need first 4 bytes to get ports. */
+ inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
+ if (!inet_hdr)
+ return false;
+
+ tuple->src.u.udp.port = inet_hdr->sport;
+ tuple->dst.u.udp.port = inet_hdr->dport;
+ return true;
+}
+
+static bool
+nf_ct_get_tuple(const struct sk_buff *skb,
+ unsigned int nhoff,
+ unsigned int dataoff,
+ u_int16_t l3num,
+ u_int8_t protonum,
+ struct net *net,
+ struct nf_conntrack_tuple *tuple)
+{
+ unsigned int size;
+ const __be32 *ap;
+ __be32 _addrs[8];
+
+ memset(tuple, 0, sizeof(*tuple));
+
+ tuple->src.l3num = l3num;
+ switch (l3num) {
+ case NFPROTO_IPV4:
+ nhoff += offsetof(struct iphdr, saddr);
+ size = 2 * sizeof(__be32);
+ break;
+ case NFPROTO_IPV6:
+ nhoff += offsetof(struct ipv6hdr, saddr);
+ size = sizeof(_addrs);
+ break;
+ default:
+ return true;
+ }
+
+ ap = skb_header_pointer(skb, nhoff, size, _addrs);
+ if (!ap)
+ return false;
+
+ switch (l3num) {
+ case NFPROTO_IPV4:
+ tuple->src.u3.ip = ap[0];
+ tuple->dst.u3.ip = ap[1];
+ break;
+ case NFPROTO_IPV6:
+ memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
+ memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
+ break;
+ }
+
+ tuple->dst.protonum = protonum;
+ tuple->dst.dir = IP_CT_DIR_ORIGINAL;
+
+ switch (protonum) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ICMPV6:
+ return icmpv6_pkt_to_tuple(skb, dataoff, net, tuple);
+#endif
+ case IPPROTO_ICMP:
+ return icmp_pkt_to_tuple(skb, dataoff, net, tuple);
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ case IPPROTO_GRE:
+ return gre_pkt_to_tuple(skb, dataoff, net, tuple);
+#endif
+ case IPPROTO_TCP:
+ case IPPROTO_UDP: /* fallthrough */
+ return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+ case IPPROTO_UDPLITE:
+ return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ case IPPROTO_SCTP:
+ return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ case IPPROTO_DCCP:
+ return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#endif
+ default:
+ break;
+ }
+
+ return true;
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+bool nf_ct_get_tuple_fast(const struct sk_buff *skb,
+ unsigned int nhoff,
+ unsigned int dataoff,
+ u_int16_t l3num,
+ u_int8_t protonum,
+ struct net *net,
+ struct nf_conntrack_tuple *tuple)
+{
+ return nf_ct_get_tuple(skb, nhoff, dataoff, l3num, protonum, net, tuple);
+}
+#endif
+
+static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
+ u_int8_t *protonum)
+{
+ int dataoff = -1;
+ const struct iphdr *iph;
+ struct iphdr _iph;
+
+ iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+ if (!iph)
+ return -1;
+
+ /* Conntrack defragments packets, we might still see fragments
+ * inside ICMP packets though.
+ */
+ if (iph->frag_off & htons(IP_OFFSET))
+ return -1;
+
+ dataoff = nhoff + (iph->ihl << 2);
+ *protonum = iph->protocol;
+
+ /* Check bogus IP headers */
+ if (dataoff > skb->len) {
+ pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u\n",
+ nhoff, iph->ihl << 2, skb->len);
+ return -1;
+ }
+ return dataoff;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
+ u8 *protonum)
+{
+ int protoff = -1;
+ unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
+ __be16 frag_off;
+ u8 nexthdr;
+
+ if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
+ &nexthdr, sizeof(nexthdr)) != 0) {
+ pr_debug("can't get nexthdr\n");
+ return -1;
+ }
+ protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
+ /*
+ * (protoff == skb->len) means the packet has not data, just
+ * IPv6 and possibly extensions headers, but it is tracked anyway
+ */
+ if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
+ pr_debug("can't find proto in pkt\n");
+ return -1;
+ }
+
+ *protonum = nexthdr;
+ return protoff;
+}
+#endif
+
+static int get_l4proto(const struct sk_buff *skb,
+ unsigned int nhoff, u8 pf, u8 *l4num)
+{
+ switch (pf) {
+ case NFPROTO_IPV4:
+ return ipv4_get_l4proto(skb, nhoff, l4num);
+#if IS_ENABLED(CONFIG_IPV6)
+ case NFPROTO_IPV6:
+ return ipv6_get_l4proto(skb, nhoff, l4num);
+#endif
+ default:
+ *l4num = 0;
+ break;
+ }
+ return -1;
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+int get_l4proto_fast(const struct sk_buff *skb,
+ unsigned int nhoff, u8 pf, u8 *l4num)
+{
+ return get_l4proto(skb, nhoff, pf, l4num);
+}
+#endif
+
+bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
+ u_int16_t l3num,
+ struct net *net, struct nf_conntrack_tuple *tuple)
+{
+ u8 protonum;
+ int protoff;
+
+ protoff = get_l4proto(skb, nhoff, l3num, &protonum);
+ if (protoff <= 0)
+ return false;
+
+ return nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple);
+}
+EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
+
+bool
+nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
+ const struct nf_conntrack_tuple *orig)
+{
+ memset(inverse, 0, sizeof(*inverse));
+
+ inverse->src.l3num = orig->src.l3num;
+
+ switch (orig->src.l3num) {
+ case NFPROTO_IPV4:
+ inverse->src.u3.ip = orig->dst.u3.ip;
+ inverse->dst.u3.ip = orig->src.u3.ip;
+ break;
+ case NFPROTO_IPV6:
+ inverse->src.u3.in6 = orig->dst.u3.in6;
+ inverse->dst.u3.in6 = orig->src.u3.in6;
+ break;
+ default:
+ break;
+ }
+
+ inverse->dst.dir = !orig->dst.dir;
+
+ inverse->dst.protonum = orig->dst.protonum;
+
+ switch (orig->dst.protonum) {
+ case IPPROTO_ICMP:
+ return nf_conntrack_invert_icmp_tuple(inverse, orig);
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ICMPV6:
+ return nf_conntrack_invert_icmpv6_tuple(inverse, orig);
+#endif
+ }
+
+ inverse->src.u.all = orig->dst.u.all;
+ inverse->dst.u.all = orig->src.u.all;
+ return true;
+}
+EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
+
+/* Generate a almost-unique pseudo-id for a given conntrack.
+ *
+ * intentionally doesn't re-use any of the seeds used for hash
+ * table location, we assume id gets exposed to userspace.
+ *
+ * Following nf_conn items do not change throughout lifetime
+ * of the nf_conn:
+ *
+ * 1. nf_conn address
+ * 2. nf_conn->master address (normally NULL)
+ * 3. the associated net namespace
+ * 4. the original direction tuple
+ */
+u32 nf_ct_get_id(const struct nf_conn *ct)
+{
+ static __read_mostly siphash_key_t ct_id_seed;
+ unsigned long a, b, c, d;
+
+ net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
+
+ a = (unsigned long)ct;
+ b = (unsigned long)ct->master;
+ c = (unsigned long)nf_ct_net(ct);
+ d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
+ &ct_id_seed);
+#ifdef CONFIG_64BIT
+ return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
+#else
+ return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
+#endif
+}
+EXPORT_SYMBOL_GPL(nf_ct_get_id);
+
+static void
+clean_from_lists(struct nf_conn *ct)
+{
+ pr_debug("clean_from_lists(%p)\n", ct);
+ hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+ hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
+
+ /* Destroy all pending expectations */
+ nf_ct_remove_expectations(ct);
+}
+
+/* must be called with local_bh_disable */
+static void nf_ct_add_to_dying_list(struct nf_conn *ct)
+{
+ struct ct_pcpu *pcpu;
+
+ /* add this conntrack to the (per cpu) dying list */
+ ct->cpu = smp_processor_id();
+ pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
+
+ spin_lock(&pcpu->lock);
+ hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
+ &pcpu->dying);
+ spin_unlock(&pcpu->lock);
+}
+
+/* must be called with local_bh_disable */
+static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
+{
+ struct ct_pcpu *pcpu;
+
+ /* add this conntrack to the (per cpu) unconfirmed list */
+ ct->cpu = smp_processor_id();
+ pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
+
+ spin_lock(&pcpu->lock);
+ hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
+ &pcpu->unconfirmed);
+ spin_unlock(&pcpu->lock);
+}
+
+/* must be called with local_bh_disable */
+static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
+{
+ struct ct_pcpu *pcpu;
+
+ /* We overload first tuple to link into unconfirmed or dying list.*/
+ pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
+
+ spin_lock(&pcpu->lock);
+ BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
+ hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+ spin_unlock(&pcpu->lock);
+}
+
+#define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
+
+/* Released via destroy_conntrack() */
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
+ gfp_t flags)
+{
+ struct nf_conn *tmpl, *p;
+
+ if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
+ tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
+ if (!tmpl)
+ return NULL;
+
+ p = tmpl;
+ tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
+ if (tmpl != p) {
+ tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
+ tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
+ }
+ } else {
+ tmpl = kzalloc(sizeof(*tmpl), flags);
+ if (!tmpl)
+ return NULL;
+ }
+
+ tmpl->status = IPS_TEMPLATE;
+ write_pnet(&tmpl->ct_net, net);
+ nf_ct_zone_add(tmpl, zone);
+ atomic_set(&tmpl->ct_general.use, 0);
+
+ return tmpl;
+}
+EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
+
+void nf_ct_tmpl_free(struct nf_conn *tmpl)
+{
+ nf_ct_ext_destroy(tmpl);
+
+ if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
+ kfree((char *)tmpl - tmpl->proto.tmpl_padto);
+ else
+ kfree(tmpl);
+}
+EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
+
+static void destroy_gre_conntrack(struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ struct nf_conn *master = ct->master;
+
+ if (master)
+ nf_ct_gre_keymap_destroy(master);
+#endif
+}
+
+static void
+destroy_conntrack(struct nf_conntrack *nfct)
+{
+ struct nf_conn *ct = (struct nf_conn *)nfct;
+
+ pr_debug("destroy_conntrack(%p)\n", ct);
+ WARN_ON(atomic_read(&nfct->use) != 0);
+
+ if (unlikely(nf_ct_is_template(ct))) {
+ nf_ct_tmpl_free(ct);
+ return;
+ }
+
+ if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE))
+ destroy_gre_conntrack(ct);
+
+ local_bh_disable();
+ /* Expectations will have been removed in clean_from_lists,
+ * except TFTP can create an expectation on the first packet,
+ * before connection is in the list, so we need to clean here,
+ * too.
+ */
+ nf_ct_remove_expectations(ct);
+
+ nf_ct_del_from_dying_or_unconfirmed_list(ct);
+
+ local_bh_enable();
+
+ if (ct->master)
+ nf_ct_put(ct->master);
+
+ pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
+ nf_conntrack_free(ct);
+}
+
+static void nf_ct_delete_from_lists(struct nf_conn *ct)
+{
+ struct net *net = nf_ct_net(ct);
+ unsigned int hash, reply_hash;
+ unsigned int sequence;
+
+ nf_ct_helper_destroy(ct);
+
+ local_bh_disable();
+ do {
+ sequence = read_seqcount_begin(&nf_conntrack_generation);
+ hash = hash_conntrack(net,
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ reply_hash = hash_conntrack(net,
+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
+
+ clean_from_lists(ct);
+ nf_conntrack_double_unlock(hash, reply_hash);
+
+ nf_ct_add_to_dying_list(ct);
+
+ local_bh_enable();
+}
+
+bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
+{
+ struct nf_conn_tstamp *tstamp;
+
+ if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
+ return false;
+
+ tstamp = nf_conn_tstamp_find(ct);
+ if (tstamp) {
+ s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
+
+ tstamp->stop = ktime_get_real_ns();
+ if (timeout < 0)
+ tstamp->stop -= jiffies_to_nsecs(-timeout);
+ }
+
+ if (nf_conntrack_event_report(IPCT_DESTROY, ct,
+ portid, report) < 0) {
+ /* destroy event was not delivered. nf_ct_put will
+ * be done by event cache worker on redelivery.
+ */
+ nf_ct_delete_from_lists(ct);
+ nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
+ return false;
+ }
+
+ nf_conntrack_ecache_work(nf_ct_net(ct));
+ nf_ct_delete_from_lists(ct);
+ nf_ct_put(ct);
+ return true;
+}
+EXPORT_SYMBOL_GPL(nf_ct_delete);
+
+static inline bool
+nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone,
+ const struct net *net)
+{
+ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+
+ /* A conntrack can be recreated with the equal tuple,
+ * so we need to check that the conntrack is confirmed
+ */
+ return nf_ct_tuple_equal(tuple, &h->tuple) &&
+ nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
+ nf_ct_is_confirmed(ct) &&
+ net_eq(net, nf_ct_net(ct));
+}
+
+static inline bool
+nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
+{
+ return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
+ nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
+ &ct2->tuplehash[IP_CT_DIR_REPLY].tuple) &&
+ nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) &&
+ nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) &&
+ net_eq(nf_ct_net(ct1), nf_ct_net(ct2));
+}
+
+/* caller must hold rcu readlock and none of the nf_conntrack_locks */
+static void nf_ct_gc_expired(struct nf_conn *ct)
+{
+ if (!atomic_inc_not_zero(&ct->ct_general.use))
+ return;
+
+ if (nf_ct_should_gc(ct))
+ nf_ct_kill(ct);
+
+ nf_ct_put(ct);
+}
+
+/*
+ * Warning :
+ * - Caller must take a reference on returned object
+ * and recheck nf_ct_tuple_equal(tuple, &h->tuple)
+ */
+static struct nf_conntrack_tuple_hash *
+____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
+ const struct nf_conntrack_tuple *tuple, u32 hash)
+{
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_head *ct_hash;
+ struct hlist_nulls_node *n;
+ unsigned int bucket, hsize;
+
+begin:
+ nf_conntrack_get_ht(&ct_hash, &hsize);
+ bucket = reciprocal_scale(hash, hsize);
+
+ hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
+ struct nf_conn *ct;
+
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ if (nf_ct_is_expired(ct)) {
+ nf_ct_gc_expired(ct);
+ continue;
+ }
+
+ if (nf_ct_key_equal(h, tuple, zone, net))
+ return h;
+ }
+ /*
+ * if the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (get_nulls_value(n) != bucket) {
+ NF_CT_STAT_INC_ATOMIC(net, search_restart);
+ goto begin;
+ }
+
+ return NULL;
+}
+
+/* Find a connection corresponding to a tuple. */
+static struct nf_conntrack_tuple_hash *
+__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
+ const struct nf_conntrack_tuple *tuple, u32 hash)
+{
+ struct nf_conntrack_tuple_hash *h;
+ struct nf_conn *ct;
+
+ rcu_read_lock();
+
+ h = ____nf_conntrack_find(net, zone, tuple, hash);
+ if (h) {
+ /* We have a candidate that matches the tuple we're interested
+ * in, try to obtain a reference and re-check tuple
+ */
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ if (likely(atomic_inc_not_zero(&ct->ct_general.use))) {
+ if (likely(nf_ct_key_equal(h, tuple, zone, net)))
+ goto found;
+
+ /* TYPESAFE_BY_RCU recycled the candidate */
+ nf_ct_put(ct);
+ }
+
+ h = NULL;
+ }
+found:
+ rcu_read_unlock();
+
+ return h;
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+struct nf_conntrack_tuple_hash *nf_conntrack_find_fast(struct net *net, const struct nf_conntrack_zone *zone,
+ const struct nf_conntrack_tuple *tuple, u32 hash)
+{
+ return __nf_conntrack_find_get(net, zone, tuple, hash);
+}
+#endif
+
+struct nf_conntrack_tuple_hash *
+nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
+ const struct nf_conntrack_tuple *tuple)
+{
+ return __nf_conntrack_find_get(net, zone, tuple,
+ hash_conntrack_raw(tuple, net));
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
+
+static void __nf_conntrack_hash_insert(struct nf_conn *ct,
+ unsigned int hash,
+ unsigned int reply_hash)
+{
+ hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
+ &nf_conntrack_hash[hash]);
+ hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
+ &nf_conntrack_hash[reply_hash]);
+}
+
+int
+nf_conntrack_hash_check_insert(struct nf_conn *ct)
+{
+ const struct nf_conntrack_zone *zone;
+ struct net *net = nf_ct_net(ct);
+ unsigned int hash, reply_hash;
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+ unsigned int sequence;
+
+ zone = nf_ct_zone(ct);
+
+ local_bh_disable();
+ do {
+ sequence = read_seqcount_begin(&nf_conntrack_generation);
+ hash = hash_conntrack(net,
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ reply_hash = hash_conntrack(net,
+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
+
+ /* See if there's one in the list already, including reverse */
+ hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
+ if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ zone, net))
+ goto out;
+
+ hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
+ if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+ zone, net))
+ goto out;
+
+ smp_wmb();
+ /* The caller holds a reference to this object */
+ atomic_set(&ct->ct_general.use, 2);
+ __nf_conntrack_hash_insert(ct, hash, reply_hash);
+ nf_conntrack_double_unlock(hash, reply_hash);
+ NF_CT_STAT_INC(net, insert);
+ local_bh_enable();
+ return 0;
+
+out:
+ nf_conntrack_double_unlock(hash, reply_hash);
+ local_bh_enable();
+ return -EEXIST;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
+
+void nf_ct_acct_add(struct nf_conn *ct, u32 dir, unsigned int packets,
+ unsigned int bytes)
+{
+ struct nf_conn_acct *acct;
+
+ acct = nf_conn_acct_find(ct);
+ if (acct) {
+ struct nf_conn_counter *counter = acct->counter;
+
+ atomic64_add(packets, &counter[dir].packets);
+ atomic64_add(bytes, &counter[dir].bytes);
+ }
+}
+EXPORT_SYMBOL_GPL(nf_ct_acct_add);
+
+static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ const struct nf_conn *loser_ct)
+{
+ struct nf_conn_acct *acct;
+
+ acct = nf_conn_acct_find(loser_ct);
+ if (acct) {
+ struct nf_conn_counter *counter = acct->counter;
+ unsigned int bytes;
+
+ /* u32 should be fine since we must have seen one packet. */
+ bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
+ nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), bytes);
+ }
+}
+
+static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
+{
+ struct nf_conn_tstamp *tstamp;
+
+ atomic_inc(&ct->ct_general.use);
+ ct->status |= IPS_CONFIRMED;
+
+ /* set conntrack timestamp, if enabled. */
+ tstamp = nf_conn_tstamp_find(ct);
+ if (tstamp)
+ tstamp->start = ktime_get_real_ns();
+}
+
+/* caller must hold locks to prevent concurrent changes */
+static int __nf_ct_resolve_clash(struct sk_buff *skb,
+ struct nf_conntrack_tuple_hash *h)
+{
+ /* This is the conntrack entry already in hashes that won race. */
+ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *loser_ct;
+
+ loser_ct = nf_ct_get(skb, &ctinfo);
+
+ if (nf_ct_is_dying(ct))
+ return NF_DROP;
+
+ if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
+ nf_ct_match(ct, loser_ct)) {
+ struct net *net = nf_ct_net(ct);
+
+ nf_conntrack_get(&ct->ct_general);
+
+ nf_ct_acct_merge(ct, ctinfo, loser_ct);
+ nf_ct_add_to_dying_list(loser_ct);
+ nf_conntrack_put(&loser_ct->ct_general);
+ nf_ct_set(skb, ct, ctinfo);
+
+ NF_CT_STAT_INC(net, clash_resolve);
+ return NF_ACCEPT;
+ }
+
+ return NF_DROP;
+}
+
+/**
+ * nf_ct_resolve_clash_harder - attempt to insert clashing conntrack entry
+ *
+ * @skb: skb that causes the collision
+ * @repl_idx: hash slot for reply direction
+ *
+ * Called when origin or reply direction had a clash.
+ * The skb can be handled without packet drop provided the reply direction
+ * is unique or there the existing entry has the identical tuple in both
+ * directions.
+ *
+ * Caller must hold conntrack table locks to prevent concurrent updates.
+ *
+ * Returns NF_DROP if the clash could not be handled.
+ */
+static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
+{
+ struct nf_conn *loser_ct = (struct nf_conn *)skb_nfct(skb);
+ const struct nf_conntrack_zone *zone;
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+ struct net *net;
+
+ zone = nf_ct_zone(loser_ct);
+ net = nf_ct_net(loser_ct);
+
+ /* Reply direction must never result in a clash, unless both origin
+ * and reply tuples are identical.
+ */
+ hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[repl_idx], hnnode) {
+ if (nf_ct_key_equal(h,
+ &loser_ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+ zone, net))
+ return __nf_ct_resolve_clash(skb, h);
+ }
+
+ /* We want the clashing entry to go away real soon: 1 second timeout. */
+ WRITE_ONCE(loser_ct->timeout, nfct_time_stamp + HZ);
+
+ /* IPS_NAT_CLASH removes the entry automatically on the first
+ * reply. Also prevents UDP tracker from moving the entry to
+ * ASSURED state, i.e. the entry can always be evicted under
+ * pressure.
+ */
+ loser_ct->status |= IPS_FIXED_TIMEOUT | IPS_NAT_CLASH;
+
+ __nf_conntrack_insert_prepare(loser_ct);
+
+ /* fake add for ORIGINAL dir: we want lookups to only find the entry
+ * already in the table. This also hides the clashing entry from
+ * ctnetlink iteration, i.e. conntrack -L won't show them.
+ */
+ hlist_nulls_add_fake(&loser_ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+
+ hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
+ &nf_conntrack_hash[repl_idx]);
+
+ NF_CT_STAT_INC(net, clash_resolve);
+ return NF_ACCEPT;
+}
+
+/**
+ * nf_ct_resolve_clash - attempt to handle clash without packet drop
+ *
+ * @skb: skb that causes the clash
+ * @h: tuplehash of the clashing entry already in table
+ * @reply_hash: hash slot for reply direction
+ *
+ * A conntrack entry can be inserted to the connection tracking table
+ * if there is no existing entry with an identical tuple.
+ *
+ * If there is one, @skb (and the assocated, unconfirmed conntrack) has
+ * to be dropped. In case @skb is retransmitted, next conntrack lookup
+ * will find the already-existing entry.
+ *
+ * The major problem with such packet drop is the extra delay added by
+ * the packet loss -- it will take some time for a retransmit to occur
+ * (or the sender to time out when waiting for a reply).
+ *
+ * This function attempts to handle the situation without packet drop.
+ *
+ * If @skb has no NAT transformation or if the colliding entries are
+ * exactly the same, only the to-be-confirmed conntrack entry is discarded
+ * and @skb is associated with the conntrack entry already in the table.
+ *
+ * Failing that, the new, unconfirmed conntrack is still added to the table
+ * provided that the collision only occurs in the ORIGINAL direction.
+ * The new entry will be added only in the non-clashing REPLY direction,
+ * so packets in the ORIGINAL direction will continue to match the existing
+ * entry. The new entry will also have a fixed timeout so it expires --
+ * due to the collision, it will only see reply traffic.
+ *
+ * Returns NF_DROP if the clash could not be resolved.
+ */
+static __cold noinline int
+nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h,
+ u32 reply_hash)
+{
+ /* This is the conntrack entry already in hashes that won race. */
+ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+ const struct nf_conntrack_l4proto *l4proto;
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *loser_ct;
+ struct net *net;
+ int ret;
+
+ loser_ct = nf_ct_get(skb, &ctinfo);
+ net = nf_ct_net(loser_ct);
+
+ l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
+ if (!l4proto->allow_clash)
+ goto drop;
+
+ ret = __nf_ct_resolve_clash(skb, h);
+ if (ret == NF_ACCEPT)
+ return ret;
+
+ ret = nf_ct_resolve_clash_harder(skb, reply_hash);
+ if (ret == NF_ACCEPT)
+ return ret;
+
+drop:
+ nf_ct_add_to_dying_list(loser_ct);
+ NF_CT_STAT_INC(net, drop);
+ NF_CT_STAT_INC(net, insert_failed);
+ return NF_DROP;
+}
+
+/* Confirm a connection given skb; places it in hash table */
+int
+__nf_conntrack_confirm(struct sk_buff *skb)
+{
+ const struct nf_conntrack_zone *zone;
+ unsigned int hash, reply_hash;
+ struct nf_conntrack_tuple_hash *h;
+ struct nf_conn *ct;
+ struct nf_conn_help *help;
+ struct hlist_nulls_node *n;
+ enum ip_conntrack_info ctinfo;
+ struct net *net;
+ unsigned int sequence;
+ int ret = NF_DROP;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ net = nf_ct_net(ct);
+
+ /* ipt_REJECT uses nf_conntrack_attach to attach related
+ ICMP/TCP RST packets in other direction. Actual packet
+ which created connection will be IP_CT_NEW or for an
+ expected connection, IP_CT_RELATED. */
+ if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+ return NF_ACCEPT;
+
+ zone = nf_ct_zone(ct);
+ local_bh_disable();
+
+ do {
+ sequence = read_seqcount_begin(&nf_conntrack_generation);
+ /* reuse the hash saved before */
+ hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
+ hash = scale_hash(hash);
+ reply_hash = hash_conntrack(net,
+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+
+ } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
+
+ /* We're not in hash table, and we refuse to set up related
+ * connections for unconfirmed conns. But packet copies and
+ * REJECT will give spurious warnings here.
+ */
+
+ /* Another skb with the same unconfirmed conntrack may
+ * win the race. This may happen for bridge(br_flood)
+ * or broadcast/multicast packets do skb_clone with
+ * unconfirmed conntrack.
+ */
+ if (unlikely(nf_ct_is_confirmed(ct))) {
+ WARN_ON_ONCE(1);
+ nf_conntrack_double_unlock(hash, reply_hash);
+ local_bh_enable();
+ return NF_DROP;
+ }
+
+ pr_debug("Confirming conntrack %p\n", ct);
+ /* We have to check the DYING flag after unlink to prevent
+ * a race against nf_ct_get_next_corpse() possibly called from
+ * user context, else we insert an already 'dead' hash, blocking
+ * further use of that particular connection -JM.
+ */
+ nf_ct_del_from_dying_or_unconfirmed_list(ct);
+
+ if (unlikely(nf_ct_is_dying(ct))) {
+ nf_ct_add_to_dying_list(ct);
+ NF_CT_STAT_INC(net, insert_failed);
+ goto dying;
+ }
+
+ /* See if there's one in the list already, including reverse:
+ NAT could have grabbed it without realizing, since we're
+ not in the hash. If there is, we lost race. */
+ hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
+ if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ zone, net))
+ goto out;
+
+ hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
+ if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+ zone, net))
+ goto out;
+
+ /* Timer relative to confirmation time, not original
+ setting time, otherwise we'd get timer wrap in
+ weird delay cases. */
+ ct->timeout += nfct_time_stamp;
+
+ __nf_conntrack_insert_prepare(ct);
+
+ /* Since the lookup is lockless, hash insertion must be done after
+ * starting the timer and setting the CONFIRMED bit. The RCU barriers
+ * guarantee that no other CPU can find the conntrack before the above
+ * stores are visible.
+ */
+ __nf_conntrack_hash_insert(ct, hash, reply_hash);
+ nf_conntrack_double_unlock(hash, reply_hash);
+ local_bh_enable();
+
+ help = nfct_help(ct);
+ if (help && help->helper)
+ nf_conntrack_event_cache(IPCT_HELPER, ct);
+
+ nf_conntrack_event_cache(master_ct(ct) ?
+ IPCT_RELATED : IPCT_NEW, ct);
+ return NF_ACCEPT;
+
+out:
+ ret = nf_ct_resolve_clash(skb, h, reply_hash);
+dying:
+ nf_conntrack_double_unlock(hash, reply_hash);
+ local_bh_enable();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
+
+/* Returns true if a connection correspondings to the tuple (required
+ for NAT). */
+int
+nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+ const struct nf_conn *ignored_conntrack)
+{
+ struct net *net = nf_ct_net(ignored_conntrack);
+ const struct nf_conntrack_zone *zone;
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_head *ct_hash;
+ unsigned int hash, hsize;
+ struct hlist_nulls_node *n;
+ struct nf_conn *ct;
+
+ zone = nf_ct_zone(ignored_conntrack);
+
+ rcu_read_lock();
+ begin:
+ nf_conntrack_get_ht(&ct_hash, &hsize);
+ hash = __hash_conntrack(net, tuple, hsize);
+
+ hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
+ ct = nf_ct_tuplehash_to_ctrack(h);
+
+ if (ct == ignored_conntrack)
+ continue;
+
+ if (nf_ct_is_expired(ct)) {
+ nf_ct_gc_expired(ct);
+ continue;
+ }
+
+ if (nf_ct_key_equal(h, tuple, zone, net)) {
+ /* Tuple is taken already, so caller will need to find
+ * a new source port to use.
+ *
+ * Only exception:
+ * If the *original tuples* are identical, then both
+ * conntracks refer to the same flow.
+ * This is a rare situation, it can occur e.g. when
+ * more than one UDP packet is sent from same socket
+ * in different threads.
+ *
+ * Let nf_ct_resolve_clash() deal with this later.
+ */
+ if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
+ nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
+ continue;
+
+ NF_CT_STAT_INC_ATOMIC(net, found);
+ rcu_read_unlock();
+ return 1;
+ }
+ }
+
+ if (get_nulls_value(n) != hash) {
+ NF_CT_STAT_INC_ATOMIC(net, search_restart);
+ goto begin;
+ }
+
+ rcu_read_unlock();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
+
+#define NF_CT_EVICTION_RANGE 8
+
+/* There's a small race here where we may free a just-assured
+ connection. Too bad: we're in trouble anyway. */
+static unsigned int early_drop_list(struct net *net,
+ struct hlist_nulls_head *head)
+{
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+ unsigned int drops = 0;
+ struct nf_conn *tmp;
+
+ hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
+ tmp = nf_ct_tuplehash_to_ctrack(h);
+
+ if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
+ continue;
+
+ if (nf_ct_is_expired(tmp)) {
+ nf_ct_gc_expired(tmp);
+ continue;
+ }
+
+ if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
+ !net_eq(nf_ct_net(tmp), net) ||
+ nf_ct_is_dying(tmp))
+ continue;
+
+ if (!atomic_inc_not_zero(&tmp->ct_general.use))
+ continue;
+
+ /* kill only if still in same netns -- might have moved due to
+ * SLAB_TYPESAFE_BY_RCU rules.
+ *
+ * We steal the timer reference. If that fails timer has
+ * already fired or someone else deleted it. Just drop ref
+ * and move to next entry.
+ */
+ if (net_eq(nf_ct_net(tmp), net) &&
+ nf_ct_is_confirmed(tmp) &&
+ nf_ct_delete(tmp, 0, 0))
+ drops++;
+
+ nf_ct_put(tmp);
+ }
+
+ return drops;
+}
+
+static noinline int early_drop(struct net *net, unsigned int hash)
+{
+ unsigned int i, bucket;
+
+ for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
+ struct hlist_nulls_head *ct_hash;
+ unsigned int hsize, drops;
+
+ rcu_read_lock();
+ nf_conntrack_get_ht(&ct_hash, &hsize);
+ if (!i)
+ bucket = reciprocal_scale(hash, hsize);
+ else
+ bucket = (bucket + 1) % hsize;
+
+ drops = early_drop_list(net, &ct_hash[bucket]);
+ rcu_read_unlock();
+
+ if (drops) {
+ NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool gc_worker_skip_ct(const struct nf_conn *ct)
+{
+ return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct);
+}
+
+static bool gc_worker_can_early_drop(const struct nf_conn *ct)
+{
+ const struct nf_conntrack_l4proto *l4proto;
+
+ if (!test_bit(IPS_ASSURED_BIT, &ct->status))
+ return true;
+
+ l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
+ if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
+ return true;
+
+ return false;
+}
+
+static void gc_worker(struct work_struct *work)
+{
+ unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
+ unsigned int i, hashsz, nf_conntrack_max95 = 0;
+ unsigned long next_run = GC_SCAN_INTERVAL;
+ struct conntrack_gc_work *gc_work;
+ gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
+
+ i = gc_work->next_bucket;
+ if (gc_work->early_drop)
+ nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
+
+ do {
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_head *ct_hash;
+ struct hlist_nulls_node *n;
+ struct nf_conn *tmp;
+
+ rcu_read_lock();
+
+ nf_conntrack_get_ht(&ct_hash, &hashsz);
+ if (i >= hashsz) {
+ rcu_read_unlock();
+ break;
+ }
+
+ hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
+ struct net *net;
+
+ tmp = nf_ct_tuplehash_to_ctrack(h);
+
+ if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
+ nf_ct_offload_timeout(tmp);
+ continue;
+ }
+
+ if (nf_ct_is_expired(tmp)) {
+ nf_ct_gc_expired(tmp);
+ continue;
+ }
+
+ if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
+ continue;
+
+ net = nf_ct_net(tmp);
+ if (atomic_read(&net->ct.count) < nf_conntrack_max95)
+ continue;
+
+ /* need to take reference to avoid possible races */
+ if (!atomic_inc_not_zero(&tmp->ct_general.use))
+ continue;
+
+ if (gc_worker_skip_ct(tmp)) {
+ nf_ct_put(tmp);
+ continue;
+ }
+
+ if (gc_worker_can_early_drop(tmp))
+ nf_ct_kill(tmp);
+
+ nf_ct_put(tmp);
+ }
+
+ /* could check get_nulls_value() here and restart if ct
+ * was moved to another chain. But given gc is best-effort
+ * we will just continue with next hash slot.
+ */
+ rcu_read_unlock();
+ cond_resched();
+ i++;
+
+ if (time_after(jiffies, end_time) && i < hashsz) {
+ gc_work->next_bucket = i;
+ next_run = 0;
+ break;
+ }
+ } while (i < hashsz);
+
+ if (gc_work->exiting)
+ return;
+
+ /*
+ * Eviction will normally happen from the packet path, and not
+ * from this gc worker.
+ *
+ * This worker is only here to reap expired entries when system went
+ * idle after a busy period.
+ */
+ if (next_run) {
+ gc_work->early_drop = false;
+ gc_work->next_bucket = 0;
+ }
+ queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
+}
+
+static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
+{
+ INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
+ gc_work->exiting = false;
+}
+
+static struct nf_conn *
+__nf_conntrack_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
+ const struct nf_conntrack_tuple *orig,
+ const struct nf_conntrack_tuple *repl,
+ gfp_t gfp, u32 hash)
+{
+ struct nf_conn *ct;
+
+ /* We don't want any race condition at early drop stage */
+ atomic_inc(&net->ct.count);
+
+ if (nf_conntrack_max &&
+ unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
+ if (!early_drop(net, hash)) {
+ if (!conntrack_gc_work.early_drop)
+ conntrack_gc_work.early_drop = true;
+ atomic_dec(&net->ct.count);
+ net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ /*
+ * Do not use kmem_cache_zalloc(), as this cache uses
+ * SLAB_TYPESAFE_BY_RCU.
+ */
+ ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
+ if (ct == NULL)
+ goto out;
+
+ spin_lock_init(&ct->lock);
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
+ ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
+ /* save hash for reusing when confirming */
+ *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
+ ct->status = 0;
+ WRITE_ONCE(ct->timeout, 0);
+ write_pnet(&ct->ct_net, net);
+ memset(&ct->__nfct_init_offset, 0,
+ offsetof(struct nf_conn, proto) -
+ offsetof(struct nf_conn, __nfct_init_offset));
+
+ nf_ct_zone_add(ct, zone);
+
+ /* Because we use RCU lookups, we set ct_general.use to zero before
+ * this is inserted in any list.
+ */
+ atomic_set(&ct->ct_general.use, 0);
+ return ct;
+out:
+ atomic_dec(&net->ct.count);
+ return ERR_PTR(-ENOMEM);
+}
+
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+ const struct nf_conntrack_zone *zone,
+ const struct nf_conntrack_tuple *orig,
+ const struct nf_conntrack_tuple *repl,
+ gfp_t gfp)
+{
+ return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
+
+void nf_conntrack_free(struct nf_conn *ct)
+{
+ struct net *net = nf_ct_net(ct);
+
+ /* A freed object has refcnt == 0, that's
+ * the golden rule for SLAB_TYPESAFE_BY_RCU
+ */
+ WARN_ON(atomic_read(&ct->ct_general.use) != 0);
+
+ nf_ct_ext_destroy(ct);
+ kmem_cache_free(nf_conntrack_cachep, ct);
+ smp_mb__before_atomic();
+ atomic_dec(&net->ct.count);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_free);
+
+
+/* Allocate a new conntrack: we return -ENOMEM if classification
+ failed due to stress. Otherwise it really is unclassifiable. */
+static noinline struct nf_conntrack_tuple_hash *
+init_conntrack(struct net *net, struct nf_conn *tmpl,
+ const struct nf_conntrack_tuple *tuple,
+ struct sk_buff *skb,
+ unsigned int dataoff, u32 hash)
+{
+ struct nf_conn *ct;
+ struct nf_conn_help *help;
+ struct nf_conntrack_tuple repl_tuple;
+ struct nf_conntrack_ecache *ecache;
+ struct nf_conntrack_expect *exp = NULL;
+ const struct nf_conntrack_zone *zone;
+ struct nf_conn_timeout *timeout_ext;
+ struct nf_conntrack_zone tmp;
+ int dir = 0;
+
+ if (!nf_ct_invert_tuple(&repl_tuple, tuple)) {
+ pr_debug("Can't invert tuple.\n");
+ return NULL;
+ }
+
+ zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
+ ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
+ hash);
+ if (IS_ERR(ct))
+ return (struct nf_conntrack_tuple_hash *)ct;
+
+#ifdef CONFIG_FASTNAT_MODULE
+ RCU_INIT_POINTER(ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL], NULL);
+ RCU_INIT_POINTER(ct->fast_ct.fast_dst[IP_CT_DIR_REPLY], NULL);
+ ct->fast_ct.fast_brport[IP_CT_DIR_ORIGINAL] = NULL;
+ ct->fast_ct.fast_brport[IP_CT_DIR_REPLY] = NULL;
+ ct->fast_ct.isFast = 0; //CT_FAST_NOT
+ RCU_INIT_POINTER(ct->fast_ct.sk, NULL);
+#endif
+ ct->packet_info[IP_CT_DIR_ORIGINAL].packets = 0;
+ ct->packet_info[IP_CT_DIR_ORIGINAL].bytes = 0;
+ ct->packet_info[IP_CT_DIR_REPLY].packets = 0;
+ ct->packet_info[IP_CT_DIR_REPLY].bytes = 0;
+ for(dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++)
+ {
+ ct->indev[dir] = NULL;
+ ct->outdev[dir] = NULL;
+ }
+ if (!nf_ct_add_synproxy(ct, tmpl)) {
+ nf_conntrack_free(ct);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
+
+ if (timeout_ext)
+ nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
+ GFP_ATOMIC);
+
+ nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+ nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
+ nf_ct_labels_ext_add(ct);
+
+ ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
+ nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
+ ecache ? ecache->expmask : 0,
+ GFP_ATOMIC);
+
+ local_bh_disable();
+ if (net->ct.expect_count) {
+ spin_lock(&nf_conntrack_expect_lock);
+ exp = nf_ct_find_expectation(net, zone, tuple);
+ if (exp) {
+ pr_debug("expectation arrives ct=%p exp=%p\n",
+ ct, exp);
+ /* Welcome, Mr. Bond. We've been expecting you... */
+ __set_bit(IPS_EXPECTED_BIT, &ct->status);
+ /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
+ ct->master = exp->master;
+ if (exp->helper) {
+ help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
+ if (help)
+ rcu_assign_pointer(help->helper, exp->helper);
+ }
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ ct->mark = exp->master->mark;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+ ct->secmark = exp->master->secmark;
+#endif
+ NF_CT_STAT_INC(net, expect_new);
+ }
+ spin_unlock(&nf_conntrack_expect_lock);
+ }
+ if (!exp)
+ __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
+
+ /* Now it is inserted into the unconfirmed list, bump refcount */
+ nf_conntrack_get(&ct->ct_general);
+ nf_ct_add_to_unconfirmed_list(ct);
+
+ local_bh_enable();
+
+ if (exp) {
+ if (exp->expectfn)
+ exp->expectfn(ct, exp);
+ nf_ct_expect_put(exp);
+ }
+
+ return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
+}
+
+/* On success, returns 0, sets skb->_nfct | ctinfo */
+static int
+resolve_normal_ct(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ u_int8_t protonum,
+ const struct nf_hook_state *state)
+{
+ const struct nf_conntrack_zone *zone;
+ struct nf_conntrack_tuple tuple;
+ struct nf_conntrack_tuple_hash *h;
+ enum ip_conntrack_info ctinfo;
+ struct nf_conntrack_zone tmp;
+ struct nf_conn *ct;
+ u32 hash;
+
+ if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
+ dataoff, state->pf, protonum, state->net,
+ &tuple)) {
+ pr_debug("Can't get tuple\n");
+ return 0;
+ }
+
+ /* look for tuple match */
+ zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
+ hash = hash_conntrack_raw(&tuple, state->net);
+ h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
+ if (!h) {
+ h = init_conntrack(state->net, tmpl, &tuple,
+ skb, dataoff, hash);
+ if (!h)
+ return 0;
+ if (IS_ERR(h))
+ return PTR_ERR(h);
+ }
+ ct = nf_ct_tuplehash_to_ctrack(h);
+
+ /* It exists; we have (non-exclusive) reference. */
+ if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
+ ctinfo = IP_CT_ESTABLISHED_REPLY;
+ } else {
+ /* Once we've had two way comms, always ESTABLISHED. */
+ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+ pr_debug("normal packet for %p\n", ct);
+ ctinfo = IP_CT_ESTABLISHED;
+ } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
+ pr_debug("related packet for %p\n", ct);
+ ctinfo = IP_CT_RELATED;
+ } else {
+ pr_debug("new packet for %p\n", ct);
+ ctinfo = IP_CT_NEW;
+ }
+ }
+ nf_ct_set(skb, ct, ctinfo);
+ return 0;
+}
+
+/*
+ * icmp packets need special treatment to handle error messages that are
+ * related to a connection.
+ *
+ * Callers need to check if skb has a conntrack assigned when this
+ * helper returns; in such case skb belongs to an already known connection.
+ */
+static unsigned int __cold
+nf_conntrack_handle_icmp(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ u8 protonum,
+ const struct nf_hook_state *state)
+{
+ int ret;
+
+ if (state->pf == NFPROTO_IPV4 && protonum == IPPROTO_ICMP)
+ ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state);
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (state->pf == NFPROTO_IPV6 && protonum == IPPROTO_ICMPV6)
+ ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state);
+#endif
+ else
+ return NF_ACCEPT;
+
+ if (ret <= 0)
+ NF_CT_STAT_INC_ATOMIC(state->net, error);
+
+ return ret;
+}
+
+static int generic_packet(struct nf_conn *ct, struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo)
+{
+ const unsigned int *timeout = nf_ct_timeout_lookup(ct);
+
+ if (!timeout)
+ timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
+
+ nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
+ return NF_ACCEPT;
+}
+
+/* Returns verdict for packet, or -1 for invalid. */
+static int nf_conntrack_handle_packet(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
+{
+ switch (nf_ct_protonum(ct)) {
+ case IPPROTO_TCP:
+ return nf_conntrack_tcp_packet(ct, skb, dataoff,
+ ctinfo, state);
+ case IPPROTO_UDP:
+ return nf_conntrack_udp_packet(ct, skb, dataoff,
+ ctinfo, state);
+ case IPPROTO_ICMP:
+ return nf_conntrack_icmp_packet(ct, skb, ctinfo, state);
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ICMPV6:
+ return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+ case IPPROTO_UDPLITE:
+ return nf_conntrack_udplite_packet(ct, skb, dataoff,
+ ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ case IPPROTO_SCTP:
+ return nf_conntrack_sctp_packet(ct, skb, dataoff,
+ ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ case IPPROTO_DCCP:
+ return nf_conntrack_dccp_packet(ct, skb, dataoff,
+ ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ case IPPROTO_GRE:
+ return nf_conntrack_gre_packet(ct, skb, dataoff,
+ ctinfo, state);
+#endif
+ }
+
+ return generic_packet(ct, skb, ctinfo);
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+int nf_conntrack_handle_packet_fast(struct nf_conn *ct,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state)
+{
+ return nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
+}
+
+void nf_conntrack_put(struct nf_conntrack *nfct)
+{
+ if (nfct && atomic_dec_and_test(&nfct->use)){
+ fast_conn_release((struct nf_conn *)nfct, RELEASE_ALL_DST | RELEASE_ALL_SK);
+ nf_conntrack_destroy(nfct);
+ }
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_put);
+#endif
+
+
+
+unsigned int
+nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
+{
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct, *tmpl;
+ u_int8_t protonum;
+ int dataoff, ret;
+
+ tmpl = nf_ct_get(skb, &ctinfo);
+ if (tmpl || ctinfo == IP_CT_UNTRACKED) {
+ /* Previously seen (loopback or untracked)? Ignore. */
+ if ((tmpl && !nf_ct_is_template(tmpl)) ||
+ ctinfo == IP_CT_UNTRACKED)
+ return NF_ACCEPT;
+ skb->_nfct = 0;
+ }
+
+ /* rcu_read_lock()ed by nf_hook_thresh */
+ dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
+ if (dataoff <= 0) {
+ pr_debug("not prepared to track yet or error occurred\n");
+ NF_CT_STAT_INC_ATOMIC(state->net, invalid);
+ ret = NF_ACCEPT;
+ goto out;
+ }
+
+ if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
+ ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
+ protonum, state);
+ if (ret <= 0) {
+ ret = -ret;
+ goto out;
+ }
+ /* ICMP[v6] protocol trackers may assign one conntrack. */
+ if (skb->_nfct)
+ goto out;
+ }
+repeat:
+ ret = resolve_normal_ct(tmpl, skb, dataoff,
+ protonum, state);
+ if (ret < 0) {
+ /* Too stressed to deal. */
+ NF_CT_STAT_INC_ATOMIC(state->net, drop);
+ ret = NF_DROP;
+ goto out;
+ }
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct) {
+ /* Not valid part of a connection */
+ NF_CT_STAT_INC_ATOMIC(state->net, invalid);
+ ret = NF_ACCEPT;
+ goto out;
+ }
+
+ ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
+ if (ret <= 0) {
+ /* Invalid: inverse of the return code tells
+ * the netfilter core what to do */
+ pr_debug("nf_conntrack_in: Can't track with proto module\n");
+ nf_conntrack_put(&ct->ct_general);
+ skb->_nfct = 0;
+ /* Special case: TCP tracker reports an attempt to reopen a
+ * closed/aborted connection. We have to go back and create a
+ * fresh conntrack.
+ */
+ if (ret == -NF_REPEAT)
+ goto repeat;
+
+ NF_CT_STAT_INC_ATOMIC(state->net, invalid);
+ if (ret == -NF_DROP)
+ NF_CT_STAT_INC_ATOMIC(state->net, drop);
+
+ ret = -ret;
+ goto out;
+ }
+
+ if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
+ !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
+ nf_conntrack_event_cache(IPCT_REPLY, ct);
+out:
+ if (tmpl)
+ nf_ct_put(tmpl);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_in);
+
+/* Alter reply tuple (maybe alter helper). This is for NAT, and is
+ implicitly racy: see __nf_conntrack_confirm */
+void nf_conntrack_alter_reply(struct nf_conn *ct,
+ const struct nf_conntrack_tuple *newreply)
+{
+ struct nf_conn_help *help = nfct_help(ct);
+
+ /* Should be unconfirmed, so not in hash table yet */
+ WARN_ON(nf_ct_is_confirmed(ct));
+
+ pr_debug("Altering reply tuple of %p to ", ct);
+ nf_ct_dump_tuple(newreply);
+
+ ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
+ if (ct->master || (help && !hlist_empty(&help->expectations)))
+ return;
+
+ rcu_read_lock();
+ __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
+
+/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
+void __nf_ct_refresh_acct(struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb,
+ u32 extra_jiffies,
+ bool do_acct)
+{
+ /* Only update if this is not a fixed timeout */
+ if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
+ goto acct;
+
+ /* If not in hash table, timer will not be active yet */
+ if (nf_ct_is_confirmed(ct))
+ extra_jiffies += nfct_time_stamp;
+
+ if (READ_ONCE(ct->timeout) != extra_jiffies)
+ WRITE_ONCE(ct->timeout, extra_jiffies);
+acct:
+ if (do_acct)
+ nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
+}
+EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
+
+bool nf_ct_kill_acct(struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb)
+{
+ nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
+
+ return nf_ct_delete(ct, 0, 0);
+}
+EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
+
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_conntrack.h>
+#include <linux/mutex.h>
+
+/* Generic function for tcp/udp/sctp/dccp and alike. */
+int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
+ const struct nf_conntrack_tuple *tuple)
+{
+ if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
+ nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
+
+const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
+ [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 },
+ [CTA_PROTO_DST_PORT] = { .type = NLA_U16 },
+};
+EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
+
+int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
+ struct nf_conntrack_tuple *t,
+ u_int32_t flags)
+{
+ if (flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) {
+ if (!tb[CTA_PROTO_SRC_PORT])
+ return -EINVAL;
+
+ t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
+ }
+
+ if (flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) {
+ if (!tb[CTA_PROTO_DST_PORT])
+ return -EINVAL;
+
+ t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
+
+unsigned int nf_ct_port_nlattr_tuple_size(void)
+{
+ static unsigned int size __read_mostly;
+
+ if (!size)
+ size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
+#endif
+
+/* Used by ipt_REJECT and ip6t_REJECT. */
+static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+
+ /* This ICMP is in reverse direction to the packet which caused it */
+ ct = nf_ct_get(skb, &ctinfo);
+ if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
+ ctinfo = IP_CT_RELATED_REPLY;
+ else
+ ctinfo = IP_CT_RELATED;
+
+ /* Attach to new skbuff, and increment count */
+ nf_ct_set(nskb, ct, ctinfo);
+ nf_conntrack_get(skb_nfct(nskb));
+}
+
+static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ struct nf_conntrack_tuple_hash *h;
+ struct nf_conntrack_tuple tuple;
+ struct nf_nat_hook *nat_hook;
+ unsigned int status;
+ int dataoff;
+ u16 l3num;
+ u8 l4num;
+
+ l3num = nf_ct_l3num(ct);
+
+ dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
+ if (dataoff <= 0)
+ return -1;
+
+ if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
+ l4num, net, &tuple))
+ return -1;
+
+ if (ct->status & IPS_SRC_NAT) {
+ memcpy(tuple.src.u3.all,
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all,
+ sizeof(tuple.src.u3.all));
+ tuple.src.u.all =
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all;
+ }
+
+ if (ct->status & IPS_DST_NAT) {
+ memcpy(tuple.dst.u3.all,
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all,
+ sizeof(tuple.dst.u3.all));
+ tuple.dst.u.all =
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all;
+ }
+
+ h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple);
+ if (!h)
+ return 0;
+
+ /* Store status bits of the conntrack that is clashing to re-do NAT
+ * mangling according to what it has been done already to this packet.
+ */
+ status = ct->status;
+
+ nf_ct_put(ct);
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ nf_ct_set(skb, ct, ctinfo);
+
+ nat_hook = rcu_dereference(nf_nat_hook);
+ if (!nat_hook)
+ return 0;
+
+ if (status & IPS_SRC_NAT &&
+ nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_SRC,
+ IP_CT_DIR_ORIGINAL) == NF_DROP)
+ return -1;
+
+ if (status & IPS_DST_NAT &&
+ nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_DST,
+ IP_CT_DIR_ORIGINAL) == NF_DROP)
+ return -1;
+
+ return 0;
+}
+
+/* This packet is coming from userspace via nf_queue, complete the packet
+ * processing after the helper invocation in nf_confirm().
+ */
+static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ const struct nf_conntrack_helper *helper;
+ const struct nf_conn_help *help;
+ int protoff;
+
+ help = nfct_help(ct);
+ if (!help)
+ return 0;
+
+ helper = rcu_dereference(help->helper);
+ if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
+ return 0;
+
+ switch (nf_ct_l3num(ct)) {
+ case NFPROTO_IPV4:
+ protoff = skb_network_offset(skb) + ip_hdrlen(skb);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case NFPROTO_IPV6: {
+ __be16 frag_off;
+ u8 pnum;
+
+ pnum = ipv6_hdr(skb)->nexthdr;
+ protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
+ &frag_off);
+ if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
+ return 0;
+ break;
+ }
+#endif
+ default:
+ return 0;
+ }
+
+ if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
+ !nf_is_loopback_packet(skb)) {
+ if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
+ NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
+ return -1;
+ }
+ }
+
+ /* We've seen it coming out the other side: confirm it */
+ return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0;
+}
+
+static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
+{
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+ int err;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ return 0;
+
+ if (!nf_ct_is_confirmed(ct)) {
+ err = __nf_conntrack_update(net, skb, ct, ctinfo);
+ if (err < 0)
+ return err;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ }
+
+ return nf_confirm_cthelper(skb, ct, ctinfo);
+}
+
+static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+ const struct sk_buff *skb)
+{
+ const struct nf_conntrack_tuple *src_tuple;
+ const struct nf_conntrack_tuple_hash *hash;
+ struct nf_conntrack_tuple srctuple;
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct) {
+ src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
+ memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
+ return true;
+ }
+
+ if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+ NFPROTO_IPV4, dev_net(skb->dev),
+ &srctuple))
+ return false;
+
+ hash = nf_conntrack_find_get(dev_net(skb->dev),
+ &nf_ct_zone_dflt,
+ &srctuple);
+ if (!hash)
+ return false;
+
+ ct = nf_ct_tuplehash_to_ctrack(hash);
+ src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
+ memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
+ nf_ct_put(ct);
+
+ return true;
+}
+
+/* Bring out ya dead! */
+static struct nf_conn *
+get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
+ void *data, unsigned int *bucket)
+{
+ struct nf_conntrack_tuple_hash *h;
+ struct nf_conn *ct;
+ struct hlist_nulls_node *n;
+ spinlock_t *lockp;
+
+ for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
+ struct hlist_nulls_head *hslot = &nf_conntrack_hash[*bucket];
+
+ if (hlist_nulls_empty(hslot))
+ continue;
+
+ lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
+ local_bh_disable();
+ nf_conntrack_lock(lockp);
+ hlist_nulls_for_each_entry(h, n, hslot, hnnode) {
+ if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
+ continue;
+ /* All nf_conn objects are added to hash table twice, one
+ * for original direction tuple, once for the reply tuple.
+ *
+ * Exception: In the IPS_NAT_CLASH case, only the reply
+ * tuple is added (the original tuple already existed for
+ * a different object).
+ *
+ * We only need to call the iterator once for each
+ * conntrack, so we just use the 'reply' direction
+ * tuple while iterating.
+ */
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ if (iter(ct, data))
+ goto found;
+ }
+ spin_unlock(lockp);
+ local_bh_enable();
+ cond_resched();
+ }
+
+ return NULL;
+found:
+ atomic_inc(&ct->ct_general.use);
+ spin_unlock(lockp);
+ local_bh_enable();
+ return ct;
+}
+
+static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
+ void *data, u32 portid, int report)
+{
+ unsigned int bucket = 0;
+ struct nf_conn *ct;
+
+ might_sleep();
+
+ mutex_lock(&nf_conntrack_mutex);
+ while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
+ /* Time to push up daises... */
+
+ nf_ct_delete(ct, portid, report);
+ nf_ct_put(ct);
+ cond_resched();
+ }
+ mutex_unlock(&nf_conntrack_mutex);
+}
+
+struct iter_data {
+ int (*iter)(struct nf_conn *i, void *data);
+ void *data;
+ struct net *net;
+};
+
+static int iter_net_only(struct nf_conn *i, void *data)
+{
+ struct iter_data *d = data;
+
+ if (!net_eq(d->net, nf_ct_net(i)))
+ return 0;
+
+ return d->iter(i, d->data);
+}
+
+static void
+__nf_ct_unconfirmed_destroy(struct net *net)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+ struct ct_pcpu *pcpu;
+
+ pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+
+ spin_lock_bh(&pcpu->lock);
+ hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
+ struct nf_conn *ct;
+
+ ct = nf_ct_tuplehash_to_ctrack(h);
+
+ /* we cannot call iter() on unconfirmed list, the
+ * owning cpu can reallocate ct->ext at any time.
+ */
+ set_bit(IPS_DYING_BIT, &ct->status);
+ }
+ spin_unlock_bh(&pcpu->lock);
+ cond_resched();
+ }
+}
+
+void nf_ct_unconfirmed_destroy(struct net *net)
+{
+ might_sleep();
+
+ if (atomic_read(&net->ct.count) > 0) {
+ __nf_ct_unconfirmed_destroy(net);
+ nf_queue_nf_hook_drop(net);
+ synchronize_net();
+ }
+}
+EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy);
+
+void nf_ct_iterate_cleanup_net(struct net *net,
+ int (*iter)(struct nf_conn *i, void *data),
+ void *data, u32 portid, int report)
+{
+ struct iter_data d;
+
+ might_sleep();
+
+ if (atomic_read(&net->ct.count) == 0)
+ return;
+
+ d.iter = iter;
+ d.data = data;
+ d.net = net;
+
+ nf_ct_iterate_cleanup(iter_net_only, &d, portid, report);
+}
+EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
+
+/**
+ * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
+ * @iter: callback to invoke for each conntrack
+ * @data: data to pass to @iter
+ *
+ * Like nf_ct_iterate_cleanup, but first marks conntracks on the
+ * unconfirmed list as dying (so they will not be inserted into
+ * main table).
+ *
+ * Can only be called in module exit path.
+ */
+void
+nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
+{
+ struct net *net;
+
+ down_read(&net_rwsem);
+ for_each_net(net) {
+ if (atomic_read(&net->ct.count) == 0)
+ continue;
+ __nf_ct_unconfirmed_destroy(net);
+ nf_queue_nf_hook_drop(net);
+ }
+ up_read(&net_rwsem);
+
+ /* Need to wait for netns cleanup worker to finish, if its
+ * running -- it might have deleted a net namespace from
+ * the global list, so our __nf_ct_unconfirmed_destroy() might
+ * not have affected all namespaces.
+ */
+ net_ns_barrier();
+
+ /* a conntrack could have been unlinked from unconfirmed list
+ * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy().
+ * This makes sure its inserted into conntrack table.
+ */
+ synchronize_net();
+
+ nf_ct_iterate_cleanup(iter, data, 0, 0);
+}
+EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
+
+static int kill_all(struct nf_conn *i, void *data)
+{
+ return net_eq(nf_ct_net(i), data);
+}
+
+void nf_ct_free_hashtable(void *hash, unsigned int size)
+{
+ if (is_vmalloc_addr(hash))
+ vfree(hash);
+ else
+ free_pages((unsigned long)hash,
+ get_order(sizeof(struct hlist_head) * size));
+}
+EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
+
+void nf_conntrack_cleanup_start(void)
+{
+ conntrack_gc_work.exiting = true;
+ RCU_INIT_POINTER(ip_ct_attach, NULL);
+}
+
+void nf_conntrack_cleanup_end(void)
+{
+ RCU_INIT_POINTER(nf_ct_hook, NULL);
+ cancel_delayed_work_sync(&conntrack_gc_work.dwork);
+ kvfree(nf_conntrack_hash);
+
+ nf_conntrack_proto_fini();
+ nf_conntrack_seqadj_fini();
+ nf_conntrack_labels_fini();
+ nf_conntrack_helper_fini();
+ nf_conntrack_timeout_fini();
+ nf_conntrack_ecache_fini();
+ nf_conntrack_tstamp_fini();
+ nf_conntrack_acct_fini();
+ nf_conntrack_expect_fini();
+
+ kmem_cache_destroy(nf_conntrack_cachep);
+}
+
+/*
+ * Mishearing the voices in his head, our hero wonders how he's
+ * supposed to kill the mall.
+ */
+void nf_conntrack_cleanup_net(struct net *net)
+{
+ LIST_HEAD(single);
+
+ list_add(&net->exit_list, &single);
+ nf_conntrack_cleanup_net_list(&single);
+}
+
+void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
+{
+ int busy;
+ struct net *net;
+
+ /*
+ * This makes sure all current packets have passed through
+ * netfilter framework. Roll on, two-stage module
+ * delete...
+ */
+ synchronize_net();
+i_see_dead_people:
+ busy = 0;
+ list_for_each_entry(net, net_exit_list, exit_list) {
+ nf_ct_iterate_cleanup(kill_all, net, 0, 0);
+ if (atomic_read(&net->ct.count) != 0)
+ busy = 1;
+ }
+ if (busy) {
+ schedule();
+ goto i_see_dead_people;
+ }
+
+ list_for_each_entry(net, net_exit_list, exit_list) {
+ nf_conntrack_proto_pernet_fini(net);
+ nf_conntrack_ecache_pernet_fini(net);
+ nf_conntrack_expect_pernet_fini(net);
+ free_percpu(net->ct.stat);
+ free_percpu(net->ct.pcpu_lists);
+ }
+}
+
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
+{
+ struct hlist_nulls_head *hash;
+ unsigned int nr_slots, i;
+
+ if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
+ return NULL;
+
+ BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
+ nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
+
+ hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL);
+
+ if (hash && nulls)
+ for (i = 0; i < nr_slots; i++)
+ INIT_HLIST_NULLS_HEAD(&hash[i], i);
+
+ return hash;
+}
+EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
+
+int nf_conntrack_hash_resize(unsigned int hashsize)
+{
+ int i, bucket;
+ unsigned int old_size;
+ struct hlist_nulls_head *hash, *old_hash;
+ struct nf_conntrack_tuple_hash *h;
+ struct nf_conn *ct;
+
+ if (!hashsize)
+ return -EINVAL;
+
+ hash = nf_ct_alloc_hashtable(&hashsize, 1);
+ if (!hash)
+ return -ENOMEM;
+
+ mutex_lock(&nf_conntrack_mutex);
+ old_size = nf_conntrack_htable_size;
+ if (old_size == hashsize) {
+ mutex_unlock(&nf_conntrack_mutex);
+ kvfree(hash);
+ return 0;
+ }
+
+ local_bh_disable();
+ nf_conntrack_all_lock();
+ write_seqcount_begin(&nf_conntrack_generation);
+
+ /* Lookups in the old hash might happen in parallel, which means we
+ * might get false negatives during connection lookup. New connections
+ * created because of a false negative won't make it into the hash
+ * though since that required taking the locks.
+ */
+
+ for (i = 0; i < nf_conntrack_htable_size; i++) {
+ while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
+ h = hlist_nulls_entry(nf_conntrack_hash[i].first,
+ struct nf_conntrack_tuple_hash, hnnode);
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ hlist_nulls_del_rcu(&h->hnnode);
+ bucket = __hash_conntrack(nf_ct_net(ct),
+ &h->tuple, hashsize);
+ hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
+ }
+ }
+ old_size = nf_conntrack_htable_size;
+ old_hash = nf_conntrack_hash;
+
+ nf_conntrack_hash = hash;
+ nf_conntrack_htable_size = hashsize;
+
+ write_seqcount_end(&nf_conntrack_generation);
+ nf_conntrack_all_unlock();
+ local_bh_enable();
+
+ mutex_unlock(&nf_conntrack_mutex);
+
+ synchronize_net();
+ kvfree(old_hash);
+ return 0;
+}
+
+int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
+{
+ unsigned int hashsize;
+ int rc;
+
+ if (current->nsproxy->net_ns != &init_net)
+ return -EOPNOTSUPP;
+
+ /* On boot, we can set this without any fancy locking. */
+ if (!nf_conntrack_hash)
+ return param_set_uint(val, kp);
+
+ rc = kstrtouint(val, 0, &hashsize);
+ if (rc)
+ return rc;
+
+ return nf_conntrack_hash_resize(hashsize);
+}
+
+static __always_inline unsigned int total_extension_size(void)
+{
+ /* remember to add new extensions below */
+ BUILD_BUG_ON(NF_CT_EXT_NUM > 9);
+
+ return sizeof(struct nf_ct_ext) +
+ sizeof(struct nf_conn_help)
+#if IS_ENABLED(CONFIG_NF_NAT)
+ + sizeof(struct nf_conn_nat)
+#endif
+ + sizeof(struct nf_conn_seqadj)
+ + sizeof(struct nf_conn_acct)
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ + sizeof(struct nf_conntrack_ecache)
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ + sizeof(struct nf_conn_tstamp)
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ + sizeof(struct nf_conn_timeout)
+#endif
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+ + sizeof(struct nf_conn_labels)
+#endif
+#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+ + sizeof(struct nf_conn_synproxy)
+#endif
+ ;
+};
+
+int nf_conntrack_init_start(void)
+{
+ unsigned long nr_pages = totalram_pages();
+ int max_factor = 8;
+ int ret = -ENOMEM;
+ int i;
+
+ /* struct nf_ct_ext uses u8 to store offsets/size */
+ BUILD_BUG_ON(total_extension_size() > 255u);
+
+ seqcount_spinlock_init(&nf_conntrack_generation,
+ &nf_conntrack_locks_all_lock);
+
+ for (i = 0; i < CONNTRACK_LOCKS; i++)
+ spin_lock_init(&nf_conntrack_locks[i]);
+
+ if (!nf_conntrack_htable_size) {
+ /* Idea from tcp.c: use 1/16384 of memory.
+ * On i386: 32MB machine has 512 buckets.
+ * >= 1GB machines have 16384 buckets.
+ * >= 4GB machines have 65536 buckets.
+ */
+ nf_conntrack_htable_size
+ = (((nr_pages << PAGE_SHIFT) / 16384)
+ / sizeof(struct hlist_head));
+ if (nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
+ nf_conntrack_htable_size = 65536;
+ else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
+ nf_conntrack_htable_size = 16384;
+ if (nf_conntrack_htable_size < 32)
+ nf_conntrack_htable_size = 32;
+
+ /* Use a max. factor of four by default to get the same max as
+ * with the old struct list_heads. When a table size is given
+ * we use the old value of 8 to avoid reducing the max.
+ * entries. */
+ max_factor = 4;
+ }
+
+ nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
+ if (!nf_conntrack_hash)
+ return -ENOMEM;
+
+ nf_conntrack_max = max_factor * nf_conntrack_htable_size;
+
+ nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
+ sizeof(struct nf_conn),
+ NFCT_INFOMASK + 1,
+ SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
+ if (!nf_conntrack_cachep)
+ goto err_cachep;
+
+ ret = nf_conntrack_expect_init();
+ if (ret < 0)
+ goto err_expect;
+
+ ret = nf_conntrack_acct_init();
+ if (ret < 0)
+ goto err_acct;
+
+ ret = nf_conntrack_tstamp_init();
+ if (ret < 0)
+ goto err_tstamp;
+
+ ret = nf_conntrack_ecache_init();
+ if (ret < 0)
+ goto err_ecache;
+
+ ret = nf_conntrack_timeout_init();
+ if (ret < 0)
+ goto err_timeout;
+
+ ret = nf_conntrack_helper_init();
+ if (ret < 0)
+ goto err_helper;
+
+ ret = nf_conntrack_labels_init();
+ if (ret < 0)
+ goto err_labels;
+
+ ret = nf_conntrack_seqadj_init();
+ if (ret < 0)
+ goto err_seqadj;
+
+ ret = nf_conntrack_proto_init();
+ if (ret < 0)
+ goto err_proto;
+
+ conntrack_gc_work_init(&conntrack_gc_work);
+ queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ);
+
+ return 0;
+
+err_proto:
+ nf_conntrack_seqadj_fini();
+err_seqadj:
+ nf_conntrack_labels_fini();
+err_labels:
+ nf_conntrack_helper_fini();
+err_helper:
+ nf_conntrack_timeout_fini();
+err_timeout:
+ nf_conntrack_ecache_fini();
+err_ecache:
+ nf_conntrack_tstamp_fini();
+err_tstamp:
+ nf_conntrack_acct_fini();
+err_acct:
+ nf_conntrack_expect_fini();
+err_expect:
+ kmem_cache_destroy(nf_conntrack_cachep);
+err_cachep:
+ kvfree(nf_conntrack_hash);
+ return ret;
+}
+
+static struct nf_ct_hook nf_conntrack_hook = {
+ .update = nf_conntrack_update,
+ .destroy = destroy_conntrack,
+ .get_tuple_skb = nf_conntrack_get_tuple_skb,
+};
+
+void nf_conntrack_init_end(void)
+{
+ /* For use by REJECT target */
+ RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
+ RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook);
+}
+
+/*
+ * We need to use special "null" values, not used in hash table
+ */
+#define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
+#define DYING_NULLS_VAL ((1<<30)+1)
+
+int nf_conntrack_init_net(struct net *net)
+{
+ int ret = -ENOMEM;
+ int cpu;
+
+ BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
+ BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS);
+ atomic_set(&net->ct.count, 0);
+
+ net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
+ if (!net->ct.pcpu_lists)
+ goto err_stat;
+
+ for_each_possible_cpu(cpu) {
+ struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+
+ spin_lock_init(&pcpu->lock);
+ INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
+ INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
+ }
+
+ net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
+ if (!net->ct.stat)
+ goto err_pcpu_lists;
+
+ ret = nf_conntrack_expect_pernet_init(net);
+ if (ret < 0)
+ goto err_expect;
+
+ nf_conntrack_acct_pernet_init(net);
+ nf_conntrack_tstamp_pernet_init(net);
+ nf_conntrack_ecache_pernet_init(net);
+ nf_conntrack_helper_pernet_init(net);
+ nf_conntrack_proto_pernet_init(net);
+
+ return 0;
+
+err_expect:
+ free_percpu(net->ct.stat);
+err_pcpu_lists:
+ free_percpu(net->ct.pcpu_lists);
+err_stat:
+ return ret;
+}
diff --git a/upstream/linux-5.10/net/netfilter/xt_DSCP.c b/upstream/linux-5.10/net/netfilter/xt_DSCP.c
new file mode 100755
index 0000000..eababc3
--- /dev/null
+++ b/upstream/linux-5.10/net/netfilter/xt_DSCP.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* x_tables module for setting the IPv4/IPv6 DSCP field, Version 1.8
+ *
+ * (C) 2002 by Harald Welte <laforge@netfilter.org>
+ * based on ipt_FTOS.c (C) 2000 by Matthew G. Marsh <mgm@paktronix.com>
+ *
+ * See RFC2474 for a description of the DSCP field within the IP Header.
+*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/dsfield.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_DSCP.h>
+
+MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+MODULE_DESCRIPTION("Xtables: DSCP/TOS field modification");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_DSCP");
+MODULE_ALIAS("ip6t_DSCP");
+MODULE_ALIAS("ipt_TOS");
+MODULE_ALIAS("ip6t_TOS");
+
+static unsigned int
+dscp_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_DSCP_info *dinfo = par->targinfo;
+ u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT;
+
+ if (dscp != dinfo->dscp) {
+ if (skb_ensure_writable(skb, sizeof(struct iphdr)))
+ return NF_DROP;
+
+ ipv4_change_dsfield(ip_hdr(skb),
+ (__force __u8)(~XT_DSCP_MASK),
+ dinfo->dscp << XT_DSCP_SHIFT);
+
+ }
+ return XT_CONTINUE;
+}
+
+static unsigned int
+dscp_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_DSCP_info *dinfo = par->targinfo;
+ u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT;
+
+ if (dscp != dinfo->dscp) {
+ if (skb_ensure_writable(skb, sizeof(struct ipv6hdr)))
+ return NF_DROP;
+
+ ipv6_change_dsfield(ipv6_hdr(skb),
+ (__force __u8)(~XT_DSCP_MASK),
+ dinfo->dscp << XT_DSCP_SHIFT);
+ }
+ return XT_CONTINUE;
+}
+
+static int dscp_tg_check(const struct xt_tgchk_param *par)
+{
+ const struct xt_DSCP_info *info = par->targinfo;
+
+ if (info->dscp > XT_DSCP_MAX)
+ return -EDOM;
+ return 0;
+}
+
+static unsigned int
+tos_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_tos_target_info *info = par->targinfo;
+ struct iphdr *iph = ip_hdr(skb);
+ u_int8_t orig, nv;
+
+ orig = ipv4_get_dsfield(iph);
+ nv = (orig & ~info->tos_mask) ^ info->tos_value;
+
+ if (orig != nv) {
+ if (skb_ensure_writable(skb, sizeof(struct iphdr)))
+ return NF_DROP;
+ iph = ip_hdr(skb);
+ ipv4_change_dsfield(iph, 0, nv);
+ }
+
+ return XT_CONTINUE;
+}
+
+static unsigned int
+tos_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_tos_target_info *info = par->targinfo;
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ u_int8_t orig, nv;
+
+ orig = ipv6_get_dsfield(iph);
+ nv = (orig & ~info->tos_mask) ^ info->tos_value;
+
+ if (orig != nv) {
+ if (skb_ensure_writable(skb, sizeof(struct iphdr)))
+ return NF_DROP;
+ iph = ipv6_hdr(skb);
+ ipv6_change_dsfield(iph, 0, nv);
+ }
+
+ return XT_CONTINUE;
+}
+
+static struct xt_target dscp_tg_reg[] __read_mostly = {
+ {
+ .name = "DSCP",
+ .family = NFPROTO_IPV4,
+ .checkentry = dscp_tg_check,
+ .target = dscp_tg,
+ .targetsize = sizeof(struct xt_DSCP_info),
+ .table = "mangle",
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "DSCP",
+ .family = NFPROTO_IPV6,
+ .checkentry = dscp_tg_check,
+ .target = dscp_tg6,
+ .targetsize = sizeof(struct xt_DSCP_info),
+ .table = "mangle",
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "TOS",
+ .revision = 1,
+ .family = NFPROTO_IPV4,
+ .table = "mangle",
+ .target = tos_tg,
+ .targetsize = sizeof(struct xt_tos_target_info),
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "TOS",
+ .revision = 1,
+ .family = NFPROTO_IPV6,
+ .table = "mangle",
+ .target = tos_tg6,
+ .targetsize = sizeof(struct xt_tos_target_info),
+ .me = THIS_MODULE,
+ },
+};
+
+static int __init dscp_tg_init(void)
+{
+ return xt_register_targets(dscp_tg_reg, ARRAY_SIZE(dscp_tg_reg));
+}
+
+static void __exit dscp_tg_exit(void)
+{
+ xt_unregister_targets(dscp_tg_reg, ARRAY_SIZE(dscp_tg_reg));
+}
+
+module_init(dscp_tg_init);
+module_exit(dscp_tg_exit);
diff --git a/upstream/linux-5.10/net/netfilter/xt_RATEEST.c b/upstream/linux-5.10/net/netfilter/xt_RATEEST.c
new file mode 100755
index 0000000..0d5c422
--- /dev/null
+++ b/upstream/linux-5.10/net/netfilter/xt_RATEEST.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (C) 2007 Patrick McHardy <kaber@trash.net>
+ */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/gen_stats.h>
+#include <linux/jhash.h>
+#include <linux/rtnetlink.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <net/gen_stats.h>
+#include <net/netlink.h>
+#include <net/netns/generic.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_RATEEST.h>
+#include <net/netfilter/xt_rateest.h>
+
+#define RATEEST_HSIZE 16
+
+struct xt_rateest_net {
+ struct mutex hash_lock;
+ struct hlist_head hash[RATEEST_HSIZE];
+};
+
+static unsigned int xt_rateest_id;
+
+static unsigned int jhash_rnd __read_mostly;
+
+static unsigned int xt_rateest_hash(const char *name)
+{
+ return jhash(name, sizeof_field(struct xt_rateest, name), jhash_rnd) &
+ (RATEEST_HSIZE - 1);
+}
+
+static void xt_rateest_hash_insert(struct xt_rateest_net *xn,
+ struct xt_rateest *est)
+{
+ unsigned int h;
+
+ h = xt_rateest_hash(est->name);
+ hlist_add_head(&est->list, &xn->hash[h]);
+}
+
+static struct xt_rateest *__xt_rateest_lookup(struct xt_rateest_net *xn,
+ const char *name)
+{
+ struct xt_rateest *est;
+ unsigned int h;
+
+ h = xt_rateest_hash(name);
+ hlist_for_each_entry(est, &xn->hash[h], list) {
+ if (strcmp(est->name, name) == 0) {
+ est->refcnt++;
+ return est;
+ }
+ }
+
+ return NULL;
+}
+
+struct xt_rateest *xt_rateest_lookup(struct net *net, const char *name)
+{
+ struct xt_rateest_net *xn = net_generic(net, xt_rateest_id);
+ struct xt_rateest *est;
+
+ mutex_lock(&xn->hash_lock);
+ est = __xt_rateest_lookup(xn, name);
+ mutex_unlock(&xn->hash_lock);
+ return est;
+}
+EXPORT_SYMBOL_GPL(xt_rateest_lookup);
+
+void xt_rateest_put(struct net *net, struct xt_rateest *est)
+{
+ struct xt_rateest_net *xn = net_generic(net, xt_rateest_id);
+
+ mutex_lock(&xn->hash_lock);
+ if (--est->refcnt == 0) {
+ hlist_del(&est->list);
+ gen_kill_estimator(&est->rate_est);
+ /*
+ * gen_estimator est_timer() might access est->lock or bstats,
+ * wait a RCU grace period before freeing 'est'
+ */
+ kfree_rcu(est, rcu);
+ }
+ mutex_unlock(&xn->hash_lock);
+}
+EXPORT_SYMBOL_GPL(xt_rateest_put);
+
+static unsigned int
+xt_rateest_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_rateest_target_info *info = par->targinfo;
+ struct gnet_stats_basic_packed *stats = &info->est->bstats;
+
+ spin_lock_bh(&info->est->lock);
+ stats->bytes += skb->len;
+ stats->packets++;
+ spin_unlock_bh(&info->est->lock);
+
+ return XT_CONTINUE;
+}
+
+static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
+{
+ struct xt_rateest_net *xn = net_generic(par->net, xt_rateest_id);
+ struct xt_rateest_target_info *info = par->targinfo;
+ struct xt_rateest *est;
+ struct {
+ struct nlattr opt;
+ struct gnet_estimator est;
+ } cfg;
+ int ret;
+
+ if (strnlen(info->name, sizeof(est->name)) >= sizeof(est->name))
+ return -ENAMETOOLONG;
+
+ net_get_random_once(&jhash_rnd, sizeof(jhash_rnd));
+
+ mutex_lock(&xn->hash_lock);
+ est = __xt_rateest_lookup(xn, info->name);
+ if (est) {
+ mutex_unlock(&xn->hash_lock);
+ /*
+ * If estimator parameters are specified, they must match the
+ * existing estimator.
+ */
+ if ((!info->interval && !info->ewma_log) ||
+ (info->interval != est->params.interval ||
+ info->ewma_log != est->params.ewma_log)) {
+ xt_rateest_put(par->net, est);
+ return -EINVAL;
+ }
+ info->est = est;
+ return 0;
+ }
+
+ ret = -ENOMEM;
+ est = kzalloc(sizeof(*est), GFP_KERNEL);
+ if (!est)
+ goto err1;
+
+ strlcpy(est->name, info->name, sizeof(est->name));
+ spin_lock_init(&est->lock);
+ est->refcnt = 1;
+ est->params.interval = info->interval;
+ est->params.ewma_log = info->ewma_log;
+
+ cfg.opt.nla_len = nla_attr_size(sizeof(cfg.est));
+ cfg.opt.nla_type = TCA_STATS_RATE_EST;
+ cfg.est.interval = info->interval;
+ cfg.est.ewma_log = info->ewma_log;
+
+ ret = gen_new_estimator(&est->bstats, NULL, &est->rate_est,
+ &est->lock, NULL, &cfg.opt);
+ if (ret < 0)
+ goto err2;
+
+ info->est = est;
+ xt_rateest_hash_insert(xn, est);
+ mutex_unlock(&xn->hash_lock);
+ return 0;
+
+err2:
+ kfree(est);
+err1:
+ mutex_unlock(&xn->hash_lock);
+ return ret;
+}
+
+static void xt_rateest_tg_destroy(const struct xt_tgdtor_param *par)
+{
+ struct xt_rateest_target_info *info = par->targinfo;
+
+ xt_rateest_put(par->net, info->est);
+}
+
+static struct xt_target xt_rateest_tg_reg __read_mostly = {
+ .name = "RATEEST",
+ .revision = 0,
+ .family = NFPROTO_UNSPEC,
+ .target = xt_rateest_tg,
+ .checkentry = xt_rateest_tg_checkentry,
+ .destroy = xt_rateest_tg_destroy,
+ .targetsize = sizeof(struct xt_rateest_target_info),
+ .usersize = offsetof(struct xt_rateest_target_info, est),
+ .me = THIS_MODULE,
+};
+
+static __net_init int xt_rateest_net_init(struct net *net)
+{
+ struct xt_rateest_net *xn = net_generic(net, xt_rateest_id);
+ int i;
+
+ mutex_init(&xn->hash_lock);
+ for (i = 0; i < ARRAY_SIZE(xn->hash); i++)
+ INIT_HLIST_HEAD(&xn->hash[i]);
+ return 0;
+}
+
+static struct pernet_operations xt_rateest_net_ops = {
+ .init = xt_rateest_net_init,
+ .id = &xt_rateest_id,
+ .size = sizeof(struct xt_rateest_net),
+};
+
+static int __init xt_rateest_tg_init(void)
+{
+ int err = register_pernet_subsys(&xt_rateest_net_ops);
+
+ if (err)
+ return err;
+ return xt_register_target(&xt_rateest_tg_reg);
+}
+
+static void __exit xt_rateest_tg_fini(void)
+{
+ xt_unregister_target(&xt_rateest_tg_reg);
+ unregister_pernet_subsys(&xt_rateest_net_ops);
+}
+
+
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xtables: packet rate estimator");
+MODULE_ALIAS("ipt_RATEEST");
+MODULE_ALIAS("ip6t_RATEEST");
+module_init(xt_rateest_tg_init);
+module_exit(xt_rateest_tg_fini);
diff --git a/upstream/linux-5.10/net/netfilter/xt_hl.c b/upstream/linux-5.10/net/netfilter/xt_hl.c
new file mode 100755
index 0000000..c1a70f8
--- /dev/null
+++ b/upstream/linux-5.10/net/netfilter/xt_hl.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IP tables module for matching the value of the TTL
+ * (C) 2000,2001 by Harald Welte <laforge@netfilter.org>
+ *
+ * Hop Limit matching module
+ * (C) 2001-2002 Maciej Soltysiak <solt@dns.toxicfilms.tv>
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_ipv4/ipt_ttl.h>
+#include <linux/netfilter_ipv6/ip6t_hl.h>
+
+MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
+MODULE_DESCRIPTION("Xtables: Hoplimit/TTL field match");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_ttl");
+MODULE_ALIAS("ip6t_hl");
+
+static bool ttl_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct ipt_ttl_info *info = par->matchinfo;
+ const u8 ttl = ip_hdr(skb)->ttl;
+
+ switch (info->mode) {
+ case IPT_TTL_EQ:
+ return ttl == info->ttl;
+ case IPT_TTL_NE:
+ return ttl != info->ttl;
+ case IPT_TTL_LT:
+ return ttl < info->ttl;
+ case IPT_TTL_GT:
+ return ttl > info->ttl;
+ }
+
+ return false;
+}
+
+static bool hl_mt6(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct ip6t_hl_info *info = par->matchinfo;
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ switch (info->mode) {
+ case IP6T_HL_EQ:
+ return ip6h->hop_limit == info->hop_limit;
+ case IP6T_HL_NE:
+ return ip6h->hop_limit != info->hop_limit;
+ case IP6T_HL_LT:
+ return ip6h->hop_limit < info->hop_limit;
+ case IP6T_HL_GT:
+ return ip6h->hop_limit > info->hop_limit;
+ }
+
+ return false;
+}
+
+static struct xt_match hl_mt_reg[] __read_mostly = {
+ {
+ .name = "ttl",
+ .revision = 0,
+ .family = NFPROTO_IPV4,
+ .match = ttl_mt,
+ .matchsize = sizeof(struct ipt_ttl_info),
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "hl",
+ .revision = 0,
+ .family = NFPROTO_IPV6,
+ .match = hl_mt6,
+ .matchsize = sizeof(struct ip6t_hl_info),
+ .me = THIS_MODULE,
+ },
+};
+
+static int __init hl_mt_init(void)
+{
+ return xt_register_matches(hl_mt_reg, ARRAY_SIZE(hl_mt_reg));
+}
+
+static void __exit hl_mt_exit(void)
+{
+ xt_unregister_matches(hl_mt_reg, ARRAY_SIZE(hl_mt_reg));
+}
+
+module_init(hl_mt_init);
+module_exit(hl_mt_exit);
diff --git a/upstream/linux-5.10/net/netfilter/xt_tcpmss.c b/upstream/linux-5.10/net/netfilter/xt_tcpmss.c
new file mode 100755
index 0000000..37704ab
--- /dev/null
+++ b/upstream/linux-5.10/net/netfilter/xt_tcpmss.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Kernel module to match TCP MSS values. */
+
+/* Copyright (C) 2000 Marc Boucher <marc@mbsi.ca>
+ * Portions (C) 2005 by Harald Welte <laforge@netfilter.org>
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter/xt_tcpmss.h>
+#include <linux/netfilter/x_tables.h>
+
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
+MODULE_DESCRIPTION("Xtables: TCP MSS match");
+MODULE_ALIAS("ipt_tcpmss");
+MODULE_ALIAS("ip6t_tcpmss");
+
+static bool
+tcpmss_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_tcpmss_match_info *info = par->matchinfo;
+ const struct tcphdr *th;
+ struct tcphdr _tcph;
+ /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
+ const u_int8_t *op;
+ u8 _opt[15 * 4 - sizeof(_tcph)];
+ unsigned int i, optlen;
+
+ /* If we don't have the whole header, drop packet. */
+ th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph);
+ if (th == NULL)
+ goto dropit;
+
+ /* Malformed. */
+ if (th->doff*4 < sizeof(*th))
+ goto dropit;
+
+ optlen = th->doff*4 - sizeof(*th);
+ if (!optlen)
+ goto out;
+
+ /* Truncated options. */
+ op = skb_header_pointer(skb, par->thoff + sizeof(*th), optlen, _opt);
+ if (op == NULL)
+ goto dropit;
+
+ for (i = 0; i < optlen; ) {
+ if (op[i] == TCPOPT_MSS
+ && (optlen - i) >= TCPOLEN_MSS
+ && op[i+1] == TCPOLEN_MSS) {
+ u_int16_t mssval;
+
+ mssval = (op[i+2] << 8) | op[i+3];
+
+ return (mssval >= info->mss_min &&
+ mssval <= info->mss_max) ^ info->invert;
+ }
+ if (op[i] < 2)
+ i++;
+ else
+ i += op[i+1] ? : 1;
+ }
+out:
+ return info->invert;
+
+dropit:
+ par->hotdrop = true;
+ return false;
+}
+
+static struct xt_match tcpmss_mt_reg[] __read_mostly = {
+ {
+ .name = "tcpmss",
+ .family = NFPROTO_IPV4,
+ .match = tcpmss_mt,
+ .matchsize = sizeof(struct xt_tcpmss_match_info),
+ .proto = IPPROTO_TCP,
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "tcpmss",
+ .family = NFPROTO_IPV6,
+ .match = tcpmss_mt,
+ .matchsize = sizeof(struct xt_tcpmss_match_info),
+ .proto = IPPROTO_TCP,
+ .me = THIS_MODULE,
+ },
+};
+
+static int __init tcpmss_mt_init(void)
+{
+ return xt_register_matches(tcpmss_mt_reg, ARRAY_SIZE(tcpmss_mt_reg));
+}
+
+static void __exit tcpmss_mt_exit(void)
+{
+ xt_unregister_matches(tcpmss_mt_reg, ARRAY_SIZE(tcpmss_mt_reg));
+}
+
+module_init(tcpmss_mt_init);
+module_exit(tcpmss_mt_exit);
diff --git a/upstream/pub/include/infra/pub_debug_info.h b/upstream/pub/include/infra/pub_debug_info.h
new file mode 100755
index 0000000..10357aa
--- /dev/null
+++ b/upstream/pub/include/infra/pub_debug_info.h
@@ -0,0 +1,67 @@
+#ifndef _PUB_DEBUG_INFO_H_
+#define _PUB_DEBUG_INFO_H_
+
+#include <stdarg.h>
+
+#define DEBUG_INFO_DEV_PATH "/dev/debug_info"
+
+/* AP²àºÍCAP²àµÄPS\KERNEL\DRIVER\FS\APP ÒÔSTART~ENDÎªÇø¼ä£¬¸÷²¿·ÖÔ¤ÁôÁË100¸öID */
+#define MODULE_ID_PS_START (1)
+#define MODULE_ID_PS_NAS (1)
+#define MODULE_ID_PS_RRC (2)
+#define MODULE_ID_PS_L2 (3)
+#define MODULE_ID_PS_UICC (99)
+#define MODULE_ID_PS_END (100)
+
+#define MODULE_ID_AP_KERNEL_START (101)
+#define MODULE_ID_AP_KERNEL_END (200)
+
+#define MODULE_ID_CAP_KERNEL_START (201)
+#define MODULE_ID_CAP_KERNEL_END (300)
+
+#define MODULE_ID_AP_DRIVES_START (301)
+#define MODULE_ID_AP_USB (301)
+#define MODULE_ID_AP_REBOOT (302)
+#define MODULE_ID_AP_TSC (303)
+#define MODULE_ID_AP_PSM (304)
+#define MODULE_ID_AP_NAND (305)
+#define MODULE_ID_AP_MMC (306)
+#define MODULE_ID_AP_WIFI (307)
+#define MODULE_ID_AP_DRIVES_END (400)
+
+#define MODULE_ID_CAP_DRIVES_START (401)
+#define MODULE_ID_CAP_USB (401)
+#define MODULE_ID_CAP_TSC (402)
+#define MODULE_ID_CAP_PSM (403)
+#define MODULE_ID_CAP_NAND (404)
+#define MODULE_ID_CAP_SPI (405)
+#define MODULE_ID_CAP_MMC (406)
+#define MODULE_ID_CAP_UART (407)
+#define MODULE_ID_CAP_DRIVES_END (500)
+
+#define MODULE_ID_AP_FS_START (501)
+#define MODULE_ID_AP_JFFS2 (501)
+#define MODULE_ID_AP_FS_END (600)
+
+#define MODULE_ID_CAP_FS_START (601)
+#define MODULE_ID_CAP_FS_END (700)
+
+#define MODULE_ID_AP_APP_START (701)
+#define MODULE_ID_AP_FOTA (701)
+#define MODULE_ID_AP_FS_CHECK (702)
+#define MODULE_ID_AP_APP_END (800)
+
+#define MODULE_ID_CAP_APP_START (801)
+#define MODULE_ID_CAP_FOTA (801)
+#define MODULE_ID_CAP_FS_CHECK (802)
+#define MODULE_ID_CAP_APP_END (900)
+
+#if defined(_USE_ZXIC_DEBUG_INFO) && !defined(CONFIG_SYSTEM_RECOVERY)
+int sc_debug_info_vrecord(unsigned int id, const char *format, va_list args);
+int sc_debug_info_record(unsigned int id, const char *format, ...);
+#else
+static inline int sc_debug_info_vrecord(unsigned int id, const char *format, va_list args) { return 0; }
+static inline int sc_debug_info_record(unsigned int id, const char *format, ...) { return 0; }
+#endif
+
+#endif
\ No newline at end of file
diff --git a/upstream/pub/include/ps_phy/atipsevent.h b/upstream/pub/include/ps_phy/atipsevent.h
new file mode 100755
index 0000000..2bd5386
--- /dev/null
+++ b/upstream/pub/include/ps_phy/atipsevent.h
@@ -0,0 +1,1677 @@
+/*****************************************************************
+*°æ±¾ËùÓÐ (C)2016ÖÐÐËͨѶ¹É·ÝÓÐÏÞ¹«Ë¾
+*Ä£¿éÃû:
+*ÎļþÃû:atipsevent.h
+*ʵÏÖ¹¦ÄÜ:ATIÏà¹ØÏûÏ¢ºÅ
+*°æ±¾:V1.0
+*****************************************************************/
+#ifndef ZPS_ATI_PSECENT_DEF_H
+#define ZPS_ATI_PSECENT_DEF_H
+
+/*ÐÒéÕ»×Óϵͳ, ÓÉÓÚºÍSDL½ø³Ì»¥Í¨£¬ÆäʼþºÅ¶¨ÒåΪ16룬¹ÊÐÒéջʼþºÅ½öµÍ16λÓÐЧ£¬²»Ê¹Óøß16λ*/
+#define EVENT_PS_BASE (DWORD)0x0000A000
+#define EVENT_PS_END (DWORD)(EVENT_PS_BASE + 0x00005f3f)
+
+/**************************************************PS msg range start (5530)********************************************************/
+/*UICC¶ÔÍâÏûÏ¢·¶Î§(200)*/
+#define AP_UICC_EVENT_BASE (DWORD)EVENT_PS_BASE
+#define AP_UICC_RSP_EVENT (DWORD)(AP_UICC_EVENT_BASE + 100)
+#define AP_UICC_EVENT_END (DWORD)(AP_UICC_RSP_EVENT + 99)
+
+/*MMIA¶ÔÍâÏûÏ¢·¶Î§(1625)*/
+#define AP_MMIA_EVENT_BASE (DWORD)(AP_UICC_EVENT_END + 1)
+
+#define AP_MMIA_EVENT_MM_BASE (DWORD)AP_MMIA_EVENT_BASE
+#define AP_MMIA_MM_RSP_EVENT (DWORD)(AP_MMIA_EVENT_MM_BASE + 100)
+#define AP_MMIA_EVENT_MM_END (DWORD)(AP_MMIA_MM_RSP_EVENT + 99)
+
+#define AP_MMIA_EVENT_CC_BASE (DWORD)(AP_MMIA_EVENT_MM_END + 1)
+#define AP_MMIA_CC_RSP_EVENT (DWORD)(AP_MMIA_EVENT_CC_BASE + 100)
+#define AP_MMIA_EVENT_CC_END (DWORD)(AP_MMIA_CC_RSP_EVENT + 99)
+
+#define AP_MMIA_EVENT_SMS_BASE (DWORD)(AP_MMIA_EVENT_CC_END + 1)
+#define AP_MMIA_SMS_RSP_EVENT (DWORD)(AP_MMIA_EVENT_SMS_BASE + 100)
+#define AP_MMIA_EVENT_SMS_END (DWORD)(AP_MMIA_SMS_RSP_EVENT + 99)
+
+#define AP_MMIA_EVENT_SS_BASE (DWORD)(AP_MMIA_EVENT_SMS_END + 1)
+#define AP_MMIA_SS_RSP_EVENT (DWORD)(AP_MMIA_EVENT_SS_BASE + 50)
+#define AP_MMIA_EVENT_SS_END (DWORD)(AP_MMIA_SS_RSP_EVENT + 49)
+
+#define AP_MMIA_EVENT_SM_BASE (DWORD)(AP_MMIA_EVENT_SS_END + 1)
+#define AP_MMIA_SM_RSP_EVENT (DWORD)(AP_MMIA_EVENT_SM_BASE + 100)
+#define AP_MMIA_EVENT_SM_END (DWORD)(AP_MMIA_SM_RSP_EVENT + 99)
+
+#define AP_MMIA_EVENT_ESM_BASE (DWORD)(AP_MMIA_EVENT_SM_END + 1)
+#define AP_MMIA_ESM_RSP_EVENT (DWORD)(AP_MMIA_EVENT_ESM_BASE + 50)
+#define AP_MMIA_EVENT_ESM_END (DWORD)(AP_MMIA_ESM_RSP_EVENT + 49)
+
+#define AP_MMIA_EVENT_UICC_BASE (DWORD)(AP_MMIA_EVENT_ESM_END + 1)
+#define AP_MMIA_UICC_RSP_EVENT (DWORD)(AP_MMIA_EVENT_UICC_BASE + 100)
+#define AP_MMIA_EVENT_UICC_END (DWORD)(AP_MMIA_UICC_RSP_EVENT + 99)
+
+#define AP_MMIA_EVENT_USAT_BASE (DWORD)(AP_MMIA_EVENT_UICC_END + 1)
+#define AP_MMIA_USAT_RSP_EVENT (DWORD)(AP_MMIA_EVENT_USAT_BASE + 5)
+#define AP_MMIA_EVENT_USAT_END (DWORD)(AP_MMIA_USAT_RSP_EVENT + 4)
+
+#define AP_MMIA_EVENT_CBS_BASE (DWORD)(AP_MMIA_EVENT_USAT_END + 1)
+#define AP_MMIA_CBS_RSP_EVENT (DWORD)(AP_MMIA_EVENT_CBS_BASE + 5)
+#define AP_MMIA_EVENT_CBS_END (DWORD)(AP_MMIA_CBS_RSP_EVENT + 9)
+
+#define AP_MMIA_EVENT_PB_BASE (DWORD)(AP_MMIA_EVENT_CBS_END + 1)
+#define AP_MMIA_PB_RSP_EVENT (DWORD)(AP_MMIA_EVENT_PB_BASE + 100)
+#define AP_MMIA_EVENT_PB_END (DWORD)(AP_MMIA_PB_RSP_EVENT + 99)
+
+#define AP_MMIA_EVENT_EM_BASE (DWORD)(AP_MMIA_EVENT_PB_END + 1)
+#define AP_MMIA_EM_RSP_EVENT (DWORD)(AP_MMIA_EVENT_EM_BASE + 50)
+#define AP_MMIA_EVENT_EM_END (DWORD)(AP_MMIA_EM_RSP_EVENT + 49)
+
+#define AP_MMIA_EVENT_OTHER_BASE (DWORD)(AP_MMIA_EVENT_EM_END + 1)
+#define AP_MMIA_OTHER_RSP_EVENT (DWORD)(AP_MMIA_EVENT_OTHER_BASE + 50)
+#define AP_MMIA_EVENT_OTHER_END (DWORD)(AP_MMIA_OTHER_RSP_EVENT + 49)
+
+#define AP_MMIA_EVENT_END (DWORD)AP_MMIA_EVENT_OTHER_END
+
+
+/*ATIÓëPDIÏûÏ¢·¶Î§(20)*/
+#define ATI_PDI_EVENT_BASE (DWORD)(EVENT_PS_BASE + 1850)
+#define ATI_PDI_RSP_EVENT (DWORD)(ATI_PDI_EVENT_BASE + 10)
+#define ATI_PDI_EVENT_END (DWORD)(ATI_PDI_RSP_EVENT + 9)
+
+/*ATIÓëCSDÏûÏ¢·¶Î§(10)*/
+#define ATI_CSD_EVENT_BASE (DWORD)(ATI_PDI_EVENT_END + 1)
+#define ATI_CSD_RSP_EVENT (DWORD)(ATI_CSD_EVENT_BASE + 5)
+#define ATI_CSD_EVENT_END (DWORD)(ATI_CSD_RSP_EVENT + 4)
+
+/*MMIAÓëUMM/CC/SS/SMS/SM/UICC/ESM/ASÏûÏ¢·¶Î§(690)*/
+#define MMIA_NAS_EVENT_BASE (DWORD)(EVENT_PS_BASE + 1880)
+
+#define MMIA_UMM_EVENT_BASE (DWORD)MMIA_NAS_EVENT_BASE
+#define MMIA_UMM_RSP_EVENT (DWORD)(MMIA_UMM_EVENT_BASE + 50)
+#define MMIA_UMM_EVENT_END (DWORD)(MMIA_UMM_RSP_EVENT + 49)
+
+#define MMIA_CC_EVENT_BASE (DWORD)(MMIA_UMM_EVENT_END + 1)
+#define MMIA_CC_RSP_EVENT (DWORD)(MMIA_CC_EVENT_BASE + 50)
+#define MMIA_CC_EVENT_END (DWORD)(MMIA_CC_RSP_EVENT + 49)
+
+#define MMIA_SMS_EVENT_BASE (DWORD)(MMIA_CC_EVENT_END + 1)
+#define MMIA_SMS_RSP_EVENT (DWORD)(MMIA_SMS_EVENT_BASE + 50)
+#define MMIA_SMS_EVENT_END (DWORD)(MMIA_SMS_RSP_EVENT + 49)
+
+#define MMIA_SS_EVENT_BASE (DWORD)(MMIA_SMS_EVENT_END + 1)
+#define MMIA_SS_RSP_EVENT (DWORD)(MMIA_SS_EVENT_BASE + 50)
+#define MMIA_SS_EVENT_END (DWORD)(MMIA_SS_RSP_EVENT + 49)
+
+#define MMIA_SM_EVENT_BASE (DWORD)(MMIA_SS_EVENT_END + 1)
+#define MMIA_SM_RSP_EVENT (DWORD)(MMIA_SM_EVENT_BASE + 50)
+#define MMIA_SM_EVENT_END (DWORD)(MMIA_SM_RSP_EVENT + 49)
+
+#define MMIA_ESM_EVENT_BASE (DWORD)(MMIA_SM_EVENT_END + 1)
+#define MMIA_ESM_RSP_EVENT (DWORD)(MMIA_ESM_EVENT_BASE + 15)
+#define MMIA_ESM_EVENT_END (DWORD)(MMIA_ESM_RSP_EVENT + 14)
+
+#define MMIA_CBS_EVENT_BASE (DWORD)(MMIA_ESM_EVENT_END + 1)
+#define MMIA_CBS_RSP_EVENT (DWORD)(MMIA_CBS_EVENT_BASE + 15)
+#define MMIA_CBS_EVENT_END (DWORD)(MMIA_CBS_RSP_EVENT + 14)
+
+#define MMIA_SNDCP_EVENT_BASE (DWORD)(MMIA_CBS_EVENT_END + 1)
+#define MMIA_SNDCP_RSP_EVENT (DWORD)(MMIA_SNDCP_EVENT_BASE + 15)
+#define MMIA_SNDCP_EVENT_END (DWORD)(MMIA_SNDCP_RSP_EVENT + 14)
+
+#define MMIA_NAS_EVENT_END (DWORD)MMIA_SNDCP_EVENT_END
+
+#define MMIA_AS_EVENT_BASE (DWORD)(MMIA_NAS_EVENT_END + 1)
+#define MMIA_AS_RSP_EVENT (DWORD)(MMIA_AS_EVENT_BASE + 50)
+#define MMIA_AS_EVENT_END (DWORD)(MMIA_AS_RSP_EVENT + 49)
+
+/*** ÔSIG_CODE.HÖÐÒÆÖ²¹ýÀ´µÄÏûÏ¢(660) ***/
+#define EVENT_PS_GSM_NORMAL_BASE (DWORD)(EVENT_PS_BASE + 4300)
+
+#define LAPDM_EVENT_BASE (DWORD)EVENT_PS_GSM_NORMAL_BASE
+#define LAPDM_EVENT_END (DWORD)(LAPDM_EVENT_BASE + 19)
+
+#define GRR_EVENT_BASE (DWORD)(LAPDM_EVENT_END + 1)
+#define GRR_EVENT_END (DWORD)(GRR_EVENT_BASE + 199)
+
+#define GMAC_EVENT_BASE (DWORD)(GRR_EVENT_END + 1)
+#define GMAC_EVENT_END (DWORD)(GMAC_EVENT_BASE + 69)
+
+#define GRLC_EVENT_BASE (DWORD)(GMAC_EVENT_END + 1)
+#define GRLC_EVENT_END (DWORD)(GRLC_EVENT_BASE + 69)
+
+#define GLLC_EVENT_BASE (DWORD)(GRLC_EVENT_END + 1)
+#define GLLC_EVENT_END (DWORD)(GLLC_EVENT_BASE + 49)
+
+#define SNDCP_EVENT_BASE (DWORD)(GLLC_EVENT_END + 1)
+#define SNDCP_EVENT_END (DWORD)(SNDCP_EVENT_BASE + 49)
+
+#define GRRC_EVENT_BASE (DWORD)(SNDCP_EVENT_END + 1)
+#define GRRC_EVENT_END (DWORD)(GRRC_EVENT_BASE + 49)
+
+#define GSMA_EVENT_BASE (DWORD)(GRRC_EVENT_END + 1)
+#define GSMA_EVENT_END (DWORD)(GSMA_EVENT_BASE + 149)
+
+#define EVENT_PS_GSM_NORMAL_END (DWORD)GSMA_EVENT_END
+
+/*ATI¶¨Ê±Æ÷ÏûÏ¢·¶Î§*/
+#define TIMER_EVENT_BASE (DWORD)(EVENT_PS_BASE + 5000)
+
+#define MMIA_TIMER_EVENT_BASE (DWORD)TIMER_EVENT_BASE
+#define MMIA_TIMER_EVENT_END (DWORD)(MMIA_TIMER_EVENT_BASE + 19)
+
+/**************************************************TOOLS & ROADTEST msg range start********************************************************/
+/*±ê×¼ÐÅÁîʼþºÅ·¶Î§(100)*/
+#define STANDARD_SIG_EVENT_BASE (DWORD)(EVENT_PS_BASE + 7000)
+#define STANDARD_SIG_EVENT_END (DWORD)(STANDARD_SIG_EVENT_BASE + 99)
+
+/*·²âÈí¼þʼþºÅ·¶Î§(800)*/
+#define PS_ROADTEST_EVENT_BASE (DWORD)(EVENT_PS_BASE + 7100)
+#define PS_ROADTEST_RSP_EVENT (DWORD)(PS_ROADTEST_EVENT_BASE + 200)
+#define PS_ROADTEST_EVENT_END (DWORD)(PS_ROADTEST_RSP_EVENT + 599)
+
+/*LTE BTrunkʼþºÅ·¶Î§*/
+#define EVENT_PS_LTE_BTRUNK_BASE (DWORD)(EVENT_PS_BASE + 15000)
+#define EVENT_PS_LTE_BTRUNK_END (DWORD)(EVENT_PS_BASE + 16383)
+/**************************************************TOOLS & ROADTEST msg range end***********************************************************/
+
+/**************************************************PS test msg range start********************************************************/
+/*ÐÒéÕ»ÄÚ²âÊÔÏûÏ¢·¶Î§(130)*/
+#define PRI_TEST_EVENT_BASE (DWORD)(PS_ROADTEST_EVENT_END + 1)
+#define PRI_TEST_EVENT_END (DWORD)(PRI_TEST_EVENT_BASE + 19)
+
+#define TAF_TEST_EVENT_BASE (DWORD)(PRI_TEST_EVENT_END + 1)
+#define TAF_TEST_EVENT_END (DWORD)(TAF_TEST_EVENT_BASE + 9)
+
+#define TC_EVENT_BASE (DWORD)(TAF_TEST_EVENT_END + 1)
+#define TC_EVENT_END (DWORD)(TC_EVENT_BASE + 29)
+
+#define NCBS_EVENT_BASE (DWORD)(TC_EVENT_END + 1)
+#define NCBS_EVENT_END (DWORD)(NCBS_EVENT_BASE + 19)
+
+#define USIR_TEST_EVENT_BASE (DWORD)(NCBS_EVENT_END + 1)
+#define USIR_TEST_EVENT_END (DWORD)(USIR_TEST_EVENT_BASE + 9)
+
+#define NURLC_EVENT_BASE (DWORD)(USIR_TEST_EVENT_END + 1)
+#define NURLC_EVENT_END (DWORD)(NURLC_EVENT_BASE + 19)
+
+#define NUMAC_EVENT_BASE (DWORD)(NURLC_EVENT_END + 1)
+#define NUMAC_EVENT_END (DWORD)(NUMAC_EVENT_BASE + 9)
+
+#define NPDCP_EVENT_BASE (DWORD)(NUMAC_EVENT_END + 1)
+#define NPDCP_EVENT_END (DWORD)(NPDCP_EVENT_BASE + 9)
+
+/*GSM²âÊÔ½ø³ÌÏûÏ¢·¶Î§(300)*/
+#define EVENT_PS_GSM_SIMU_BASE (DWORD)(NPDCP_EVENT_END + 1)
+#define L1SIMU_EVENT_BASE (DWORD)EVENT_PS_GSM_SIMU_BASE
+#define L1SIMU_EVENT_END (DWORD)(L1SIMU_EVENT_BASE + 49)
+
+#define NLAPDM_EVENT_BASE (DWORD)(L1SIMU_EVENT_END + 1)
+#define NLAPDM_EVENT_END (DWORD)(NLAPDM_EVENT_BASE + 49)
+
+#define NGMAC_EVENT_BASE (DWORD)(NLAPDM_EVENT_END + 1)
+#define NGMAC_EVENT_END (DWORD)(NGMAC_EVENT_BASE + 99)
+
+#define NLLC_EVENT_BASE (DWORD)(NGMAC_EVENT_END + 1)
+#define NLLC_EVENT_END (DWORD)(NLLC_EVENT_BASE + 49)
+
+#define NRLC_EVENT_BASE (DWORD)(NLLC_EVENT_END + 1)
+#define NRLC_EVENT_END (DWORD)(NRLC_EVENT_BASE + 49)
+
+#define EVENT_PS_GSM_SIMU_END (DWORD)NRLC_EVENT_END
+
+/*GSM AS¶ÔµÈ²ãÐÅÁî¸ú×ÙʼþºÅ·¶Î§¶¨Òå(100)*/
+#define SIGTRACE_EVENT_BASE (DWORD)(EVENT_PS_GSM_SIMU_END + 1)
+
+/*L1GÐÅÁî¸ú×ÙʼþºÅ·¶Î§¶¨Òå(50)*/
+#define L1G_ST_EVENT_BASE (DWORD)(SIGTRACE_EVENT_BASE + 100)
+#define L1G_ST_EVENT_END (DWORD)(L1G_ST_EVENT_BASE + 49)
+
+#define SIGTRACE_EVENT_END (DWORD)L1G_ST_EVENT_END
+
+/*GRRº¯Êý¸ú×ÙÏûÏ¢·¶Î§(100)*/
+#define GSM_FUNC_EVENT_BASE (DWORD)(SIGTRACE_EVENT_END + 1)
+#define GRR_FUNC_EVENT_BASE (DWORD)GSM_FUNC_EVENT_BASE
+#define GRR_FUNC_EVENT_END (DWORD)(GRR_FUNC_EVENT_BASE + 99)
+
+/*º¯ÊýÐÅÁî¸ú×ÙÏûÏ¢·¶Î§(60)*/
+#define FUNC_EVENT_BASE (DWORD)(GRR_FUNC_EVENT_END + 1)
+#define URRC_FUNC_EVENT_BASE (DWORD)FUNC_EVENT_BASE
+#define URRC_FUNC_EVENT_END (DWORD)(URRC_FUNC_EVENT_BASE + 49)
+
+#define TAF_FUNC_EVENT_BASE (DWORD)(URRC_FUNC_EVENT_END + 1)
+#define TAF_FUNC_EVENT_END (DWORD)(TAF_FUNC_EVENT_BASE + 9)
+#define FUNC_EVENT_END (DWORD)TAF_FUNC_EVENT_END
+
+/*È«¾Ö±äÁ¿»ñȡʼþºÅ·¶Î§¶¨Òå(150)*/
+#define GVAR_EVENT_BASE (DWORD)(FUNC_EVENT_END + 1)
+#define GVAR_EVENT_END (DWORD)(GVAR_EVENT_BASE + 149)
+/* ========================================================================
+ UICC¶ÔÍâÌṩÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+#define AP_UICC_INIT_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 0)
+#define AP_UICC_VERIFY_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 1)
+#define AP_UICC_READ_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 2)
+#define AP_UICC_UPDATE_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 3)
+#define AP_UICC_AUTH_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 4)
+#define AP_UICC_PWROFF_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 5)
+#define AP_UICC_PIN_REMAIN_NUM_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 6)
+#define AP_UICC_USAT_ENVELOP_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 7)
+#define AP_UICC_USAT_TERMNL_RSP_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 8)
+#define AP_UICC_USAT_TERMNL_PROF_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 9)
+#define AP_UICC_PIN_ENABLE_QUERY_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 10)
+#define AP_UICC_PIN_STAT_QUERY_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 11)
+#define AP_UICC_PIN_APPL_SET_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 12)
+#define AP_UICC_PIN_APPL_READ_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 13)
+#define AP_UICC_CARD_MODE_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 14)
+#define AP_UICC_WRITE_ITEM_IND_EV (DWORD)(AP_UICC_EVENT_BASE + 15)
+#define AP_UICC_UPDATE_ITEM_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 16)
+#define AP_UICC_VERIFY_PIN2_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 17)
+#define AP_UICC_ZPUK_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 18)
+#define AP_UICC_INCREASE_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 19)
+#define AP_UICC_RESET_ACM_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 20)
+#define AP_UICC_UNBLOCK_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 21)
+#define AP_UICC_CHANGE_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 22)
+#define AP_UICC_FACILITY_PIN_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 23)
+#define AP_UICC_REFRESH_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 24)
+#define AP_UICC_DEACTEND_IND_EV (DWORD)(AP_UICC_EVENT_BASE + 25)
+#define AP_UICC_FILECHANGEEND_IND_EV (DWORD)(AP_UICC_EVENT_BASE + 26)
+#define AP_UICC_TO_READ_CARD_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 27)
+#define AP_UICC_CSIM_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 28)
+#define AP_UICC_AP_PWROFF_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 29)
+#define AP_UICC_CCHO_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 30)
+#define AP_UICC_CCHC_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 31)
+#define AP_UICC_CGLA_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 32)
+#define AP_UICC_CRSM_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 33)
+#define AP_UICC_MOVECARD_IND_EV (DWORD)(AP_UICC_EVENT_BASE + 34)
+#define AP_UICC_INSERTCARD_IND_EV (DWORD)(AP_UICC_EVENT_BASE + 35)
+#define AP_UICC_GET_INFO_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 36)
+#define AP_UICC_EFSTATUS_QUERY_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 37)
+#define AP_UICC_EFSTATUS_MODIFY_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 38)
+#define AP_UICC_PREPERSONREC_SEARCH_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 39)
+#define AP_UICC_PB_SEARCH_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 40)
+#define AP_UICC_READ_TO_PSDEV_IND_EV (DWORD)(AP_UICC_EVENT_BASE + 45)
+#define AP_UICC_GET_REC_NUM_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 46)
+#define AP_UICC_AIR_AUTH_RSP_IND_EV (DWORD)(AP_UICC_EVENT_BASE + 47)
+#define AP_UICC_READ_EID_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 48)
+#define AP_UICC_READ_REC_DIRECT_REQ_EV (DWORD)(AP_UICC_EVENT_BASE + 50)
+
+#define AP_UICC_INIT_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 0)
+#define AP_UICC_UICCOK_IND_EV (DWORD)(AP_UICC_RSP_EVENT + 1)
+#define AP_UICC_INIT_IND_EV (DWORD)(AP_UICC_RSP_EVENT + 2)
+#define AP_UICC_SLOT_IND_EV (DWORD)(AP_UICC_RSP_EVENT + 3)
+#define AP_UICC_READ_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 4)
+#define AP_UICC_UPDATE_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 5)
+#define AP_UICC_AUTH_RSP_EV (DWORD)(AP_UICC_RSP_EVENT + 6)
+#define AP_UICC_AUTH_FAIL_IND_EV (DWORD)(AP_UICC_RSP_EVENT + 7)
+#define AP_UICC_NOCARD_IND_EV (DWORD)(AP_UICC_RSP_EVENT + 8)
+#define AP_UICC_PIN_REMAIN_NUM_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 9)
+#define AP_UICC_USAT_ENVELOP_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 10)
+#define AP_UICC_USAT_COMMON_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 11)
+#define AP_UICC_USAT_PROV_CMD_IND_EV (DWORD)(AP_UICC_RSP_EVENT + 12)
+#define AP_UICC_PIN_ENABLE_QUERY_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 13)
+#define AP_UICC_PIN_STAT_QUERY_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 14)
+#define AP_UICC_PIN_APPL_SET_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 15)
+#define AP_UICC_PIN_APPL_READ_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 16)
+#define AP_UICC_CARD_MODE_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 17)
+#define AP_UICC_PWROFF_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 18)
+#define AP_UICC_COMMON_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 19)
+#define AP_UICC_UICC_UNSYNC_IND_EV (DWORD)(AP_UICC_RSP_EVENT + 20)
+#define AP_UICC_NO_PROC_NOTIFY_IND_EV (DWORD)(AP_UICC_RSP_EVENT + 21)
+#define AP_UICC_CARD_LOCK_STATUS_IND_EV (DWORD)(AP_UICC_RSP_EVENT + 22)
+#define AP_UICC_PWROFF_IND_EV (DWORD)(AP_UICC_RSP_EVENT + 23)
+#define AP_UICC_UPDATE_ITEM_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 24)
+#define AP_UICC_VERIFY_PIN2_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 25)
+#define AP_UICC_INCREASE_ACM_FAIL_IND_EV (DWORD)(AP_UICC_RSP_EVENT + 26)
+#define AP_UICC_FILECHANGE_IND_EV (DWORD)(AP_UICC_RSP_EVENT + 27)
+#define AP_UICC_CSIM_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 28)
+#define AP_UICC_ATR_IND_EV (DWORD)(AP_UICC_RSP_EVENT + 29)
+#define AP_UICC_CCHO_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 30)
+#define AP_UICC_CGLA_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 31)
+#define AP_UICC_CRSM_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 32)
+#define AP_UICC_USAT_FETCH_IND_EV (DWORD)(AP_UICC_RSP_EVENT + 33)
+#define AP_UICC_GET_INFO_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 34)
+#define AP_UICC_EFSTATUS_QUERY_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 35)
+#define AP_UICC_EFSTATUS_MODIFY_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 36)
+#define AP_UICC_PREPERSNREC_SRCH_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 37)
+#define AP_UICC_PB_SEARCH_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 38) /* ·ÖÅäÁË50ÌõÏûÏ¢Çø¼ä£¬ÒÑÓÃÁË39¸ö */
+#define AP_UICC_REFRESH_HAPPEN_IND_EV (DWORD)(AP_UICC_RSP_EVENT + 39)
+#define AP_UICC_AIR_AUTH_REQ_IND_EV (DWORD)(AP_UICC_RSP_EVENT + 40)
+#define AP_UICC_READ_EID_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 41)
+#define AP_UICC_CCHC_CNF_EV (DWORD)(AP_UICC_RSP_EVENT + 42)
+#define AP_UICC_LOC_STAT_IND (DWORD)(AP_UICC_RSP_EVENT + 43)
+
+/* ========================================================================
+ MMIA¶ÔÍâÌṩÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+/* ========================================================================
+ AP-MMIA MMÏà¹Ø²¿·ÖÏûÏ¢ºÅ
+======================================================================== */
+#define AP_MMIA_CREG_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 0)
+#define AP_MMIA_CREG_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 1)
+#define AP_MMIA_COPS_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 2)
+#define AP_MMIA_COPS_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 3)
+#define AP_MMIA_COPS_TEST_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 4)
+#define AP_MMIA_CGATT_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 5)
+#define AP_MMIA_CGATT_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 6)
+#define AP_MMIA_CGREG_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 7)
+#define AP_MMIA_CGREG_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 8)
+#define AP_MMIA_CFUN_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 9)
+#define AP_MMIA_CFUN_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 10)
+#define AP_MMIA_CPLS_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 11)
+#define AP_MMIA_CPLS_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 12)
+#define AP_MMIA_CPOL_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 13)
+#define AP_MMIA_CPOL_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 14)
+#define AP_MMIA_CPOL_TEST_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 15)
+#define AP_MMIA_ZMMI_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 16)
+#define AP_MMIA_ZMMI_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 17)
+#define AP_MMIA_MODE_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 18)
+#define AP_MMIA_ZATT_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 19)
+#define AP_MMIA_ZATT_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 20)
+#define AP_MMIA_ZGAAT_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 21)
+#define AP_MMIA_ZGAAT_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 22)
+#define AP_MMIA_SYSINFO_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 23)
+#define AP_MMIA_ZACT_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 24)
+#define AP_MMIA_ZACT_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 25)
+#define AP_MMIA_MODE_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 26)
+#define AP_MMIA_SYSCONFIG_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 27)
+#define AP_MMIA_SYSCONFIG_READ_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 28)
+#define AP_MMIA_CEREG_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 29)
+#define AP_MMIA_CEREG_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 30)
+#define AP_MMIA_ZCSG_SEL_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 31)
+#define AP_MMIA_ZCSG_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 32)
+#define AP_MMIA_ZCSG_LIST_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 33)
+#define AP_MMIA_CEMODE_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 34)
+#define AP_MMIA_CEMODE_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 35)
+#define AP_MMIA_ZEACT_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 36)
+#define AP_MMIA_ZEACT_READ_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 37)
+#define AP_MMIA_CVMOD_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 38)
+#define AP_MMIA_CVMOD_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 39)
+#define AP_MMIA_CS_SRV_RSP_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 40)
+#define AP_MMIA_LTEBGPLMN_TESTREQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 41)
+#define AP_MMIA_SMSOVERIPNET_SETREQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 42)
+#define AP_MMIA_SMSOVERIPNET_QUERYREQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 43)
+#define AP_MMIA_FPLMN_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 44)
+#define AP_MMIA_FPLMN_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 45)
+#define AP_MMIA_FPLMN_TEST_REQ_EV (DWORD)(AP_MMIA_EVENT_MM_BASE + 46)
+
+#define AP_MMIA_CREG_IND_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 0)
+#define AP_MMIA_CGREG_IND_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 1)
+#define AP_MMIA_ZMMI_IND_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 2)
+#define AP_MMIA_COPS_TEST_CNF_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 3)
+#define AP_MMIA_CGATT_QUERY_CNF_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 4)
+#define AP_MMIA_CPOL_QUERY_CNF_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 5)
+#define AP_MMIA_ZATT_QUERY_CNF_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 6)
+#define AP_MMIA_MODE_IND_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 7)
+#define AP_MMIA_ZACT_QUERY_CNF_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 8)
+#define AP_MMIA_SYSCONFIG_READ_CNF_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 9)
+#define AP_MMIA_CEREG_IND_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 10)
+#define AP_MMIA_ZCSG_QUERY_CNF_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 11)
+#define AP_MMIA_ZCSG_LIST_CNF_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 12)
+#define AP_MMIA_CEMODE_QUERY_CNF_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 13)
+#define AP_MMIA_CVMOD_QUERY_CNF_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 14)
+#define AP_MMIA_CS_SRV_IND_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 15)
+#define AP_MMIA_LTEBGPLMN_TESTCNF_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 16)
+#define AP_MMIA_IMSVOPS_IND_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 17)
+#define AP_MMIA_FPLMN_QUERY_CNF_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 18)
+#define AP_MMIA_FPLMN_TEST_CNF_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 19)
+#define AP_MMIA_EMERBER_IND_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 20)
+#define AP_MMIA_EMERNUM_IND_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 21)
+#define AP_MMIA_PSCFGSTART_IND_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 22)
+#define AP_MMIA_PSCFGEND_IND_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 23)
+#define AP_MMIA_USER_CARD_SEL_IND_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 24)
+#define AP_MMIA_ZCOPS_TEST_CNF_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 25)
+#define AP_MMIA_TIMEZONE_IND_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 26)
+#define AP_MMIA_ZULRTIND_IND_EV (DWORD)(AP_MMIA_MM_RSP_EVENT + 27)
+/* ========================================================================
+ AP-MMIA CCÏà¹Ø²¿·ÖÏûÏ¢ºÅ
+======================================================================== */
+#define AP_MMIA_CC_SETUP_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 0)
+#define AP_MMIA_CC_ANSWER_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 1)
+#define AP_MMIA_CC_MODIFY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 2)
+#define AP_MMIA_CC_STATE_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 3)
+#define AP_MMIA_CC_DISC_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 4)
+#define AP_MMIA_CC_DTMF_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 5)
+#define AP_MMIA_CC_CHLD_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 6)
+#define AP_MMIA_CC_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 7)
+#define AP_MMIA_CC_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 8)
+#define AP_MMIA_DS_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 9)
+#define AP_MMIA_DS_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 10)
+#define AP_MMIA_MOD_TO_MULTMEDIA_RSP_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 11)
+#define AP_MMIA_CC_MTC_RSP_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 12)
+#define AP_MMIA_DSCI_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 13)
+#define AP_MMIA_DSCI_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 14)
+#define AP_MMIA_CAOC_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 15)
+#define AP_MMIA_CAOC_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 16)
+#define AP_MMIA_CACM_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 17)
+#define AP_MMIA_CAMM_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 18)
+#define AP_MMIA_CPUC_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 19)
+#define AP_MMIA_CCWE_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 20)
+#define AP_MMIA_CACM_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 21)
+#define AP_MMIA_CAMM_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 22)
+#define AP_MMIA_CPUC_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 23)
+#define AP_MMIA_CCWE_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 24)
+#define AP_MMIA_CALL_LINE_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 25)
+#define AP_MMIA_CALL_LINE_QRY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 26)
+
+#define AP_MMIA_CC_CBST_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 27)
+#define AP_MMIA_CC_CBST_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 28)
+#define AP_MMIA_CC_CCUG_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 29)
+#define AP_MMIA_CC_CCUG_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 30)
+#define AP_MMIA_CC_CMOD_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 31)
+#define AP_MMIA_CC_CMOD_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 32)
+#define AP_MMIA_CC_CR_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 33)
+#define AP_MMIA_CC_CR_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 34)
+#define AP_MMIA_CC_CRC_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 35)
+#define AP_MMIA_CC_CRC_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 36)
+#define AP_MMIA_CC_CSNS_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 37)
+#define AP_MMIA_CC_CSNS_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 38)
+#define AP_MMIA_CC_CSSN_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 39)
+#define AP_MMIA_CC_CSSN_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 40)
+#define AP_MMIA_CC_FCLASS_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 41)
+#define AP_MMIA_CC_FCLASS_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 42)
+#define AP_MMIA_SS_CHSN_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 43)
+#define AP_MMIA_SS_CHSN_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 44)
+#define AP_MMIA_SS_CRLP_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 45)
+#define AP_MMIA_SS_CRLP_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 46)
+#define AP_MMIA_SS_ETBM_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 47)
+#define AP_MMIA_SS_ETBM_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CC_BASE + 48)
+
+#define AP_MMIA_CC_SETUP_CNF_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 0)
+#define AP_MMIA_CC_QUERY_CNF_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 1)
+#define AP_MMIA_CC_ANSWER_CNF_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 2)
+#define AP_MMIA_CC_MODIFY_CNF_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 3)
+#define AP_MMIA_CC_STATE_QUERY_CNF_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 4)
+#define AP_MMIA_CC_DISC_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 5)
+#define AP_MMIA_DS_QUERY_CNF_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 6)
+#define AP_MMIA_COLP_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 7)
+#define AP_MMIA_CR_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 8)
+#define AP_MMIA_MT_CALL_SS_NOTIFY_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 9)
+#define AP_MMIA_CLIP_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 10)
+#define AP_MMIA_CC_PROC_INFO_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 11)
+#define AP_MMIA_RING_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 12)
+#define AP_MMIA_CRING_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 13)
+#define AP_MMIA_CCWA_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 14)
+#define AP_MMIA_MO_CALL_SS_NOTIFY_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 15)
+#define AP_MMIA_MOD_TO_MULTMEDIA_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 16)
+#define AP_MMIA_CONN_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 17)
+#define AP_MMIA_ORIG_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 18)
+#define AP_MMIA_CONF_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 19)
+#define AP_MMIA_CEND_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 20)
+#define AP_MMIA_CALL_STATE_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 21)
+#define AP_MMIA_DSCI_QUERY_CNF_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 22)
+#define AP_MMIA_CAOC_SET_CNF_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 23)
+#define AP_MMIA_CPUC_QUERY_CNF_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 24)
+#define AP_MMIA_CCCM_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 25)
+#define AP_MMIA_CCWV_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 26)
+#define AP_MMIA_REDIALEND_IND_EV (DWORD)(AP_MMIA_CC_RSP_EVENT + 27)
+
+/* ========================================================================
+ AP-MMIA SMSÏà¹Ø²¿·ÖÏûÏ¢ºÅ
+======================================================================== */
+#define AP_MMIA_SMS_TCMGS_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 0)
+#define AP_MMIA_SMS_CMSS_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 1)
+#define AP_MMIA_SMS_TCMGW_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 2)
+#define AP_MMIA_SMS_CMGD_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 3)
+#define AP_MMIA_SMS_TCMGC_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 4)
+#define AP_MMIA_SMS_CMMS_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 5)
+#define AP_MMIA_SMS_CNMI_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 6)
+#define AP_MMIA_SMS_CMGL_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 7)
+#define AP_MMIA_SMS_CMGR_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 8)
+#define AP_MMIA_SMS_TCNMA_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 9)
+#define AP_MMIA_SMS_CGSMS_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 10)
+#define AP_MMIA_SMS_CSMS_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 11)
+#define AP_MMIA_SMS_CPMS_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 12)
+#define AP_MMIA_SMS_CMGF_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 13)
+#define AP_MMIA_SMS_CSCA_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 14)
+#define AP_MMIA_SMS_TCSMP_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 15)
+#define AP_MMIA_SMS_TCSDH_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 16)
+#define AP_MMIA_SMS_PCMGS_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 17)
+#define AP_MMIA_SMS_PCMGW_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 18)
+#define AP_MMIA_SMS_PCMGC_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 19)
+#define AP_MMIA_SMS_PCNMA_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 20)
+#define AP_MMIA_SMS_CPMS_TEST_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 21)
+#define AP_MMIA_SMS_ZMENA_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 22)
+#define AP_MMIA_SMS_QUERY_MODE_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 23)
+#define AP_MMIA_SMS_QUERY_MAX_INDEX_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 24)
+#define AP_MMIA_SMS_CNMA_QUERY_MODE_REQ_EV (DWORD)(AP_MMIA_EVENT_SMS_BASE + 25)
+
+#define AP_MMIA_SMS_TCMGS_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 0)
+#define AP_MMIA_SMS_TCMSS_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 1)
+#define AP_MMIA_SMS_CMGW_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 2)
+#define AP_MMIA_SMS_TCMGC_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 3)
+#define AP_MMIA_SMS_STORE_REC_IND_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 4)
+#define AP_MMIA_SMS_TCMT_IND_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 5)
+#define AP_MMIA_SMS_TCDS_IND_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 6)
+#define AP_MMIA_SMS_TDELI_LIST_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 7)
+#define AP_MMIA_SMS_TSUB_LIST_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 8)
+#define AP_MMIA_SMS_TSTAT_LIST_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 9)
+#define AP_MMIA_SMS_TCOM_LIST_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 10)
+#define AP_MMIA_SMS_TDELI_READ_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 11)
+#define AP_MMIA_SMS_TSUB_READ_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 12)
+#define AP_MMIA_SMS_TSTAT_READ_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 13)
+#define AP_MMIA_SMS_TCOM_READ_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 14)
+#define AP_MMIA_SMS_CPMS_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 15)
+#define AP_MMIA_SMS_PCMGS_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 16)
+#define AP_MMIA_SMS_PCMSS_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 17)
+#define AP_MMIA_SMS_PCMGC_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 18)
+#define AP_MMIA_SMS_PCMTIND_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 19)
+#define AP_MMIA_SMS_PCDSIND_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 20)
+#define AP_MMIA_SMS_PCMGL_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 21)
+#define AP_MMIA_SMS_PCMGR_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 22)
+#define AP_MMIA_SMS_CPMS_TEST_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 23)
+#define AP_MMIA_SMS_QUERY_MAX_INDEX_CNF_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 24)
+#define AP_MMIA_SMS_SAVE_FAILURE_IND_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 25)
+#define AP_MMIA_SMS_ZCMTIND_EV (DWORD)(AP_MMIA_SMS_RSP_EVENT + 26)
+
+/* ========================================================================
+ AP-MMIA SSÏà¹Ø²¿·ÖÏûÏ¢ºÅ
+======================================================================== */
+#define AP_MMIA_CLCK_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_SS_BASE + 0)
+#define AP_MMIA_CPWD_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_SS_BASE + 1)
+#define AP_MMIA_CLIP_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_SS_BASE + 2)
+#define AP_MMIA_CLIP_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_SS_BASE + 3)
+#define AP_MMIA_CLIR_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_SS_BASE + 4)
+#define AP_MMIA_CLIR_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_SS_BASE + 5)
+#define AP_MMIA_COLP_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_SS_BASE + 6)
+#define AP_MMIA_COLP_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_SS_BASE + 7)
+#define AP_MMIA_CCFC_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_SS_BASE + 8)
+#define AP_MMIA_CCWA_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_SS_BASE + 9)
+#define AP_MMIA_CCWA_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_SS_BASE + 10)
+#define AP_MMIA_CUSD_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_SS_BASE + 11)
+#define AP_MMIA_CUSD_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_SS_BASE + 12)
+#define AP_MMIA_COLR_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_SS_BASE + 13)
+#define AP_MMIA_CNAP_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_SS_BASE + 14)
+#define AP_MMIA_CNAP_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_SS_BASE + 15)
+
+#define AP_MMIA_CLCK_STATUS_CNF_EV (DWORD)(AP_MMIA_SS_RSP_EVENT + 0)
+#define AP_MMIA_CLIP_QUERY_CNF_EV (DWORD)(AP_MMIA_SS_RSP_EVENT + 1)
+#define AP_MMIA_CLIR_QUERY_CNF_EV (DWORD)(AP_MMIA_SS_RSP_EVENT + 2)
+#define AP_MMIA_COLP_QUERY_CNF_EV (DWORD)(AP_MMIA_SS_RSP_EVENT + 3)
+#define AP_MMIA_CCFC_STATUS_CNF_EV (DWORD)(AP_MMIA_SS_RSP_EVENT + 4)
+#define AP_MMIA_CCWA_STATUS_CNF_EV (DWORD)(AP_MMIA_SS_RSP_EVENT + 5)
+#define AP_MMIA_CCWA_QUERY_CNF_EV (DWORD)(AP_MMIA_SS_RSP_EVENT + 6)
+#define AP_MMIA_CUSD_IND_EV (DWORD)(AP_MMIA_SS_RSP_EVENT + 7)
+#define AP_MMIA_COLR_QUERY_CNF_EV (DWORD)(AP_MMIA_SS_RSP_EVENT + 8)
+#define AP_MMIA_CNAP_QUERY_CNF_EV (DWORD)(AP_MMIA_SS_RSP_EVENT + 9)
+#define AP_MMIA_CNAP_IND_EV (DWORD)(AP_MMIA_SS_RSP_EVENT + 10)
+
+/* ========================================================================
+ AP-MMIA SMÏà¹Ø²¿·ÖÏûÏ¢ºÅ
+======================================================================== */
+#define AP_MMIA_SM_PARAM_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 0)
+#define AP_MMIA_SM_PARAM_READ_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 1)
+#define AP_MMIA_SM_PDP_STATUS_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 2)
+#define AP_MMIA_SM_ACTIVED_CID_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 3)
+#define AP_MMIA_SM_DEF_CID_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 4)
+#define AP_MMIA_SM_PDP_ADDR_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 5)
+#define AP_MMIA_SM_NEG_QOS_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 6)
+#define AP_MMIA_SM_NEG_EQOS_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 7)
+#define AP_MMIA_SM_ACT_DEACT_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 8)
+#define AP_MMIA_SM_MOD_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 9)
+#define AP_MMIA_SM_DATA_STATE_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 10)
+#define AP_MMIA_SM_MT_ACT_ANS_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 11)
+#define AP_MMIA_SM_CPSB_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 12)
+#define AP_MMIA_SM_CGCONTRDP_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 13)
+#define AP_MMIA_SM_CGSCONTRDP_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 14)
+#define AP_MMIA_SM_CGTFTRDP_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 15)
+#define AP_MMIA_SM_CGDEL_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 16)
+#define AP_MMIA_SM_ZGACT_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 17)
+/* ÒÔÉÏΪATÃüÁî¶ÔÓ¦ÏûÏ¢*/
+#define AP_MMIA_SM_GET_PCO_RSP_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 18)
+#define AP_MMIA_SM_IP_PDP_ACT_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 19)
+#define AP_MMIA_SM_OPEN_CHNL_RSP_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 20)
+#define AP_MMIA_SM_IDLE_CHNL_QUERY_RSP_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 21)
+#define AP_MMIA_DISCONNECT_REQ_EV (DWORD)(AP_MMIA_EVENT_SM_BASE + 22)
+
+
+#define AP_MMIA_SM_PDP_STATUS_QUERY_CNF_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 0)
+#define AP_MMIA_SM_ACTIVED_CID_QUERY_CNF_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 1)
+#define AP_MMIA_SM_PDP_ADDR_QUERY_CNF_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 2)
+#define AP_MMIA_SM_NEG_QOS_QUERY_CNF_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 3)
+#define AP_MMIA_SM_NEG_EQOS_QUERY_CNF_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 4)
+#define AP_MMIA_SM_NO_CARRIER_CNF_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 5)
+#define AP_MMIA_SM_ACT_DEACT_CNF_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 6)
+#define AP_MMIA_SM_MOD_CNF_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 7)
+#define AP_MMIA_SM_MT_ACTIVATE_IND_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 8)
+#define AP_MMIA_SM_CGEV_IND_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 9)
+#define AP_MMIA_SM_IP_PDP_ACT_CNF_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 10)
+#define AP_MMIA_SM_CLOSE_CHNL_IND_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 11)
+#define AP_MMIA_SM_QUERY_IDLE_CHNL_IND_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 12)
+#define AP_MMIA_SM_GET_PCO_IND_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 13)
+#define AP_MMIA_SM_CONNECT_IND_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 14)
+#define AP_MMIA_SM_CPSB_IND_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 15)
+#define AP_MMIA_SM_CGCONTRDP_CNF_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 16)
+#define AP_MMIA_SM_CGSCONTRDP_CNF_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 17)
+#define AP_MMIA_SM_CGTFTRDP_CNF_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 18)
+#define AP_MMIA_SM_CGDEL_CNF_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 19)
+#define AP_MMIA_SM_DEACT_IND_EV (DWORD)(AP_MMIA_SM_RSP_EVENT + 20)
+
+/* ========================================================================
+ AP-MMIA ESMÏà¹Ø²¿·ÖÏûÏ¢ºÅ
+======================================================================== */
+#define AP_MMIA_ESM_CGETFADS_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_ESM_BASE + 0)
+#define AP_MMIA_ESM_TFAD_READ_REQ_EV (DWORD)(AP_MMIA_EVENT_ESM_BASE + 1)
+#define AP_MMIA_ESM_CGATFT_REQ_EV (DWORD)(AP_MMIA_EVENT_ESM_BASE + 2)
+#define AP_MMIA_ESM_BEARER_MOD_REQ_EV (DWORD)(AP_MMIA_EVENT_ESM_BASE + 3)
+#define AP_MMIA_ESM_EBR_MOD_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_ESM_BASE + 4)
+#define AP_MMIA_CGEQOSRDP_REQ_EV (DWORD)(AP_MMIA_EVENT_ESM_BASE + 5) /*only for R7&R5*/
+#define AP_MMIA_ESM_TFAD_TEST_REQ_EV (DWORD)(AP_MMIA_EVENT_ESM_BASE + 6)
+#define AP_MMIA_CGEQOS_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_ESM_BASE + 7) /*only for R7&R5*/
+#define AP_MMIA_CGEQOS_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_ESM_BASE + 8) /*only for R7&R5*/
+
+#define AP_MMIA_ESM_BEARER_ACT_IND_EV (DWORD)(AP_MMIA_ESM_RSP_EVENT + 0)
+#define AP_MMIA_ESM_BEARER_DEACT_IND_EV (DWORD)(AP_MMIA_ESM_RSP_EVENT + 1)
+#define AP_MMIA_ESM_BEARER_MOD_IND_EV (DWORD)(AP_MMIA_ESM_RSP_EVENT + 2)
+#define AP_MMIA_ESM_TFAD_READ_CNF_EV (DWORD)(AP_MMIA_ESM_RSP_EVENT + 3)
+#define AP_MMIA_ESM_CGATFT_CNF_EV (DWORD)(AP_MMIA_ESM_RSP_EVENT + 4)
+#define AP_MMIA_ESM_BEARER_MOD_CNF_EV (DWORD)(AP_MMIA_ESM_RSP_EVENT + 5)
+#define AP_MMIA_ESM_BEARER_MOD_REJ_EV (DWORD)(AP_MMIA_ESM_RSP_EVENT + 6)
+#define AP_MMIA_ESM_EBRMOD_QUERY_CNF_EV (DWORD)(AP_MMIA_ESM_RSP_EVENT + 7)
+#define AP_MMIA_CGEQOSRDP_CNF_EV (DWORD)(AP_MMIA_ESM_RSP_EVENT + 8) /*only for R7&R5*/
+#define AP_MMIA_ESM_TFADTEST_CNF_EV (DWORD)(AP_MMIA_ESM_RSP_EVENT + 9)
+
+/* ========================================================================
+ AP-MMIA UICCÏà¹Ø²¿·ÖÏûÏ¢ºÅ
+======================================================================== */
+#define AP_MMIA_UICC_INIT_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 0)
+#define AP_MMIA_CPIN_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 1)
+#define AP_MMIA_PIN_REMAIN_NUM_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 2)
+#define AP_MMIA_CPBS_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 3)
+#define AP_MMIA_CPBS_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 4)
+#define AP_MMIA_CPBR_EXE_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 5)
+#define AP_MMIA_CPBR_TEST_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 6)
+#define AP_MMIA_CPBF_EXE_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 7)
+#define AP_MMIA_CPBF_TEST_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 8)
+#define AP_MMIA_CPBW_EXE_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 9)
+#define AP_MMIA_CPBW_TEST_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 10)
+#define AP_MMIA_UICC_COMMAND_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 11)
+#define AP_MMIA_PIN_APPL_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 12)
+#define AP_MMIA_PIN_APPL_READ_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 13)
+#define AP_MMIA_CARD_MODE_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 14)
+#define AP_MMIA_CPIN_READ_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 15)
+#define AP_MMIA_CPBS_TEST_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 16)
+#define AP_MMIA_SCPBR_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 17)
+#define AP_MMIA_SCPBR_TEST_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 18)
+#define AP_MMIA_SCPBW_TEST_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 19)
+#define AP_MMIA_SCPBW_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 20)
+#define AP_MMIA_CNUM_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 21)
+#define AP_MMIA_ZIMG_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 22)
+#define AP_MMIA_ZGIIDF_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 23)
+#define AP_MMIA_ZPUK_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 24)
+#define AP_MMIA_CPBW_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 25)
+#define AP_MMIA_ZCPBQ_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 26)
+#define AP_MMIA_ZCPBQ_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 27)
+#define AP_MMIA_ZEER_READ_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 28)
+#define AP_MMIA_MB_AUTH_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 29)
+#define AP_MMIA_MB_CELL_ID_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 30)
+#define AP_MMIA_PSEUDO_FR_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 31)
+#define AP_MMIA_PSEUDO_FR_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 32)
+#define AP_MMIA_REFRESH_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 33)
+#define AP_MMIA_CARD_SRV_LIST_QRY_REQ_EV (DWORD)(AP_MMIA_EVENT_UICC_BASE + 34)
+
+#define AP_MMIA_UICC_INIT_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 0)
+#define AP_MMIA_UICC_OKIND_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 1)
+#define AP_MMIA_UICC_INTI_IND_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 2)
+#define AP_MMIA_UICC_SLOT_IND_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 3)
+#define AP_MMIA_PIN_REMAI_NNUM_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 4)
+#define AP_MMIA_CPBS_QUERY_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 5)
+#define AP_MMIA_CPBR_EXE_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 6)
+#define AP_MMIA_CPBR_TEST_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 7)
+#define AP_MMIA_CPBF_TEST_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 8)
+#define AP_MMIA_CPBW_TEST_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 9)
+#define AP_MMIA_PIN_APPL_SET_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 10)
+#define AP_MMIA_PIN_APPL_READ_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 11)
+#define AP_MMIA_CARD_MODE_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 12)
+#define AP_MMIA_CPIN_READ_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 13)
+#define AP_MMIA_CPBR_SET_END_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 14)
+#define AP_MMIA_CPBS_TEST_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 15)
+#define AP_MMIA_SCPBR_SET_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 16)
+#define AP_MMIA_SCPBR_SET_END_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 17)
+#define AP_MMIA_SCPBR_TEST_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 18)
+#define AP_MMIA_SCPBW_TEST_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 19)
+#define AP_MMIA_CNUM_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 20)
+#define AP_MMIA_ZIMG_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 21)
+#define AP_MMIA_ZGIIDF_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 22)
+#define AP_MMIA_CPBW_EXE_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 23)
+#define AP_MMIA_ZCPBQ_SET_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 24)
+#define AP_MMIA_ZCPBQ_QUERY_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 25)
+#define AP_MMIA_ZEER_READ_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 26)
+#define AP_MMIA_MB_AUTH_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 27)
+#define AP_MMIA_PSEUDO_FR_QUERY_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 28)
+#define AP_MMIA_CARD_SRV_LIST_QRY_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 29)
+#define AP_MMIA_ICCID_IND_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 30)
+#define AP_MMIA_ZCFIS_SET_CNF_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 31)
+#define AP_MMIA_ZISIMINIT_IND_EV (DWORD)(AP_MMIA_UICC_RSP_EVENT + 32)
+#define AP_MMIA_COPN_EXE_CNF (DWORD)(AP_MMIA_UICC_RSP_EVENT + 33)
+#define AP_MMIA_COPN_END_CNF (DWORD)(AP_MMIA_UICC_RSP_EVENT + 34)
+
+/* ========================================================================
+ AP-MMIA USATÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+#define AP_MMIA_USAT_ENVELOP_REQ_EV (DWORD)(AP_MMIA_EVENT_USAT_BASE + 0)
+#define AP_MMIA_USAT_TERMNL_RSP_REQ_EV (DWORD)(AP_MMIA_EVENT_USAT_BASE + 1)
+#define AP_MMIA_USAT_TERMNL_PROF_REQ_EV (DWORD)(AP_MMIA_EVENT_USAT_BASE + 2)
+#define AP_MMIA_USAT_LOC_INFO_REQ_EV (DWORD)(AP_MMIA_EVENT_USAT_BASE + 3)
+#define AP_MMIA_USAT_TO_READ_CARD_REQ_EV (DWORD)(AP_MMIA_EVENT_USAT_BASE + 4)
+
+#define AP_MMIA_USAT_ENVELOP_CNF_EV (DWORD)(AP_MMIA_USAT_RSP_EVENT + 0)
+#define AP_MMIA_USAT_PROV_CMD_IND_EV (DWORD)(AP_MMIA_USAT_RSP_EVENT + 1)
+#define AP_MMIA_USAT_NOPROC_NOTIFY_IND_EV (DWORD)(AP_MMIA_USAT_RSP_EVENT + 2)
+
+/* ========================================================================
+ AP-MMIA CBSÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+#define AP_MMIA_CBS_CSCB_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CBS_BASE + 0)
+#define AP_MMIA_CBS_CSCB_READ_REQ_EV (DWORD)(AP_MMIA_EVENT_CBS_BASE + 1)
+#define AP_MMIA_CBS_SAVING_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CBS_BASE + 2)
+#define AP_MMIA_CBS_RESTORE_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_CBS_BASE + 3)
+
+#define AP_MMIA_CBS_TCBM_IND_EV (DWORD)(AP_MMIA_CBS_RSP_EVENT + 0)
+#define AP_MMIA_CBS_PCBM_IND_EV (DWORD)(AP_MMIA_CBS_RSP_EVENT + 1)
+#define AP_MMIA_CBS_TCBM_LIST_CNF_EV (DWORD)(AP_MMIA_CBS_RSP_EVENT + 2)
+#define AP_MMIA_CBS_PCBM_LIST_CNF_EV (DWORD)(AP_MMIA_CBS_RSP_EVENT + 3)
+#define AP_MMIA_CBS_TCBM_READ_CNF_EV (DWORD)(AP_MMIA_CBS_RSP_EVENT + 4)
+#define AP_MMIA_CBS_PCBM_READ_CNF_EV (DWORD)(AP_MMIA_CBS_RSP_EVENT + 5)
+
+/* ========================================================================
+ AP-MMIA PB(´æ´¢¹ÜÀí)ÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+#define AP_MMIA_SM_SAVE_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 0)
+#define AP_MMIA_SM_READ_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 1)
+#define AP_MMIA_SM_LIST_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 2)
+#define AP_MMIA_SM_DELETE_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 3)
+#define AP_MMIA_SM_MEM_AVAIL_RSP_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 4)
+#define AP_MMIA_PB_FIND_INDEX_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 5)
+#define AP_MMIA_PB_FIND_TEXT_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 6)
+#define AP_MMIA_PB_EDIT_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 7)
+#define AP_MMIA_PB_STORAGE_STATUS_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 8)
+#define AP_MMIA_PB_PREF_MSG_STO_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 9)
+#define AP_MMIA_PB_PREF_MSG_STO_TEST_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 10)
+#define AP_MMIA_PB_TPMR_UPDATE_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 11)
+#define AP_MMIA_PB_MEM_CAPA_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 12)
+#define AP_MMIA_PB_MT_PARA_IND_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 13)
+#define AP_MMIA_PB_EMER_NUM_LIST_IND_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 14)
+#define AP_MMIA_PB_STO_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 15)
+#define AP_MMIA_PB_STO_TEST_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 16)
+#define AP_MMIA_PB_QUERY_SMS_MAX_INDEX_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 17)
+#define AP_MMIA_PB_S_FIND_INDEX_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 18)
+#define AP_MMIA_PB_S_EDIT_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 19)
+#define AP_MMIA_PB_C_NUM_REQ (DWORD)(AP_MMIA_EVENT_PB_BASE + 20)
+#define AP_MMIA_PB_CLCK_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 21)
+#define AP_MMIA_PB_SCPBR_TEST_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 22)
+#define AP_MMIA_PB_SCPBW_TEST_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 23)
+#define AP_MMIA_PB_UICC_OK_IND_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 24)
+#define AP_MMIA_PB_CPBR_IND_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 25)
+#define AP_MMIA_PB_CPBF_IND_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 26)
+#define AP_MMIA_PB_SCPBR_IND_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 27)
+#define AP_MMIA_PB_CMGL_IND_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 28)
+#define AP_MMIA_PB_CPBW_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 29)
+#define AP_MMIA_PB_READ_CAPA_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 30)
+#define AP_MMIA_PB_READ_SET_NUM_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 31)
+#define AP_MMIA_PB_READ_LAST_EXT_ERR_REQ_EV (DWORD)(AP_MMIA_EVENT_PB_BASE + 32)
+
+#define AP_MMIA_SM_SAVE_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 0)
+#define AP_MMIA_SM_READ_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 1)
+#define AP_MMIA_SM_LIST_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 2)
+#define AP_MMIA_SM_DELETE_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 3)
+#define AP_MMIA_SM_MEM_AVAIL_IND_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 4)
+#define AP_MMIA_PB_FIND_INDEX_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 5)
+#define AP_MMIA_PB_FIND_TEXT_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 6)
+#define AP_MMIA_PB_EDIT_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 7)
+#define AP_MMIA_PB_STORAGE_STATUS_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 8)
+#define AP_MMIA_PB_FIND_INDEX_END_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 9)
+#define AP_MMIA_PB_PREF_MSG_STO_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 10)
+#define AP_MMIA_PB_PREF_MSG_STO_TEST_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 11)
+#define AP_MMIA_PB_COMMON_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 12)
+#define AP_MMIA_PB_INIT_COMP_IND_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 13)
+#define AP_MMIA_PB_STO_TEST_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 14)
+#define AP_MMIA_PB_QUERY_SMS_MAX_INDEX_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 15)
+#define AP_MMIA_PB_S_FIND_INDEX_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 16)
+#define AP_MMIA_PB_S_FIND_INDEX_END_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 17)
+#define AP_MMIA_PB_S_EDIT_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 18)
+#define AP_MMIA_PB_SCPBR_TEST_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 19)
+#define AP_MMIA_PB_SCPBW_TEST_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 20)
+#define AP_MMIA_PB_C_NUM_CNF (DWORD)(AP_MMIA_PB_RSP_EVENT + 21)
+#define AP_MMIA_PB_CLCK_STATUS_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 22)
+#define AP_MMIA_PB_CHG_INDEX_IND_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 23)
+#define AP_MMIA_PB_CPBW_QUERY_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 24)
+#define AP_MMIA_PB_READ_CAPA_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 25)
+#define AP_MMIA_PB_READ_SET_NUM_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 26)
+#define AP_MMIA_PB_READ_LAST_EXT_ERR_CNF_EV (DWORD)(AP_MMIA_PB_RSP_EVENT + 27)
+
+/* ========================================================================
+ AP-MMIA ¹¤³ÌģʽºÍÏúÁ¿Í³¼ÆÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+#define AP_MMIA_EM_CELL_INFO_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 0)
+#define AP_MMIA_EM_CELL_INFO_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 1)
+#define AP_MMIA_EM_LOCK_CELL_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 2)
+#define AP_MMIA_EM_HO_INFO_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 3)
+#define AP_MMIA_EM_HO_INFO_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 4)
+#define AP_MMIA_SELL_STAT_SWITCH_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 5)
+#define AP_MMIA_SELL_STAT_SWITCH_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 6)
+#define AP_MMIA_SELL_STAT_UDPINFO_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 7)
+#define AP_MMIA_SELL_STAT_TEST_SEND_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 8)
+#define AP_MMIA_SELL_STAT_DOMAIN_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 9)
+#define AP_MMIA_SELL_STAT_DOMAIN_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 10)
+#define AP_MMIA_SELL_STAT_CRC_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 11)
+#define AP_MMIA_SELL_STAT_CRC_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 12)
+#define AP_MMIA_SELL_STAT_DEBUG_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 13)
+#define AP_MMIA_SELL_STAT_DEBUG_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 14)
+#define AP_MMIA_SELL_STAT_PORT_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 15)
+#define AP_MMIA_SELL_STAT_PORT_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 16)
+#define AP_MMIA_SELL_STAT_TRI_TYPE_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 17)
+#define AP_MMIA_SELL_STAT_DNS_CNT_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_EM_BASE + 18)
+
+
+#define AP_MMIA_EM_CELL_INFO_QUERY_CNF_EV (DWORD)(AP_MMIA_EM_RSP_EVENT + 0)
+#define AP_MMIA_EM_HO_INFO_IND_EV (DWORD)(AP_MMIA_EM_RSP_EVENT + 1)
+#define AP_MMIA_EM_HO_INFO_QUERY_CNF_EV (DWORD)(AP_MMIA_EM_RSP_EVENT + 2)
+#define AP_MMIA_SELL_STAT_SWITCH_QUERY_CNF_EV (DWORD)(AP_MMIA_EM_RSP_EVENT + 3)
+#define AP_MMIA_SELL_STAT_UDPINFO_QUERY_CNF_EV (DWORD)(AP_MMIA_EM_RSP_EVENT + 4)
+#define AP_MMIA_SELL_STAT_DOMAIN_QUERY_CNF_EV (DWORD)(AP_MMIA_EM_RSP_EVENT + 5)
+#define AP_MMIA_SELL_STAT_CRC_QUERY_CNF_EV (DWORD)(AP_MMIA_EM_RSP_EVENT + 6)
+#define AP_MMIA_SELL_STAT_DEBUG_QUERY_CNF_EV (DWORD)(AP_MMIA_EM_RSP_EVENT + 7)
+#define AP_MMIA_SELL_STAT_PORT_QUERY_CNF_EV (DWORD)(AP_MMIA_EM_RSP_EVENT + 8)
+#define AP_MMIA_SELL_STAT_TRI_TYPE_QUERY_CNF_EV (DWORD)(AP_MMIA_EM_RSP_EVENT + 9)
+#define AP_MMIA_SELL_STAT_DNS_CNT_QUERY_CNF_EV (DWORD)(AP_MMIA_EM_RSP_EVENT + 10)
+
+/* ========================================================================
+ AP-MMIA ÐźÅÇ¿¶ÈÖ÷¶¯Éϱ¨ÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+#define AP_MMIA_RXLEV_REQ_EV (DWORD)(AP_MMIA_EVENT_OTHER_BASE + 0)
+#define AP_MMIA_ZRPT_RXLEV_REQ_EV (DWORD)(AP_MMIA_EVENT_OTHER_BASE + 1)
+#define AP_MMIA_ZRPT_RXLEV_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_OTHER_BASE + 2)
+#define AP_MMIA_QUERY_IMSI_REQ_EV (DWORD)(AP_MMIA_EVENT_OTHER_BASE + 3)
+#define AP_MMIA_QUERY_IMEI_REQ_EV (DWORD)(AP_MMIA_EVENT_OTHER_BASE + 4)
+#define AP_MMIA_ABORT_REQ_EV (DWORD)(AP_MMIA_EVENT_OTHER_BASE + 5)
+#define AP_MMIA_CAUSE_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_OTHER_BASE + 6)
+#define AP_MMIA_SPN_READ_REQ_EV (DWORD)(AP_MMIA_EVENT_OTHER_BASE + 7)
+#define AP_MMIA_ZETWS_PRIMARY_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_OTHER_BASE + 8)
+#define AP_MMIA_ZETWS_PRIMARY_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_OTHER_BASE + 9)
+#define AP_MMIA_ZETWS_SECONDARY_SET_REQ_EV (DWORD)(AP_MMIA_EVENT_OTHER_BASE + 10)
+#define AP_MMIA_ZETWS_SECONDARY_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_OTHER_BASE + 11)
+#define AP_MMIA_SET_IMSI_REQ_EV (DWORD)(AP_MMIA_EVENT_OTHER_BASE + 12)
+#define AP_MMIA_AUTO_START_REQ_EV (DWORD)(AP_MMIA_EVENT_OTHER_BASE + 13)
+#define AP_MMIA_CHNEL_STATE_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_OTHER_BASE + 14)
+#define AP_MMIA_ZOPERLTEBAND_QUERY_REQ_EV (DWORD)(AP_MMIA_EVENT_OTHER_BASE + 15)
+
+
+#define AP_MMIA_RXLEV_CNF_EV (DWORD)(AP_MMIA_OTHER_RSP_EVENT + 0)
+#define AP_MMIA_ZRPT_RXLEVIND_EV (DWORD)(AP_MMIA_OTHER_RSP_EVENT + 1)
+#define AP_MMIA_ZRPT_RXLEV_QUERY_CNF_EV (DWORD)(AP_MMIA_OTHER_RSP_EVENT + 2)
+#define AP_MMIA_QUERY_IMSI_CNF_EV (DWORD)(AP_MMIA_OTHER_RSP_EVENT + 3)
+#define AP_MMIA_QUERY_IMEI_CNF_EV (DWORD)(AP_MMIA_OTHER_RSP_EVENT + 4)
+#define AP_MMIA_COMMON_CNF_EV (DWORD)(AP_MMIA_OTHER_RSP_EVENT + 5)
+#define AP_MMIA_CAUSE_QUERY_CNF_EV (DWORD)(AP_MMIA_OTHER_RSP_EVENT + 6)
+#define AP_MMIA_ZPBIC_IND_EV (DWORD)(AP_MMIA_OTHER_RSP_EVENT + 7)
+#define AP_MMIA_SPN_READ_CNF_EV (DWORD)(AP_MMIA_OTHER_RSP_EVENT + 8)
+#define AP_MMIA_ZETWS_PRIMARY_QUERY_CNF_EV (DWORD)(AP_MMIA_OTHER_RSP_EVENT + 9)
+#define AP_MMIA_ZETWS_SECONDARY_QUERY_CNF_EV (DWORD)(AP_MMIA_OTHER_RSP_EVENT + 10)
+#define AP_MMIA_ZETWS_PRIMARY_IND_EV (DWORD)(AP_MMIA_OTHER_RSP_EVENT + 11)
+#define AP_MMIA_ZETWS_SECONDARY_IND_EV (DWORD)(AP_MMIA_OTHER_RSP_EVENT + 12)
+#define AP_MMIA_CHG_INDEX_IND_EV (DWORD)(AP_MMIA_OTHER_RSP_EVENT + 13)
+
+/* ========================================================================
+ MMIA-UMMÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+#define MMIA_UMM_SEARCH_PLMN_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 0)
+#define MMIA_UMM_PLMN_LIST_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 1)
+#define MMIA_UMM_ACTIVE_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 2)
+#define MMIA_UMM_RF_CTRL_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 3)
+#define MMIA_UMM_ABORT_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 4)
+#define MMIA_UMM_EM_LOCK_CELL_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 5)
+#define MMIA_UMM_CSG_SEL_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 6)
+#define MMIA_UMM_CURRENT_CSG_QUERY_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 7)
+#define MMIA_UMM_CSG_LIST_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 8)
+#define MMIA_UMM_SYSCONFIG_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 9)
+#define MMIA_UMM_CGATT_QUERY_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 10)
+#define MMIA_UMM_ZATT_QUERY_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 11)
+#define MMIA_UMM_EPS_MODE_QUERY_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 12)
+#define MMIA_UMM_EPS_MODE_SET_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 13)
+#define MMIA_UMM_SET_LTE_ACT_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 14)
+#define MMIA_UMM_CS_SRV_CNF_EV (DWORD)(MMIA_UMM_EVENT_BASE + 15)
+#define MMIA_UMM_IMS_REGISTER_STATES_EV (DWORD)(MMIA_UMM_EVENT_BASE + 16)
+#define MMIA_UMM_VOICE_MODE_SET_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 17)
+#define MMIA_UMM_VOICE_MODE_QUERY_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 18)
+#define MMIA_UMM_SYSCONFIG_QUERY_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 19)
+#define MMIA_UMM_SMSOVERIPNET_SETREQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 20)
+#define Z_TD_LTE_CELL_IND_EV (DWORD)(MMIA_UMM_EVENT_BASE + 21)
+#define MMIA_UMM_PS_CONTEXT_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 22)
+#define MMIA_UMM_PS_CONTEXT_IND_EV (DWORD)(MMIA_UMM_EVENT_BASE + 23)
+#define MMIA_UMM_CAUSE_QUERY_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 24)
+#define MMIA_UMM_UPDATE_OPERPLMN_IND_EV (DWORD)(MMIA_UMM_EVENT_BASE + 25)
+#define MMIA_UMM_CS_CALL_START_IND_EV (DWORD)(MMIA_UMM_EVENT_BASE + 26)
+#define MMIA_UMM_CS_CALL_END_IND_EV (DWORD)(MMIA_UMM_EVENT_BASE + 27)
+#define MMIA_UMM_XCELLINFO_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 28)
+#define MMIA_UMM_LASTCID_APNMODIFY_IND_EV (DWORD)(MMIA_UMM_EVENT_BASE + 29)
+#define MMIA_UMM_CSVOICE_QUERY_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 30)
+#define MMIA_UMM_CARDSWITCH_CMP_IND_EV (DWORD)(MMIA_UMM_EVENT_BASE + 31)
+#define MMIA_UMM_ECALLSPT_QUERY_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 32)
+#define MMIA_UMM_ECALLONLY_QUERY_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 33)
+#define MMIA_UMM_FREQ_SCAN_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 34)
+#define MMIA_UMM_FAST_FREQ_SCAN_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 35)
+#define MMIA_UMM_IMSAIRREL_REQ_EV (DWORD)(MMIA_UMM_EVENT_BASE + 36)
+#define MMIA_UMM_SOFTPOWER_STATUS_IND_EV (DWORD)(MMIA_UMM_EVENT_BASE + 37)
+
+
+#define MMIA_UMM_PLMN_INFO_IND_EV (DWORD)(MMIA_UMM_RSP_EVENT + 0)
+#define MMIA_UMM_PLMN_LIST_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 1)
+#define MMIA_UMM_ACTIVE_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 2)
+#define MMIA_UMM_MM_INFO_IND_EV (DWORD)(MMIA_UMM_RSP_EVENT + 3)
+#define MMIA_UMM_RF_CTRL_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 4)
+#define MMIA_UMM_EM_LOCK_CELL_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 5)
+#define MMIA_UMM_CSG_SEL_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 6)
+#define MMIA_UMM_CURRENT_CSG_QUERY_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 7)
+#define MMIA_UMM_CSG_LIST_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 8)
+#define MMIA_UMM_COMMON_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 9)
+#define MMIA_UMM_CGATT_QUERY_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 10)
+#define MMIA_UMM_ZATT_QUERY_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 11)
+#define MMIA_UMM_EPS_MODE_SET_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 12)
+#define MMIA_UMM_EPS_MODE_QUERY_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 13)
+#define MMIA_UMM_SEARCH_PLMN_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 14)
+#define MMIA_UMM_CS_SRV_IND_Ev (DWORD)(MMIA_UMM_RSP_EVENT + 15)
+#define MMIA_UMM_VOICE_MODE_QUERY_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 16)
+#define MMIA_UMM_SYSCONFIG_QUERY_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 17)
+#define MMIA_UMM_NOTIFY_PS_STATE_EV (DWORD)(MMIA_UMM_RSP_EVENT + 18)
+#define MMIA_UMM_SUBMODE_IND_EV (DWORD)(MMIA_UMM_RSP_EVENT + 19)
+#define MMIA_UMM_SRVCC_IND_EV (DWORD)(MMIA_UMM_RSP_EVENT + 20)
+#define MMIA_UMM_PS_CONTEXT_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 21)
+#define MMIA_UMM_CAUSE_QUERY_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 22)
+#define MMIA_UMM_UPDATE_DUALPSSYSCONFIG_IND_EV (DWORD)(MMIA_UMM_RSP_EVENT + 23)
+#define MMIA_UMM_IMSNOTSUPPORT_IND_EV (DWORD)(MMIA_UMM_RSP_EVENT + 24)
+#define MMIA_UMM_PLMNLIST_BANDINFO_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 25)
+#define MMIA_UMM_XCELLINFO_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 26)
+#define MMIA_UMM_CSVOICE_QUERY_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 27)
+#define MMIA_UMM_SCAN_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 28)
+#define MMIA_UMM_CARDSWITCH_REQ_IND_EV (DWORD)(MMIA_UMM_RSP_EVENT + 29)
+#define MMIA_UMM_ECALLSPT_QUERY_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 30)
+#define MMIA_UMM_ECALLONLY_QUERY_CNF_EV (DWORD)(MMIA_UMM_RSP_EVENT + 31)
+#define MMIA_UMM_CAUSE_IND_EV (DWORD)(MMIA_UMM_RSP_EVENT + 32)
+#define MMIA_UMM_T10DEREG_IND_EV (DWORD)(MMIA_UMM_RSP_EVENT + 33)
+/* ========================================================================
+ MMIA£CCÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+#define MMIA_CC_MOC_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 0)
+#define MMIA_CC_MTC_RSP_EV (DWORD)(MMIA_CC_EVENT_BASE + 1)
+#define MMIA_CC_ANS_MODE_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 2)
+#define MMIA_CC_MODIFY_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 3)
+#define MMIA_CC_DIS_MODE_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 4)
+#define MMIA_CC_DTMF_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 5)
+#define MMIA_CC_CHLD_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 6)
+#define MMIA_CC_STATE_QUERY_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 7)
+#define MMIA_CC_CAUSE_QUERY_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 8)
+#define MMIA_CC_CSTA_QUERY_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 9)
+#define MMIA_CC_CSTA_SET_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 10)
+#define MMIA_CC_MODE_TO_MULTMEDIA_RSP_EV (DWORD)(MMIA_CC_EVENT_BASE + 11)
+#define MMIA_CC_CCM_QUERY_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 12)
+#define MMIA_CC_ABORT_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 13)
+#define MMIA_CC_STATE_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 14)
+#define MMIA_CC_OPEN_VOICECHNL_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 15)
+#define MMIA_CC_SRVCC_NOTOPEN_VOICECHNL_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 16)
+#define MMIA_CC_T9TIMER_SET_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 17)
+#define MMIA_CC_T9TIMER_QRY_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 18)
+#define MMIA_CC_VOICEMODE_QRY_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 19)
+#define MMIA_CC_RESETIVS_REQ_EV (DWORD)(MMIA_CC_EVENT_BASE + 20)
+
+#define MMIA_CC_MOC_CNF_EV (DWORD)(MMIA_CC_RSP_EVENT + 0)
+#define MMIA_CC_MTC_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 1)
+#define MMIA_CC_ANS_MODE_CNF_EV (DWORD)(MMIA_CC_RSP_EVENT + 2)
+#define MMIA_CC_MODIFY_CNF_EV (DWORD)(MMIA_CC_RSP_EVENT + 3)
+#define MMIA_CC_DISC_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 4)
+#define MMIA_CC_NOTIFY_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 5)
+#define MMIA_CC_AOC_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 6)
+#define MMIA_CC_SS_NOTIFY_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 7)
+#define MMIA_CC_STATE_QUERY_CNF_EV (DWORD)(MMIA_CC_RSP_EVENT + 8)
+#define MMIA_CC_COMMON_CNF_EV (DWORD)(MMIA_CC_RSP_EVENT + 9)
+#define MMIA_CC_CAUSE_QUERY_CNF_EV (DWORD)(MMIA_CC_RSP_EVENT + 10)
+#define MMIA_CC_PROC_INFO_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 11)
+#define MMIA_CC_CSTA_QUERY_CNF_EV (DWORD)(MMIA_CC_RSP_EVENT + 12)
+#define MMIA_CC_MODE_TO_MULTMEDIAIND_EV (DWORD)(MMIA_CC_RSP_EVENT + 13)
+#define MMIA_CC_DISC_CNF_EV (DWORD)(MMIA_CC_RSP_EVENT + 14)
+#define MMIA_CC_CALL_STATE_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 15)
+#define MMIA_CC_OPEN_VOICE_CHNL_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 16)
+#define MMIA_CC_CLOSE_VOICE_CHNL_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 17)
+#define MMIA_CC_CCM_QUERY_CNF_EV (DWORD)(MMIA_CC_RSP_EVENT + 18)
+#define MMIA_CC_CCWV_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 19)
+#define MMIA_CC_NOTIFY_AOC_TIMER_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 20)
+#define MMIA_CC_CNAP_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 21)
+#define MMIA_CC_DUALPSCFG_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 22)
+#define MMIA_CC_STOP_LOCALVOICE_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 23)
+#define MMIA_CC_CHLD_CNF_EV (DWORD)(MMIA_CC_RSP_EVENT + 24)
+#define MMIA_CC_DTMF_CNF_EV (DWORD)(MMIA_CC_RSP_EVENT + 25)
+#define MMIA_CC_CSTA_SET_CNF_EV (DWORD)(MMIA_CC_RSP_EVENT + 26)
+#define MMIA_CC_START_LOCALVOICE_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 27)
+#define MMIA_CC_ZECALL_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 28)
+#define MMIA_CC_CECN_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 29)
+#define MMIA_CC_ECALL_WORKSTATE_IND_EV (DWORD)(MMIA_CC_RSP_EVENT + 30)
+#define MMIA_CC_T9TIMER_QRY_CNF_EV (DWORD)(MMIA_CC_RSP_EVENT + 31)
+#define MMIA_CC_CALLBACK_EVENT_EV (DWORD)(MMIA_CC_RSP_EVENT + 32)
+#define MMIA_CC_VOICEMODE_QRY_CNF_EV (DWORD)(MMIA_CC_RSP_EVENT + 33)
+#define MMIA_CC_RESETIVS_CNF_EV (DWORD)(MMIA_CC_RSP_EVENT + 34)
+
+/* ========================================================================
+ MMIA£SMSÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+#define MMIA_SMS_SUBMIT_SEND_REQ_EV (DWORD)(MMIA_SMS_EVENT_BASE + 0)
+#define MMIA_SMS_COMMAND_SEND_REQ_EV (DWORD)(MMIA_SMS_EVENT_BASE + 1)
+#define MMIA_SMS_REC_RSP_REQ_EV (DWORD)(MMIA_SMS_EVENT_BASE + 2)
+#define MMIA_SMS_MEM_AVAIL_REQ_EV (DWORD)(MMIA_SMS_EVENT_BASE + 3)
+#define MMIA_SMS_STORE_REPORT_REQ_EV (DWORD)(MMIA_SMS_EVENT_BASE + 4)
+#define MMIA_SMS_ABORT_MO_REQ_EV (DWORD)(MMIA_SMS_EVENT_BASE + 5)
+
+#define MMIA_SMS_MSG_SEND_CNF_EV (DWORD)(MMIA_SMS_RSP_EVENT + 0)
+#define MMIA_SMS_DELIVER_REC_IND_EV (DWORD)(MMIA_SMS_RSP_EVENT + 1)
+#define MMIA_SMS_STATUS_REC_IND_EV (DWORD)(MMIA_SMS_RSP_EVENT + 2)
+#define MMIA_SMS_REC_RSP_CNF_EV (DWORD)(MMIA_SMS_RSP_EVENT + 3)
+#define MMIA_SMS_MMS_DISABLE_IND_EV (DWORD)(MMIA_SMS_RSP_EVENT + 4)
+#define MMIA_SMS_MEM_AVAIL_CNF_EV (DWORD)(MMIA_SMS_RSP_EVENT + 5)
+#define MMIA_SMS_COMMON_CNF_EV (DWORD)(MMIA_SMS_RSP_EVENT + 6)
+
+/* ========================================================================
+ MMIA£SSÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+#define MMIA_SS_CLCK_SET_REQ_EV (DWORD)(MMIA_SS_EVENT_BASE + 0)
+#define MMIA_SS_CPWD_SET_REQ_EV (DWORD)(MMIA_SS_EVENT_BASE + 1)
+#define MMIA_SS_CLIP_READ_REQ_EV (DWORD)(MMIA_SS_EVENT_BASE + 2)
+#define MMIA_SS_CLIR_READ_REQ_EV (DWORD)(MMIA_SS_EVENT_BASE + 3)
+#define MMIA_SS_COLP_READ_REQ_EV (DWORD)(MMIA_SS_EVENT_BASE + 4)
+#define MMIA_SS_COLR_READ_REQ_EV (DWORD)(MMIA_SS_EVENT_BASE + 5)
+#define MMIA_SS_CCFC_SET_REQ_EV (DWORD)(MMIA_SS_EVENT_BASE + 6)
+#define MMIA_SS_CCWA_SET_REQ_EV (DWORD)(MMIA_SS_EVENT_BASE + 7)
+#define MMIA_SS_CUSD_SET_REQ_EV (DWORD)(MMIA_SS_EVENT_BASE + 8)
+#define MMIA_SS_ABORT_REQ_EV (DWORD)(MMIA_SS_EVENT_BASE + 9)
+#define MMIA_SS_USSD_CANCEL_REQ_EV (DWORD)(MMIA_SS_EVENT_BASE + 10)
+#define MMIA_SS_CNAP_READ_REQ_EV (DWORD)(MMIA_SS_EVENT_BASE + 11)
+#define MMIA_SS_MOLR_ENABLE_REQ_EV (DWORD)(MMIA_SS_EVENT_BASE + 12)
+#define MMIA_SS_MOLR_DISABLE_REQ_EV (DWORD)(MMIA_SS_EVENT_BASE + 13)
+#define MMIA_SS_MTLR_ANS_REQ_EV (DWORD)(MMIA_SS_EVENT_BASE + 14)
+
+#define MMIA_SS_COMMON_CNF_EV (DWORD)(MMIA_SS_RSP_EVENT + 0)
+#define MMIA_SS_CLCK_QUERY_CNF_EV (DWORD)(MMIA_SS_RSP_EVENT + 1)
+#define MMIA_SS_CLIP_READ_CNF_EV (DWORD)(MMIA_SS_RSP_EVENT + 2)
+#define MMIA_SS_CLIR_READ_CNF_EV (DWORD)(MMIA_SS_RSP_EVENT + 3)
+#define MMIA_SS_COLP_READ_CNF_EV (DWORD)(MMIA_SS_RSP_EVENT + 4)
+#define MMIA_SS_COLR_READ_CNF_EV (DWORD)(MMIA_SS_RSP_EVENT + 5)
+#define MMIA_SS_CCFC_QUERY_CNF_EV (DWORD)(MMIA_SS_RSP_EVENT + 6)
+#define MMIA_SS_CCWA_QUERY_CNF_EV (DWORD)(MMIA_SS_RSP_EVENT + 7)
+#define MMIA_SS_CUSD_MT_IND_EV (DWORD)(MMIA_SS_RSP_EVENT + 8)
+#define MMIA_SS_CNAP_READ_CNF_EV (DWORD)(MMIA_SS_RSP_EVENT + 9)
+#define MMIA_SS_MOLR_RES_IND_EV (DWORD)(MMIA_SS_RSP_EVENT + 10)
+#define MMIA_SS_MTLOCIREQ_NOTIFY_IND_EV (DWORD)(MMIA_SS_RSP_EVENT + 11)
+/* ========================================================================
+ MMIA£SMÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+#define MMIA_SM_NEG_QOS_QUERY_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 0)
+#define MMIA_SM_NEG_EQOS_QUERY_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 1)
+#define MMIA_SM_ACTIVATED_CID_QUERY_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 2)
+#define MMIA_SM_PDP_STATUS_QUERY_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 3)
+#define MMIA_SM_PDP_ADDR_QUERY_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 4)
+#define MMIA_SM_PDP_CAUSE_QUERY_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 5)
+#define MMIA_SM_PDP_ACTIVATE_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 6)
+#define MMIA_SM_PDP_DEACTIVATE_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 7)
+#define MMIA_SM_PDP_MODIFY_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 8)
+#define MMIA_SM_DATA_STATE_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 9)
+#define MMIA_SM_MT_ACT_ANS_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 10)
+#define MMIA_SM_CPSB_QUERY_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 11)
+#define MMIA_SM_CGCONTRDP_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 12)
+#define MMIA_SM_CGSCONTRDP_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 13)
+#define MMIA_SM_CGTFTRDP_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 14)
+
+/* ÒÔÉÏÏûÏ¢ÓжÔÓ¦µÄATÃüÁî */
+#define MMIA_SM_ABORT_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 15)
+#define MMIA_SM_IP_PDP_ACT_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 16)
+#define MMIA_SM_IDLE_CHNL_QUERY_RSP_EV (DWORD)(MMIA_SM_EVENT_BASE + 17)
+#define MMIA_SM_GET_PCO_RSP_EV (DWORD)(MMIA_SM_EVENT_BASE + 18)
+#define MMIA_SM_DISCONNECT_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 19)
+#define MMIA_SM_CONTEXT_REQ_EV (DWORD)(MMIA_SM_EVENT_BASE + 20)
+#define MMIA_SM_CONTEXT_IND_EV (DWORD)(MMIA_SM_EVENT_BASE + 21)
+
+#define MMIA_SM_NEG_QOS_QUERY_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 0)
+#define MMIA_SM_NEG_EQOS_QUERY_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 1)
+#define MMIA_SM_ACTIVATED_CID_QUERY_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 2)
+#define MMIA_SM_PDP_STATUS_QUERY_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 3)
+#define MMIA_SM_PDP_ADDR_QUERY_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 4)
+#define MMIA_SM_PDP_CAUSE_QUERY_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 5)
+#define MMIA_SM_PDP_ACTIVATE_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 6)
+#define MMIA_SM_PDP_DEACTIVATE_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 7)
+#define MMIA_SM_PDP_MODIFY_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 8)
+#define MMIA_SM_PDP_ACTIVATE_IND_EV (DWORD)(MMIA_SM_RSP_EVENT + 9)
+#define MMIA_SM_CGEV_IND_EV (DWORD)(MMIA_SM_RSP_EVENT + 10)
+#define MMIA_SM_IP_PDP_ACT_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 11)
+#define MMIA_SM_CLOSE_CHNL_IND_EV (DWORD)(MMIA_SM_RSP_EVENT + 12)
+#define MMIA_SM_IDLE_CHNL_QUERY_IND_EV (DWORD)(MMIA_SM_RSP_EVENT + 13)
+#define MMIA_SM_GET_PCO_IND_EV (DWORD)(MMIA_SM_RSP_EVENT + 14)
+#define MMIA_SM_COMMON_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 15)
+#define MMIA_SM_CONNECT_IND_EV (DWORD)(MMIA_SM_RSP_EVENT + 16)
+#define MMIA_SM_NO_CARRIER_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 17)
+#define MMIA_SM_CID_DEACT_IND_EV (DWORD)(MMIA_SM_RSP_EVENT + 18)
+#define MMIA_SM_CPSB_QUERY_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 19)
+#define MMIA_SM_CPSB_IND_EV (DWORD)(MMIA_SM_RSP_EVENT + 20)
+#define MMIA_SM_CGCONTRDP_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 21)
+#define MMIA_SM_CGSCONTRDP_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 22)
+#define MMIA_SM_CGTFTRDP_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 23)
+#define MMIA_SM_NOTIFICATION_IND_EV (DWORD)(MMIA_SM_RSP_EVENT + 24)
+#define MMIA_SM_MT_ACT_ANS_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 25)
+#define MMIA_SM_CONTEXT_CNF_EV (DWORD)(MMIA_SM_RSP_EVENT + 26)
+#define MMIA_SM_MSISDN_IND_EV (DWORD)(MMIA_SM_RSP_EVENT + 27)
+
+/* ========================================================================
+ ESM- MMIAÄ£¿éÖ®¼äÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+#define MMIA_ESM_MT_EPS_BEARER_ACT_REQ_EV (DWORD)(MMIA_ESM_EVENT_BASE + 0) /*only for R7&R5*/
+#define MMIA_ESM_EPS_BEARER_MOD_REQ_EV (DWORD)(MMIA_ESM_EVENT_BASE + 1)
+#define MMIA_ESM_EBR_MOD_QUERY_REQ_EV (DWORD)(MMIA_ESM_EVENT_BASE + 2)
+#define MMIA_ESM_EPS_QOS_QUERY_REQ_EV (DWORD)(MMIA_ESM_EVENT_BASE + 3) /*only for R7&R5*/
+#define MMIA_ESM_CGATFT_REQ_EV (DWORD)(MMIA_ESM_EVENT_BASE + 4)
+#define MMIA_ESM_ABORT_REQ_EV (DWORD)(MMIA_ESM_EVENT_BASE + 5) /*only for R7&R5*/
+#ifdef BTRUNK_SUPPORT
+#define PTT_MMIA_ESM_TAUTYPE_REQ_EV (DWORD)(MMIA_ESM_EVENT_BASE + 6) /*¼¯ÈºÌí¼Ó*/
+#endif
+#define MMIA_ESM_PDP_CAUSE_QUERY_REQ_EV (DWORD)(MMIA_ESM_EVENT_BASE + 7)
+
+#define MMIA_ESM_EPS_BEARER_ACT_IND_EV (DWORD)(MMIA_ESM_RSP_EVENT + 0)
+#define MMIA_ESM_EPS_BEARER_DEACT_IND_EV (DWORD)(MMIA_ESM_RSP_EVENT + 1)
+#define MMIA_ESM_EPS_BEARER_MOD_IND_EV (DWORD)(MMIA_ESM_RSP_EVENT + 2)
+#define MMIA_ESM_MT_EPS_BEARER_ACT_IND_EV (DWORD)(MMIA_ESM_RSP_EVENT + 3) /*only for R7&R5*/
+#define MMIA_ESM_EPS_BEARER_MOD_CNF_EV (DWORD)(MMIA_ESM_RSP_EVENT + 4)
+#define MMIA_ESM_EPS_BEARER_MOD_REJ_EV (DWORD)(MMIA_ESM_RSP_EVENT + 5)
+#define MMIA_ESM_EBR_MOD_QUERY_CNF_EV (DWORD)(MMIA_ESM_RSP_EVENT + 6)
+#define MMIA_ESM_EPS_QOS_QUERY_CNF_EV (DWORD)(MMIA_ESM_RSP_EVENT + 7) /*only for R7&R5*/
+#define MMIA_ESM_CGATFT_CNF_EV (DWORD)(MMIA_ESM_RSP_EVENT + 8)
+#define MMIA_ESM_PDP_ADDR_QUERY_CNF_EV (DWORD)(MMIA_ESM_RSP_EVENT + 9) /*only for R7&R5*/
+#ifdef BTRUNK_SUPPORT
+#define PTT_MMIA_ESM_TAUTYPE_CNF_EV (DWORD)(MMIA_ESM_RSP_EVENT + 10) /*¼¯ÈºÌí¼Ó*/
+#endif
+#define MMIA_ESM_PDP_CAUSE_QUERY_CNF_EV (DWORD)(MMIA_ESM_RSP_EVENT + 11)
+
+/* ========================================================================
+ MMIA£CBSÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+#define MMIA_CBS_ACTIVATE_REQ_EV (DWORD)(MMIA_CBS_EVENT_BASE + 0)
+
+#define MMIA_CBS_ACTIVATE_CNF_EV (DWORD)(MMIA_CBS_RSP_EVENT + 0)
+#define MMIA_CBS_DATA_IND_EV (DWORD)(MMIA_CBS_RSP_EVENT + 1)
+
+/* ========================================================================
+ MMIA£ASÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+#define MMIA_AS_EM_CELL_INFO_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 0)
+#define MMIA_AS_EM_HO_INFO_REQ (DWORD)(MMIA_AS_EVENT_BASE + 1)
+#define MMIA_AS_EM_CELLRESORCCOCOUNT_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 2)
+#define MMIA_AS_RPT_RXLEV_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 3)
+#define MMIA_AS_QUERY_RXLEV_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 4)
+#define MMIA_PDI_SELL_STAT_START_SEND_PACKET_IND_EV (DWORD)(MMIA_AS_EVENT_BASE + 5)
+#define MMIA_PDI_SELL_STAT_ABORT_IND_EV (DWORD)(MMIA_AS_EVENT_BASE + 6)
+#define MMIA_EUCSR_LTEINFO_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 7)
+#define MMIA_L1E_ZEPCG_REQ (DWORD)(MMIA_AS_EVENT_BASE + 8)
+#define MMIA_AS_B39_INFO_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 9)
+#define MMIA_AS_RSSI_QUERY_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 10)
+#define MMIA_AS_SINR_QUERY_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 11)
+#define MMIA_AS_QUERY_EM_CELL_INFO_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 12)
+#define MMIA_AS_TMGI_ACTIVATE_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 13)
+#define MMIA_AS_TMGI_DEACTIVATE_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 14)
+#define MMIA_AS_SAI_LIST_QUERY_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 15)
+#define MMIA_AS_TMGI_LIST_QUERY_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 16)
+#define MMIA_AS_TMGI_LIST_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 17)
+#define MMIA_AS_MBMS_PREFERENCE_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 18)
+#define MMIA_AS_TMGI_LIST_REPORT_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 19)
+#define MMIA_AS_SAI_LIST_REPORT_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 20)
+#define MMIA_AS_NW_TIME_QUERY_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 21)
+#define MMIA_AS_QUERY_CESQ_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 22)
+#define MMIA_AS_EM_LTE_HO_SET_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 23)
+#define MMIA_AS_EM_LTE_HO_SET_QUERY_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 24)
+#define MMIA_L1W_ZWPCG_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 25)
+#define MMIA_L1T_ZTPCG_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 26)
+#define MMIA_GRR_ZGPCG_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 27)
+#define MMIA_AS_QUERY_ZCSQ_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 28)
+#define MMIA_AS_LBS_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 29)
+#define MMIA_AS_IMS_DATA_DELETE_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 30)
+#define MMIA_AS_CARD_SWITCH_REQ_EV (DWORD)(MMIA_AS_EVENT_BASE + 31)
+#define MMIA_AS_CARD_SWITCH_IND_EV (DWORD)(MMIA_AS_EVENT_BASE + 32)
+
+#define MMIA_AS_EM_UCELL_INFO_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 0)
+#define MMIA_AS_EM_UHO_INFO_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 1)
+#define MMIA_AS_RPT_RXLEV_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 2)
+#define MMIA_AS_QUERY_RXLEV_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 3)
+#define MMIA_EUSIR_ETWS_PRIMARY_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 4)
+#define MMIA_EUSIR_ETWS_SECONDARY_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 5)
+#define MMIA_AS_EM_EUCELL_INFO_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 6)
+#define MMIA_EUCSR_LTEINFO_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 7)
+#define MMIA_L1E_ZEPCG_CNF (DWORD)(MMIA_AS_RSP_EVENT + 8)
+#define MMIA_AS_B39_INFO_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 9)
+#define AS_EM_CELL_INFO_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 10)
+#define MMIA_AS_RSSI_QUERY_CNF_EV (DWORD)(MMIA_AS_RSP_EVENT + 11)
+#define MMIA_AS_SINR_QUERY_CNF_EV (DWORD)(MMIA_AS_RSP_EVENT + 12)
+#define MMIA_AS_QUERY_EM_UCELL_INFO_CNF_EV (DWORD)(MMIA_AS_RSP_EVENT + 13)
+#define MMIA_AS_QUERY_EM_EUCELL_INFO_CNF_EV (DWORD)(MMIA_AS_RSP_EVENT + 14)
+#define RR_QUERY_EM_CELL_INFO_CNF_EV (DWORD)(MMIA_AS_RSP_EVENT + 15)
+#define AS_QUERY_EM_CELL_INFO_CNF_EV (DWORD)(MMIA_AS_RSP_EVENT + 16)
+#define MMIA_AS_TMGI_ACTIVATE_CNF_EV (DWORD)(MMIA_AS_RSP_EVENT + 17)
+#define MMIA_AS_TMGI_DEACTIVATE_CNF_EV (DWORD)(MMIA_AS_RSP_EVENT + 18)
+#define MMIA_AS_SAI_LIST_QUERY_RESP_EV (DWORD)(MMIA_AS_RSP_EVENT + 19)
+#define MMIA_AS_SAI_LIST_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 20)
+#define MMIA_AS_TMGI_LIST_QUERY_RESP_EV (DWORD)(MMIA_AS_RSP_EVENT + 21)
+#define MMIA_AS_TMGI_LIST_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 22)
+#define MMIA_AS_MBMS_SERVICE_SUSPEND_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 23)
+#define MMIA_AS_MBMS_SERVICE_RESUME_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 24)
+#define MMIA_AS_COMMON_CFG_CNF_EV (DWORD)(MMIA_AS_RSP_EVENT + 25)
+#define MMIA_AS_NW_TIME_QUERY_RESP_EV (DWORD)(MMIA_AS_RSP_EVENT + 26)
+#define ATI_EUCSR_HIGHT_CALL_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 27)
+#define MMIA_AS_QUERY_CESQ_CNF_EV (DWORD)(MMIA_AS_RSP_EVENT + 28)
+#define ATI_EUCSR_BUSY_ALERTING_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 29)
+#define MMIA_ASC_LTE_LOSTCOVERAGE_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 30)
+#define MMIA_AS_EM_LTE_HO_INFO_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 31)
+#define MMIA_AS_EM_LTE_HO_SET_QUERY_CNF_EV (DWORD)(MMIA_AS_RSP_EVENT + 32)
+#define MMIA_L1W_ZWPCG_CNF_EV (DWORD)(MMIA_AS_RSP_EVENT + 33)
+#define MMIA_L1T_ZTPCG_CNF_EV (DWORD)(MMIA_AS_RSP_EVENT + 34)
+#define MMIA_GRR_ZGPCG_CNF_EV (DWORD)(MMIA_AS_RSP_EVENT + 35)
+#define MMIA_AS_QUERY_ZCSQ_CNF_EV (DWORD)(MMIA_AS_RSP_EVENT + 36)
+#define MMIA_AS_LBS_CNF_EV (DWORD)(MMIA_AS_RSP_EVENT + 37)
+#define MMIA_AS_UL_PARAM_IND_EV (DWORD)(MMIA_AS_RSP_EVENT + 38)
+#define MMIA_AS_CARD_SWITCH_CNF_EV (DWORD)(MMIA_AS_RSP_EVENT + 39)
+/* ========================================================================
+ PDI - ATI ÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+#define ATI_PDI_DATA_REQ_EV (DWORD)(ATI_PDI_EVENT_BASE + 0)
+#define ATI_PDI_DATA_IND_EV (DWORD)(ATI_PDI_EVENT_BASE + 1)
+#define PSI_PDI_DATA_IND_EV (DWORD)(ATI_PDI_EVENT_BASE + 2)
+
+/* ========================================================================
+ CSD - ATI ÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+#define ATI_CSD_DATA_IND_EV (DWORD)(ATI_CSD_EVENT_BASE + 0)
+
+#define ATI_CSD_FLOW_CTRL_ON_EV (DWORD)(ATI_CSD_RSP_EVENT + 0)
+#define ATI_CSD_FLOW_CTRL_OFF_EV (DWORD)(ATI_CSD_RSP_EVENT + 1)
+
+/*È«¾Ö±äÁ¿ÉèÖᢻñÈ¡ÏûÏ¢¶¨Òå*/
+#define GVAR_MMIA_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 0)
+#define GVAR_MMIA_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 1)
+
+#define GVAR_UMM_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 2)
+#define GVAR_UMM_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 3)
+
+#define GVAR_MM_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 4)
+#define GVAR_MM_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 5)
+
+#define GVAR_GMM_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 6)
+#define GVAR_GMM_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 7)
+
+#define GVAR_UICCMNG_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 8)
+#define GVAR_UICCMNG_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 9)
+
+#define GVAR_CC_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 10)
+#define GVAR_CC_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 11)
+
+#define GVAR_SM_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 12)
+#define GVAR_SM_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 13)
+
+#define GVAR_SMS_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 14)
+#define GVAR_SMS_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 15)
+
+#define GVAR_SS_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 16)
+#define GVAR_SS_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 17)
+
+#define GVAR_DS_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 18)
+#define GVAR_DS_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 19)
+#define GVAR_RA_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 20)
+#define GVAR_RA_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 21)
+#define GVAR_RLP_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 22)
+#define GVAR_RLP_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 23)
+
+#define GVAR_CBS_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 24)
+#define GVAR_CBS_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 25)
+
+#define GVAR_URRC_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 26)
+#define GVAR_URRC_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 27)
+
+#define GVAR_UMTC_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 28)
+#define GVAR_UMTC_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 29)
+
+#define GVAR_UCER_CONTEXT_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 30)
+#define GVAR_UCER_CONTEXT_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 31)
+#define GVAR_UCER_SECURITY_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 32)
+#define GVAR_UCER_SECURITY_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 33)
+
+#define GVAR_UCSR_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 34)
+#define GVAR_UCSR_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 35)
+
+#define GVAR_USIR_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 36)
+#define GVAR_USIR_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 37)
+
+#define GVAR_UMCR_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 38)
+#define GVAR_UMCR_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 39)
+
+#define GVAR_URBC_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 40)
+#define GVAR_URBC_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 41)
+
+#define GVAR_UCMR_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 42)
+#define GVAR_UCMR_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 43)
+
+#define GVAR_URLC_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 44)
+#define GVAR_URLC_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 45)
+
+#define GVAR_UMAC_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 46)
+#define GVAR_UMAC_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 47)
+#define GVAR_PDCP_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 48)
+#define GVAR_PDCP_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 49)
+#define GVAR_RABM_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 50)
+#define GVAR_RABM_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 51)
+
+#define GVAR_PDI_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 52)
+#define GVAR_PDI_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 53)
+
+#define GVAR_SCI_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 54)
+#define GVAR_SCI_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 55)
+
+#define GVAR_GSMA_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 56)
+#define GVAR_GSMA_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 57)
+
+#define GVAR_UICC_DEV_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 58)
+#define GVAR_UICC_DEV_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 59)
+
+#define GVAR_ATMEM_DEV_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 60)
+#define GVAR_ATMEM_DEV_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 61)
+
+#define GVAR_NV_DEV_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 62)
+#define GVAR_NV_DEV_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 63)
+
+#define GVAR_ASC_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 64)
+#define GVAR_ASC_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 65)
+
+#define GVAR_EMM_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 66)
+#define GVAR_EMM_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 67)
+
+#define GVAR_ESM_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 68)
+#define GVAR_ESM_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 69)
+
+/*WCDMA GVAR_EVENT_BASE=150*/
+#define GVAR_WRRC_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 70)
+#define GVAR_WRRC_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 71)
+
+#define GVAR_WMTC_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 72)
+#define GVAR_WMTC_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 73)
+
+#define GVAR_WCER_CONTEXT_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 74)
+#define GVAR_WCER_CONTEXT_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 75)
+#define GVAR_WCER_SECURITY_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 76)
+#define GVAR_WCER_SECURITY_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 77)
+
+#define GVAR_WCSR_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 78)
+#define GVAR_WCSR_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 79)
+
+#define GVAR_WSIR_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 80)
+#define GVAR_WSIR_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 81)
+
+#define GVAR_WMCR_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 82)
+#define GVAR_WMCR_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 83)
+
+#define GVAR_WRBC_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 84)
+#define GVAR_WRBC_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 85)
+
+#define GVAR_WCMR_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 86)
+#define GVAR_WCMR_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 87)
+
+#define GVAR_WRLC_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 88)
+#define GVAR_WRLC_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 89)
+
+#define GVAR_WMAC_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 90)
+#define GVAR_WMAC_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 91)
+
+#define GVAR_ECSR_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 92)
+#define GVAR_ECSR_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 93)
+
+#define GVAR_ECER_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 94)
+#define GVAR_ECER_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 95)
+
+#define GVAR_STM_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 96)
+#define GVAR_STM_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 97)
+#define GVAR_TSM_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 98)
+#define GVAR_TSM_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 99)
+
+#define GVAR_LPP_GET_REQ_EV (DWORD)(GVAR_EVENT_BASE + 100)
+#define GVAR_LPP_GET_CNF_EV (DWORD)(GVAR_EVENT_BASE + 101)
+
+
+#define RRAT_RXSTAT_IND (DWORD)(GRR_EVENT_BASE + 140)
+#define RRMI_RXSTAT_IND (DWORD)(GRR_EVENT_BASE + 141)
+#define RR_EM_HO_INFO_IND (DWORD)(GRR_EVENT_BASE + 142)
+#define RR_EM_CELL_INFO_IND (DWORD)(GRR_EVENT_BASE + 143)
+#define ERRC_CELL_CHANGE_CNF_EV (DWORD)(GRR_EVENT_BASE + 144)
+#define ERRC_CELL_CHANGE_REJ_EV (DWORD)(GRR_EVENT_BASE + 145)
+#define ERRC_RESEL_CNF_EV (DWORD)(GRR_EVENT_BASE + 146)
+#define ERRC_RESEL_REJ_EV (DWORD)(GRR_EVENT_BASE + 147)
+#define ERRC_CELL_SEARCH_CNF_EV (DWORD)(GRR_EVENT_BASE + 148)
+#define ERRC_CELL_SEARCH_REJ_EV (DWORD)(GRR_EVENT_BASE + 149)
+#define URRC_PSHO_CNF_EV (DWORD)(GRR_EVENT_BASE + 150)
+#define URRC_PSHO_REJ_EV (DWORD)(GRR_EVENT_BASE + 151)
+#define ERRC_PSHO_CNF_EV (DWORD)(GRR_EVENT_BASE + 152)
+#define ERRC_PSHO_REJ_EV (DWORD)(GRR_EVENT_BASE + 153)
+#define RR_PSHO_REQ_EV (DWORD)(GRR_EVENT_BASE + 154)
+#define MAC_GRR_PSHO_CNF_EV (DWORD)(GRR_EVENT_BASE + 155)
+#define MAC_GRR_PSHO_REJ_EV (DWORD)(GRR_EVENT_BASE + 156)
+#define MAC_GRR_PSHO_RETURN_CNF_EV (DWORD)(GRR_EVENT_BASE + 157)
+#define MAC_GRR_PSHO_RETURN_FAIL_EV (DWORD)(GRR_EVENT_BASE + 158)
+#define MAC_GRR_PSHO_DEACT_CNF_EV (DWORD)(GRR_EVENT_BASE + 159)
+#define Z_RRMI_INTER_RAT_NCELL_IND_EV (DWORD)(GRR_EVENT_BASE + 160)
+#define RR_XCELLINFO_REQ (DWORD)(GRR_EVENT_BASE + 161)
+#define RR_XCELLINFO_ABORT_REQ (DWORD)(GRR_EVENT_BASE + 162)
+
+/* START OF RRC */
+#define LLC_RRC_DATA_REQ (DWORD)(GRRC_EVENT_BASE + 0)
+#define RR_EM_HO_INFO_REQ (DWORD)(GRRC_EVENT_BASE + 1)
+#define RR_ABORT_REQ (DWORD)(GRRC_EVENT_BASE + 2)
+#define RR_DATA_REQ (DWORD)(GRRC_EVENT_BASE + 3)
+#define RR_HO_START_INFO (DWORD)(GRRC_EVENT_BASE + 4)
+#define GRR_RRC_CLASSMARK_IND (DWORD)(GRRC_EVENT_BASE + 5)
+#define GRR_RRC_UPDATE_PARAM_REQ (DWORD)(GRRC_EVENT_BASE + 6)
+#define GRR_RRC_ASSIGN_REQ (DWORD)(GRRC_EVENT_BASE + 7)
+#define GRR_RRC_EST_REQ (DWORD)(GRRC_EVENT_BASE + 8)
+#define GRR_RRC_ERROR_IND (DWORD)(GRRC_EVENT_BASE + 9)
+#define GRR_RRC_DEACT_REQ (DWORD)(GRRC_EVENT_BASE + 10)
+#define GRR_RRC_RXSTAT_REQ (DWORD)(GRRC_EVENT_BASE + 11)
+#define GRR_RRC_TESTPARAM_REQ (DWORD)(GRRC_EVENT_BASE + 12)
+#define GRR_RRC_MN_MEAS_REQ (DWORD)(GRRC_EVENT_BASE + 13)
+#define GRR_RRC_RRAT_RXSTAT_REQ (DWORD)(GRRC_EVENT_BASE + 14)
+#define GRR_RRC_RRL_DATA_REQ (DWORD)(GRRC_EVENT_BASE + 15)
+#define GRR_RRC_DTM_REQ (DWORD)(GRRC_EVENT_BASE + 16)
+#define GRR_RRC_PDCH_COMPLETE_IND (DWORD)(GRRC_EVENT_BASE + 17)
+#define GRR_RRC_REL_PS_CNF (DWORD)(GRRC_EVENT_BASE + 18)
+#define GRR_RRC_REL_PS_IND (DWORD)(GRRC_EVENT_BASE + 19)
+#define DL_UNIT_DATA_IND (DWORD)(GRRC_EVENT_BASE + 20)
+#define DL_DATA_IND (DWORD)(GRRC_EVENT_BASE + 21)
+#define DL_DATA_REJ (DWORD)(GRRC_EVENT_BASE + 22)
+#define DL_ESTABLISH_IND (DWORD)(GRRC_EVENT_BASE + 23)
+#define DL_ESTABLISH_CON (DWORD)(GRRC_EVENT_BASE + 24)
+#define DL_IRAT_HO_CON (DWORD)(GRRC_EVENT_BASE + 25)
+#define DL_RELEASE_IND (DWORD)(GRRC_EVENT_BASE + 26)
+#define DL_RELEASE_CON (DWORD)(GRRC_EVENT_BASE + 27)
+#define DL_SUSPEND_CON (DWORD)(GRRC_EVENT_BASE + 28)
+#define MDL_ERROR_IND (DWORD)(GRRC_EVENT_BASE + 29)
+#define URRC_HO_INFO_RES (DWORD)(GRRC_EVENT_BASE + 30)
+#define URRC_HO_CNF (DWORD)(GRRC_EVENT_BASE + 31)
+#define URRC_HO_REJ (DWORD)(GRRC_EVENT_BASE + 32)
+#define RR_HO_REQ (DWORD)(GRRC_EVENT_BASE + 33)
+#define RR_VSD_INFO (DWORD)(GRRC_EVENT_BASE + 34)
+#define DL_SENDCMP_IND_EV (DWORD)(GRRC_EVENT_BASE + 35)
+#define GRR_RRC_POWEROFF_IND_EV (DWORD)(GRRC_EVENT_BASE + 36)
+#define T3110 (DWORD)(GRRC_EVENT_BASE + 37)
+#define T3124 (DWORD)(GRRC_EVENT_BASE + 38)
+#define GRRC_T3230_EV (DWORD)(GRRC_EVENT_BASE + 39) // R9 UPDATE
+#define T3148 (DWORD)(GRRC_EVENT_BASE + 40)
+
+ /* END OF RRC */
+
+/* START OF GRR */
+#define RRMN_MEAS_RESULTS_REQ (DWORD)(GRR_EVENT_BASE + 0)
+#define RR_TESTPARAM_REQ (DWORD)(GRR_EVENT_BASE + 1)
+#define RR_EM_CELL_INFO_REQ (DWORD)(GRR_EVENT_BASE + 2)
+#define RR_ACT_REQ (DWORD)(GRR_EVENT_BASE + 3)
+#define RR_CELL_PARAMETER_REQ (DWORD)(GRR_EVENT_BASE + 4)
+#define RR_CLASSMARK_IND (DWORD)(GRR_EVENT_BASE + 5)
+#define RR_DEACT_REQ (DWORD)(GRR_EVENT_BASE + 6)
+#define RR_HPLMN_ACT_REQ (DWORD)(GRR_EVENT_BASE + 7)
+#define RR_PCH_PREFERENCE_REQ (DWORD)(GRR_EVENT_BASE + 8)
+#define RR_PLMN_ABORT_REQ (DWORD)(GRR_EVENT_BASE + 9)
+#define RR_PLMN_REQ (DWORD)(GRR_EVENT_BASE + 10)
+#define RR_UPDATE_PLMN_REQ (DWORD)(GRR_EVENT_BASE + 11)
+#define RR_INACTIVE_REQ (DWORD)(GRR_EVENT_BASE + 12)
+#define RR_HPLMN_ABORT_REQ (DWORD)(GRR_EVENT_BASE + 13)
+#define RR_EST_REQ (DWORD)(GRR_EVENT_BASE + 14)
+#define RR_UPDATE_PARAM_REQ (DWORD)(GRR_EVENT_BASE + 15)
+#define GMMRR_ASSIGN_REQ (DWORD)(GRR_EVENT_BASE + 16)
+#define GMMRR_INFO_REQ (DWORD)(GRR_EVENT_BASE + 17)
+#define GMMRR_RELEASE_REQ (DWORD)(GRR_EVENT_BASE + 18)
+#define RR_TEST_COUNT_REQ (DWORD)(GRR_EVENT_BASE + 19)
+#define RRMI_START_RXSTAT_REQ (DWORD)(GRR_EVENT_BASE + 20)
+#define RRMI_END_RXSTAT_REQ (DWORD)(GRR_EVENT_BASE + 21)
+#define RRL_RR_DATA_REQ (DWORD)(GRR_EVENT_BASE + 22)
+#define RRC_GRR_EST_CNF (DWORD)(GRR_EVENT_BASE + 23)
+#define RRC_GRR_EST_FAIL (DWORD)(GRR_EVENT_BASE + 24)
+#define RRC_GRR_CHN_REL_IND (DWORD)(GRR_EVENT_BASE + 25)
+#define RRC_GRR_DEACT_CNF (DWORD)(GRR_EVENT_BASE + 26)
+#define RRC_GRR_TESTPARAM_CNF (DWORD)(GRR_EVENT_BASE + 27)
+#define RRC_GRR_MN_MEAS_CNF (DWORD)(GRR_EVENT_BASE + 28)
+#define RRC_GRR_RRAT_RXSTAT_IND (DWORD)(GRR_EVENT_BASE + 29)
+#define RRC_GRR_RRL_DATA_IND (DWORD)(GRR_EVENT_BASE + 30)
+#define RRC_GRR_RRL_ABORT_EVENT_IND (DWORD)(GRR_EVENT_BASE + 31)
+#define RRC_GRR_DTM_CNF (DWORD)(GRR_EVENT_BASE + 32)
+#define RRC_GRR_DTM_IND (DWORD)(GRR_EVENT_BASE + 33)
+#define RRC_GRR_DTM_ASS_IND (DWORD)(GRR_EVENT_BASE + 34)
+#define RRC_GRR_DTM_REJ (DWORD)(GRR_EVENT_BASE + 35)
+#define RRC_GRR_PKT_NOTI_IND (DWORD)(GRR_EVENT_BASE + 36)
+#define RRC_GRR_REL_PS_REQ (DWORD)(GRR_EVENT_BASE + 37)
+#define RRC_GRR_CONNECTED_IND (DWORD)(GRR_EVENT_BASE + 38)
+#define RRC_GRR_HO_TO_UTRAN_IND (DWORD)(GRR_EVENT_BASE + 39)
+#define RLC_GRR_ACCESS_REQ (DWORD)(GRR_EVENT_BASE + 40)
+#define RLC_GRR_UPLINK_PDCH_IND (DWORD)(GRR_EVENT_BASE + 41)
+#define RLC_GRR_REL_PDCH_CNF (DWORD)(GRR_EVENT_BASE + 42)
+#define RLC_GRR_UPLINK_PDCH_REL_IND (DWORD)(GRR_EVENT_BASE + 43)
+#define RLC_GRR_STATUS_IND (DWORD)(GRR_EVENT_BASE + 44)
+#define RLC_GRR_UPLINK_PDCH_EST_IND (DWORD)(GRR_EVENT_BASE + 45)
+#define RLC_GRR_TESTPARAM_IND (DWORD)(GRR_EVENT_BASE + 46)
+#define MAC_GRR_DOWNLINK_PDCH_IND (DWORD)(GRR_EVENT_BASE + 47)
+#define MAC_GRR_DOWNLINK_PDCH_REL_IND (DWORD)(GRR_EVENT_BASE + 48)
+#define MAC_GRR_POLLING_CNF (DWORD)(GRR_EVENT_BASE + 49)
+#define MAC_GRR_CIRCUIT_CNF (DWORD)(GRR_EVENT_BASE + 50)
+#define MAC_GRR_CIRCUIT_FAIL (DWORD)(GRR_EVENT_BASE + 51)
+#define MAC_GRR_CIRCUIT_ABORT_CNF (DWORD)(GRR_EVENT_BASE + 52)
+#define MAC_GRR_DATA_IND (DWORD)(GRR_EVENT_BASE + 53)
+#define MAC_GRR_FREQ_UPDATE_REQ (DWORD)(GRR_EVENT_BASE + 54)
+#define MAC_GRR_DEACT_CNF (DWORD)(GRR_EVENT_BASE + 55)
+#define MAC_GRR_IDLE_CHN_REQ (DWORD)(GRR_EVENT_BASE + 56)
+#define MAC_GRR_PERS_LEVEL_IND (DWORD)(GRR_EVENT_BASE + 57)
+#define MAC_GRR_START_TIMER (DWORD)(GRR_EVENT_BASE + 58)
+#define MAC_GRR_STOP_TIMER (DWORD)(GRR_EVENT_BASE + 59)
+#define MAC_GRR_TESTPARAM_IND (DWORD)(GRR_EVENT_BASE + 60)
+#define MAC_GRR_SUSPEND_CNF (DWORD)(GRR_EVENT_BASE + 61)
+#define MAC_GRR_PDCH_FAIL_IND (DWORD)(GRR_EVENT_BASE + 62)
+#define RRAT_RXSTAT_REQ (DWORD)(GRR_EVENT_BASE + 63)
+#define RRAT_CHANGE_REL_REQ (DWORD)(GRR_EVENT_BASE + 64)
+#define RR_CELL_CHANGE_REQ (DWORD)(GRR_EVENT_BASE + 65)
+#define URRC_CELL_CHANGE_CNF (DWORD)(GRR_EVENT_BASE + 66)
+#define URRC_CELL_CHANGE_REJ (DWORD)(GRR_EVENT_BASE + 67)
+#define URRC_RESEL_REJ (DWORD)(GRR_EVENT_BASE + 68)
+#define URRC_RESEL_CNF (DWORD)(GRR_EVENT_BASE + 69)
+#define URRC_SET_INACTIVE_CNF (DWORD)(GRR_EVENT_BASE + 70)
+#define RR_SET_INACTIVE_REQ (DWORD)(GRR_EVENT_BASE + 71)
+#define URRC_ABORT_READ_PREDEF_CNF (DWORD)(GRR_EVENT_BASE + 72)/*WCDMAÏÂʹÓÃ*/
+#define URRC_L1_RSRC_CNF (DWORD)(GRR_EVENT_BASE + 73)
+#define URRC_L1_RSRC_REJ (DWORD)(GRR_EVENT_BASE + 74)
+#define RR_L1_RSRC_REQ (DWORD)(GRR_EVENT_BASE + 75)
+#define RR_L1_RSRC_FREE_IND (DWORD)(GRR_EVENT_BASE + 76)
+#define RR_CELL_SEARCH_REQ (DWORD)(GRR_EVENT_BASE + 77)
+#define URRC_READ_PREDEF_CONF_CNF (DWORD)(GRR_EVENT_BASE + 78)/*WCDMAÏÂʹÓÃ*/
+#define URRC_CELL_RESEL_PARAM_IND (DWORD)(GRR_EVENT_BASE + 79)
+#define URRC_CELL_SEARCH_CNF (DWORD)(GRR_EVENT_BASE + 80)
+#define URRC_CELL_SEARCH_REJ (DWORD)(GRR_EVENT_BASE + 81)
+#define RR_RESEL_REQ (DWORD)(GRR_EVENT_BASE + 82)
+/* ========================================================================
+ MMIA¶¨Ê±Æ÷ÏûÏ¢ºÅ¶¨Òå
+======================================================================== */
+#define MMIA_EM_HO_INFO_EXPIRY_EV (DWORD)(MMIA_TIMER_EVENT_BASE + 0)
+#define MMIA_SELL_STAT_ONE_PDP_EXPIRY_EV (DWORD)(MMIA_TIMER_EVENT_BASE + 1)
+#define MMIA_SELL_STAT_SUM_PDP_EXPIRY_EV (DWORD)(MMIA_TIMER_EVENT_BASE + 2)
+#define MMIA_SELL_STAT_REG_EXPIRY_EV (DWORD)(MMIA_TIMER_EVENT_BASE + 3)
+#define MMIA_AOC_EXPIRY_EV (DWORD)(MMIA_TIMER_EVENT_BASE + 4)
+#define MMIA_ZGDT_EXPIRY_EV (DWORD)(MMIA_TIMER_EVENT_BASE + 5)
+#define MMIA_SOFTRESET_EXPIRY_EV (DWORD)(MMIA_TIMER_EVENT_BASE + 6)
+#define MMIA_ZULRTIND_EXPIRY_EV (DWORD)(MMIA_TIMER_EVENT_BASE + 7)
+
+/*´¥·¢ÏûÏ¢*/
+#define MSGTRACEPS_CELL_DISPLAY_REQ_EV (DWORD)(PS_ROADTEST_EVENT_BASE + 0)
+#define ROADTEST_UEINFO_REQ_EV (DWORD)(PS_ROADTEST_EVENT_BASE + 1)
+#define MSGTRACEPS_CELLRESORCCOCOUNT_REQ_EV (DWORD)(PS_ROADTEST_EVENT_BASE + 2)
+
+/*WCDMA PS_ROADTEST_RSP_EVENT =300*/
+#define AT_INFO_RECEIVED_EV (DWORD)(PS_ROADTEST_RSP_EVENT + 18)
+#define AT_INFO_SENT_EV (DWORD)(PS_ROADTEST_RSP_EVENT + 19)
+#define WRLC_UL_DATAPDU_TRACE_EV (DWORD)(PS_ROADTEST_RSP_EVENT + 20)
+#define WRLC_DL_DATAPDU_TRACE_EV (DWORD)(PS_ROADTEST_RSP_EVENT + 21)
+#define WRLC_UL_CTRLPDU_TRACE_EV (DWORD)(PS_ROADTEST_RSP_EVENT + 22)
+#define WRLC_DL_CTRLPDU_TRACE_EV (DWORD)(PS_ROADTEST_RSP_EVENT + 23)
+#define PDCP2WRLC_DATA_IND_TRACE_EV (DWORD)(PS_ROADTEST_RSP_EVENT + 24)
+#define PDCP2WRLC_DATA_REQ_TRACE_EV (DWORD)(PS_ROADTEST_RSP_EVENT + 25)
+#define WMAC_UL_UPA_TB_INFO_TRACE_EV (DWORD)(PS_ROADTEST_RSP_EVENT + 26)
+
+#ifdef BTRUNK_SUPPORT
+/**************************************************PS LTE BTrunk msg range start********************************************************/
+/* ATI --> TSM */
+#define ATI_TSM_REG_REQ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 1)
+#define ATI_TSM_CALL_REQ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 2)
+#define ATI_TSM_CALLCONFIRM_REQ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 3)
+#define ATI_TSM_CALLCONNECT_REQ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 4)
+#define ATI_TSM_CALLRLS_REQ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 5)
+#define ATI_TSM_FLOOR_REQ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 6)
+#define ATI_TSM_FLOORRLS_REQ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 7)
+#define ATI_TSM_STUNINFO_QUERY_REQ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 8)
+#define ATI_TSM_GROUPINFO_QUERY_REQ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 9)
+#define ATI_TSM_SCANGROUPINFO_REQ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 10)
+#define ATI_TSM_SCANSWITCH_REQ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 11)
+#define ATI_TSM_SHAKEHAND_REQ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 12)
+#define ATI_TSM_SHORT_DATA_REQ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 13)
+#define ATI_TSM_LOCATINFO_REQ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 14)
+#define ATI_TSM_SETABILITY_REQ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 15)
+#define ATI_TSM_CALLFORWARD_REQ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 16)
+#define ATI_TSM_CALLMODIFY_RSP_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 17)
+#define ATI_TSM_CALLMODIFY_REJ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 18)
+
+/* TSM --> ATI */
+#define ATI_TSM_FLOORGRT_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 25)
+#define ATI_TSM_FLOORRLS_CNF_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 26)
+#define ATI_TSM_FLOORRLS_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 27)
+#define ATI_TSM_FLOORREJ_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 28)
+#define ATI_TSM_FLOORWAIT_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 29)
+#define ATI_TSM_FLOORINFORM_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 30)
+#define ATI_TSM_STUNINFO_QUERY_CNF_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 31)
+#define ATI_TSM_STUNINFO_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 32)
+#define ATI_TSM_GROUPINFO_QUERY_CNF_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 33)
+#define ATI_TSM_GROUPINFO_UPDATE_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 34)
+#define ATI_TSM_GROUPCALL_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 35)
+#define ATI_TSM_SCANGROUPINFO_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 36)
+#define ATI_TSM_SHAKEHAND_CNF_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 37)
+#define ATI_TSM_SHORT_DATA_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 38)
+#define ATI_TSM_LOCATINFO_TYPE_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 39)
+#define ATI_TSM_FALLBACK_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 40)
+#define ATI_TSM_REG_CNF_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 41)
+#define ATI_TSM_REGSTATE_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 42)
+#define ATI_TSM_CALL_CNF_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 43)
+#define ATI_TSM_CALLRLS_CNF_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 44)
+#define ATI_TSM_CALL_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 45)
+#define ATI_TSM_CALLPROCEED_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 46)
+#define ATI_TSM_CALLALERTING_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 47)
+#define ATI_TSM_CALLCONNECTACK_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 48)
+#define ATI_TSM_CALLRLS_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 49)
+#define ATI_TSM_SPEAKINGTIMEROUT_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 50)
+#define ATI_TSM_VIDEOSOURCE_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 51)
+#define ATI_TSM_SCANGROUPINFO_CNF_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 52)
+#define ATI_TSM_SRSTATUS_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 53)
+#define ATI_TSM_CALLHOLD_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 54)
+#define ATI_TSM_PTTBEAR_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 55)
+#define ATI_TSM_BUSY_ALERTING_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 56)
+#define ATI_TSM_CALLFORWARD_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 57)
+#define ATI_TSM_CALLMODIFY_REQ_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 58)
+#define ATI_TSM_CALLMODIFY_ACK_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 59)
+#define ATI_TSM_REGEXTINFO_IND_EV (DWORD)(EVENT_PS_LTE_BTRUNK_BASE + 60)
+#endif
+#endif
+
diff --git a/upstream/pub/project/zx297520v3/include/drv/NvParam_drv.h b/upstream/pub/project/zx297520v3/include/drv/NvParam_drv.h
new file mode 100755
index 0000000..66c6f6d
--- /dev/null
+++ b/upstream/pub/project/zx297520v3/include/drv/NvParam_drv.h
@@ -0,0 +1,223 @@
+/***********************************************************************
+* Copyright (C) 2016, ZTE Corporation.
+*
+* File Name: nvparam_drv.h
+* File Mark:
+* Description:
+* Others:
+* Version: v1.0
+* Author: wangxia
+* Date: 2016-03-12
+*
+* History 1:
+* Date:
+* Version:
+* Author:
+* Modification:
+* History 2:
+**********************************************************************/
+#ifndef NVPARAM_DRV_H
+#define NVPARAM_DRV_H
+
+/**************************************************************************
+ * Include files *
+ **************************************************************************/
+#include "RWNvConfig.h"
+#include "NvParam_tsc.h"
+/**************************************************************************
+ * Macro *
+ **************************************************************************/
+#define DRV_NV_ADDR OS_FLASH_DRV_RW_NONFAC_BASE_ADDR
+#define DRV_NV_SIZE OS_FLASH_DRV_RW_NONFAC_SIZE /*16K*/
+
+/*=====================================================================================================================
+|----------------|----------------|---------------|--------------|----------------|-----------------|-----------------|
+| public(256B) | system(3K) | platfor(3K) | highspeed(4K)| peripheral(3K) | audio(1K) | reserved(1.75K) |
+|----------------|----------------|---------------|--------------|----------------|-----------------|-----------------|
+=======================================================================================================================*/
+
+#define DRV_PUB_NV_ADDR DRV_NV_ADDR
+#define DRV_PUB_NV_SIZE (256)
+#define DRV_SYS_NV_ADDR (DRV_PUB_NV_ADDR + DRV_PUB_NV_SIZE)
+#define DRV_SYS_NV_SIZE (3 * 1024)
+#define DRV_PLAT_NV_ADDR (DRV_SYS_NV_ADDR + DRV_SYS_NV_SIZE)
+#define DRV_PLAT_NV_SIZE (3 * 1024)
+#define DRV_HS_PERI_NV_ADDR (DRV_PLAT_NV_ADDR + DRV_PLAT_NV_SIZE)
+#define DRV_HS_PERI_NV_SIZE (4 * 1024)
+#define DRV_PERI_NV_ADDR (DRV_HS_PERI_NV_ADDR + DRV_HS_PERI_NV_SIZE)
+#define DRV_PERI_NV_SIZE (3 * 1024)
+#define DRV_AUDIO_NV_ADDR (DRV_PERI_NV_ADDR + DRV_PERI_NV_SIZE)
+#define DRV_AUDIO_NV_SIZE (1 * 1024)
+#define DRV_RSVD_NV_ADDR (DRV_AUDIO_NV_ADDR + DRV_AUDIO_NV_SIZE)
+#define DRV_RSVD_NV_SIZE (1 * 1024 + 768)
+
+#define DRV_TOTAL_NV_SIZE (DRV_PUB_NV_SIZE+DRV_SYS_NV_SIZE+DRV_PLAT_NV_SIZE+DRV_HS_PERI_NV_SIZE+DRV_PERI_NV_SIZE+DRV_AUDIO_NV_SIZE+DRV_RSVD_NV_SIZE)
+
+/* user interface */
+#define DRV_PUB_NV_ITEM_ADDR(x) (DRV_PUB_NV_ADDR + (UINT32)(&(((T_ZDrvNv_PubData*)(0x0))->x)))
+#define DRV_PUB_NV_ITEM_SIZE(x) (sizeof(((T_ZDrvNv_PubData*)(0x0))->x))
+
+#define DRV_SYS_NV_ITEM_ADDR(x) (DRV_SYS_NV_ADDR + (UINT32)(&(((T_ZDrvNv_SysData*)(0x0))->x)))
+#define DRV_SYS_NV_ITEM_SIZE(x) (sizeof(((T_ZDrvNv_SysData*)(0x0))->x))
+
+#define DRV_PLAT_NV_ITEM_ADDR(x) (DRV_PLAT_NV_ADDR + (UINT32)(&(((T_ZDrvNv_PlatData*)(0x0))->x)))
+#define DRV_PLAT_NV_ITEM_SIZE(x) (sizeof(((T_ZDrvNv_PlatData*)(0x0))->x))
+
+#define DRV_HS_PERI_NV_ITEM_ADDR(x) (DRV_HS_PERI_NV_ADDR + (UINT32)(&(((T_ZDrvNv_HSPeriData*)(0x0))->x)))
+#define DDRV_HS_PER_NV_ITEM_SIZE(x) (sizeof(((T_ZDrvNv_HSPeriData*)(0x0))->x))
+
+#define DRV_PER_NV_ITEM_ADDR(x) (DRV_PERI_NV_ADDR + (UINT32)(&(((T_ZDrvNv_PeriData*)(0x0))->x)))
+#define DRV_PER_NV_ITEM_SIZE(x) (sizeof(((T_ZDrvNv_PeriData*)(0x0))->x))
+
+#define DRV_AUDIO_NV_ITEM_ADDR(x) (DRV_AUDIO_NV_ADDR + (UINT32)(&(((T_ZDrvNv_AudioData*)(0x0))->x)))
+#define DRV_AUDIO_NV_ITEM_SIZE(x) (sizeof(((T_ZDrvNv_AudioData*)(0x0))->x))
+
+#define OS_FLASH_VOICE_DRV_RW_NONFAC_BASE_ADDR (OS_FLASH_DRV_RW_NONFAC_BASE_ADDR + 15360)
+#define OS_FLASH_VOICE_DRV_NONFAC_SIZE 1024
+
+#if DRV_TOTAL_NV_SIZE > (OS_FLASH_DRV_RW_NONFAC_SIZE)
+#error "error drv nv config!!!"
+#endif
+
+/****************************************************************************
+* Types
+****************************************************************************/
+
+
+/******************************************************
+* Drv NV Config
+******************************************************/
+/***********************************
+1. public nv_data
+************************************/
+typedef struct _T_ZDrvNv_PubData
+{
+ /* 0x00 */ CHAR chipName[16];
+ /* 0x10 */ CHAR prjName[16];
+ /* 0x20 */ CHAR externalVer[16];
+ /* 0x30 */ CHAR internalVer[16];
+ /* 0x40 */ CHAR releaseTime[16];
+ /* 0x50 */ UINT8 productType;
+ /* 0x51 */ UINT8 reserved[DRV_PUB_NV_SIZE - 0x51];
+} __attribute__ ((packed)) T_ZDrvNv_PubData;
+
+/***********************************
+2. system group nv_data
+************************************/
+typedef struct _T_ZDrvNv_SysData
+{
+ /* 0x000 */ T_SYS_NV_TSC_CONFIG tsc_config;
+ UINT8 reserved0[12];
+ /* 0x70 */ UINT32 buck1OnoffFlag;
+ /* 0x74 */ UINT32 wdtSwitch;
+ /* 0x78 */ UINT32 wdtPriority;
+ /* 0x7C */ UINT8 uiccmodeSwitch;
+ /* 0x7D */ UINT8 uiccPreSwitch;
+ /* 0x7E */ UINT8 uicc1modeSwitch;
+ /* 0x7F */ UINT8 uicc1PreSwitch;
+ /* 0x80 */ UINT8 ishpnotsupported;
+ UINT8 reserved[DRV_SYS_NV_SIZE - 129];
+} __attribute__ ((packed)) T_ZDrvNv_SysData;
+
+/***********************************
+3. platform group nv_data
+************************************/
+typedef struct _T_ZDrvNv_PlatData
+{
+ UINT8 reserved[DRV_PLAT_NV_SIZE];
+} __attribute__ ((packed)) T_ZDrvNv_PlatData;
+
+/***********************************
+4. hign-speed peripheral group nv_data
+************************************/
+typedef struct _T_ZDrvNv_HSPeriData
+{
+ UINT8 reserved[DRV_HS_PERI_NV_SIZE];
+} __attribute__ ((packed)) T_ZDrvNv_HSPeriData;
+
+/***********************************
+5. common peripheral group nv_data
+************************************/
+typedef struct _T_ZDrvNv_PeriData
+{
+ UINT8 bat_det;
+ UINT8 reserved[DRV_PERI_NV_SIZE-1];
+} __attribute__ ((packed)) T_ZDrvNv_PeriData;
+
+/***********************************
+6. audio group nv_data
+************************************/
+typedef struct _T_ZDrvNv_AudioData
+{
+ UINT8 reserved[DRV_AUDIO_NV_SIZE];
+} __attribute__ ((packed)) T_ZDrvNv_AudioData;
+
+/***********************************
+7. all driver_used nv_data
+************************************/
+typedef struct _T_ZDrv_NvData
+{
+ /* 0x0000 */ T_ZDrvNv_PubData pubData;
+ /* 0x0100 */ T_ZDrvNv_SysData sysData;
+ /* 0x0D00 */ T_ZDrvNv_PlatData platData;
+ /* 0x1900 */ T_ZDrvNv_HSPeriData HSPeriData;
+ /* 0x2900 */ T_ZDrvNv_PeriData periData;
+ /* 0x3500 */ T_ZDrvNv_AudioData audioData;
+ /* 0x3900 */ UINT8 reserved[DRV_RSVD_NV_SIZE];
+} T_ZDrv_NvData;
+
+
+/******************************************************
+* check struct size
+******************************************************/
+static inline CHAR zDrvNv_CheckTypeSize(void)
+{ \
+ CHAR __dummy1[(sizeof(T_ZDrv_NvData)==DRV_NV_SIZE)?1:-1]={0}; \
+ CHAR __dummy2[(sizeof(T_ZDrvNv_PubData)==DRV_PUB_NV_SIZE)?1:-1]={0}; \
+ CHAR __dummy3[(sizeof(T_ZDrvNv_SysData)==DRV_SYS_NV_SIZE)?1:-1]={0}; \
+ CHAR __dummy4[(sizeof(T_ZDrvNv_PlatData)==DRV_PLAT_NV_SIZE)?1:-1]={0}; \
+ CHAR __dummy5[(sizeof(T_ZDrvNv_HSPeriData)==DRV_HS_PERI_NV_SIZE)?1:-1]={0}; \
+ CHAR __dummy6[(sizeof(T_ZDrvNv_PeriData)==DRV_PERI_NV_SIZE)?1:-1]={0}; \
+ CHAR __dummy7[(sizeof(T_ZDrvNv_AudioData)==DRV_AUDIO_NV_SIZE)?1:-1]={0}; \
+ return (__dummy1[0]+__dummy2[0]+__dummy3[0]+__dummy4[0]+__dummy5[0]+__dummy6[0]+__dummy7[0]); \
+}
+
+/******************************************************
+* old struct
+******************************************************/
+#if 0
+typedef struct _T_Sys_Drv_Nv_Data
+{
+ T_SYS_NV_TSC_CONFIG tsc_config;
+ UINT8 reserved[6];
+ UINT32 wdtSwitch;
+}T_Sys_Drv_Nv_Data;
+#endif
+typedef struct _T_Drv_Nv_Data
+{
+ UINT32 VpData[1024];//add by lvwenhua for voice 2013.12.6
+}T_Drv_Nv_Data;
+
+#define DRV_NV_ITEM_ADDRESS(x) (DRV_AUDIO_NV_ADDR + (UINT32)(&(((T_Drv_Nv_Data*)(0x0))->x)))
+//flag use 32byte
+typedef struct _T_Audio_NvFlag
+{
+ UINT8 isVpConfigInitOn;
+ UINT8 isVpParamInNv;
+ UINT8 isUseSlicCodec;
+ UINT8 isUseVoiceProc;//4 UINT8 isUseNXP;
+ UINT8 isUseCodecDsp;
+ UINT8 isUseNvWrite;
+ UINT8 isCloseVpBufferBak;
+ UINT8 isUseTdm;//8
+ UINT8 isUseRxDtmfDet;
+ UINT8 isUseTxDtmfDet;
+ UINT8 isUseRxMixData;
+ UINT8 isUseTxMixData;//12
+ UINT8 isUseEcall;
+ UINT8 reserved[19];//32-13
+
+} T_Audio_NvFlag;
+
+#endif
+
diff --git a/upstream/pub/project/zx297520v3/include/infra/ram_base_config_7520v3.h b/upstream/pub/project/zx297520v3/include/infra/ram_base_config_7520v3.h
new file mode 100755
index 0000000..6a1626d
--- /dev/null
+++ b/upstream/pub/project/zx297520v3/include/infra/ram_base_config_7520v3.h
@@ -0,0 +1,347 @@
+/*******************************************************************************
+* °æÈ¨ËùÓÐ (C)2015, ÖÐÐËͨѶ¹É·ÝÓÐÏÞ¹«Ë¾¡£
+*
+* ÎļþÃû³Æ: ram_config_7520v3.h
+* Îļþ±êʶ: ram_config_7520v3.h
+* ÄÚÈÝÕªÒª: zx297520v3оƬƽ̨´æ´¢µØÖ·ÅäÖÃÍ·Îļþ
+* ʹÓ÷½·¨: #include "ram_config.h"
+*
+* ÐÞ¸ÄÈÕÆÚ °æ±¾ºÅ Ð޸ıê¼Ç ÐÞ¸ÄÈË ÐÞ¸ÄÄÚÈÝ
+* ------------------------------------------------------------------------------
+* 2015/06/08 V1.0 Create ÁõÑÇÄÏ ´´½¨
+*
+*******************************************************************************/
+
+#ifndef _RAM_BASE_CONFIG_7520V3
+#define _RAM_BASE_CONFIG_7520V3
+
+/*******************************************************************************
+* Í·Îļþ *
+*******************************************************************************/
+
+/*******************************************************************************
+* ºê¶¨Òå *
+*******************************************************************************/
+
+/* IRAM0»ùµØÖ· */
+#ifdef DDR_BASE_ADDR_LINUX_VA
+#define IRAM_BASE_ADDR ((unsigned long)(ZX_IRAM0_BASE))
+#else
+#define IRAM_BASE_ADDR (0x82000000UL>>CPU_SHIFT)
+#endif
+#define IRAM_BASE_LEN (0x00010000UL>>CPU_SHIFT)
+
+/* 1K, Òì³£ÏòÁ¿±í: 0x82000000/0x41000000 */
+#define IRAM_BASE_ADDR_VECTOR (IRAM_BASE_ADDR)
+#define IRAM_BASE_LEN_VECTOR ((1 * 1024UL)>>CPU_SHIFT)
+#define OTP_SECURE_PUK_BASE IRAM_BASE_ADDR_VECTOR + 0x4
+
+/* 12K£¬Çý¶¯ºË¼äͨѶ */
+#define IRAM_BASE_ADDR_DRV (IRAM_BASE_ADDR_VECTOR + IRAM_BASE_LEN_VECTOR)
+#define IRAM_BASE_LEN_DRV ((12 * 1024UL)>>CPU_SHIFT)
+
+/* 1K£¬Ê¡µçÃüÁî½»»¥ */
+#define IRAM_BASE_ADDR_PSM (IRAM_BASE_ADDR_DRV + IRAM_BASE_LEN_DRV)
+#define IRAM_BASE_LEN_PSM ((1 * 1024UL)>>CPU_SHIFT)
+
+/* 4K£¬PSÓëPHYÐÅÏ¢½»»¥£¬¹«¹²ÒµÎñ */
+#define IRAM_BASE_ADDR_PUB (IRAM_BASE_ADDR_PSM + IRAM_BASE_LEN_PSM)
+#define IRAM_BASE_LEN_PUB ((4 * 1024UL)>>CPU_SHIFT)
+
+/* 512B£¬PSÓëPHYÐÅÏ¢½»»¥£¬É䯵¹«¹²ÒµÎñ */
+#define IRAM_BASE_ADDR_PUB_RF (IRAM_BASE_ADDR_PUB)
+#define IRAM_BASE_LEN_PUB_RF (512UL>>CPU_SHIFT)
+
+/* 32B£¬¸¨Ä£Ê½AFC»º´æÊý¾Ý¿Õ¼ä */
+#define IRAM_BASE_ADDR_SLAVE_AFC (IRAM_BASE_ADDR_PUB_RF + IRAM_BASE_LEN_PUB_RF)
+#define IRAM_BASE_LEN_SLAVE_AFC (32UL>>CPU_SHIFT)
+
+/* 1K£¬Î¿ØÊý¾Ý´æ·Å */
+#define IRAM_BASE_ADDR_TPC (IRAM_BASE_ADDR_PUB + IRAM_BASE_LEN_PUB)
+#define IRAM_BASE_LEN_TPC ((1 * 1024UL)>>CPU_SHIFT)
+
+/* 2K£¬ÖжÏÏ̹߳켣´æ·Å */
+#define IRAM_BASE_ADDR_OS_STATISTIC (IRAM_BASE_ADDR_TPC + IRAM_BASE_LEN_TPC)
+#define IRAM_BASE_LEN_OS_STATISTIC ((2 * 1024UL)>>CPU_SHIFT)
+
+/* 1K,ϵͳ¸ú×ټǼ */
+#define IRAM_BASE_ADDR_SYS_TRACE (IRAM_BASE_ADDR_OS_STATISTIC + IRAM_BASE_LEN_OS_STATISTIC)
+#define IRAM_BASE_LEN_SYS_TRACE ((1 * 1024UL)>>CPU_SHIFT)
+
+/* IRAM ICPµØÖ· */
+#define ICP_CMD_BASE_ADDR (IRAM_BASE_ADDR)
+#define ICP_DRV_BASE_ADDR (IRAM_BASE_ADDR_DRV)
+#define DUAL_STANDBY_INTERF_GSM_USE_INFO_BASE_ADDR (IRAM_BASE_ADDR_GSM)
+
+/* ¸÷ºËIRAM¹ì¼£µØÖ· */
+#define IRAM_BASE_ADDR_OS_STATISTIC_PSCPU (IRAM_BASE_ADDR_OS_STATISTIC)
+#define IRAM_BASE_LEN_OS_STATISTIC_PSCPU (0x200UL>>CPU_SHIFT)
+#define IRAM_BASE_ADDR_OS_STATISTIC_PHYCPU (IRAM_BASE_ADDR_OS_STATISTIC_PSCPU + IRAM_BASE_LEN_OS_STATISTIC_PSCPU)
+#define IRAM_BASE_LEN_OS_STATISTIC_PHYCPU (0x200UL>>CPU_SHIFT)
+#define IRAM_BASE_ADDR_OS_STATISTIC_APCPU (IRAM_BASE_ADDR_OS_STATISTIC_PHYCPU + IRAM_BASE_LEN_OS_STATISTIC_PHYCPU)
+#define IRAM_BASE_LEN_OS_STATISTIC_APCPU (0x400UL>>CPU_SHIFT)
+
+/* ¸÷ºËIRAM¸ú×ÙµØÖ· */
+#define IRAM_BASE_ADDR_SYS_TRACE_RMCPU (IRAM_BASE_ADDR_SYS_TRACE)
+#define IRAM_BASE_ADDR_SYS_TRACE_APCPU (IRAM_BASE_ADDR_SYS_TRACE + (0x10>>CPU_SHIFT))
+#define IRAM_BASE_ADDR_SYS_TRACE_PSCPU (IRAM_BASE_ADDR_SYS_TRACE + (0x20>>CPU_SHIFT))
+#define IRAM_BASE_ADDR_SYS_TRACE_PHYCPU (IRAM_BASE_ADDR_SYS_TRACE + (0x30>>CPU_SHIFT))
+
+/* phy logÓÅ»¯·½°¸¸´Óà IRAM_BASE_ADDR_SYS_TRACE ºó512×Ö½Ú¿Õ¼ä */
+#define IRAM_BASE_ADDR_ZCAT_PHY_LOG (IRAM_BASE_ADDR_SYS_TRACE + (0x200>>CPU_SHIFT))
+
+/* phy log¶ªÊ§¸ú×Ù·½°¸¸´Óà IRAM_BASE_ADDR_SYS_TRACE ºó64×Ö½Ú¿Õ¼ä */
+#define IRAM_BASE_PHY_LOG_DROP_TRACE (IRAM_BASE_ADDR_ZCAT_PHY_LOG + (0x200>>CPU_SHIFT) - (0x40>>CPU_SHIFT))
+
+/* ¼Ç¼ramdumpģʽ: 4×Ö½Ú¿Õ¼ä*/
+#define IRAM_BASE_ADDR_RAMDUMP_MODE (IRAM_BASE_PHY_LOG_DROP_TRACE - (0x04>>CPU_SHIFT))
+
+/* ¼Ç¼SHM bufferµØÖ·: 16×Ö½Ú¿Õ¼ä*/
+#define IRAM_BASE_ADDR_SHM_REMOTE_REGION (IRAM_BASE_ADDR_RAMDUMP_MODE - (0x10>>CPU_SHIFT))
+
+/* ¼Ç¼zcatģʽ: 4×Ö½Ú¿Õ¼ä*/
+#define IRAM_BASE_ADDR_ZCAT_MODE (IRAM_BASE_ADDR_SHM_REMOTE_REGION - (0x04>>CPU_SHIFT))
+
+/* ¸´Óù켣µÄǰ4¸ö×ֽڼǼboot´«µÝµÄDDR sizeÐÅÏ¢ */
+#define IRAM_BASE_ADDR_BOOT_DDR (IRAM_BASE_ADDR_DRV - (0x04>>CPU_SHIFT))
+
+/* IRAM1»ùµØÖ· */
+#ifdef DDR_BASE_ADDR_LINUX_VA
+#define IRAM1_BASE_ADDR ((unsigned long)(ZX_IRAM1_BASE))
+#else
+#define IRAM1_BASE_ADDR (0x00100000>>CPU_SHIFT)
+#endif
+#define IRAM1_BASE_LEN (0x00003000>>CPU_SHIFT)
+
+
+#define DDR_BASE_ADDR (0x20000000UL>>CPU_SHIFT)
+
+/* 3M£¬ÎïÀí²ã°æ±¾£¬ÓÉPS¼ÓÔØ */
+/* 7520µÄZSPÅäÖÃΪ·ÇCacheÇø£¬Ö»ÄÜÅäÖÃ4¸ö¶Î£¬ÇÒÿ¸ö¶ÎµØÖ·»¹ÓÐÌØ¶¨ÒªÇ󣬸õØÖ·±ä¶¯ÐèÓëÎïÀí²ãÈ·ÈÏ */
+#ifdef DDR_BASE_ADDR_LINUX_VA
+#define DDR_BASE_ADDR_PHY ((unsigned long)(ZX_DDR_PHYCODE_BASE))
+#else
+#define DDR_BASE_ADDR_PHY (DDR_BASE_ADDR)
+#endif
+
+#ifdef _USE_LTE_ONLY
+#define DDR_BASE_LEN_PHY (0x00200000UL>>CPU_SHIFT)
+#else
+#define DDR_BASE_LEN_PHY (0x00300000UL>>CPU_SHIFT)
+#endif
+#define DDR_BASE_OFF_PHY (0)
+
+/* 1.5M£¬ÎïÀí²ãDATA/HARQ/CRC */
+#define DDR_BASE_ADDR_PHY_DATA (DDR_BASE_ADDR_PHY + DDR_BASE_LEN_PHY)
+#define DDR_BASE_LEN_PHY_DATA (0x00180000UL>>CPU_SHIFT)
+#define DDR_BASE_OFF_PHY_DATA (DDR_BASE_OFF_PHY + DDR_BASE_LEN_PHY)
+
+/* 1.0M£¬ÐÒéÕ»ÓëÎïÀí²ã½»»¥ */
+#define DDR_BASE_ADDR_LTE_DATA (DDR_BASE_ADDR_PHY + DDR_BASE_LEN_PHY) //DDR_BASE_LEN_PHY_NV
+#define DDR_BASE_LEN_LTE_DATA (0x00100000UL>>CPU_SHIFT)
+#define DDR_BASE_OFF_LTE_DATA (DDR_BASE_OFF_PHY + DDR_BASE_LEN_PHY)
+
+/* 0.25M£¬Ö§³Åµ¼³öRamdump */
+#define DDR_BASE_ADDR_RAMDUMP (DDR_BASE_ADDR_LTE_DATA + DDR_BASE_LEN_LTE_DATA)
+#define DDR_BASE_LEN_RAMDUMP (0x00040000UL>>CPU_SHIFT)
+#define DDR_BASE_OFF_RAMDUMP (DDR_BASE_OFF_LTE_DATA + DDR_BASE_LEN_LTE_DATA)
+
+#ifdef _USE_VEHICLE_DC /* ³µÔØË«ºËLinux */
+/* 37.75M£¬AP¹²ºË°æ±¾(´Ë´óСÊǰ´ÕÕº¬CAPºËµÄ64MÄÚ´æÅäÖö¨Ò壬¸Ãºê±ð´¦²»»á±»Ê¹ÓÃ) */
+#define DDR_BASE_ADDR_AP (DDR_BASE_ADDR_RAMDUMP + DDR_BASE_LEN_RAMDUMP)
+#define DDR_BASE_LEN_AP (0x025C0000UL>>CPU_SHIFT)
+#define DDR_BASE_OFF_AP (DDR_BASE_OFF_RAMDUMP + DDR_BASE_LEN_RAMDUMP)
+
+/* 2M, share memory between ap and cap */
+#define DDR_BASE_ADDR_CAP_BUF (DDR_BASE_ADDR_AP + DDR_BASE_LEN_AP)
+#define DDR_BASE_LEN_CAP_BUF (0x00200000UL>>CPU_SHIFT)
+#define DDR_BASE_OFF_CAP_BUF (DDR_BASE_OFF_AP + DDR_BASE_LEN_AP)
+
+/* 84M/212M, cap°æ±¾ */
+#define DDR_BASE_ADDR_CAP (DDR_BASE_ADDR_CAP_BUF + DDR_BASE_LEN_CAP_BUF)
+#define DDR_BASE_LEN_CAP (0x05400000UL>>CPU_SHIFT)
+#define DDR_BASE_OFF_CAP (DDR_BASE_OFF_CAP_BUF + DDR_BASE_LEN_CAP_BUF)
+
+/* capºËµÄdtbµØÖ·¹©ubootºËcap kernelʹÓà */
+#define DDR_BASE_CAP_DTB_ADDR (DDR_BASE_ADDR_CAP_BUF + (0x00100000UL>>CPU_SHIFT))
+#else
+/* 42.75M£¬AP¹²ºË°æ±¾(´Ë´óСÊǰ´ÕÕº¬CAPºËµÄ64MÄÚ´æÅäÖö¨Ò壬¸Ãºê±ð´¦²»»á±»Ê¹ÓÃ) */
+#define DDR_BASE_ADDR_AP (DDR_BASE_ADDR_RAMDUMP + DDR_BASE_LEN_RAMDUMP)
+#ifdef _USE_LTE_ONLY
+#define DDR_BASE_LEN_AP (0x02BC0000UL>>CPU_SHIFT)
+#else
+#define DDR_BASE_LEN_AP (0x02AC0000UL>>CPU_SHIFT)
+#endif
+#define DDR_BASE_OFF_AP (DDR_BASE_OFF_RAMDUMP + DDR_BASE_LEN_RAMDUMP)
+
+/* 1M, share memory between ap and cap */
+#define DDR_BASE_ADDR_CAP_BUF (DDR_BASE_ADDR_AP + DDR_BASE_LEN_AP)
+#ifndef DDR_BASE_LEN_CAP_BUF
+#define DDR_BASE_LEN_CAP_BUF (0x00100000UL>>CPU_SHIFT)
+#endif
+#define DDR_BASE_OFF_CAP_BUF (DDR_BASE_OFF_AP + DDR_BASE_LEN_AP)
+
+/* 16M, cap°æ±¾ */
+#define DDR_BASE_ADDR_CAP (DDR_BASE_ADDR_CAP_BUF + DDR_BASE_LEN_CAP_BUF)
+#ifndef DDR_BASE_LEN_CAP
+#define DDR_BASE_LEN_CAP (0x01000000UL>>CPU_SHIFT)
+#endif
+#define DDR_BASE_OFF_CAP (DDR_BASE_OFF_CAP_BUF + DDR_BASE_LEN_CAP_BUF)
+#endif
+
+#define DDR_BASE_PHYCODE_ADDR_PA (DDR_BASE_ADDR)
+#define DDR_BASE_MODEM_ADDR_PA (DDR_BASE_PHYCODE_ADDR_PA + DDR_BASE_LEN_PHY)
+#define DDR_BASE_MODEM_SIZE (DDR_BASE_LEN_LTE_DATA + DDR_BASE_LEN_RAMDUMP)
+#define DDR_BASE_AP_ADDR_PA (DDR_BASE_MODEM_ADDR_PA + DDR_BASE_MODEM_SIZE)
+
+#define DDR_BASE_CAPBUF_ADDR_PA (DDR_BASE_AP_ADDR_PA + DDR_BASE_LEN_AP)
+#define DDR_BASE_CAP_ADDR_PA (DDR_BASE_CAPBUF_ADDR_PA + DDR_BASE_LEN_CAP_BUF)
+
+
+/* 1M£¬ÎïÀí²ãNV ¿Õ¼ä¸´Óà */
+#define DDR_BASE_ADDR_PHY_NV (DDR_BASE_ADDR_LTE_DATA)
+#define DDR_BASE_LEN_PHY_NV (0x00100000UL>>CPU_SHIFT)
+
+/* 0.375M£¬Çý¶¯Ê¡µç·þÓÃPS<->PHY½»»¥¿Õ¼ä */
+#define DDR_BASE_ADDR_PSM (DDR_BASE_ADDR_LTE_DATA)
+#define DDR_BASE_LEN_PSM (0x00060000UL>>CPU_SHIFT)
+#define DDR_BASE_OFF_PSM (DDR_BASE_OFF_RAMDUMP)
+
+/* 1M£¬ÐÒéÕ»ÓëÎïÀí²ã½»»¥ ¿Õ¼ä¸´Óà */
+#define DDR_BASE_ADDR_WCDMA_DATA (DDR_BASE_ADDR_LTE_DATA)
+#define DDR_BASE_LEN_WCDMA_DATA (DDR_BASE_LEN_LTE_DATA)
+
+#if 0
+/* PsBuffer»ùÖ· */
+#define PS_BUF_BASE_ADDR (DDR_BASE_ADDR_PSBUF)
+#endif
+
+/* ICP»ùÖ· */
+#define ICP_DATA_BASE_ADDR (DDR_BASE_ADDR_LTE_DATA)
+
+/* WCDMA»ùÖ· */
+#define DDR_BASE_ADDR_FOR_W (DDR_BASE_ADDR_WCDMA_DATA)
+
+/* ¹¤¾ß´úÀí»ùÖ· */
+/* #define TOOL_AGENT_BASE_ADDR (DDR_BASE_ADDR_TOOL_AGENT) */
+
+#if 0
+/* PPP»ùÖ· */
+#define PLAT_PPP_BASE_ADDR (PS_BUF_BASE_ADDR)
+#endif
+
+/**/
+#define SHARE_BUF_AP_CP_BASE_ADDR (DDR_BASE_ADDR_AP_CP_SHAREBUF)
+
+#if defined(_USE_CAP_SYS) || defined(_USE_VEHICLE_DC)
+#define ICP_CAP_BUF_ADDR DDR_BASE_ADDR_CAP_BUF
+#define ICP_CAP_BUF_LEN ((924 * 1024UL)>>CPU_SHIFT)
+#define TOOL_CAP_BUF_ADDR (ICP_CAP_BUF_ADDR + ICP_CAP_BUF_LEN)
+#define TOOL_CAP_BUF_LEN ((60 * 1024UL)>>CPU_SHIFT)
+#define RINGBUF_CAP_TO_AP_ADDR (TOOL_CAP_BUF_ADDR + TOOL_CAP_BUF_LEN)
+#define RINGBUF_CAP_TO_AP_LEN ((32 * 1024UL)>>CPU_SHIFT)
+#define ADB_CAP_BUF_ADDR (RINGBUF_CAP_TO_AP_ADDR + RINGBUF_CAP_TO_AP_LEN)
+#define ADB_CAP_BUF_LEN ((4 * 1024UL)>>CPU_SHIFT)
+#define RAMDUMP_CAP_CMM_BUF_ADDR (ADB_CAP_BUF_ADDR + ADB_CAP_BUF_LEN)
+#define RAMDUMP_CAP_CMM_BUF_LEN ((4 * 1024UL)>>CPU_SHIFT)
+#define RINGBUF_AP_TO_CAP_ADDR (RAMDUMP_CAP_CMM_BUF_ADDR + RAMDUMP_CAP_CMM_BUF_LEN)
+#define RINGBUF_AP_TO_CAP_LEN ((128 * 1024UL)>>CPU_SHIFT)
+#define TOOL_ZSP_TO_CAP_LOG_ADDR (RINGBUF_AP_TO_CAP_ADDR + RINGBUF_AP_TO_CAP_LEN)
+#define TOOL_ZSP_TO_CAP_LOG_LEN ((384 * 1024UL)>>CPU_SHIFT)
+#define RAMDUMP_AP_TO_CAP_BUF_ADDR (TOOL_ZSP_TO_CAP_LOG_ADDR + TOOL_ZSP_TO_CAP_LOG_LEN)
+#define RAMDUMP_AP_TO_CAP_BUF_LEN ((128 * 1024UL)>>CPU_SHIFT)
+#define TEE_SHARE_BUF_ADDR (RAMDUMP_AP_TO_CAP_BUF_ADDR + RAMDUMP_AP_TO_CAP_BUF_LEN)
+#define TEE_SHARE_BUF_LEN ((384 * 1024UL)>>CPU_SHIFT)
+
+#define ICP_CAP_BUF_ADDR_PA DDR_BASE_CAPBUF_ADDR_PA
+#define TOOL_CAP_BUF_ADDR_PA (ICP_CAP_BUF_ADDR_PA + ICP_CAP_BUF_LEN)
+#define RINGBUF_CAP_TO_AP_ADDR_PA (TOOL_CAP_BUF_ADDR_PA + TOOL_CAP_BUF_LEN)
+#define ADB_CAP_BUF_ADDR_PA (RINGBUF_CAP_TO_AP_ADDR_PA + RINGBUF_CAP_TO_AP_LEN)
+#define RAMDUMP_CAP_CMM_BUF_ADDR_PA (ADB_CAP_BUF_ADDR_PA + ADB_CAP_BUF_LEN)
+#define RINGBUF_AP_TO_CAP_ADDR_PA (RAMDUMP_CAP_CMM_BUF_ADDR_PA + RAMDUMP_CAP_CMM_BUF_LEN)
+#define TOOL_ZSP_TO_CAP_LOG_ADDR_PA (RINGBUF_AP_TO_CAP_ADDR_PA + RINGBUF_AP_TO_CAP_LEN)
+#define RAMDUMP_AP_TO_CAP_BUF_ADDR_PA (TOOL_ZSP_TO_CAP_LOG_ADDR_PA + TOOL_ZSP_TO_CAP_LOG_LEN)
+#define TEE_SHARE_BUF_ADDR_PA (RAMDUMP_AP_TO_CAP_BUF_ADDR_PA + RAMDUMP_AP_TO_CAP_BUF_LEN)
+#endif
+
+/* 7520V3оƬIRAM0ѹËõ£¬ÐÒéÕ»ÎïÀí²ã½»»¥¿Õ¼äÒÆ¶¯µ½DDR£¬¸´ÓÃRamdump¿Õ¼ä */
+/* 34K£¬PSÓëPHYÐÅÏ¢½»»¥£¬LTEÒµÎñ */
+/* #define IRAM_BASE_ADDR_LTE (DDR_BASE_ADDR_RAMDUMP) */
+/* 10K£¬PSÓëPHYÐÅÏ¢½»»¥£¬LTEÒµÎñ ʹÓÃIRAM0£¬¹¦ºÄÓÅ»¯ 7K+3K, 3k for embms*/
+#define IRAM_BASE_ADDR_LTE (IRAM_BASE_ADDR_SYS_TRACE + IRAM_BASE_LEN_SYS_TRACE)
+#define IRAM_BASE_LEN_LTE ((10 * 1024UL)>>CPU_SHIFT)
+
+/* 24K£¬PSÓëPHYµÄICP½»»¥£¬Ê¹ÓÃIRAM*/
+#define IRAM_BASE_ADDR_PS_PHY_SHAREBUF (IRAM_BASE_ADDR_LTE + IRAM_BASE_LEN_LTE)
+#define IRAM_BASE_LEN_PS_PHY_SHAREBUF ((24 * 1024UL)>>CPU_SHIFT)
+
+/* 221K£¬PSÓëPHYµÄICP½»»¥£¬Ê¹ÓÃDDR, ¸´ÓÃRAMDUMP*/
+#define DDR_BASE_ADDR_PS_PHY_SHAREBUF (DDR_BASE_ADDR_RAMDUMP)
+#define DDR_BASE_LEN_PS_PHY_SHAREBUF ((221 * 1024UL)>>CPU_SHIFT)
+
+/* 2k£¬zsp RAMDUMP*/
+#define DDR_BASE_ADDR_PHY_RAMDUMP (DDR_BASE_ADDR_PS_PHY_SHAREBUF + DDR_BASE_LEN_PS_PHY_SHAREBUF)
+#define DDR_BASE_LEN_PHY_RAMDUMP ((2 * 1024UL)>>CPU_SHIFT)
+
+/* 1K£¬PSÓëPHYÐÅÏ¢½»»¥£¬TDÒµÎñ ʹÓÃDDR*/
+#define IRAM_BASE_ADDR_TD (DDR_BASE_ADDR_PHY_RAMDUMP + DDR_BASE_LEN_PHY_RAMDUMP)
+/* #define IRAM_BASE_LEN_TD ((25 * 1024UL)>>CPU_SHIFT) */
+#define IRAM_BASE_LEN_TD ((1 * 1024UL)>>CPU_SHIFT)
+
+/* 12K£¬PSÓëPHYÐÅÏ¢½»»¥£¬WÒµÎñ ʹÓÃDDR*/
+#define IRAM_BASE_ADDR_WCDMA (IRAM_BASE_ADDR_TD + IRAM_BASE_LEN_TD)
+/* #define IRAM_BASE_LEN_WCDMA ((48 * 1024UL)>>CPU_SHIFT) */
+#define IRAM_BASE_LEN_WCDMA ((12 * 1024UL)>>CPU_SHIFT)
+
+/* 20K£¬W UPA ¿Õ¼ä */
+#define DDR_BASE_ADDR_WUPA_DATA (IRAM_BASE_ADDR_WCDMA + IRAM_BASE_LEN_WCDMA)
+#define DDR_BASE_LEN_WUPA_DATA ((20 * 1024UL)>>CPU_SHIFT)
+
+/* IRAM WCDMA»ùÖ· */
+#define IRAM_BASE_ADDR_FOR_W (IRAM_BASE_ADDR_WCDMA)
+
+/* DPRAM»ùÖ· */
+#define DPRAM_BASE_ADDR (IRAM_BASE_ADDR_TD)
+
+/* DPRAM DDR»ùÖ· */
+#define DPRAM_MEM_BASE_ADDR (IRAM_BASE_ADDR_TD)
+
+/* PS tcm config for ramdump */
+#define RAMDUMP_PS_ITCM_BASE_EXTER (0x0)
+#define RAMDUMP_PS_ITCM_BASE_INTER (0x0)
+#define RAMDUMP_PS_ITCM_SIZE (0x0)
+#define RAMDUMP_PS_DTCM_BASE_EXTER (0x0)
+#define RAMDUMP_PS_DTCM_BASE_INTER (0x0)
+#define RAMDUMP_PS_DTCM_SIZE (0x0)
+
+/* ZSP Ramdump */
+/* #ifdef _USE_ZSP_RAMDUMP */
+# define RAMDUMP_ZSP_ITCM_BASE (0x81040000UL)
+# define RAMDUMP_ZSP_ITCM_SIZE (0x00010000UL)
+# define RAMDUMP_ZSP_DTCM_BASE (0x81000000UL)
+# define RAMDUMP_ZSP_DTCM_SIZE (0x00010000UL)
+
+# define RAMDUMP_ZSP_CODE_SIZE (0x1b0000>>CPU_SHIFT)
+# define RAMDUMP_ZSP_IDDR_BASE (DDR_BASE_ADDR_PHY)
+# define RAMDUMP_ZSP_IDDR_SIZE (RAMDUMP_ZSP_CODE_SIZE)
+# define RAMDUMP_ZSP_DDDR_BASE (RAMDUMP_ZSP_IDDR_BASE + RAMDUMP_ZSP_CODE_SIZE)
+# define RAMDUMP_ZSP_DDDR_SIZE (DDR_BASE_LEN_PHY - RAMDUMP_ZSP_CODE_SIZE)
+
+# define RAMDUMP_ZSP_ITCM_SELF_BASE (0x0)
+# define RAMDUMP_ZSP_DTCM_SELF_BASE (0x10000UL)
+/* #endif */
+
+/*******************************************************************************
+* Êý¾ÝÀàÐͶ¨Òå *
+*******************************************************************************/
+
+/*******************************************************************************
+* È«¾Ö±äÁ¿ÉùÃ÷ *
+*******************************************************************************/
+
+/*******************************************************************************
+* È«¾Öº¯ÊýÉùÃ÷ *
+*******************************************************************************/
+
+#endif // #ifndef _RAM_BASE_CONFIG_7520V3
+
diff --git a/upstream/tools/SignTool/SignImage b/upstream/tools/SignTool/SignImage
new file mode 100755
index 0000000..552a4de
--- /dev/null
+++ b/upstream/tools/SignTool/SignImage
Binary files differ
diff --git a/upstream/tools/SignTool/SignImage.exe b/upstream/tools/SignTool/SignImage.exe
new file mode 100755
index 0000000..6905638
--- /dev/null
+++ b/upstream/tools/SignTool/SignImage.exe
Binary files differ