[Feature][T106_eSDK]T106-V2.01.01.02P56U06.AP.15.11_CAP.15.11(SDK4.6)diff_16.08(SDK4.7)

Only Configure: No
Affected branch: master
Affected module: unknow
Is it affected on both ZXIC and MTK: only ZXIC
Self-test: Yes
Doc Update: No

Change-Id: I7a7c42775e2ffdd23aaec4fff782adcc99d7890b
diff --git a/upstream/linux-5.10/arch/arm/kernel/svc_s.S b/upstream/linux-5.10/arch/arm/kernel/svc_s.S
new file mode 100755
index 0000000..a52abb7
--- /dev/null
+++ b/upstream/linux-5.10/arch/arm/kernel/svc_s.S
@@ -0,0 +1,101 @@
+
+#define	GIC_DIST_BASE			(0xF2000000)
+#define	GIC_RDIST_BASE			(0xF2040000)
+
+#define	REAL_TXT_ADDR			(CONFIG_PHYS_OFFSET + 0x8000)
+#define	DTB_ADDR			(CONFIG_DTB_ADDR)	
+
+secure_init:
+
+#if 0
+	mov	r5, r0
+	mov	r6, r1
+	mov	r7, r2
+#endif	
+	mov	r4, lr
+
+#if 1
+	/* use r0--r4 only */
+	bl  	get_core_id
+	mov 	r1, r0
+	bl  	get_cluster_id
+	mov 	r2, r0	
+
+	ldr 	r3, =GIC_DIST_BASE
+	ldr	r0, =0x50
+	str	r0, [r3]
+
+	ldr 	r3, =GIC_RDIST_BASE
+	lsl  	r2, r2, #2
+	add 	r1, r1, r2
+	lsl  	r1, r1, #17
+
+	add 	r1, r1, r3
+	add 	r1, r1, #0x14
+
+	LDR  R0, [R1]
+	LDR  R2, =0xfffffffd
+	AND  R0, R0, R2
+	STR  R0, [R1]
+
+	LDR  R2, = 0xFFFFFFFB
+wait:  
+	LDR  R0, [R1]  
+	AND  R0, R0, R2
+	CMP  R0, #0
+	BNE  wait
+
+	SUB  R1, R1, #0x14
+	LDR  R2, =0x10080
+	ADD  R1, R1, R2
+	LDR  R2, =0xFFFFFFFF
+	STR  R2, [R1]
+#endif
+
+	MRS   R0, CPSR
+	BIC    R0,  #0x1F
+	ORR   R0,  #0xD6
+	MSR   CPSR_c,  R0
+
+	MOV  r3, #0xD
+	MCR  p15,#0x6,r3,c12,c12,#5
+	MCR      p15,0,r3,c12,c12,#5
+
+	MRC    p15,0,r1,c1,c1,0
+	MOV    r2, r1
+	ORR    r2, #0x1
+	MCR    p15,0,r2,c1,c1,0
+
+	MCR      p15,#0x4,r3,c12,c9,#5
+
+	MRS   R0, CPSR  
+	BIC    R0,  #0x1F
+	ORR   R0,  #0xD3
+	MSR   CPSR_c,  R0
+
+#if 0
+	mov	r0, r5
+	mov	r1, r6
+	mov	r2, r7
+#else
+	ldr	r0, =0
+	ldr	r1, =REAL_TXT_ADDR
+	ldr	r2, =DTB_ADDR
+#endif
+	mov	lr, r4
+
+	ret	lr
+ENDPROC(secure_init)
+
+get_core_id:
+	MRC   	p15, 0, R0, c0, c0, 5
+	AND   	R0, R0, #0xFF
+	BX R14
+ENDPROC(get_core_id)
+
+get_cluster_id:
+	MRC   	p15, 0, r0, c0, c0, 5
+	AND   	r0, r0, #0xFF00
+	LSR   	r0, r0, #0x8
+	BX    	lr
+ENDPROC(get_cluster_id)
diff --git a/upstream/linux-5.10/drivers/misc/zcat/debug_info.c b/upstream/linux-5.10/drivers/misc/zcat/debug_info.c
new file mode 100755
index 0000000..d23e340
--- /dev/null
+++ b/upstream/linux-5.10/drivers/misc/zcat/debug_info.c
@@ -0,0 +1,396 @@
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cdev.h>
+#include <linux/semaphore.h>
+#include <linux/timer.h>
+
+// #include <linux/fs.h>
+#include <linux/ioport.h>
+// #include <linux/serial_reg.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+// #include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+// #include <linux/kthread.h>
+#include <asm/io.h>
+
+#include <linux/vmalloc.h>
+#include <linux/soc/zte/rpmsg.h>
+// #include <linux/syscalls.h>
+
+// #include "debuginfo.h"
+#include "pub_debug_info.h"
+#include "ringbuf.h"
+
+
+#if defined(_USE_ZXIC_DEBUG_INFO) && !defined(CONFIG_SYSTEM_RECOVERY)
+/*******************************************************************************
+ *                                   宏定义                                     *
+ *******************************************************************************/
+#define DEBUG_INFO_SHARE_MEM_LEN    (0x2000)
+#define DEBUG_INFO_READABLE_LEN     (0x1400)
+#define DEBUG_INFO_MAX_DATA_LEN     (128)
+#define DEBUG_INFO_MAX_TOTAL_LEN    (140) // 8 + 128 + 4
+#define DEBUG_INFO_READ_TIME_MSECS  (10000)
+
+#define DEBUG_INFO_CHANNEL          (9)
+#define DEBUG_INFO_MSG_CAP_SIZE     (2 * 1024)
+
+#define DEBUG_INFO_OK               (0)
+#define DEBUG_INFO_ERROR            (-1)
+
+#define DEBUG_INFO_IOCTL_SET_DISABLE  (0x1001)
+
+/*******************************************************************************
+ *                                结构体定义                                     *
+ *******************************************************************************/
+typedef unsigned int UINT32;
+typedef unsigned short UINT16;
+typedef unsigned char UINT8;
+
+typedef struct
+{
+    UINT16 module_id; // 模块id
+    UINT16 sub_len;   // 用户数据长度
+    UINT32 time;
+    char sub_data[]; // 用户数据
+} T_SHARED_MEM_DATA;
+
+typedef struct
+{
+    UINT32 head;                  // 0x010a0a0a
+    UINT32 total_len;             // 数据内容长度
+    long long time;               // time()函数获取
+} T_SAVE_FILE_DATA;
+
+/*******************************************************************************
+ *                                  全局变量                                     *
+ *******************************************************************************/
+volatile T_RINGBUFFER *g_debug_info_buf = NULL;
+static struct semaphore debug_sem;
+static DEFINE_RAW_SPINLOCK(debugWr_lock);
+static int g_init_flag = 0;
+
+/*******************************************************************************
+ *                              内部函数定义                                     *
+ *******************************************************************************/
+static int sc_debug_info_read_to_user(char *buf, unsigned short count);
+static int sc_debug_info_record_from_user(const char *info, unsigned short count);
+static int sc_debug_info_write(UINT32 flag, const UINT8 *buf, UINT32 len);
+static void sc_debug_info_from_ap(void *buf, unsigned int len);
+
+static void kernel_timer_timeout(struct timer_list *t);
+static ssize_t debug_info_read(struct file *fp, char __user *buf, size_t count, loff_t *pos);
+static ssize_t debug_info_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos);
+static int debug_info_open(struct inode *ip, struct file *fp);
+static long debug_info_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
+static int debug_info_release(struct inode *ip, struct file *fp);
+
+//初始化timer
+static DEFINE_TIMER(timer, kernel_timer_timeout);
+
+static const struct file_operations debug_info_fops = {
+    .owner = THIS_MODULE,
+    .read = debug_info_read,
+    .write = debug_info_write,
+    .open = debug_info_open,
+    .unlocked_ioctl = debug_info_ioctl,
+    .release = debug_info_release,
+};
+
+static struct miscdevice debug_info_device = {
+    .minor = MISC_DYNAMIC_MINOR,
+    .name = "debug_info",
+    .fops = &debug_info_fops,
+};
+
+static void kernel_timer_timeout(struct timer_list *t)
+{
+    if (debug_sem.count == 0)
+    {
+        up(&debug_sem);
+    }
+    /* 因为内核定时器是一个单次的定时器,所以如果想要多次重复定时需要在定时器绑定的函数结尾重新装载时间,并启动定时 */
+    /* Kernel Timer restart */
+    mod_timer(&timer, jiffies + msecs_to_jiffies(DEBUG_INFO_READ_TIME_MSECS));    
+}
+
+static ssize_t debug_info_read(struct file *fp, char __user *buf, size_t count, loff_t *pos)
+{
+    int ret;
+    int rd_len;
+
+    ret = down_interruptible(&debug_sem);
+    if(ret < 0)
+    {
+        return ret;
+    }
+    else
+    {
+        rd_len = sc_debug_info_read_to_user(buf, count);
+    }
+
+    return rd_len;
+}
+
+static ssize_t debug_info_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos)
+{
+    int wr_len = sc_debug_info_record_from_user(buf, count);
+
+    return wr_len;
+}
+
+static int debug_info_open(struct inode *ip, struct file *fp)
+{
+    return 0;
+}
+
+static long debug_info_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+    switch(cmd)
+    {
+        case DEBUG_INFO_IOCTL_SET_DISABLE:
+            *(volatile UINT32 *)ZCAT_DEBUG_INFO_DISABLE = arg;
+            break;
+
+        default:
+            break;
+    }
+    return 0;
+}
+
+static int debug_info_release(struct inode *ip, struct file *fp)
+{
+    return 0;
+}
+
+static void sc_debug_info_from_ap(void *buf, unsigned int len)
+{
+    T_SHARED_MEM_DATA *debug_msg = (T_SHARED_MEM_DATA *)buf;
+    debug_msg->time = jiffies;
+
+    sc_debug_info_write(ZCAT_MEM_TYPE_KERNEL, buf, len);
+}
+
+static int __init debug_info_init(void)
+{
+    int ret = misc_register(&debug_info_device);
+    if (ret)
+    {
+        printk("debug_info_device init.\n");
+        return DEBUG_INFO_ERROR;
+    }
+
+    void *mem;
+    mem = vmalloc(DEBUG_INFO_SHARE_MEM_LEN);
+    if (!mem)
+    {
+        printk("vmalloc failed.\n");
+        return DEBUG_INFO_ERROR;
+    }
+
+    g_debug_info_buf = CreateRingBuffer((UINT8 *)mem, DEBUG_INFO_SHARE_MEM_LEN);
+    if (g_debug_info_buf == NULL)
+    {
+        printk("CreateRingBuffer failed.\n");
+        return DEBUG_INFO_ERROR;
+    }
+#if 1
+    ret = rpmsgCreateChannel(
+        CORE_PS0, 
+        DEBUG_INFO_CHANNEL,
+        DEBUG_INFO_MSG_CAP_SIZE);
+    if (ret != DEBUG_INFO_OK) 
+    {
+        printk("rpmsgCreateChannel failed, ret = %d\n", ret);
+        return DEBUG_INFO_ERROR;
+    }  
+    
+    ret = rpmsgRegCallBack(
+            CORE_PS0,
+            DEBUG_INFO_CHANNEL, 
+            sc_debug_info_from_ap);
+    if (ret != DEBUG_INFO_OK) 
+    {
+        printk("rpmsgRegCallBack failed,ret = %d\n", ret);
+        return DEBUG_INFO_ERROR;
+    } 
+#endif
+    sema_init(&debug_sem, 0);
+    /* 添加并启动定时器, 10ms */
+    mod_timer(&timer, jiffies + 1);    
+
+    g_init_flag = 1;
+
+    return 0;
+}
+
+static void __exit debug_info_exit(void)
+{
+    misc_deregister(&debug_info_device);
+
+    del_timer(&timer);
+}
+
+static int sc_debug_info_write(UINT32 flag, const UINT8 *buf, UINT32 len)
+{
+    UINT32 writelen;
+    UINT32 used_space;
+    unsigned long flags;
+
+    if (len == 0 || g_debug_info_buf == NULL)
+    {
+        printk("sc_debug_info_write:: (len == 0 || g_debug_info_buf == NULL).\n");
+        return DEBUG_INFO_ERROR;
+    }
+
+    raw_spin_lock_irqsave(&debugWr_lock, flags);
+    writelen = WriteRingBuffer(g_debug_info_buf, buf, len, flag);
+    raw_spin_unlock_irqrestore(&debugWr_lock, flags);
+    used_space = GetRingBufferSize(g_debug_info_buf);
+    if (used_space > DEBUG_INFO_READABLE_LEN)
+    {
+        if (debug_sem.count == 0)
+        {
+            up(&debug_sem);
+        }
+    }
+
+    return writelen;
+}
+
+static int sc_debug_info_read_to_user(char *buf, unsigned short count)
+{
+    unsigned int bufSize_used = 0;
+    unsigned int readLen = 0;
+    unsigned int bufLen = 0;
+    T_SAVE_FILE_DATA fileDataHead;
+
+    if (g_init_flag == 0)
+    {
+        printk("debug_info not init.\n");
+        return DEBUG_INFO_ERROR;
+    }
+    if (count == 0 || buf == NULL || g_debug_info_buf == NULL)
+    {
+        printk("sc_debug_info_read_to_user:: (count == 0 || buf == NULL || g_debug_info_buf == NULL).\n");
+        return DEBUG_INFO_ERROR;
+    }
+
+    bufSize_used = GetRingBufferSize(g_debug_info_buf);
+    if (bufSize_used == 0)
+    {
+        // printk("sc_debug_info_read_to_user:: ringBuf is empty.\n");
+        return 0;
+    }
+
+    fileDataHead.head = 0x010a0a0a;
+    fileDataHead.time = 0;
+    fileDataHead.total_len = bufSize_used;
+
+    copy_to_user(buf, &fileDataHead, sizeof(T_SAVE_FILE_DATA));
+
+    readLen = ReadRingBuffer(g_debug_info_buf, (buf + sizeof(T_SAVE_FILE_DATA)), bufSize_used, ZCAT_MEM_TYPE_USER);
+    if (readLen == 0)
+    {
+        // printk("ReadRingBuffer failed.\n");
+        return 0;
+    }
+
+    return (readLen + sizeof(T_SAVE_FILE_DATA));
+}
+
+static int sc_debug_info_record_from_user(const char *info, unsigned short count)
+{
+    unsigned int cnt = 0;
+    unsigned int my_jiffies = jiffies;
+
+    if (g_init_flag == 0)
+    {
+        printk("debug_info not init.\n");
+        return DEBUG_INFO_ERROR;
+    }
+    if (info == NULL)
+    {
+        printk("sc_debug_info_record_from_user:: info is NULL.\n");
+        return DEBUG_INFO_ERROR;
+    }
+
+    copy_to_user(info + 4, &my_jiffies, sizeof(my_jiffies));
+    cnt = sc_debug_info_write(ZCAT_MEM_TYPE_USER, (UINT8 *)info, count);
+
+    return cnt;
+}
+
+module_init(debug_info_init);
+module_exit(debug_info_exit);
+
+MODULE_AUTHOR("jcw");
+MODULE_DESCRIPTION("debug_info driver");
+MODULE_LICENSE("GPL");
+
+
+int sc_debug_info_vrecord(unsigned int id, const char *format, va_list args)
+{
+    int len;
+    UINT32 writelen;
+    // va_list args;
+    char str_buf[DEBUG_INFO_MAX_TOTAL_LEN] __attribute__((aligned(4)));
+    T_SHARED_MEM_DATA *shareMemData = (T_SHARED_MEM_DATA *)str_buf;
+
+    if (g_init_flag == 0)
+    {
+        printk("debug_info not init.\n");
+        return DEBUG_INFO_ERROR;
+    }
+
+    /* args是一个char*类型指针,指向format之后的第一个参数*/
+    // va_start(args, format);
+    len = vsnprintf(shareMemData->sub_data, DEBUG_INFO_MAX_DATA_LEN, format, args);
+    // va_end(args);
+    if (len < 0)
+    {
+        printk("vsnprintf format error.\n");
+        return DEBUG_INFO_ERROR;
+    }
+
+    shareMemData->module_id = (UINT16)(id & 0xFFFF);
+    shareMemData->sub_len = len;
+    shareMemData->time = jiffies;
+
+    writelen = sc_debug_info_write(ZCAT_MEM_TYPE_KERNEL, (UINT8 *)shareMemData, len + sizeof(T_SHARED_MEM_DATA));
+    return writelen;
+}
+EXPORT_SYMBOL(sc_debug_info_vrecord);
+
+int sc_debug_info_record(unsigned int id, const char *format, ...)
+{
+    va_list args;
+	int r;
+
+	va_start(args, format);
+	r = sc_debug_info_vrecord(id, format, args);
+	va_end(args);
+
+
+    return r;
+}
+EXPORT_SYMBOL(sc_debug_info_record);
+#else
+int sc_debug_info_record(unsigned int id, const char *format, ...)
+{
+	return 0;
+}
+#endif   /*  _USE_ZXIC_DEBUG_INFO */
+
diff --git a/upstream/linux-5.10/drivers/net/zvnet/zvnet_dev.h b/upstream/linux-5.10/drivers/net/zvnet/zvnet_dev.h
new file mode 100755
index 0000000..d1feaa5
--- /dev/null
+++ b/upstream/linux-5.10/drivers/net/zvnet/zvnet_dev.h
@@ -0,0 +1,76 @@
+#ifndef ZV_NET_H

+#define ZV_NET_H

+

+#include <linux/interrupt.h>

+#include <linux/kthread.h>

+#include <linux/spinlock.h>

+#include <linux/semaphore.h>

+#include <linux/netdevice.h>

+#include <linux/skbuff.h>

+#include <linux/soc/zte/rpmsg.h>

+

+//#define ZVNET_DEBUG

+#ifdef ZVNET_DEBUG

+#define zv_dbg(format, arg...) printk(KERN_DEBUG "[zvnet]<%s>: " format "\n" , \

+	__func__ , ## arg)

+#define zv_info(format, arg...) printk(KERN_INFO "[zvnet]<%s>: " format "\n" , \

+	__func__ , ## arg)

+#else

+#define zv_dbg(format, arg...) do {} while (0)

+#define zv_info(format, arg...) do {} while (0)

+#endif

+

+#define zv_err(format, arg...) printk(KERN_ERR "[zvnet]<%s>: " format "\n" , \

+	__func__ , ## arg)

+

+#define zv_warn(format, arg...) printk(KERN_WARNING "[zvnet]<%s>: " format "\n" , \

+	__func__ , ## arg)

+

+//zvnetÉ豸×î´óÊý

+#define DDR_ZVNET_DEV_MAX 10

+#define ZVNET_IFNAME_PREFIX "zvnet"

+

+#define  ICP_CHN_ZVNET1 20 //ICP_CHANNEL_WAN1

+#define  ICP_CHN_ZVNET2 21 //ICP_CHANNEL_WAN2 

+#define  ICP_CHN_ZVNET3 22 //ICP_CHANNEL_WAN3 

+#define  ICP_CHN_ZVNET4 23 //ICP_CHANNEL_WAN4 

+

+#define	 ICP_CHANNEL_SIZE 	(8 * 1024 *2)

+

+#define zvnetCreateChannel rpmsgCreateChannel

+#define zvnetWrite         rpmsgWrite

+#define zvnetRead          rpmsgRead

+

+struct zvnet_channel {

+    T_RpMsg_CoreID core_id;

+    T_RpMsg_ChID channel_id;

+    unsigned int channel_size;

+    struct task_struct *rcv_thread;

+};

+

+struct zvnet {

+    struct net_device  *net;

+    struct sk_buff_head rxq;

+    struct tasklet_struct bh;

+    void *dev_priv;

+};

+

+struct zvnet_device {

+    struct zvnet *dev;

+    struct net_device *net;

+    //struct zvnet_channel chn_info;

+    unsigned char retran_times;

+    //int (*write)(struct sk_buff *,struct v2x_hdr *, unsigned int, struct net_device *);

+};

+

+struct zvp_header {

+    unsigned int magic_word;

+    unsigned short chnid;

+    unsigned short tlen;

+};

+

+#define ZVP_MAGIC_WORD  0x5A5A5A5A

+#define ZVP_HEAD_LEN 	sizeof(struct zvp_header)

+

+#endif

+

diff --git a/upstream/linux-5.10/drivers/rtc/class.c b/upstream/linux-5.10/drivers/rtc/class.c
new file mode 100755
index 0000000..625effe
--- /dev/null
+++ b/upstream/linux-5.10/drivers/rtc/class.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RTC subsystem, base class
+ *
+ * Copyright (C) 2005 Tower Technologies
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * class skeleton from drivers/hwmon/hwmon.c
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/rtc.h>
+#include <linux/kdev_t.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "rtc-core.h"
+
+static DEFINE_IDA(rtc_ida);
+struct class *rtc_class;
+
+static void rtc_device_release(struct device *dev)
+{
+	struct rtc_device *rtc = to_rtc_device(dev);
+	struct timerqueue_head *head = &rtc->timerqueue;
+	struct timerqueue_node *node;
+
+	mutex_lock(&rtc->ops_lock);
+	while ((node = timerqueue_getnext(head)))
+		timerqueue_del(head, node);
+	mutex_unlock(&rtc->ops_lock);
+
+	cancel_work_sync(&rtc->irqwork);
+
+	ida_simple_remove(&rtc_ida, rtc->id);
+	kfree(rtc);
+}
+
+#ifdef CONFIG_RTC_HCTOSYS_DEVICE
+/* Result of the last RTC to system clock attempt. */
+int rtc_hctosys_ret = -ENODEV;
+
+/* IMPORTANT: the RTC only stores whole seconds. It is arbitrary
+ * whether it stores the most close value or the value with partial
+ * seconds truncated. However, it is important that we use it to store
+ * the truncated value. This is because otherwise it is necessary,
+ * in an rtc sync function, to read both xtime.tv_sec and
+ * xtime.tv_nsec. On some processors (i.e. ARM), an atomic read
+ * of >32bits is not possible. So storing the most close value would
+ * slow down the sync API. So here we have the truncated value and
+ * the best guess is to add 0.5s.
+ */
+
+static void rtc_hctosys(struct rtc_device *rtc)
+{
+	int err;
+	struct rtc_time tm;
+	struct timespec64 tv64 = {
+		.tv_nsec = NSEC_PER_SEC >> 1,
+	};
+
+	err = rtc_read_time(rtc, &tm);
+	if (err) {
+		dev_err(rtc->dev.parent,
+			"hctosys: unable to read the hardware clock\n");
+		goto err_read;
+	}
+
+	tv64.tv_sec = rtc_tm_to_time64(&tm);
+
+#if BITS_PER_LONG == 32
+	if (tv64.tv_sec > INT_MAX) {
+		err = -ERANGE;
+		goto err_read;
+	}
+#endif
+
+	err = do_settimeofday64(&tv64);
+
+	dev_info(rtc->dev.parent, "setting system clock to %ptR UTC (%lld)\n",
+		 &tm, (long long)tv64.tv_sec);
+
+err_read:
+	rtc_hctosys_ret = err;
+}
+#endif
+
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
+/*
+ * On suspend(), measure the delta between one RTC and the
+ * system's wall clock; restore it on resume().
+ */
+
+static struct timespec64 old_rtc, old_system, old_delta;
+
+static int rtc_suspend(struct device *dev)
+{
+	struct rtc_device	*rtc = to_rtc_device(dev);
+	struct rtc_time		tm;
+	struct timespec64	delta, delta_delta;
+	int err;
+
+	if (timekeeping_rtc_skipsuspend())
+		return 0;
+
+	if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
+		return 0;
+
+	/* snapshot the current RTC and system time at suspend*/
+	err = rtc_read_time(rtc, &tm);
+	if (err < 0) {
+		pr_debug("%s:  fail to read rtc time\n", dev_name(&rtc->dev));
+		return 0;
+	}
+
+	ktime_get_real_ts64(&old_system);
+	old_rtc.tv_sec = rtc_tm_to_time64(&tm);
+
+	/*
+	 * To avoid drift caused by repeated suspend/resumes,
+	 * which each can add ~1 second drift error,
+	 * try to compensate so the difference in system time
+	 * and rtc time stays close to constant.
+	 */
+	delta = timespec64_sub(old_system, old_rtc);
+	delta_delta = timespec64_sub(delta, old_delta);
+	if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
+		/*
+		 * if delta_delta is too large, assume time correction
+		 * has occurred and set old_delta to the current delta.
+		 */
+		old_delta = delta;
+	} else {
+		/* Otherwise try to adjust old_system to compensate */
+		old_system = timespec64_sub(old_system, delta_delta);
+	}
+
+	return 0;
+}
+
+static int rtc_resume(struct device *dev)
+{
+	struct rtc_device	*rtc = to_rtc_device(dev);
+	struct rtc_time		tm;
+	struct timespec64	new_system, new_rtc;
+	struct timespec64	sleep_time;
+	int err;
+
+	if (timekeeping_rtc_skipresume())
+		return 0;
+
+	rtc_hctosys_ret = -ENODEV;
+	if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
+		return 0;
+
+	/* snapshot the current rtc and system time at resume */
+	ktime_get_real_ts64(&new_system);
+	err = rtc_read_time(rtc, &tm);
+	if (err < 0) {
+		pr_debug("%s:  fail to read rtc time\n", dev_name(&rtc->dev));
+		return 0;
+	}
+
+	new_rtc.tv_sec = rtc_tm_to_time64(&tm);
+	new_rtc.tv_nsec = 0;
+
+	if (new_rtc.tv_sec < old_rtc.tv_sec) {
+		pr_debug("%s:  time travel!\n", dev_name(&rtc->dev));
+		return 0;
+	}
+
+	/* calculate the RTC time delta (sleep time)*/
+	sleep_time = timespec64_sub(new_rtc, old_rtc);
+
+	/*
+	 * Since these RTC suspend/resume handlers are not called
+	 * at the very end of suspend or the start of resume,
+	 * some run-time may pass on either sides of the sleep time
+	 * so subtract kernel run-time between rtc_suspend to rtc_resume
+	 * to keep things accurate.
+	 */
+	sleep_time = timespec64_sub(sleep_time,
+				    timespec64_sub(new_system, old_system));
+
+	if (sleep_time.tv_sec >= 0)
+		timekeeping_inject_sleeptime64(&sleep_time);
+	rtc_hctosys_ret = 0;
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rtc_class_dev_pm_ops, rtc_suspend, rtc_resume);
+#define RTC_CLASS_DEV_PM_OPS	(&rtc_class_dev_pm_ops)
+#else
+#define RTC_CLASS_DEV_PM_OPS	NULL
+#endif
+
+/* Ensure the caller will set the id before releasing the device */
+static struct rtc_device *rtc_allocate_device(void)
+{
+	struct rtc_device *rtc;
+
+	rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
+	if (!rtc)
+		return NULL;
+
+	device_initialize(&rtc->dev);
+
+	/* Drivers can revise this default after allocating the device. */
+	rtc->set_offset_nsec =  NSEC_PER_SEC / 2;
+
+	rtc->irq_freq = 1;
+	rtc->max_user_freq = 64;
+	rtc->dev.class = rtc_class;
+	rtc->dev.groups = rtc_get_dev_attribute_groups();
+	rtc->dev.release = rtc_device_release;
+
+	mutex_init(&rtc->ops_lock);
+	spin_lock_init(&rtc->irq_lock);
+	init_waitqueue_head(&rtc->irq_queue);
+
+	/* Init timerqueue */
+	timerqueue_init_head(&rtc->timerqueue);
+	INIT_WORK(&rtc->irqwork, rtc_timer_do_work);
+	/* Init aie timer */
+	rtc_timer_init(&rtc->aie_timer, rtc_aie_update_irq, rtc);
+	/* Init uie timer */
+	rtc_timer_init(&rtc->uie_rtctimer, rtc_uie_update_irq, rtc);
+	/* Init pie timer */
+	hrtimer_init(&rtc->pie_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	rtc->pie_timer.function = rtc_pie_update_irq;
+	rtc->pie_enabled = 0;
+
+	return rtc;
+}
+
+static int rtc_device_get_id(struct device *dev)
+{
+	int of_id = -1, id = -1;
+
+	if (dev->of_node)
+		of_id = of_alias_get_id(dev->of_node, "rtc");
+	else if (dev->parent && dev->parent->of_node)
+		of_id = of_alias_get_id(dev->parent->of_node, "rtc");
+
+	if (of_id >= 0) {
+		id = ida_simple_get(&rtc_ida, of_id, of_id + 1, GFP_KERNEL);
+		if (id < 0)
+			dev_warn(dev, "/aliases ID %d not available\n", of_id);
+	}
+
+	if (id < 0)
+		id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL);
+
+	return id;
+}
+
+static void rtc_device_get_offset(struct rtc_device *rtc)
+{
+	time64_t range_secs;
+	u32 start_year;
+	int ret;
+
+	/*
+	 * If RTC driver did not implement the range of RTC hardware device,
+	 * then we can not expand the RTC range by adding or subtracting one
+	 * offset.
+	 */
+	if (rtc->range_min == rtc->range_max)
+		return;
+
+	ret = device_property_read_u32(rtc->dev.parent, "start-year",
+				       &start_year);
+	if (!ret) {
+		rtc->start_secs = mktime64(start_year, 1, 1, 0, 0, 0);
+		rtc->set_start_time = true;
+	}
+
+	/*
+	 * If user did not implement the start time for RTC driver, then no
+	 * need to expand the RTC range.
+	 */
+	if (!rtc->set_start_time)
+		return;
+
+	range_secs = rtc->range_max - rtc->range_min + 1;
+
+	/*
+	 * If the start_secs is larger than the maximum seconds (rtc->range_max)
+	 * supported by RTC hardware or the maximum seconds of new expanded
+	 * range (start_secs + rtc->range_max - rtc->range_min) is less than
+	 * rtc->range_min, which means the minimum seconds (rtc->range_min) of
+	 * RTC hardware will be mapped to start_secs by adding one offset, so
+	 * the offset seconds calculation formula should be:
+	 * rtc->offset_secs = rtc->start_secs - rtc->range_min;
+	 *
+	 * If the start_secs is larger than the minimum seconds (rtc->range_min)
+	 * supported by RTC hardware, then there is one region is overlapped
+	 * between the original RTC hardware range and the new expanded range,
+	 * and this overlapped region do not need to be mapped into the new
+	 * expanded range due to it is valid for RTC device. So the minimum
+	 * seconds of RTC hardware (rtc->range_min) should be mapped to
+	 * rtc->range_max + 1, then the offset seconds formula should be:
+	 * rtc->offset_secs = rtc->range_max - rtc->range_min + 1;
+	 *
+	 * If the start_secs is less than the minimum seconds (rtc->range_min),
+	 * which is similar to case 2. So the start_secs should be mapped to
+	 * start_secs + rtc->range_max - rtc->range_min + 1, then the
+	 * offset seconds formula should be:
+	 * rtc->offset_secs = -(rtc->range_max - rtc->range_min + 1);
+	 *
+	 * Otherwise the offset seconds should be 0.
+	 */
+	if (rtc->start_secs > rtc->range_max ||
+	    rtc->start_secs + range_secs - 1 < rtc->range_min)
+		rtc->offset_secs = rtc->start_secs - rtc->range_min;
+	else if (rtc->start_secs > rtc->range_min)
+		rtc->offset_secs = range_secs;
+	else if (rtc->start_secs < rtc->range_min)
+		rtc->offset_secs = -range_secs;
+	else
+		rtc->offset_secs = 0;
+}
+
+/**
+ * rtc_device_unregister - removes the previously registered RTC class device
+ *
+ * @rtc: the RTC class device to destroy
+ */
+static void rtc_device_unregister(struct rtc_device *rtc)
+{
+	mutex_lock(&rtc->ops_lock);
+	/*
+	 * Remove innards of this RTC, then disable it, before
+	 * letting any rtc_class_open() users access it again
+	 */
+	rtc_proc_del_device(rtc);
+	cdev_device_del(&rtc->char_dev, &rtc->dev);
+	rtc->ops = NULL;
+	mutex_unlock(&rtc->ops_lock);
+	put_device(&rtc->dev);
+}
+
+static void devm_rtc_release_device(struct device *dev, void *res)
+{
+	struct rtc_device *rtc = *(struct rtc_device **)res;
+
+	rtc_nvmem_unregister(rtc);
+
+	if (rtc->registered)
+		rtc_device_unregister(rtc);
+	else
+		put_device(&rtc->dev);
+}
+
+struct rtc_device *devm_rtc_allocate_device(struct device *dev)
+{
+	struct rtc_device **ptr, *rtc;
+	int id, err;
+
+	id = rtc_device_get_id(dev);
+	if (id < 0)
+		return ERR_PTR(id);
+
+	ptr = devres_alloc(devm_rtc_release_device, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr) {
+		err = -ENOMEM;
+		goto exit_ida;
+	}
+
+	rtc = rtc_allocate_device();
+	if (!rtc) {
+		err = -ENOMEM;
+		goto exit_devres;
+	}
+
+	*ptr = rtc;
+	devres_add(dev, ptr);
+
+	rtc->id = id;
+	rtc->dev.parent = dev;
+	dev_set_name(&rtc->dev, "rtc%d", id);
+
+	return rtc;
+
+exit_devres:
+	devres_free(ptr);
+exit_ida:
+	ida_simple_remove(&rtc_ida, id);
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(devm_rtc_allocate_device);
+
+int __rtc_register_device(struct module *owner, struct rtc_device *rtc)
+{
+	struct rtc_wkalrm alrm;
+	int err;
+
+	if (!rtc->ops) {
+		dev_dbg(&rtc->dev, "no ops set\n");
+		return -EINVAL;
+	}
+
+	rtc->owner = owner;
+	rtc_device_get_offset(rtc);
+
+	/* Check to see if there is an ALARM already set in hw */
+	err = __rtc_read_alarm(rtc, &alrm);
+	if (!err && !rtc_valid_tm(&alrm.time))
+		rtc_initialize_alarm(rtc, &alrm);
+
+	rtc_dev_prepare(rtc);
+
+	err = cdev_device_add(&rtc->char_dev, &rtc->dev);
+	if (err)
+		dev_warn(rtc->dev.parent, "failed to add char device %d:%d\n",
+			 MAJOR(rtc->dev.devt), rtc->id);
+	else
+		dev_dbg(rtc->dev.parent, "char device (%d:%d)\n",
+			MAJOR(rtc->dev.devt), rtc->id);
+
+	rtc_proc_add_device(rtc);
+
+	rtc->registered = true;
+	dev_info(rtc->dev.parent, "registered as %s\n",
+		 dev_name(&rtc->dev));
+
+#ifdef CONFIG_RTC_HCTOSYS_DEVICE
+	if (!strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE))
+		rtc_hctosys(rtc);
+#endif
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__rtc_register_device);
+
+/**
+ * devm_rtc_device_register - resource managed rtc_device_register()
+ * @dev: the device to register
+ * @name: the name of the device (unused)
+ * @ops: the rtc operations structure
+ * @owner: the module owner
+ *
+ * @return a struct rtc on success, or an ERR_PTR on error
+ *
+ * Managed rtc_device_register(). The rtc_device returned from this function
+ * are automatically freed on driver detach.
+ * This function is deprecated, use devm_rtc_allocate_device and
+ * rtc_register_device instead
+ */
+struct rtc_device *devm_rtc_device_register(struct device *dev,
+					    const char *name,
+					    const struct rtc_class_ops *ops,
+					    struct module *owner)
+{
+	struct rtc_device *rtc;
+	int err;
+
+	rtc = devm_rtc_allocate_device(dev);
+	if (IS_ERR(rtc))
+		return rtc;
+
+	rtc->ops = ops;
+
+	err = __rtc_register_device(owner, rtc);
+	if (err)
+		return ERR_PTR(err);
+
+	return rtc;
+}
+EXPORT_SYMBOL_GPL(devm_rtc_device_register);
+
+static int __init rtc_init(void)
+{
+	rtc_class = class_create(THIS_MODULE, "rtc");
+	if (IS_ERR(rtc_class)) {
+		pr_err("couldn't create class\n");
+		return PTR_ERR(rtc_class);
+	}
+	rtc_class->pm = RTC_CLASS_DEV_PM_OPS;
+	rtc_dev_init();
+	return 0;
+}
+subsys_initcall(rtc_init);
diff --git a/upstream/linux-5.10/drivers/soc/sc/pcu/pcu-zx297520v3.c b/upstream/linux-5.10/drivers/soc/sc/pcu/pcu-zx297520v3.c
new file mode 100755
index 0000000..66c8cf3
--- /dev/null
+++ b/upstream/linux-5.10/drivers/soc/sc/pcu/pcu-zx297520v3.c
@@ -0,0 +1,976 @@
+/*
+ *
+ * Copyright (C) 2015-2022 ZTE-TSP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/kernel.h> 
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/refcount.h>
+#include <linux/slab.h>
+#include <linux/irqchip.h>
+#include <linux/suspend.h>
+#include <linux/syscore_ops.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include <linux/soc/sc/common.h>
+#include <linux/soc/sc/spinlock.h>
+#include <linux/soc/sc/pcu.h>
+#include <linux/soc/sc/rpmsg.h>
+#include <dt-bindings/soc/zx297520v3-irq.h>
+#include <uapi/linux/sc_bsp/bsp_api.h>
+
+#include "pcu-common.h"
+
+#if 0
+
+#define pm_ram_log(fmt, args...)      	\
+{	\
+	pm_printk("[SLP] " fmt, ##args);	\
+}
+#else
+#define pm_ram_log(fmt, args...)      	\
+{	\
+	printk(KERN_INFO "[SLP] " fmt, ##args);	\
+}
+
+#endif
+
+#define ZX_IRQ_NUM			(IRQ_ZX297520V3_SPI_NUM + 32)
+
+#define PCU_LOCK		reg_spin_lock();
+#define PCU_UNLOCK		reg_spin_unlock();
+
+static struct zx_pcu_int_info zx297520v3_pcu_int_info[] = 
+{
+	{
+		.pcu_index		= PCU_AP_TIMER1_INT,
+	 	.gic_index		= AP_TIMER1_INT,
+		.status_index	= 51,
+	 	.wake_index		= 0,
+	 	.int_name		= "ap_timer1",
+	 	.irq_type		= IRQ_TYPE_EDGE_RISING,
+	 	.wl_type		= PM_WL_EVENT_AP_TIMER1,
+	},
+	{
+		.pcu_index		= PCU_AP_TIMER2_INT,
+	 	.gic_index		= AP_TIMER2_INT,
+	 	.status_index	= 52,	 	
+		.wake_index 	= 1,
+	 	.int_name		= "ap_timer2",
+	 	.irq_type		= IRQ_TYPE_EDGE_RISING,
+	 	.wl_type		= PM_WL_EVENT_AP_TIMER2,
+	},
+	{
+		.pcu_index		= PCU_ICP_PS2AP_INT,
+		.gic_index		= ICP_PS2AP_INT,
+	 	.status_index	= 53,		
+		.wake_index 	= 2,
+		.int_name		= "icp_ps_ap",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_ICP_PS2AP,
+	},
+	{
+		.pcu_index		= PCU_USB_POWERDWN_UP_INT,
+		.gic_index		= USB_POWERDWN_UP_INT,
+	 	.status_index	= 6,		
+		.wake_index 	= 3,
+		.int_name		= "usb_up",
+		.irq_type		= IRQ_TYPE_EDGE_RISING,
+		.wl_type		= PM_WL_EVENT_USB_POWERDWN_UP,
+	},
+	{
+		.pcu_index		= PCU_USB_POWERDWN_DOWN_INT,
+		.gic_index		= USB_POWERDWN_DOWN_INT,
+	 	.status_index	= 7,		
+		.wake_index 	= 4,
+		.int_name		= "usb_down",
+		.irq_type		= IRQ_TYPE_EDGE_FALLING,
+		.wl_type		= PM_WL_EVENT_USB_POWERDWN_DOWN,
+	},
+	{
+		.pcu_index		= PCU_HSIC_POWERDWN_UP_INT,
+		.gic_index		= HSIC_POWERDWN_UP_INT,
+	 	.status_index	= 8,		
+		.wake_index 	= 5,
+		.int_name		= "hsic_up",
+		.irq_type		= IRQ_TYPE_EDGE_RISING,
+		.wl_type		= PM_WL_EVENT_HSIC_POWERDWN_UP,
+	},
+	{
+		.pcu_index		= PCU_HSIC_POWERDWN_DOWN_INT,
+		.gic_index		= HSIC_POWERDWN_DOWN_INT,
+	 	.status_index	= 9,		
+		.wake_index 	= 6,
+		.int_name		= "hsic_down",
+		.irq_type		= IRQ_TYPE_EDGE_FALLING,
+		.wl_type		= PM_WL_EVENT_HSIC_POWERDWN_DOWN,
+	},
+	{
+		.pcu_index		= PCU_ICP_M02AP_INT,
+		.gic_index		= ICP_M02AP_INT,
+	 	.status_index	= 54,		
+		.wake_index 	= 7,
+		.int_name		= "icp_m0_ap",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_ICP_M02AP,
+	},
+	{
+		.pcu_index		= PCU_RTC_ALARM_INT,
+		.gic_index		= RTC_ALARM_INT,
+	 	.status_index	= 12,		
+		.wake_index 	= 8,
+		.int_name		= "rtc_alarm",
+		.irq_type		= IRQ_TYPE_LEVEL_LOW,
+		.wl_type		= PM_WL_EVENT_ALARM,
+	},
+	{
+		.pcu_index		= PCU_RTC_TIMER_INT,
+		.gic_index		= RTC_TIMER_INT,
+	 	.status_index	= 13,		
+		.wake_index 	= 9,
+		.int_name		= "rtc_timer",
+		.irq_type		= IRQ_TYPE_LEVEL_LOW,
+		.wl_type		= PM_WL_EVENT_RTC_TIMER,
+	},
+	{
+		.pcu_index		= PCU_KEYPAD_INT,
+		.gic_index		= KEYPAD_INT,
+	 	.status_index	= 14,		
+		.wake_index 	= 10,
+		.int_name		= "kpd",
+		.irq_type		= IRQ_TYPE_EDGE_RISING,
+		.wl_type		= PM_WL_EVENT_KEYPAD,
+	},
+	{
+		.pcu_index		= PCU_SD1_DATA1_INT,
+		.gic_index		= SD1_DATA1_INT,
+	 	.status_index	= 15,		
+		.wake_index 	= 11,
+		.int_name		= "sd1_d1",
+		.irq_type		= IRQ_TYPE_LEVEL_LOW,
+		.wl_type		= PM_WL_EVENT_SD1_DATA1,
+	},
+	{
+		.pcu_index		= PCU_EX0_INT,
+		.gic_index		= EX0_INT,
+	 	.status_index	= 30,		
+		.wake_index 	= 14,
+		.int_name		= "ext0",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT0,
+	},
+	{
+		.pcu_index		= PCU_EX1_INT,
+		.gic_index		= EX1_INT,
+	 	.status_index	= 31,		
+		.wake_index 	= 15,
+		.int_name		= "ext1",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT1,
+	},
+	{
+		.pcu_index		= PCU_EX2_INT,
+		.gic_index		= EX2_INT,
+	 	.status_index	= 32,		
+		.wake_index 	= 16,
+		.int_name		= "ext2",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT2,
+	},
+	{
+		.pcu_index		= PCU_EX3_INT,
+		.gic_index		= EX3_INT,
+	 	.status_index	= 33,		
+		.wake_index 	= 17,
+		.int_name		= "ext3",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT3,
+	},
+	{
+		.pcu_index		= PCU_EX4_INT,
+		.gic_index		= EX4_INT,
+	 	.status_index	= 34,		
+		.wake_index 	= 18,
+		.int_name		= "ext4",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT4,
+	},
+	{
+		.pcu_index		= PCU_EX5_INT,
+		.gic_index		= EX5_INT,
+	 	.status_index	= 35,		
+		.wake_index 	= 19,
+		.int_name		= "ext5",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT5,
+	},
+	{
+		.pcu_index		= PCU_EX6_INT,
+		.gic_index		= EX6_INT,
+	 	.status_index	= 36,		
+		.wake_index 	= 20,
+		.int_name		= "ext6",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT6,
+	},
+	{
+		.pcu_index		= PCU_EX7_INT,
+		.gic_index		= EX7_INT,
+	 	.status_index	= 37,		
+		.wake_index 	= 21,
+		.int_name		= "ext7",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT7,
+	},
+	{
+		.pcu_index		= PCU_EX8_INT,
+		.gic_index		= EX8IN1_INT,
+	 	.status_index	= 38,		
+		.wake_index 	= 22,
+		.int_name		= "ext8",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT8,
+	},
+	{
+		.pcu_index		= PCU_EX9_INT,
+		.gic_index		= EX8IN1_INT,
+	 	.status_index	= 39,
+		.wake_index 	= 23,
+		.int_name		= "ext9",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT9,
+	},
+	{
+		.pcu_index		= PCU_EX10_INT,
+		.gic_index		= EX8IN1_INT,
+	 	.status_index	= 40,		
+		.wake_index 	= 24,
+		.int_name		= "ext10",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT10,
+	},
+	{
+		.pcu_index		= PCU_EX11_INT,
+		.gic_index		= EX8IN1_INT,
+	 	.status_index	= 41,		
+		.wake_index 	= 25,
+		.int_name		= "ext11",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT11,
+	},
+	{
+		.pcu_index		= PCU_EX12_INT,
+		.gic_index		= EX8IN1_INT,
+	 	.status_index	= 42,		
+		.wake_index 	= 26,
+		.int_name		= "ext12",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT12,
+	},
+	{
+		.pcu_index		= PCU_EX13_INT,
+		.gic_index		= EX8IN1_INT,
+	 	.status_index	= 43,		
+		.wake_index 	= 27,
+		.int_name		= "ext13",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT13,
+	},
+	{
+		.pcu_index		= PCU_EX14_INT,
+		.gic_index		= EX8IN1_INT,
+	 	.status_index	= 44,		
+		.wake_index 	= 28,
+		.int_name		= "ext14",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT14,
+	},
+	{
+		.pcu_index		= PCU_EX15_INT,
+		.gic_index		= EX8IN1_INT,
+	 	.status_index	= 45,		
+		.wake_index 	= 29,
+		.int_name		= "ext15",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_EXT15,
+	},
+	{
+		.pcu_index		= PCU_SD0_DATA1_INT,
+		.gic_index		= SD0_DATA1_INT,
+	 	.status_index	= 2,		
+		.wake_index 	= 30,
+		.int_name		= "sd0_d1",
+		.irq_type		= IRQ_TYPE_LEVEL_LOW,
+		.wl_type		= PM_WL_EVENT_SD0_DATA1,
+	},
+	{
+		.pcu_index		= PCU_ICP_PHY2AP_INT,
+		.gic_index		= ICP_PHY2AP_INT,
+		.status_index	= 55,		
+		.wake_index 	= 31,
+		.int_name		= "icp_phy_ap",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= PM_WL_EVENT_ICP_PHY2AP,
+	},
+	{
+		.pcu_index		= PCU_GMACPHY_WAKE_INT,
+		.gic_index		= GMACPHY_WAKE_INT,
+		.status_index	= 60,		
+		.wake_index 	= 0xff,
+		.int_name		= "gmacphy_wake",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= 0xff,
+	},
+	{
+		.pcu_index		= PCU_UART0_RXD_INT,
+		.gic_index		= UART0_RXD_INT,
+		.status_index	= 59,		
+		.wake_index 	= 42,
+		.int_name		= "uart0_rxd",
+		.irq_type		= IRQ_TYPE_EDGE_FALLING,
+		.wl_type		= 0xff,
+
+	},
+	{
+		.pcu_index		= PCU_GMAC_INT,
+		.gic_index		= GMAC_INT,
+		.status_index	= 16,		
+		.wake_index 	= 0xff,
+		.int_name		= "gmac",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= 0xff,
+	},
+	{
+		.pcu_index		= PCU_GMACPHY_INT,
+		.gic_index		= GMACPHY_INT,
+		.status_index	= 61,		
+		.wake_index 	= 0xff,
+		.int_name		= "gmacphy",
+		.irq_type		= IRQ_TYPE_LEVEL_HIGH,
+		.wl_type		= 0xff,
+	},
+};
+
+static int zx_pcu_get_irqchip_state(struct irq_data *data,
+				     enum irqchip_irq_state which, bool *val)
+{
+	data = data->parent_data;
+
+	if (data->chip->irq_get_irqchip_state)
+		return data->chip->irq_get_irqchip_state(data, which, val);
+
+	return -ENOSYS;
+}
+
+static int zx_pcu_set_irqchip_state(struct irq_data *data,
+				     enum irqchip_irq_state which, bool val)
+{
+	data = data->parent_data;
+
+	if (data->chip->irq_set_irqchip_state)
+		return data->chip->irq_set_irqchip_state(data, which, val);
+
+	return -ENOSYS;
+}
+
+static int zx_pcu_nmi_setup(struct irq_data *data)
+{
+	data = data->parent_data;
+
+	if (data->chip->irq_nmi_setup)
+		return data->chip->irq_nmi_setup(data);
+
+	return -ENOSYS;
+}
+
+static void zx_pcu_nmi_teardown(struct irq_data *data)
+{
+	data = data->parent_data;
+
+	if (data->chip->irq_nmi_teardown)
+		data->chip->irq_nmi_teardown(data);
+}
+
+static int zx_pcu_set_wake(struct irq_data *data, unsigned int on)
+{
+	pcu_set_irq_wake(data->hwirq, on);
+
+/*	pr_info("irq:%d, onoff:%d", data->hwirq, on);*/
+	
+	return 0;
+}
+
+static void zx_pcu_eoi_irq(struct irq_data *data)
+{
+	pcu_clr_irq_pending(data->hwirq);
+
+	irq_chip_eoi_parent(data);
+}
+
+static int zx_pcu_set_type(struct irq_data *data, unsigned int type)
+{
+	unsigned int new_type = type;
+
+	if(!pcu_set_irq_type(data->hwirq, type))
+		new_type = IRQ_TYPE_LEVEL_HIGH;
+
+	return irq_chip_set_type_parent(data, new_type);
+}
+
+static int zx_pcu_set_affinity(struct irq_data *data,
+				 const struct cpumask *dest, bool force)
+{
+/*
+	if (data->hwirq == IRQ_ZX298501_AP_TIMER1)
+		return irq_chip_set_affinity_parent(data, cpumask_of(0), force); // ???
+	else
+*/		return irq_chip_set_affinity_parent(data, dest, force);
+}
+
+static struct irq_chip zx_pcu_chip = {
+	.name			= "PCU",
+	.irq_eoi		= zx_pcu_eoi_irq,
+	.irq_mask		= irq_chip_mask_parent,
+	.irq_unmask		= irq_chip_unmask_parent,
+	.irq_set_wake	= zx_pcu_set_wake,
+	.irq_set_type	= zx_pcu_set_type,
+	
+	.irq_set_affinity		= zx_pcu_set_affinity,
+	.irq_get_irqchip_state	= zx_pcu_get_irqchip_state,
+	.irq_set_irqchip_state	= zx_pcu_set_irqchip_state,
+	.irq_set_vcpu_affinity	= irq_chip_set_vcpu_affinity_parent,
+	.irq_nmi_setup			= zx_pcu_nmi_setup,
+	.irq_nmi_teardown		= zx_pcu_nmi_teardown,	
+	.flags					= IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SET_TYPE_MASKED,
+};
+
+static int zx_pcu_domain_translate(struct irq_domain *d,
+					struct irq_fwspec *fwspec,
+					unsigned long *hwirq,
+					unsigned int *type)
+{
+	if (is_of_node(fwspec->fwnode)) {
+		if (fwspec->param_count != 3)
+			return -EINVAL;
+
+		/* No PPI should point to this domain */
+		if (fwspec->param[0] != 0)
+			return -EINVAL;
+
+		*hwirq = fwspec->param[1];
+		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int zx_pcu_domain_alloc(struct irq_domain *domain,
+				    unsigned int virq,
+				    unsigned int nr_irqs, void *data)
+{
+	struct irq_fwspec *fwspec = data;
+	struct irq_fwspec parent_fwspec;
+	struct zx_pcu_dev *pcu = domain->host_data;
+	irq_hw_number_t hwirq;
+	unsigned int i;
+
+	if (fwspec->param_count != 3)
+		return -EINVAL;	/* Not GIC compliant */
+	if (fwspec->param[0] != GIC_SPI)
+		return -EINVAL;	/* No PPI should point to this domain */
+
+	hwirq = fwspec->param[1];
+	if (hwirq >= ZX_IRQ_NUM)
+		return -EINVAL;
+
+	for (i = 0; i < nr_irqs; i++) {
+		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+					      &zx_pcu_chip,
+					      (void __force *)pcu->top_reg_base);
+	}
+
+	parent_fwspec = *fwspec;
+	parent_fwspec.fwnode = domain->parent->fwnode;
+	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+					    &parent_fwspec);
+}
+
+static const struct irq_domain_ops zx_pcu_domain_ops = {
+	.translate	= zx_pcu_domain_translate,
+	.alloc		= zx_pcu_domain_alloc,
+	.free		= irq_domain_free_irqs_common,
+};
+
+static int __init zx_pcu_init(struct device_node *node,
+				   struct device_node *parent)
+{
+	struct irq_domain *parent_domain, *domain;
+	struct zx_pcu_dev *pcu;
+
+	if (!parent) {
+		pr_err("%pOF: no parent found\n", node);
+		return -ENODEV;
+	}
+
+	parent_domain = irq_find_host(parent);
+	if (!parent_domain) {
+		pr_err("%pOF: unable to obtain parent domain\n", node);
+		return -ENXIO;
+	}
+
+	pcu = &pcu_dev;
+	pcu->np				= node;
+	pcu->top_reg_base	= of_iomap(node, 0);
+	WARN(!pcu->top_reg_base, "unable to map top pcu registers\n");
+
+	pcu->int_info = zx297520v3_pcu_int_info;
+	pcu->int_count = ARRAY_SIZE(zx297520v3_pcu_int_info);
+
+	pcu_init();
+
+	domain = irq_domain_add_hierarchy(parent_domain, 0, ZX_IRQ_NUM,
+					  node, &zx_pcu_domain_ops,
+					  pcu);
+	if (!domain) {
+		pr_err("%pOF: failed to allocated domain\n", node);
+		return -ENOMEM;
+	}
+
+//	set_smp_cross_call();
+	pm_pcu_init();
+
+	return 0;
+}
+
+IRQCHIP_DECLARE(zx297520v3_pcu, "zte,zx297520v3-pcu", zx_pcu_init);
+
+/* pcu debug */
+#ifdef CONFIG_PM
+#define PCU_TOP						(pcu_dev.top_reg_base)
+
+#define ARM_AP_CONFIG_REG           (PCU_TOP + 0x0)
+#define ARM_AP_SLEEP_TIME_REG       (PCU_TOP + 4*0x3C)
+#define AP_INT_WAKE_DIS_REG        	(PCU_TOP + 4*0xD)
+#define CORE_SWITCH_CONFIG_REG    	(PCU_TOP + 4*0x2b)
+
+#define M0_INT_WAKE_DIS_REG        	(PCU_TOP + 4*0xE)
+#define PCU_INT_READOUT_REG1		(PCU_TOP + 4*0x1EB)
+#define PCU_INT_READOUT_REG2		(PCU_TOP + 4*0x1EC)
+#define PCU_INT_READOUT_REG3		(PCU_TOP + 4*0x1ED)
+
+
+/*ARM_AP_CONFIG_REG*/
+#define	PCU_SLEEP_MODE				(1U << 0)
+#define	PCU_POWEROFF_MODE			(1U << 1)
+#define	PCU_L2_CLK_GATE				(1U << 2)		/*1-can turn off*/
+#define PCU_SLEEP_2M0               (1U << 3)
+#define	PCU_SLEEP_DONE_BYPASS		(1U << 4)	 	
+#define	PCU_SW_CONFIG_MASK			(1U << 5)	 	/* ?????  */
+
+#define	PCU_MODE_MASK				(0x3U << 0)
+
+/*ARM_AP_SLEEP_TIME_REG*/
+#define	PCU_AP_SLEEP_TIME_DIS       (1U << 31)
+
+
+
+/* low power function */
+extern unsigned int pm_get_wakesource(void);
+
+/**
+ * clear pcu sleep mode.
+ * 
+ */
+void pm_clear_pcu(void)
+{
+	zx_clr_reg(ARM_AP_CONFIG_REG, PCU_MODE_MASK);
+}
+
+void pm_pcu_init(void)
+{
+	zx_clr_reg(ARM_AP_CONFIG_REG, PCU_MODE_MASK);
+	zx_set_reg(ARM_AP_CONFIG_REG, PCU_L2_CLK_GATE);
+	zx_write_reg(AP_INT_WAKE_DIS_REG, ~(pm_get_wakesource()));
+}
+
+void zx_apmgclken_set(unsigned en)
+{
+	unsigned tmp;
+	if(en){
+		//set ps_clk_switch=1
+		tmp = zx_read_reg(CORE_SWITCH_CONFIG_REG);
+		tmp |= (0x1<<2);
+		zx_write_reg(CORE_SWITCH_CONFIG_REG, tmp);
+	} else{
+		//set ps_clk_switch=0
+		tmp = zx_read_reg(CORE_SWITCH_CONFIG_REG);
+		tmp &= ~(0x1<<2);
+		zx_write_reg(CORE_SWITCH_CONFIG_REG, tmp);
+	}
+}
+
+
+/**
+ * config pcu before poweroff
+ * 
+ */
+void pm_set_pcu_poweroff(u32 sleep_time)
+{
+	zx_set_reg(ARM_AP_CONFIG_REG, PCU_POWEROFF_MODE);
+	zx_write_reg(ARM_AP_SLEEP_TIME_REG, sleep_time);
+}
+EXPORT_SYMBOL(pm_set_pcu_poweroff);
+
+
+/**
+ * config pcu before sleep
+ * 
+ */
+void pm_set_pcu_sleep(u32 sleep_time)
+{
+	zx_set_reg(ARM_AP_CONFIG_REG, PCU_SLEEP_MODE);
+	zx_write_reg(ARM_AP_SLEEP_TIME_REG, sleep_time);
+}
+
+/**
+ * get wakeup setting.
+ * 
+ */
+unsigned int pcu_get_wakeup_setting(void)
+{
+	return zx_read_reg(AP_INT_WAKE_DIS_REG);
+}
+/**
+ * set wakeup enable by gic.
+ * 
+ * 
+ */
+unsigned int  gic_wake_enable[3]=
+{
+	(1<<ICP_PS2AP_INT) |(1<<ICP_M02AP_INT) | (1<<AP_TIMER1_INT) | (1<<EX8IN1_INT),
+	0, 
+	0
+};
+
+extern void show_icp_state(T_RpMsg_CoreID actorID);
+void pm_get_wake_cause(void)
+{
+	unsigned int	int_status[2];
+	int 			i = 0;
+	int 			index_found = 0xff;
+	unsigned int	pcu_wake_setting[2];
+		
+	/* when wake up, the level is high&the value is 0*/
+	int_status[0] = zx_read_reg(PCU_INT_READOUT_REG1);
+	int_status[1] = zx_read_reg(PCU_INT_READOUT_REG2);
+
+	pcu_wake_setting[0] = zx_read_reg(AP_INT_WAKE_DIS_REG);
+	pcu_wake_setting[1] = zx_read_reg(M0_INT_WAKE_DIS_REG);
+
+	for(i=0; i<ARRAY_SIZE(zx297520v3_pcu_int_info); i++)
+	{
+		if (zx297520v3_pcu_int_info[i].wake_index == 0xff)
+			continue;
+
+		if(pcu_wake_setting[0]&BIT(zx297520v3_pcu_int_info[i].wake_index))	
+			continue;
+
+		if(int_status[zx297520v3_pcu_int_info[i].status_index/32]&(1<<(zx297520v3_pcu_int_info[i].status_index%32)))
+			continue;
+
+		index_found = i;
+		break;
+	}
+	
+	if(index_found != 0xff)
+	{
+		pm_ram_log(" wake: %d  [%s]\n", zx297520v3_pcu_int_info[index_found].gic_index, zx297520v3_pcu_int_info[index_found].int_name);
+
+		if(zx297520v3_pcu_int_info[index_found].gic_index ==ICP_PS2AP_INT) {
+			show_icp_state(CORE_PS0);
+		}
+		pm_ram_log(" pcu int status:0x%x 0x%x\n",int_status[0], int_status[1]);
+
+		pm_wl_set_event(pcu_get_wl_index_by_gic(zx297520v3_pcu_int_info[index_found].gic_index));
+	}
+	else
+	{
+		pm_ram_log(" wake abnormal\n");
+		pm_ram_log(" pcu int status:0x%x 0x%x\n",int_status[0], int_status[1]);
+	}
+}
+
+static struct wakeup_source *zx_main_ws;
+static int zx_pcu_pm_callback(struct notifier_block *nb,
+			unsigned long action, void *ptr)
+{
+	switch (action) {
+
+	case PM_POST_SUSPEND:
+		__pm_wakeup_event(zx_main_ws, 1000);		
+		break;
+
+	default:
+		return NOTIFY_DONE;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int pcu_pm_suspend(void)
+{
+	int ret = 0;
+
+	return ret;
+}
+
+static void pcu_pm_resume(void)
+{
+//	pcu_get_wake_cause();
+}
+
+static struct syscore_ops pcu_pm_syscore_ops = {
+	.suspend = pcu_pm_suspend,
+	.resume = pcu_pm_resume,
+};
+
+static int pcu_pm_init(void)
+{
+	zx_main_ws = wakeup_source_register(NULL, "zx_main");
+	if (!zx_main_ws)
+		return -ENOMEM;
+
+	pm_notifier(zx_pcu_pm_callback, 0);
+
+	register_syscore_ops(&pcu_pm_syscore_ops);
+	return 0;
+}
+core_initcall(pcu_pm_init);
+#endif
+
+/* --------------------------------------------------------------------
+ * extint_8in1
+ * -------------------------------------------------------------------- */
+
+struct ext8in1_info {
+	struct irq_domain   *domain;
+	struct regmap 		*regmap;
+	int                 parent_irq;
+	
+};
+
+struct ext8in1_info ext8in1_dev = {0};
+
+/*
+ * return external interrupt number from ex8-ex15,
+ * return value is 0-7
+ */
+static unsigned int pcu_get_8in1_int_source(void)
+{  
+	unsigned int vector_8in1 = 0;
+ 
+	vector_8in1 = zx_read_reg(pcu_dev.top_reg_base+0x12C);	 
+	
+	return 	(vector_8in1&0x7);
+}
+ 
+
+/*external int 8-15 need extra clear*/
+static void pcu_int_clear_8in1(unsigned int pcu_index)
+{	
+	unsigned int vector=0;
+	
+	if ( (pcu_index >= PCU_EX8_INT)&&(pcu_index <= PCU_EX15_INT) )
+	{
+		/*
+		 *in 7510 platform, 8in1 interrupt would be used by different cores.
+		 *when any core installs a new 8in1 interrupt, another core may be 
+		 * responding another 8in1 interrupt, so  8in1 interrupt shouldn't be 
+		 *cleared. in this case, nothing to be done. but a new problem comes,
+		 * the core install new  8in1 interrupt will receive a fake interrupt.
+		 */
+		vector = pcu_get_8in1_int_source();
+		if (pcu_index != (vector + PCU_EX8_INT) )
+			return;
+
+		PCU_LOCK
+   		zx_write_reg(pcu_dev.top_reg_base+0x128, 0x1);
+		PCU_UNLOCK
+
+		pcu_int_clear(pcu_index);
+	}
+}
+
+static void ext8in1_irq_lock(struct irq_data *data){}
+static void ext8in1_irq_sync_unlock(struct irq_data *data){}
+static void ext8in1_irq_mask(struct irq_data *data){}
+static void ext8in1_irq_unmask(struct irq_data *data){}
+static int ext8in1_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+	if (!data)
+		return -EINVAL;
+
+	pcu_set_irq_wake_by_pcu(data->hwirq + PCU_EX8_INT, on);
+
+	return 0;
+}
+
+static int ext8in1_irq_set_type(struct irq_data *data, unsigned int type)
+{
+	if (!data)
+		return -EINVAL;
+
+	pcu_int_set_type(data->hwirq + PCU_EX8_INT, type);
+
+	pcu_int_clear_8in1(data->hwirq + PCU_EX8_INT);
+
+	return 0;
+}
+
+static struct irq_chip ext8in1_irq_chip =
+{
+    .name           		= "ext8in1",
+
+	.irq_set_wake			= ext8in1_irq_set_wake,
+	.irq_set_type			= ext8in1_irq_set_type,
+	.irq_mask				= ext8in1_irq_mask,
+	.irq_unmask				= ext8in1_irq_unmask,
+	.irq_bus_lock			= ext8in1_irq_lock,
+	.irq_bus_sync_unlock	= ext8in1_irq_sync_unlock,
+};
+
+static void ext8in1_handle_irq(struct irq_desc *desc)
+{
+	struct ext8in1_info *data = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	int virq;
+	int hwirq, new_hwirq;
+
+	chained_irq_enter(chip, desc);
+   
+    hwirq = pcu_get_8in1_int_source();
+
+	while(1) {
+		pcu_int_clear_8in1(hwirq + PCU_EX8_INT);
+
+		virq = irq_find_mapping(data->domain, hwirq);
+		if (virq > 0)
+			generic_handle_irq(virq);
+
+		new_hwirq = pcu_get_8in1_int_source();
+        if (hwirq == new_hwirq)
+            break;
+        else
+            hwirq = new_hwirq;
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
+extern void mask_irq(struct irq_desc *desc);
+extern void unmask_irq(struct irq_desc *desc);
+static int ext8in1_irq_resume(struct device *dev)
+{
+	unmask_irq(irq_to_desc(ext8in1_dev.parent_irq));
+
+	return 0;
+}
+
+static int ext8in1_irq_suspend(struct device *dev)
+{
+	mask_irq(irq_to_desc(ext8in1_dev.parent_irq));
+
+	return 0;
+}
+
+static int zx_ext8in1_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *parent_np;
+	struct regmap *regmap;
+	struct ext8in1_info *data = &ext8in1_dev;
+	int i;
+
+	parent_np = of_parse_phandle(pdev->dev.of_node, "parent-syscon", 0);
+	if (!parent_np) {
+		dev_err(&pdev->dev, "Can't get parent-syscon\n");
+		return -EINVAL;
+	}
+	
+	regmap = syscon_node_to_regmap(parent_np);
+	if (IS_ERR(regmap)) {
+		of_node_put(parent_np);
+		return PTR_ERR(regmap);
+	}	
+	data->regmap = regmap;
+
+	data->parent_irq = platform_get_irq(pdev, 0);
+	if (data->parent_irq <= 0)
+		return -EPROBE_DEFER;
+
+	data->domain = irq_domain_add_linear(np, 8, &irq_domain_simple_ops, NULL);
+	if (!data->domain)
+		return -ENODEV;
+
+	for (i = EX8_INT; i <= EX15_INT; i++) {
+		int virq = irq_create_mapping(data->domain, i);
+
+		irq_set_chip_and_handler(virq, &ext8in1_irq_chip,
+					 handle_simple_irq);
+		irq_set_chip_data(virq, data);
+	}
+
+	irq_set_chained_handler_and_data(data->parent_irq,
+					 ext8in1_handle_irq, data);
+	enable_irq_wake(data->parent_irq);
+
+	pr_info("zx_ext8in1 init OK. \n");
+
+	return 0;
+}
+
+static const struct of_device_id zx_ext8in1_match[] = {
+	{ .compatible = "zte,zx297520v3-ext8in1" },
+	{ }
+};
+
+static const struct dev_pm_ops ext8in1_irq_pm_ops = {
+	.resume = ext8in1_irq_resume,
+	.suspend = ext8in1_irq_suspend,
+};
+
+static struct platform_driver zx_ext8in1_driver = {
+	.probe = zx_ext8in1_probe,
+	.driver = {
+		.name = "zx_ext8in1_drv",
+		.of_match_table = zx_ext8in1_match,
+		.pm = &ext8in1_irq_pm_ops,
+	},
+};
+
+static int __init zx_ext8in1_driver_init(void)
+{
+	return platform_driver_register(&zx_ext8in1_driver);
+}
+core_initcall(zx_ext8in1_driver_init);
+
diff --git a/upstream/linux-5.10/drivers/soc/sc/rpmsg/zx29_icp.c b/upstream/linux-5.10/drivers/soc/sc/rpmsg/zx29_icp.c
new file mode 100755
index 0000000..aae42a2
--- /dev/null
+++ b/upstream/linux-5.10/drivers/soc/sc/rpmsg/zx29_icp.c
@@ -0,0 +1,490 @@
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/clockchips.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/syscore_ops.h>
+#include <linux/gpio.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/soc/sc/common.h>
+#include <linux/soc/sc/spinlock.h>
+
+#include "icp_dev.h"
+#include "zx29_icp.h"
+#include "icp_rpmsg.h"
+
+static icp_callback_fn	_icp_fn;
+static T_HalIcp_Reg *icp_ap2m0_reg;
+static T_HalIcp_Reg *icp_ap2ps_reg;
+
+static inline T_HalIcp_Reg *icp_get_reg(T_ZDrvRpMsg_ActorID actor_id)
+{
+	if (CORE_M0 == actor_id )
+		return icp_ap2m0_reg;
+	else if (CORE_PS0 == actor_id )
+		return icp_ap2ps_reg;
+	else
+		BUG();
+}
+
+/*******************************************************************************
+* Function: icp_set_int
+* Description: This function is used for generating icp interrupt to inform remote cpu;
+* Parameters:
+*   Input:
+           actorID: id of remote cpu
+           chID: id of channel
+*   Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+static int icp_set_int(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+	T_HalIcp_Reg *icp_reg;
+
+    if ((actorID >= CORE_MAXID) || (chID >= CHANNEL_MAXID(actorID)))
+        return -EINVAL;
+
+	icp_reg = icp_get_reg(actorID);
+
+    if(chID<32)
+		icp_reg->control.low_word	= (1<<chID);
+    else
+		icp_reg->control.high_word	= (1<<(chID-32));
+
+	return 0;
+}
+  
+/*******************************************************************************
+* Function: icp_clear_int
+* Description: This function is used for clear icp interrupt from remote cpu;
+* Parameters:
+*   Input:
+           actorID: id of remote cpu
+           chID: id of channel
+*   Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+static void icp_clear_int(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+	T_HalIcp_Reg *icp_reg = icp_get_reg(actorID);
+
+    if(chID<32)
+        icp_reg->clear.low_word  = (1<<chID);
+    else
+       	icp_reg->clear.high_word = (1<<(chID-32)) ;
+}
+
+/*******************************************************************************
+* Function: icp_get_int
+* Description: This function is used for get icp interrupt from remote cpu;
+* Parameters:
+*   Input:
+*           actorID: id of remote cpu
+*           chID: id of channel
+*   Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+T_HalIcp_Dword icp_get_int(T_ZDrvRpMsg_ActorID actorID)
+{
+	T_HalIcp_Dword IcpState;
+	T_HalIcp_Reg *icp_reg;
+
+    if (actorID >= CORE_MAXID)
+    {
+		IcpState.high_word	= 0;
+		IcpState.low_word	= 0;
+    
+        return IcpState;
+    }
+
+	icp_reg = icp_get_reg(actorID);
+	
+	IcpState.high_word 	= icp_reg->state.high_word;
+	IcpState.low_word 	= icp_reg->state.low_word;
+	
+	return IcpState;
+}
+
+/*******************************************************************************
+* Function: icp_get_int_state
+* Description: This function is used for get the state of icp interruptting  of remote cpu;
+* Parameters:
+*   Input:
+           actorID: id of remote cpu
+           chID: id of channel
+*   Output:None
+*
+* Returns:None
+*
+*
+* Others:
+********************************************************************************/
+static int icp_get_int_state(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+	T_HalIcp_Reg *icp_reg;
+
+	icp_reg = icp_get_reg(actorID);
+
+	if(chID<32)
+	{
+		if(icp_reg->in_state.low_word & (0x1<<chID))
+			return true;
+	}
+	else
+	{
+		if(icp_reg->in_state.high_word & (0x1<<(chID-32)))
+			return true;
+	}
+
+	return false;
+}
+	
+/*******************************************************************************
+* Function: icp_mask_int
+* Description: This function is used for Mask interrupt of channel;
+* Parameters:
+*   Input:
+*   Output:
+*
+* Returns:  NONE
+*
+*
+* Others:
+********************************************************************************/
+static int icp_mask_int(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+	T_HalIcp_Reg *icp_reg;
+
+    if ((actorID >= CORE_MAXID) || (chID >= CHANNEL_MAXID(actorID)))
+        return -EINVAL;
+
+	icp_reg = icp_get_reg(actorID);
+
+    if(chID<32)
+        icp_reg->mask.low_word  |= (0x1<<chID);
+    else
+        icp_reg->mask.high_word |= (0x1<<(chID-32));
+
+	return 0;
+}
+
+/*******************************************************************************
+* Function: icp_unmask_int
+* Description: This function is used for unmask interrupt of channel;
+* Parameters:
+*   Input:
+*   Output:
+*
+* Returns:
+*            NONE
+*
+*
+* Others:
+********************************************************************************/
+static int icp_unmask_int(T_ZDrvRpMsg_ActorID actorID, T_ZDrvRpMsg_ChID chID)
+{
+	T_HalIcp_Reg *icp_reg;
+
+    if ((actorID >= CORE_MAXID) || (chID >= CHANNEL_MAXID(actorID)))
+        return -EINVAL;
+
+	icp_reg = icp_get_reg(actorID);
+
+	if(chID < 32)
+		icp_reg->mask.low_word  &= ~(0x1<<chID);
+    else
+		icp_reg->mask.high_word &= ~(0x1<<(chID-32));
+
+	return 0;
+}
+
+int icp_int_count = 0;
+#ifdef CONFIG_ZX29_WATCHDOG
+extern void zx_wdt_icp_wake(void);
+#endif
+irqreturn_t icp_isr(int irq, void *data)
+{
+	icp_msg	_icp_msg;	
+	T_HalIcp_Dword IcpState;	
+	unsigned int i;
+
+	_icp_msg.src_id = (unsigned int)data;
+
+	IcpState = icp_get_int(_icp_msg.src_id);
+
+	for(i=0; i<CHANNEL_MAXID(_icp_msg.src_id); i++)
+	{
+		if((((i<32)&&((IcpState.low_word>>i) & 0x1))||((i>=32)&&((IcpState.high_word>>(i-32)) & 0x1)))) {
+			_icp_msg.event_id = i;			
+		#ifdef CONFIG_ZX29_WATCHDOG			
+			if((CORE_M0 == _icp_msg.src_id)&&(2 == i))
+	  			zx_wdt_icp_wake();
+		#endif
+			if(_icp_fn)
+				_icp_fn(&_icp_msg);
+			
+			icp_clear_int(_icp_msg.src_id, i);
+		}
+	}
+
+	icp_int_count ++;
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * for loopback test
+ */
+void fake_icp_isr(T_RpMsg_CoreID src_core, T_RpMsg_CoreID dest_core, T_RpMsg_ChID ch)
+{
+	icp_msg	_icp_msg;
+	unsigned int i;
+
+	_icp_msg.src_id 	= src_core;
+	_icp_msg.dest_core 	= dest_core;	
+	_icp_msg.event_id 	= ch;
+
+	if(_icp_fn)
+		_icp_fn(&_icp_msg);
+}
+
+/*
+ * for get wake state
+ */
+void icp_get_int_info(T_ZDrvRpMsg_ActorID actorID, unsigned int *high_word, unsigned int *low_word)
+{
+	T_HalIcp_Dword IcpState;
+
+	IcpState = icp_get_int(actorID);
+
+	*high_word = IcpState.high_word;
+	*low_word = IcpState.low_word;
+}
+
+static const char * const ps_channel_info[64] = {
+	[0] = "drv test",
+	[2] = "Power Management",
+	[3] = "ADB agent",
+	[4] = "USB app config",
+	[5] = "USB kernel config",
+	[6] = "audio",
+	[7] = "console switch",
+	[8] = "NV",
+	[9] = "debug",
+	[10] = "ramdump",
+	[11] = "tee common",
+	[12] = "tee RPC",
+	[13] = "ap2cap message queue",
+	[14] = "cap2ap message queue",
+	[15] = "AMT framework",
+	[16] = "APP rsvd 16",
+	[17] = "APP rsvd 17",
+	[18] = "APP rsvd 18",
+	[19] = "APP rsvd 19",
+	[20] = "zvnet 20",
+	[21] = "zvnet 21",
+	[22] = "zvnet 22",
+	[23] = "zvnet 23",
+	[24] = "zvnet 24",
+	[25] = "zvnet 25",
+	[26] = "zvnet 26",
+	[27] = "zvnet 27",
+	[28] = "zvnet 28",
+	[29] = "free skb",
+	[30] = "ttygs0",
+	[31] = "ttygs1",
+	[32] = "socket ipc",
+	[33] = "binder ipc",
+	[34] = "at channel 34",
+	[35] = "at channel 35",
+	[36] = "at channel 36",
+	[37] = "at channel 37",
+	[38] = "at channel 38",
+	[39] = "at channel 39",
+	[40] = "at channel 40",
+	[41] = "voice buffer",
+};
+
+void show_icp_state(T_ZDrvRpMsg_ActorID actorID)
+{
+	unsigned int	hw, lw;
+	int i;
+
+	if (actorID != CORE_PS0)
+		return;
+
+	icp_get_int_info(actorID, &hw, &lw);
+	pr_info("[SLP] icpwake: 0x%x 0x%x\n", hw, lw);
+
+	for (i=0; i<32; i++)
+		if (lw&BIT(i))
+			pr_info("[SLP] icpwake: channel(%d) function(%s)\n", i, ps_channel_info[i] ? ps_channel_info[i] : "NA");
+
+	for (i=0; i<32; i++)
+		if (hw&BIT(i))
+			pr_info("[SLP] icpwake: channel(%d) function(%s)\n", i+32, ps_channel_info[i+32] ? ps_channel_info[i+32] : "NA");
+}
+
+static void icp_register_callback(icp_callback_fn cb)
+{
+	_icp_fn = cb;
+}
+
+static int icp_send_message(unsigned int core_id, icp_msg *icp_msg)
+{
+	if(!icp_msg || icp_msg->dest_core > CORE_MAXID )
+		return -EINVAL;
+		
+	if(icp_get_int_state(icp_msg->dest_core, icp_msg->event_id)==false)
+	{
+		icp_set_int(icp_msg->dest_core, icp_msg->event_id);
+	}
+	
+	return 0;	
+}
+
+static t_icpdev_ops zx29_icp_ops = {
+	.register_callback	= icp_register_callback,
+	.send_message		= icp_send_message, 
+	.mask_int			= icp_mask_int,
+	.unmask_int			= icp_unmask_int,
+	.set_int			= icp_set_int,
+};
+
+static int icp_ap2ps_init(struct device *dev)
+{
+	void __iomem *reg_base;
+	unsigned int irq;
+	int ret;
+	struct device_node *np = dev->of_node;
+
+	reg_base = of_iomap(np, 0);
+	if ( !reg_base ){
+		pr_err("%s: [ICP]Cannot get IORESOURCE_MEM\n", __func__);
+		return -ENOENT;
+	}
+	
+	icp_ap2ps_reg = (T_HalIcp_Reg *)reg_base;
+
+	irq = irq_of_parse_and_map(np, 0);
+	if( !irq ){
+		pr_err("%s: [ICP]Cannot get IORESOURCE_IRQ\n", __func__);
+		return -ENOENT;
+	}		
+
+	icp_ap2ps_reg->mask.high_word	= 0xffffffff;
+	icp_ap2ps_reg->mask.low_word	= 0xffffffff;
+
+	ret = request_irq(irq, icp_isr, 0, "zx_icp", CORE_PS0);
+	if (ret)
+	{
+		pr_err("%s: [ICP]register irq failed\n", __func__);
+		return ret;
+	}
+	
+	enable_irq_wake(irq);
+
+	icpdev_register_ops(&zx29_icp_ops);
+		
+	rpmsgInit(CORE_PS0, np);
+/*
+	dev->id = CORE_PS0;
+	ret = icp_rpmsg_device_register(dev);
+*/
+	pr_info("%s: ok! irq(%d) icp_address(%llx \n", __func__, irq, reg_base );
+
+	return ret;
+}
+
+static int icp_ap2m0_init(struct device *dev)
+{
+	void __iomem *reg_base;
+	unsigned int irq;
+	int ret;
+	struct device_node *np = dev->of_node;
+
+	pr_info("%s: enter \n", __func__);
+
+	reg_base = of_iomap(np, 0);
+	if ( !reg_base ){
+		pr_err("%s: [ICP]Cannot get IORESOURCE_MEM\n", __func__);
+		return -ENOENT;
+	}
+	
+	icp_ap2m0_reg = (T_HalIcp_Reg *)reg_base;
+
+	irq = irq_of_parse_and_map(np, 0);
+	if( !irq ){
+		pr_err("%s: [ICP]Cannot get IORESOURCE_IRQ\n", __func__);
+		return -ENOENT;
+	}		
+
+	icp_ap2m0_reg->mask.high_word	= 0xffffffff;
+	icp_ap2m0_reg->mask.low_word	= 0xffffffff;
+
+	ret = request_irq(irq, icp_isr, 0, "zx_icp", CORE_M0);
+	if (ret)
+	{
+		pr_err("%s: [ICP]register irq failed\n", __func__);
+		return ret;
+	}
+
+	enable_irq_wake(irq);
+
+	icpdev_register_ops(&zx29_icp_ops);
+
+	rpmsgInit(CORE_M0, np);
+
+	pr_info("%s: ok! irq(%d) icp_address(%llx \n", __func__, irq, reg_base );
+
+	return 0;
+}
+
+static const struct of_device_id zx29_icp_dt_ids[] = {
+	{	.compatible = "zte,zx29-icp-ap2m0", .data = &icp_ap2m0_init	}, 
+	{	.compatible = "zte,zx29-icp-ap2ps", .data = &icp_ap2ps_init	}, 		
+	{	/* sentinel */	}
+};
+
+static int zx29_icp_probe(struct platform_device *pdev)
+{
+	int (*init_fn)(struct device *dev);
+
+	init_fn = of_device_get_match_data(&pdev->dev);
+	if (!init_fn) {
+		dev_err(&pdev->dev, "Error: No device match found\n");
+		return -ENODEV;
+	}
+
+	return init_fn(&pdev->dev);
+}
+	
+static struct platform_driver zx29_icp_driver = {
+	.driver = {
+		.name = "zx29-icp",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(zx29_icp_dt_ids),
+	},
+	.probe	= zx29_icp_probe,
+};
+
+builtin_platform_driver(zx29_icp_driver)
diff --git a/upstream/linux-5.10/drivers/tty/tty_io.c b/upstream/linux-5.10/drivers/tty/tty_io.c
new file mode 100755
index 0000000..669aef7
--- /dev/null
+++ b/upstream/linux-5.10/drivers/tty/tty_io.c
@@ -0,0 +1,3602 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+/*
+ * 'tty_io.c' gives an orthogonal feeling to tty's, be they consoles
+ * or rs-channels. It also implements echoing, cooked mode etc.
+ *
+ * Kill-line thanks to John T Kohl, who also corrected VMIN = VTIME = 0.
+ *
+ * Modified by Theodore Ts'o, 9/14/92, to dynamically allocate the
+ * tty_struct and tty_queue structures.  Previously there was an array
+ * of 256 tty_struct's which was statically allocated, and the
+ * tty_queue structures were allocated at boot time.  Both are now
+ * dynamically allocated only when the tty is open.
+ *
+ * Also restructured routines so that there is more of a separation
+ * between the high-level tty routines (tty_io.c and tty_ioctl.c) and
+ * the low-level tty routines (serial.c, pty.c, console.c).  This
+ * makes for cleaner and more compact code.  -TYT, 9/17/92
+ *
+ * Modified by Fred N. van Kempen, 01/29/93, to add line disciplines
+ * which can be dynamically activated and de-activated by the line
+ * discipline handling modules (like SLIP).
+ *
+ * NOTE: pay no attention to the line discipline code (yet); its
+ * interface is still subject to change in this version...
+ * -- TYT, 1/31/92
+ *
+ * Added functionality to the OPOST tty handling.  No delays, but all
+ * other bits should be there.
+ *	-- Nick Holloway <alfie@dcs.warwick.ac.uk>, 27th May 1993.
+ *
+ * Rewrote canonical mode and added more termios flags.
+ * 	-- julian@uhunix.uhcc.hawaii.edu (J. Cowley), 13Jan94
+ *
+ * Reorganized FASYNC support so mouse code can share it.
+ *	-- ctm@ardi.com, 9Sep95
+ *
+ * New TIOCLINUX variants added.
+ *	-- mj@k332.feld.cvut.cz, 19-Nov-95
+ *
+ * Restrict vt switching via ioctl()
+ *      -- grif@cs.ucr.edu, 5-Dec-95
+ *
+ * Move console and virtual terminal code to more appropriate files,
+ * implement CONFIG_VT and generalize console device interface.
+ *	-- Marko Kohtala <Marko.Kohtala@hut.fi>, March 97
+ *
+ * Rewrote tty_init_dev and tty_release_dev to eliminate races.
+ *	-- Bill Hawes <whawes@star.net>, June 97
+ *
+ * Added devfs support.
+ *      -- C. Scott Ananian <cananian@alumni.princeton.edu>, 13-Jan-1998
+ *
+ * Added support for a Unix98-style ptmx device.
+ *      -- C. Scott Ananian <cananian@alumni.princeton.edu>, 14-Jan-1998
+ *
+ * Reduced memory usage for older ARM systems
+ *      -- Russell King <rmk@arm.linux.org.uk>
+ *
+ * Move do_SAK() into process context.  Less stack use in devfs functions.
+ * alloc_tty_struct() always uses kmalloc()
+ *			 -- Andrew Morton <andrewm@uow.edu.eu> 17Mar01
+ */
+
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/fcntl.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/task.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/devpts_fs.h>
+#include <linux/file.h>
+#include <linux/fdtable.h>
+#include <linux/console.h>
+#include <linux/timer.h>
+#include <linux/ctype.h>
+#include <linux/kd.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/ppp-ioctl.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/serial.h>
+#include <linux/ratelimit.h>
+#include <linux/compat.h>
+
+#include <linux/uaccess.h>
+
+#include <linux/kbd_kern.h>
+#include <linux/vt_kern.h>
+#include <linux/selection.h>
+
+#include <linux/kmod.h>
+#include <linux/nsproxy.h>
+
+#undef TTY_DEBUG_HANGUP
+#ifdef TTY_DEBUG_HANGUP
+# define tty_debug_hangup(tty, f, args...)	tty_debug(tty, f, ##args)
+#else
+# define tty_debug_hangup(tty, f, args...)	do { } while (0)
+#endif
+
+#define TTY_PARANOIA_CHECK 1
+#define CHECK_TTY_COUNT 1
+
+struct ktermios tty_std_termios = {	/* for the benefit of tty drivers  */
+	.c_iflag = ICRNL | IXON,
+	.c_oflag = OPOST | ONLCR,
+	.c_cflag = B38400 | CS8 | CREAD | HUPCL,
+	.c_lflag = ISIG | ICANON | ECHO | ECHOE | ECHOK |
+		   ECHOCTL | ECHOKE | IEXTEN,
+	.c_cc = INIT_C_CC,
+	.c_ispeed = 38400,
+	.c_ospeed = 38400,
+	/* .c_line = N_TTY, */
+};
+
+EXPORT_SYMBOL(tty_std_termios);
+
+/* This list gets poked at by procfs and various bits of boot up code. This
+   could do with some rationalisation such as pulling the tty proc function
+   into this file */
+
+LIST_HEAD(tty_drivers);			/* linked list of tty drivers */
+
+/* Mutex to protect creating and releasing a tty */
+DEFINE_MUTEX(tty_mutex);
+
+static ssize_t tty_read(struct kiocb *, struct iov_iter *);
+static ssize_t tty_write(struct kiocb *, struct iov_iter *);
+static __poll_t tty_poll(struct file *, poll_table *);
+static int tty_open(struct inode *, struct file *);
+#ifdef CONFIG_COMPAT
+static long tty_compat_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg);
+#else
+#define tty_compat_ioctl NULL
+#endif
+static int __tty_fasync(int fd, struct file *filp, int on);
+static int tty_fasync(int fd, struct file *filp, int on);
+static void release_tty(struct tty_struct *tty, int idx);
+
+/**
+ *	free_tty_struct		-	free a disused tty
+ *	@tty: tty struct to free
+ *
+ *	Free the write buffers, tty queue and tty memory itself.
+ *
+ *	Locking: none. Must be called after tty is definitely unused
+ */
+
+static void free_tty_struct(struct tty_struct *tty)
+{
+	tty_ldisc_deinit(tty);
+	put_device(tty->dev);
+	kfree(tty->write_buf);
+	tty->magic = 0xDEADDEAD;
+	kfree(tty);
+}
+
+static inline struct tty_struct *file_tty(struct file *file)
+{
+	return ((struct tty_file_private *)file->private_data)->tty;
+}
+
+int tty_alloc_file(struct file *file)
+{
+	struct tty_file_private *priv;
+
+	priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	file->private_data = priv;
+
+	return 0;
+}
+
+/* Associate a new file with the tty structure */
+void tty_add_file(struct tty_struct *tty, struct file *file)
+{
+	struct tty_file_private *priv = file->private_data;
+
+	priv->tty = tty;
+	priv->file = file;
+
+	spin_lock(&tty->files_lock);
+	list_add(&priv->list, &tty->tty_files);
+	spin_unlock(&tty->files_lock);
+}
+
+/**
+ * tty_free_file - free file->private_data
+ *
+ * This shall be used only for fail path handling when tty_add_file was not
+ * called yet.
+ */
+void tty_free_file(struct file *file)
+{
+	struct tty_file_private *priv = file->private_data;
+
+	file->private_data = NULL;
+	kfree(priv);
+}
+
+/* Delete file from its tty */
+static void tty_del_file(struct file *file)
+{
+	struct tty_file_private *priv = file->private_data;
+	struct tty_struct *tty = priv->tty;
+
+	spin_lock(&tty->files_lock);
+	list_del(&priv->list);
+	spin_unlock(&tty->files_lock);
+	tty_free_file(file);
+}
+
+/**
+ *	tty_name	-	return tty naming
+ *	@tty: tty structure
+ *
+ *	Convert a tty structure into a name. The name reflects the kernel
+ *	naming policy and if udev is in use may not reflect user space
+ *
+ *	Locking: none
+ */
+
+const char *tty_name(const struct tty_struct *tty)
+{
+	if (!tty) /* Hmm.  NULL pointer.  That's fun. */
+		return "NULL tty";
+	return tty->name;
+}
+
+EXPORT_SYMBOL(tty_name);
+
+const char *tty_driver_name(const struct tty_struct *tty)
+{
+	if (!tty || !tty->driver)
+		return "";
+	return tty->driver->name;
+}
+
+static int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
+			      const char *routine)
+{
+#ifdef TTY_PARANOIA_CHECK
+	if (!tty) {
+		pr_warn("(%d:%d): %s: NULL tty\n",
+			imajor(inode), iminor(inode), routine);
+		return 1;
+	}
+	if (tty->magic != TTY_MAGIC) {
+		pr_warn("(%d:%d): %s: bad magic number\n",
+			imajor(inode), iminor(inode), routine);
+		return 1;
+	}
+#endif
+	return 0;
+}
+
+/* Caller must hold tty_lock */
+static int check_tty_count(struct tty_struct *tty, const char *routine)
+{
+#ifdef CHECK_TTY_COUNT
+	struct list_head *p;
+	int count = 0, kopen_count = 0;
+
+	spin_lock(&tty->files_lock);
+	list_for_each(p, &tty->tty_files) {
+		count++;
+	}
+	spin_unlock(&tty->files_lock);
+	if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+	    tty->driver->subtype == PTY_TYPE_SLAVE &&
+	    tty->link && tty->link->count)
+		count++;
+	if (tty_port_kopened(tty->port))
+		kopen_count++;
+	if (tty->count != (count + kopen_count)) {
+		tty_warn(tty, "%s: tty->count(%d) != (#fd's(%d) + #kopen's(%d))\n",
+			 routine, tty->count, count, kopen_count);
+		return (count + kopen_count);
+	}
+#endif
+	return 0;
+}
+
+/**
+ *	get_tty_driver		-	find device of a tty
+ *	@device: device identifier
+ *	@index: returns the index of the tty
+ *
+ *	This routine returns a tty driver structure, given a device number
+ *	and also passes back the index number.
+ *
+ *	Locking: caller must hold tty_mutex
+ */
+
+static struct tty_driver *get_tty_driver(dev_t device, int *index)
+{
+	struct tty_driver *p;
+
+	list_for_each_entry(p, &tty_drivers, tty_drivers) {
+		dev_t base = MKDEV(p->major, p->minor_start);
+		if (device < base || device >= base + p->num)
+			continue;
+		*index = device - base;
+		return tty_driver_kref_get(p);
+	}
+	return NULL;
+}
+
+/**
+ *	tty_dev_name_to_number	-	return dev_t for device name
+ *	@name: user space name of device under /dev
+ *	@number: pointer to dev_t that this function will populate
+ *
+ *	This function converts device names like ttyS0 or ttyUSB1 into dev_t
+ *	like (4, 64) or (188, 1). If no corresponding driver is registered then
+ *	the function returns -ENODEV.
+ *
+ *	Locking: this acquires tty_mutex to protect the tty_drivers list from
+ *		being modified while we are traversing it, and makes sure to
+ *		release it before exiting.
+ */
+int tty_dev_name_to_number(const char *name, dev_t *number)
+{
+	struct tty_driver *p;
+	int ret;
+	int index, prefix_length = 0;
+	const char *str;
+
+	for (str = name; *str && !isdigit(*str); str++)
+		;
+
+	if (!*str)
+		return -EINVAL;
+
+	ret = kstrtoint(str, 10, &index);
+	if (ret)
+		return ret;
+
+	prefix_length = str - name;
+	mutex_lock(&tty_mutex);
+
+	list_for_each_entry(p, &tty_drivers, tty_drivers)
+		if (prefix_length == strlen(p->name) && strncmp(name,
+					p->name, prefix_length) == 0) {
+			if (index < p->num) {
+				*number = MKDEV(p->major, p->minor_start + index);
+				goto out;
+			}
+		}
+
+	/* if here then driver wasn't found */
+	ret = -ENODEV;
+out:
+	mutex_unlock(&tty_mutex);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(tty_dev_name_to_number);
+
+#ifdef CONFIG_CONSOLE_POLL
+
+/**
+ *	tty_find_polling_driver	-	find device of a polled tty
+ *	@name: name string to match
+ *	@line: pointer to resulting tty line nr
+ *
+ *	This routine returns a tty driver structure, given a name
+ *	and the condition that the tty driver is capable of polled
+ *	operation.
+ */
+struct tty_driver *tty_find_polling_driver(char *name, int *line)
+{
+	struct tty_driver *p, *res = NULL;
+	int tty_line = 0;
+	int len;
+	char *str, *stp;
+
+	for (str = name; *str; str++)
+		if ((*str >= '0' && *str <= '9') || *str == ',')
+			break;
+	if (!*str)
+		return NULL;
+
+	len = str - name;
+	tty_line = simple_strtoul(str, &str, 10);
+
+	mutex_lock(&tty_mutex);
+	/* Search through the tty devices to look for a match */
+	list_for_each_entry(p, &tty_drivers, tty_drivers) {
+		if (!len || strncmp(name, p->name, len) != 0)
+			continue;
+		stp = str;
+		if (*stp == ',')
+			stp++;
+		if (*stp == '\0')
+			stp = NULL;
+
+		if (tty_line >= 0 && tty_line < p->num && p->ops &&
+		    p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
+			res = tty_driver_kref_get(p);
+			*line = tty_line;
+			break;
+		}
+	}
+	mutex_unlock(&tty_mutex);
+
+	return res;
+}
+EXPORT_SYMBOL_GPL(tty_find_polling_driver);
+#endif
+
+static ssize_t hung_up_tty_read(struct kiocb *iocb, struct iov_iter *to)
+{
+	return 0;
+}
+
+static ssize_t hung_up_tty_write(struct kiocb *iocb, struct iov_iter *from)
+{
+	return -EIO;
+}
+
+/* No kernel lock held - none needed ;) */
+static __poll_t hung_up_tty_poll(struct file *filp, poll_table *wait)
+{
+	return EPOLLIN | EPOLLOUT | EPOLLERR | EPOLLHUP | EPOLLRDNORM | EPOLLWRNORM;
+}
+
+static long hung_up_tty_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg)
+{
+	return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
+}
+
+static long hung_up_tty_compat_ioctl(struct file *file,
+				     unsigned int cmd, unsigned long arg)
+{
+	return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
+}
+
+static int hung_up_tty_fasync(int fd, struct file *file, int on)
+{
+	return -ENOTTY;
+}
+
+static void tty_show_fdinfo(struct seq_file *m, struct file *file)
+{
+	struct tty_struct *tty = file_tty(file);
+
+	if (tty && tty->ops && tty->ops->show_fdinfo)
+		tty->ops->show_fdinfo(tty, m);
+}
+
+static const struct file_operations tty_fops = {
+	.llseek		= no_llseek,
+	.read_iter	= tty_read,
+	.write_iter	= tty_write,
+	.splice_read	= generic_file_splice_read,
+	.splice_write	= iter_file_splice_write,
+	.poll		= tty_poll,
+	.unlocked_ioctl	= tty_ioctl,
+	.compat_ioctl	= tty_compat_ioctl,
+	.open		= tty_open,
+	.release	= tty_release,
+	.fasync		= tty_fasync,
+	.show_fdinfo	= tty_show_fdinfo,
+};
+
+static const struct file_operations console_fops = {
+	.llseek		= no_llseek,
+	.read_iter	= tty_read,
+	.write_iter	= redirected_tty_write,
+	.splice_read	= generic_file_splice_read,
+	.splice_write	= iter_file_splice_write,
+	.poll		= tty_poll,
+	.unlocked_ioctl	= tty_ioctl,
+	.compat_ioctl	= tty_compat_ioctl,
+	.open		= tty_open,
+	.release	= tty_release,
+	.fasync		= tty_fasync,
+};
+
+static const struct file_operations hung_up_tty_fops = {
+	.llseek		= no_llseek,
+	.read_iter	= hung_up_tty_read,
+	.write_iter	= hung_up_tty_write,
+	.poll		= hung_up_tty_poll,
+	.unlocked_ioctl	= hung_up_tty_ioctl,
+	.compat_ioctl	= hung_up_tty_compat_ioctl,
+	.release	= tty_release,
+	.fasync		= hung_up_tty_fasync,
+};
+
+static DEFINE_SPINLOCK(redirect_lock);
+static struct file *redirect;
+
+extern void tty_sysctl_init(void);
+
+/**
+ *	tty_wakeup	-	request more data
+ *	@tty: terminal
+ *
+ *	Internal and external helper for wakeups of tty. This function
+ *	informs the line discipline if present that the driver is ready
+ *	to receive more output data.
+ */
+
+void tty_wakeup(struct tty_struct *tty)
+{
+	struct tty_ldisc *ld;
+
+	if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) {
+		ld = tty_ldisc_ref(tty);
+		if (ld) {
+			if (ld->ops->write_wakeup)
+				ld->ops->write_wakeup(tty);
+			tty_ldisc_deref(ld);
+		}
+	}
+	wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
+}
+
+EXPORT_SYMBOL_GPL(tty_wakeup);
+
+/**
+ *	__tty_hangup		-	actual handler for hangup events
+ *	@tty: tty device
+ *
+ *	This can be called by a "kworker" kernel thread.  That is process
+ *	synchronous but doesn't hold any locks, so we need to make sure we
+ *	have the appropriate locks for what we're doing.
+ *
+ *	The hangup event clears any pending redirections onto the hung up
+ *	device. It ensures future writes will error and it does the needed
+ *	line discipline hangup and signal delivery. The tty object itself
+ *	remains intact.
+ *
+ *	Locking:
+ *		BTM
+ *		  redirect lock for undoing redirection
+ *		  file list lock for manipulating list of ttys
+ *		  tty_ldiscs_lock from called functions
+ *		  termios_rwsem resetting termios data
+ *		  tasklist_lock to walk task list for hangup event
+ *		    ->siglock to protect ->signal/->sighand
+ */
+static void __tty_hangup(struct tty_struct *tty, int exit_session)
+{
+	struct file *cons_filp = NULL;
+	struct file *filp, *f = NULL;
+	struct tty_file_private *priv;
+	int    closecount = 0, n;
+	int refs;
+
+	if (!tty)
+		return;
+
+
+	spin_lock(&redirect_lock);
+	if (redirect && file_tty(redirect) == tty) {
+		f = redirect;
+		redirect = NULL;
+	}
+	spin_unlock(&redirect_lock);
+
+	tty_lock(tty);
+
+	if (test_bit(TTY_HUPPED, &tty->flags)) {
+		tty_unlock(tty);
+		return;
+	}
+
+	/*
+	 * Some console devices aren't actually hung up for technical and
+	 * historical reasons, which can lead to indefinite interruptible
+	 * sleep in n_tty_read().  The following explicitly tells
+	 * n_tty_read() to abort readers.
+	 */
+	set_bit(TTY_HUPPING, &tty->flags);
+
+	/* inuse_filps is protected by the single tty lock,
+	   this really needs to change if we want to flush the
+	   workqueue with the lock held */
+	check_tty_count(tty, "tty_hangup");
+
+	spin_lock(&tty->files_lock);
+	/* This breaks for file handles being sent over AF_UNIX sockets ? */
+	list_for_each_entry(priv, &tty->tty_files, list) {
+		filp = priv->file;
+		if (filp->f_op->write_iter == redirected_tty_write)
+			cons_filp = filp;
+		if (filp->f_op->write_iter != tty_write)
+			continue;
+		closecount++;
+		__tty_fasync(-1, filp, 0);	/* can't block */
+		filp->f_op = &hung_up_tty_fops;
+	}
+	spin_unlock(&tty->files_lock);
+
+	refs = tty_signal_session_leader(tty, exit_session);
+	/* Account for the p->signal references we killed */
+	while (refs--)
+		tty_kref_put(tty);
+
+	tty_ldisc_hangup(tty, cons_filp != NULL);
+
+	spin_lock_irq(&tty->ctrl_lock);
+	clear_bit(TTY_THROTTLED, &tty->flags);
+	clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+	put_pid(tty->session);
+	put_pid(tty->pgrp);
+	tty->session = NULL;
+	tty->pgrp = NULL;
+	tty->ctrl_status = 0;
+	spin_unlock_irq(&tty->ctrl_lock);
+
+	/*
+	 * If one of the devices matches a console pointer, we
+	 * cannot just call hangup() because that will cause
+	 * tty->count and state->count to go out of sync.
+	 * So we just call close() the right number of times.
+	 */
+	if (cons_filp) {
+		if (tty->ops->close)
+			for (n = 0; n < closecount; n++)
+				tty->ops->close(tty, cons_filp);
+	} else if (tty->ops->hangup)
+		tty->ops->hangup(tty);
+	/*
+	 * We don't want to have driver/ldisc interactions beyond the ones
+	 * we did here. The driver layer expects no calls after ->hangup()
+	 * from the ldisc side, which is now guaranteed.
+	 */
+	set_bit(TTY_HUPPED, &tty->flags);
+	clear_bit(TTY_HUPPING, &tty->flags);
+	tty_unlock(tty);
+
+	if (f)
+		fput(f);
+}
+
+static void do_tty_hangup(struct work_struct *work)
+{
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, hangup_work);
+
+	__tty_hangup(tty, 0);
+}
+
+/**
+ *	tty_hangup		-	trigger a hangup event
+ *	@tty: tty to hangup
+ *
+ *	A carrier loss (virtual or otherwise) has occurred on this like
+ *	schedule a hangup sequence to run after this event.
+ */
+
+void tty_hangup(struct tty_struct *tty)
+{
+	tty_debug_hangup(tty, "hangup\n");
+	schedule_work(&tty->hangup_work);
+}
+
+EXPORT_SYMBOL(tty_hangup);
+
+/**
+ *	tty_vhangup		-	process vhangup
+ *	@tty: tty to hangup
+ *
+ *	The user has asked via system call for the terminal to be hung up.
+ *	We do this synchronously so that when the syscall returns the process
+ *	is complete. That guarantee is necessary for security reasons.
+ */
+
+void tty_vhangup(struct tty_struct *tty)
+{
+	tty_debug_hangup(tty, "vhangup\n");
+	__tty_hangup(tty, 0);
+}
+
+EXPORT_SYMBOL(tty_vhangup);
+
+
+/**
+ *	tty_vhangup_self	-	process vhangup for own ctty
+ *
+ *	Perform a vhangup on the current controlling tty
+ */
+
+void tty_vhangup_self(void)
+{
+	struct tty_struct *tty;
+
+	tty = get_current_tty();
+	if (tty) {
+		tty_vhangup(tty);
+		tty_kref_put(tty);
+	}
+}
+
+/**
+ *	tty_vhangup_session		-	hangup session leader exit
+ *	@tty: tty to hangup
+ *
+ *	The session leader is exiting and hanging up its controlling terminal.
+ *	Every process in the foreground process group is signalled SIGHUP.
+ *
+ *	We do this synchronously so that when the syscall returns the process
+ *	is complete. That guarantee is necessary for security reasons.
+ */
+
+void tty_vhangup_session(struct tty_struct *tty)
+{
+	tty_debug_hangup(tty, "session hangup\n");
+	__tty_hangup(tty, 1);
+}
+
+/**
+ *	tty_hung_up_p		-	was tty hung up
+ *	@filp: file pointer of tty
+ *
+ *	Return true if the tty has been subject to a vhangup or a carrier
+ *	loss
+ */
+
+int tty_hung_up_p(struct file *filp)
+{
+	return (filp && filp->f_op == &hung_up_tty_fops);
+}
+
+EXPORT_SYMBOL(tty_hung_up_p);
+
+/**
+ *	stop_tty	-	propagate flow control
+ *	@tty: tty to stop
+ *
+ *	Perform flow control to the driver. May be called
+ *	on an already stopped device and will not re-call the driver
+ *	method.
+ *
+ *	This functionality is used by both the line disciplines for
+ *	halting incoming flow and by the driver. It may therefore be
+ *	called from any context, may be under the tty atomic_write_lock
+ *	but not always.
+ *
+ *	Locking:
+ *		flow_lock
+ */
+
+void __stop_tty(struct tty_struct *tty)
+{
+	if (tty->stopped)
+		return;
+	tty->stopped = 1;
+	if (tty->ops->stop)
+		tty->ops->stop(tty);
+}
+
+void stop_tty(struct tty_struct *tty)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&tty->flow_lock, flags);
+	__stop_tty(tty);
+	spin_unlock_irqrestore(&tty->flow_lock, flags);
+}
+EXPORT_SYMBOL(stop_tty);
+
+/**
+ *	start_tty	-	propagate flow control
+ *	@tty: tty to start
+ *
+ *	Start a tty that has been stopped if at all possible. If this
+ *	tty was previous stopped and is now being started, the driver
+ *	start method is invoked and the line discipline woken.
+ *
+ *	Locking:
+ *		flow_lock
+ */
+
+void __start_tty(struct tty_struct *tty)
+{
+	if (!tty->stopped || tty->flow_stopped)
+		return;
+	tty->stopped = 0;
+	if (tty->ops->start)
+		tty->ops->start(tty);
+	tty_wakeup(tty);
+}
+
+void start_tty(struct tty_struct *tty)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&tty->flow_lock, flags);
+	__start_tty(tty);
+	spin_unlock_irqrestore(&tty->flow_lock, flags);
+}
+EXPORT_SYMBOL(start_tty);
+
+static void tty_update_time(struct timespec64 *time)
+{
+	time64_t sec = ktime_get_real_seconds();
+
+	/*
+	 * We only care if the two values differ in anything other than the
+	 * lower three bits (i.e every 8 seconds).  If so, then we can update
+	 * the time of the tty device, otherwise it could be construded as a
+	 * security leak to let userspace know the exact timing of the tty.
+	 */
+	if ((sec ^ time->tv_sec) & ~7)
+		time->tv_sec = sec;
+}
+
+/*
+ * Iterate on the ldisc ->read() function until we've gotten all
+ * the data the ldisc has for us.
+ *
+ * The "cookie" is something that the ldisc read function can fill
+ * in to let us know that there is more data to be had.
+ *
+ * We promise to continue to call the ldisc until it stops returning
+ * data or clears the cookie. The cookie may be something that the
+ * ldisc maintains state for and needs to free.
+ */
+static int iterate_tty_read(struct tty_ldisc *ld, struct tty_struct *tty,
+		struct file *file, struct iov_iter *to)
+{
+	int retval = 0;
+	void *cookie = NULL;
+	unsigned long offset = 0;
+	char kernel_buf[64];
+	size_t count = iov_iter_count(to);
+
+	do {
+		int size, copied;
+
+		size = count > sizeof(kernel_buf) ? sizeof(kernel_buf) : count;
+		size = ld->ops->read(tty, file, kernel_buf, size, &cookie, offset);
+		if (!size)
+			break;
+
+		if (size < 0) {
+			/* Did we have an earlier error (ie -EFAULT)? */
+			if (retval)
+				break;
+			retval = size;
+
+			/*
+			 * -EOVERFLOW means we didn't have enough space
+			 * for a whole packet, and we shouldn't return
+			 * a partial result.
+			 */
+			if (retval == -EOVERFLOW)
+				offset = 0;
+			break;
+		}
+
+		copied = copy_to_iter(kernel_buf, size, to);
+		offset += copied;
+		count -= copied;
+
+		/*
+		 * If the user copy failed, we still need to do another ->read()
+		 * call if we had a cookie to let the ldisc clear up.
+		 *
+		 * But make sure size is zeroed.
+		 */
+		if (unlikely(copied != size)) {
+			count = 0;
+			retval = -EFAULT;
+		}
+	} while (cookie);
+
+	/* We always clear tty buffer in case they contained passwords */
+	memzero_explicit(kernel_buf, sizeof(kernel_buf));
+	return offset ? offset : retval;
+}
+
+
+/**
+ *	tty_read	-	read method for tty device files
+ *	@file: pointer to tty file
+ *	@buf: user buffer
+ *	@count: size of user buffer
+ *	@ppos: unused
+ *
+ *	Perform the read system call function on this terminal device. Checks
+ *	for hung up devices before calling the line discipline method.
+ *
+ *	Locking:
+ *		Locks the line discipline internally while needed. Multiple
+ *	read calls may be outstanding in parallel.
+ */
+
+static ssize_t tty_read(struct kiocb *iocb, struct iov_iter *to)
+{
+	int i;
+	struct file *file = iocb->ki_filp;
+	struct inode *inode = file_inode(file);
+	struct tty_struct *tty = file_tty(file);
+	struct tty_ldisc *ld;
+
+	if (tty_paranoia_check(tty, inode, "tty_read"))
+		return -EIO;
+	if (!tty || tty_io_error(tty))
+		return -EIO;
+
+	/* We want to wait for the line discipline to sort out in this
+	   situation */
+	ld = tty_ldisc_ref_wait(tty);
+	if (!ld)
+		return hung_up_tty_read(iocb, to);
+	i = -EIO;
+	if (ld->ops->read)
+		i = iterate_tty_read(ld, tty, file, to);
+	tty_ldisc_deref(ld);
+
+	if (i > 0)
+		tty_update_time(&inode->i_atime);
+
+	return i;
+}
+
+static void tty_write_unlock(struct tty_struct *tty)
+{
+	mutex_unlock(&tty->atomic_write_lock);
+	wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
+}
+
+static int tty_write_lock(struct tty_struct *tty, int ndelay)
+{
+	if (!mutex_trylock(&tty->atomic_write_lock)) {
+		if (ndelay)
+			return -EAGAIN;
+		if (mutex_lock_interruptible(&tty->atomic_write_lock))
+			return -ERESTARTSYS;
+	}
+	return 0;
+}
+
+/*
+ * Split writes up in sane blocksizes to avoid
+ * denial-of-service type attacks
+ */
+static inline ssize_t do_tty_write(
+	ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
+	struct tty_struct *tty,
+	struct file *file,
+	struct iov_iter *from)
+{
+	size_t count = iov_iter_count(from);
+	ssize_t ret, written = 0;
+	unsigned int chunk;
+
+	ret = tty_write_lock(tty, file->f_flags & O_NDELAY);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * We chunk up writes into a temporary buffer. This
+	 * simplifies low-level drivers immensely, since they
+	 * don't have locking issues and user mode accesses.
+	 *
+	 * But if TTY_NO_WRITE_SPLIT is set, we should use a
+	 * big chunk-size..
+	 *
+	 * The default chunk-size is 2kB, because the NTTY
+	 * layer has problems with bigger chunks. It will
+	 * claim to be able to handle more characters than
+	 * it actually does.
+	 *
+	 * FIXME: This can probably go away now except that 64K chunks
+	 * are too likely to fail unless switched to vmalloc...
+	 */
+	chunk = 2048;
+	if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags))
+		chunk = 65536;
+	if (count < chunk)
+		chunk = count;
+
+	/* write_buf/write_cnt is protected by the atomic_write_lock mutex */
+	if (tty->write_cnt < chunk) {
+		unsigned char *buf_chunk;
+
+		if (chunk < 1024)
+			chunk = 1024;
+
+		buf_chunk = kmalloc(chunk, GFP_KERNEL);
+		if (!buf_chunk) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		kfree(tty->write_buf);
+		tty->write_cnt = chunk;
+		tty->write_buf = buf_chunk;
+	}
+
+	/* Do the write .. */
+	for (;;) {
+		size_t size = count;
+		if (size > chunk)
+			size = chunk;
+
+		ret = -EFAULT;
+		if (copy_from_iter(tty->write_buf, size, from) != size)
+			break;
+
+		ret = write(tty, file, tty->write_buf, size);
+		if (ret <= 0)
+			break;
+
+		written += ret;
+		if (ret > size)
+			break;
+
+		/* FIXME! Have Al check this! */
+		if (ret != size)
+			iov_iter_revert(from, size-ret);
+
+		count -= ret;
+		if (!count)
+			break;
+		ret = -ERESTARTSYS;
+		if (signal_pending(current))
+			break;
+		cond_resched();
+	}
+	if (written) {
+		tty_update_time(&file_inode(file)->i_mtime);
+		ret = written;
+	}
+out:
+	tty_write_unlock(tty);
+	return ret;
+}
+
+/**
+ * tty_write_message - write a message to a certain tty, not just the console.
+ * @tty: the destination tty_struct
+ * @msg: the message to write
+ *
+ * This is used for messages that need to be redirected to a specific tty.
+ * We don't put it into the syslog queue right now maybe in the future if
+ * really needed.
+ *
+ * We must still hold the BTM and test the CLOSING flag for the moment.
+ */
+
+void tty_write_message(struct tty_struct *tty, char *msg)
+{
+	if (tty) {
+		mutex_lock(&tty->atomic_write_lock);
+		tty_lock(tty);
+		if (tty->ops->write && tty->count > 0)
+			tty->ops->write(tty, msg, strlen(msg));
+		tty_unlock(tty);
+		tty_write_unlock(tty);
+	}
+	return;
+}
+
+
+/**
+ *	tty_write		-	write method for tty device file
+ *	@file: tty file pointer
+ *	@buf: user data to write
+ *	@count: bytes to write
+ *	@ppos: unused
+ *
+ *	Write data to a tty device via the line discipline.
+ *
+ *	Locking:
+ *		Locks the line discipline as required
+ *		Writes to the tty driver are serialized by the atomic_write_lock
+ *	and are then processed in chunks to the device. The line discipline
+ *	write method will not be invoked in parallel for each device.
+ */
+
+static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_iter *from)
+{
+	struct tty_struct *tty = file_tty(file);
+ 	struct tty_ldisc *ld;
+	ssize_t ret;
+
+	if (tty_paranoia_check(tty, file_inode(file), "tty_write"))
+		return -EIO;
+	if (!tty || !tty->ops->write ||	tty_io_error(tty))
+			return -EIO;
+	/* Short term debug to catch buggy drivers */
+	if (tty->ops->write_room == NULL)
+		tty_err(tty, "missing write_room method\n");
+	ld = tty_ldisc_ref_wait(tty);
+	if (!ld)
+		return hung_up_tty_write(iocb, from);
+	if (!ld->ops->write)
+		ret = -EIO;
+	else
+		ret = do_tty_write(ld->ops->write, tty, file, from);
+	tty_ldisc_deref(ld);
+	return ret;
+}
+
+static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
+{
+	return file_tty_write(iocb->ki_filp, iocb, from);
+}
+
+ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
+{
+	struct file *p = NULL;
+
+	spin_lock(&redirect_lock);
+	if (redirect)
+		p = get_file(redirect);
+	spin_unlock(&redirect_lock);
+
+	/*
+	 * We know the redirected tty is just another tty, we can can
+	 * call file_tty_write() directly with that file pointer.
+	 */
+	if (p) {
+		ssize_t res;
+		res = file_tty_write(p, iocb, iter);
+		fput(p);
+		return res;
+	}
+	return tty_write(iocb, iter);
+}
+
+/**
+ *	tty_send_xchar	-	send priority character
+ *
+ *	Send a high priority character to the tty even if stopped
+ *
+ *	Locking: none for xchar method, write ordering for write method.
+ */
+
+int tty_send_xchar(struct tty_struct *tty, char ch)
+{
+	int	was_stopped = tty->stopped;
+
+	if (tty->ops->send_xchar) {
+		down_read(&tty->termios_rwsem);
+		tty->ops->send_xchar(tty, ch);
+		up_read(&tty->termios_rwsem);
+		return 0;
+	}
+
+	if (tty_write_lock(tty, 0) < 0)
+		return -ERESTARTSYS;
+
+	down_read(&tty->termios_rwsem);
+	if (was_stopped)
+		start_tty(tty);
+	tty->ops->write(tty, &ch, 1);
+	if (was_stopped)
+		stop_tty(tty);
+	up_read(&tty->termios_rwsem);
+	tty_write_unlock(tty);
+	return 0;
+}
+
+static char ptychar[] = "pqrstuvwxyzabcde";
+
+/**
+ *	pty_line_name	-	generate name for a pty
+ *	@driver: the tty driver in use
+ *	@index: the minor number
+ *	@p: output buffer of at least 6 bytes
+ *
+ *	Generate a name from a driver reference and write it to the output
+ *	buffer.
+ *
+ *	Locking: None
+ */
+static void pty_line_name(struct tty_driver *driver, int index, char *p)
+{
+	int i = index + driver->name_base;
+	/* ->name is initialized to "ttyp", but "tty" is expected */
+	sprintf(p, "%s%c%x",
+		driver->subtype == PTY_TYPE_SLAVE ? "tty" : driver->name,
+		ptychar[i >> 4 & 0xf], i & 0xf);
+}
+
+/**
+ *	tty_line_name	-	generate name for a tty
+ *	@driver: the tty driver in use
+ *	@index: the minor number
+ *	@p: output buffer of at least 7 bytes
+ *
+ *	Generate a name from a driver reference and write it to the output
+ *	buffer.
+ *
+ *	Locking: None
+ */
+static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
+{
+	if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE)
+		return sprintf(p, "%s", driver->name);
+	else
+		return sprintf(p, "%s%d", driver->name,
+			       index + driver->name_base);
+}
+
+/**
+ *	tty_driver_lookup_tty() - find an existing tty, if any
+ *	@driver: the driver for the tty
+ *	@idx:	 the minor number
+ *
+ *	Return the tty, if found. If not found, return NULL or ERR_PTR() if the
+ *	driver lookup() method returns an error.
+ *
+ *	Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
+ */
+static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
+		struct file *file, int idx)
+{
+	struct tty_struct *tty;
+
+	if (driver->ops->lookup)
+		if (!file)
+			tty = ERR_PTR(-EIO);
+		else
+			tty = driver->ops->lookup(driver, file, idx);
+	else
+		tty = driver->ttys[idx];
+
+	if (!IS_ERR(tty))
+		tty_kref_get(tty);
+	return tty;
+}
+
+/**
+ *	tty_init_termios	-  helper for termios setup
+ *	@tty: the tty to set up
+ *
+ *	Initialise the termios structure for this tty. This runs under
+ *	the tty_mutex currently so we can be relaxed about ordering.
+ */
+
+void tty_init_termios(struct tty_struct *tty)
+{
+	struct ktermios *tp;
+	int idx = tty->index;
+
+	if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
+		tty->termios = tty->driver->init_termios;
+	else {
+		/* Check for lazy saved data */
+		tp = tty->driver->termios[idx];
+		if (tp != NULL) {
+			tty->termios = *tp;
+			tty->termios.c_line  = tty->driver->init_termios.c_line;
+		} else
+			tty->termios = tty->driver->init_termios;
+	}
+	/* Compatibility until drivers always set this */
+	tty->termios.c_ispeed = tty_termios_input_baud_rate(&tty->termios);
+	tty->termios.c_ospeed = tty_termios_baud_rate(&tty->termios);
+}
+EXPORT_SYMBOL_GPL(tty_init_termios);
+
+int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+	tty_init_termios(tty);
+	tty_driver_kref_get(driver);
+	tty->count++;
+	driver->ttys[tty->index] = tty;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tty_standard_install);
+
+/**
+ *	tty_driver_install_tty() - install a tty entry in the driver
+ *	@driver: the driver for the tty
+ *	@tty: the tty
+ *
+ *	Install a tty object into the driver tables. The tty->index field
+ *	will be set by the time this is called. This method is responsible
+ *	for ensuring any need additional structures are allocated and
+ *	configured.
+ *
+ *	Locking: tty_mutex for now
+ */
+static int tty_driver_install_tty(struct tty_driver *driver,
+						struct tty_struct *tty)
+{
+	return driver->ops->install ? driver->ops->install(driver, tty) :
+		tty_standard_install(driver, tty);
+}
+
+/**
+ *	tty_driver_remove_tty() - remove a tty from the driver tables
+ *	@driver: the driver for the tty
+ *	@tty: tty to remove
+ *
+ *	Remvoe a tty object from the driver tables. The tty->index field
+ *	will be set by the time this is called.
+ *
+ *	Locking: tty_mutex for now
+ */
+static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)
+{
+	if (driver->ops->remove)
+		driver->ops->remove(driver, tty);
+	else
+		driver->ttys[tty->index] = NULL;
+}
+
+/**
+ *	tty_reopen()	- fast re-open of an open tty
+ *	@tty: the tty to open
+ *
+ *	Return 0 on success, -errno on error.
+ *	Re-opens on master ptys are not allowed and return -EIO.
+ *
+ *	Locking: Caller must hold tty_lock
+ */
+static int tty_reopen(struct tty_struct *tty)
+{
+	struct tty_driver *driver = tty->driver;
+	struct tty_ldisc *ld;
+	int retval = 0;
+
+	if (driver->type == TTY_DRIVER_TYPE_PTY &&
+	    driver->subtype == PTY_TYPE_MASTER)
+		return -EIO;
+
+	if (!tty->count)
+		return -EAGAIN;
+
+	if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
+		return -EBUSY;
+
+	ld = tty_ldisc_ref_wait(tty);
+	if (ld) {
+		tty_ldisc_deref(ld);
+	} else {
+		retval = tty_ldisc_lock(tty, 5 * HZ);
+		if (retval)
+			return retval;
+
+		if (!tty->ldisc)
+			retval = tty_ldisc_reinit(tty, tty->termios.c_line);
+		tty_ldisc_unlock(tty);
+	}
+
+	if (retval == 0)
+		tty->count++;
+
+	return retval;
+}
+
+/**
+ *	tty_init_dev		-	initialise a tty device
+ *	@driver: tty driver we are opening a device on
+ *	@idx: device index
+ *
+ *	Prepare a tty device. This may not be a "new" clean device but
+ *	could also be an active device. The pty drivers require special
+ *	handling because of this.
+ *
+ *	Locking:
+ *		The function is called under the tty_mutex, which
+ *	protects us from the tty struct or driver itself going away.
+ *
+ *	On exit the tty device has the line discipline attached and
+ *	a reference count of 1. If a pair was created for pty/tty use
+ *	and the other was a pty master then it too has a reference count of 1.
+ *
+ * WSH 06/09/97: Rewritten to remove races and properly clean up after a
+ * failed open.  The new code protects the open with a mutex, so it's
+ * really quite straightforward.  The mutex locking can probably be
+ * relaxed for the (most common) case of reopening a tty.
+ *
+ *	Return: returned tty structure
+ */
+
+struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
+{
+	struct tty_struct *tty;
+	int retval;
+
+	/*
+	 * First time open is complex, especially for PTY devices.
+	 * This code guarantees that either everything succeeds and the
+	 * TTY is ready for operation, or else the table slots are vacated
+	 * and the allocated memory released.  (Except that the termios
+	 * may be retained.)
+	 */
+
+	if (!try_module_get(driver->owner))
+		return ERR_PTR(-ENODEV);
+
+	tty = alloc_tty_struct(driver, idx);
+	if (!tty) {
+		retval = -ENOMEM;
+		goto err_module_put;
+	}
+
+	tty_lock(tty);
+	retval = tty_driver_install_tty(driver, tty);
+	if (retval < 0)
+		goto err_free_tty;
+
+	if (!tty->port)
+		tty->port = driver->ports[idx];
+
+	if (WARN_RATELIMIT(!tty->port,
+			"%s: %s driver does not set tty->port. This would crash the kernel. Fix the driver!\n",
+			__func__, tty->driver->name)) {
+		retval = -EINVAL;
+		goto err_release_lock;
+	}
+
+	retval = tty_ldisc_lock(tty, 5 * HZ);
+	if (retval)
+		goto err_release_lock;
+	tty->port->itty = tty;
+
+	/*
+	 * Structures all installed ... call the ldisc open routines.
+	 * If we fail here just call release_tty to clean up.  No need
+	 * to decrement the use counts, as release_tty doesn't care.
+	 */
+	retval = tty_ldisc_setup(tty, tty->link);
+	if (retval)
+		goto err_release_tty;
+	tty_ldisc_unlock(tty);
+	/* Return the tty locked so that it cannot vanish under the caller */
+	return tty;
+
+err_free_tty:
+	tty_unlock(tty);
+	free_tty_struct(tty);
+err_module_put:
+	module_put(driver->owner);
+	return ERR_PTR(retval);
+
+	/* call the tty release_tty routine to clean out this slot */
+err_release_tty:
+	tty_ldisc_unlock(tty);
+	tty_info_ratelimited(tty, "ldisc open failed (%d), clearing slot %d\n",
+			     retval, idx);
+err_release_lock:
+	tty_unlock(tty);
+	release_tty(tty, idx);
+	return ERR_PTR(retval);
+}
+
+/**
+ * tty_save_termios() - save tty termios data in driver table
+ * @tty: tty whose termios data to save
+ *
+ * Locking: Caller guarantees serialisation with tty_init_termios().
+ */
+void tty_save_termios(struct tty_struct *tty)
+{
+	struct ktermios *tp;
+	int idx = tty->index;
+
+	/* If the port is going to reset then it has no termios to save */
+	if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
+		return;
+
+	/* Stash the termios data */
+	tp = tty->driver->termios[idx];
+	if (tp == NULL) {
+		tp = kmalloc(sizeof(*tp), GFP_KERNEL);
+		if (tp == NULL)
+			return;
+		tty->driver->termios[idx] = tp;
+	}
+	*tp = tty->termios;
+}
+EXPORT_SYMBOL_GPL(tty_save_termios);
+
+/**
+ *	tty_flush_works		-	flush all works of a tty/pty pair
+ *	@tty: tty device to flush works for (or either end of a pty pair)
+ *
+ *	Sync flush all works belonging to @tty (and the 'other' tty).
+ */
+static void tty_flush_works(struct tty_struct *tty)
+{
+	flush_work(&tty->SAK_work);
+	flush_work(&tty->hangup_work);
+	if (tty->link) {
+		flush_work(&tty->link->SAK_work);
+		flush_work(&tty->link->hangup_work);
+	}
+}
+
+/**
+ *	release_one_tty		-	release tty structure memory
+ *	@work: work of tty we are obliterating
+ *
+ *	Releases memory associated with a tty structure, and clears out the
+ *	driver table slots. This function is called when a device is no longer
+ *	in use. It also gets called when setup of a device fails.
+ *
+ *	Locking:
+ *		takes the file list lock internally when working on the list
+ *	of ttys that the driver keeps.
+ *
+ *	This method gets called from a work queue so that the driver private
+ *	cleanup ops can sleep (needed for USB at least)
+ */
+static void release_one_tty(struct work_struct *work)
+{
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, hangup_work);
+	struct tty_driver *driver = tty->driver;
+	struct module *owner = driver->owner;
+
+	if (tty->ops->cleanup)
+		tty->ops->cleanup(tty);
+
+	tty->magic = 0;
+	tty_driver_kref_put(driver);
+	module_put(owner);
+
+	spin_lock(&tty->files_lock);
+	list_del_init(&tty->tty_files);
+	spin_unlock(&tty->files_lock);
+
+	put_pid(tty->pgrp);
+	put_pid(tty->session);
+	free_tty_struct(tty);
+}
+
+static void queue_release_one_tty(struct kref *kref)
+{
+	struct tty_struct *tty = container_of(kref, struct tty_struct, kref);
+
+	/* The hangup queue is now free so we can reuse it rather than
+	   waste a chunk of memory for each port */
+	INIT_WORK(&tty->hangup_work, release_one_tty);
+	schedule_work(&tty->hangup_work);
+}
+
+/**
+ *	tty_kref_put		-	release a tty kref
+ *	@tty: tty device
+ *
+ *	Release a reference to a tty device and if need be let the kref
+ *	layer destruct the object for us
+ */
+
+void tty_kref_put(struct tty_struct *tty)
+{
+	if (tty)
+		kref_put(&tty->kref, queue_release_one_tty);
+}
+EXPORT_SYMBOL(tty_kref_put);
+
+/**
+ *	release_tty		-	release tty structure memory
+ *
+ *	Release both @tty and a possible linked partner (think pty pair),
+ *	and decrement the refcount of the backing module.
+ *
+ *	Locking:
+ *		tty_mutex
+ *		takes the file list lock internally when working on the list
+ *	of ttys that the driver keeps.
+ *
+ */
+static void release_tty(struct tty_struct *tty, int idx)
+{
+	/* This should always be true but check for the moment */
+	WARN_ON(tty->index != idx);
+	WARN_ON(!mutex_is_locked(&tty_mutex));
+	if (tty->ops->shutdown)
+		tty->ops->shutdown(tty);
+	tty_save_termios(tty);
+	tty_driver_remove_tty(tty->driver, tty);
+	if (tty->port)
+		tty->port->itty = NULL;
+	if (tty->link)
+		tty->link->port->itty = NULL;
+	if (tty->port)
+		tty_buffer_cancel_work(tty->port);
+	if (tty->link)
+		tty_buffer_cancel_work(tty->link->port);
+
+	tty_kref_put(tty->link);
+	tty_kref_put(tty);
+}
+
+/**
+ *	tty_release_checks - check a tty before real release
+ *	@tty: tty to check
+ *	@idx: index of the tty
+ *
+ *	Performs some paranoid checking before true release of the @tty.
+ *	This is a no-op unless TTY_PARANOIA_CHECK is defined.
+ */
+static int tty_release_checks(struct tty_struct *tty, int idx)
+{
+#ifdef TTY_PARANOIA_CHECK
+	if (idx < 0 || idx >= tty->driver->num) {
+		tty_debug(tty, "bad idx %d\n", idx);
+		return -1;
+	}
+
+	/* not much to check for devpts */
+	if (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)
+		return 0;
+
+	if (tty != tty->driver->ttys[idx]) {
+		tty_debug(tty, "bad driver table[%d] = %p\n",
+			  idx, tty->driver->ttys[idx]);
+		return -1;
+	}
+	if (tty->driver->other) {
+		struct tty_struct *o_tty = tty->link;
+
+		if (o_tty != tty->driver->other->ttys[idx]) {
+			tty_debug(tty, "bad other table[%d] = %p\n",
+				  idx, tty->driver->other->ttys[idx]);
+			return -1;
+		}
+		if (o_tty->link != tty) {
+			tty_debug(tty, "bad link = %p\n", o_tty->link);
+			return -1;
+		}
+	}
+#endif
+	return 0;
+}
+
+/**
+ *      tty_kclose      -       closes tty opened by tty_kopen
+ *      @tty: tty device
+ *
+ *      Performs the final steps to release and free a tty device. It is the
+ *      same as tty_release_struct except that it also resets TTY_PORT_KOPENED
+ *      flag on tty->port.
+ */
+void tty_kclose(struct tty_struct *tty)
+{
+	/*
+	 * Ask the line discipline code to release its structures
+	 */
+	tty_ldisc_release(tty);
+
+	/* Wait for pending work before tty destruction commmences */
+	tty_flush_works(tty);
+
+	tty_debug_hangup(tty, "freeing structure\n");
+	/*
+	 * The release_tty function takes care of the details of clearing
+	 * the slots and preserving the termios structure.
+	 */
+	mutex_lock(&tty_mutex);
+	tty_port_set_kopened(tty->port, 0);
+	release_tty(tty, tty->index);
+	mutex_unlock(&tty_mutex);
+}
+EXPORT_SYMBOL_GPL(tty_kclose);
+
+/**
+ *	tty_release_struct	-	release a tty struct
+ *	@tty: tty device
+ *	@idx: index of the tty
+ *
+ *	Performs the final steps to release and free a tty device. It is
+ *	roughly the reverse of tty_init_dev.
+ */
+void tty_release_struct(struct tty_struct *tty, int idx)
+{
+	/*
+	 * Ask the line discipline code to release its structures
+	 */
+	tty_ldisc_release(tty);
+
+	/* Wait for pending work before tty destruction commmences */
+	tty_flush_works(tty);
+
+	tty_debug_hangup(tty, "freeing structure\n");
+	/*
+	 * The release_tty function takes care of the details of clearing
+	 * the slots and preserving the termios structure.
+	 */
+	mutex_lock(&tty_mutex);
+	release_tty(tty, idx);
+	mutex_unlock(&tty_mutex);
+}
+EXPORT_SYMBOL_GPL(tty_release_struct);
+
+/**
+ *	tty_release		-	vfs callback for close
+ *	@inode: inode of tty
+ *	@filp: file pointer for handle to tty
+ *
+ *	Called the last time each file handle is closed that references
+ *	this tty. There may however be several such references.
+ *
+ *	Locking:
+ *		Takes bkl. See tty_release_dev
+ *
+ * Even releasing the tty structures is a tricky business.. We have
+ * to be very careful that the structures are all released at the
+ * same time, as interrupts might otherwise get the wrong pointers.
+ *
+ * WSH 09/09/97: rewritten to avoid some nasty race conditions that could
+ * lead to double frees or releasing memory still in use.
+ */
+
+int tty_release(struct inode *inode, struct file *filp)
+{
+	struct tty_struct *tty = file_tty(filp);
+	struct tty_struct *o_tty = NULL;
+	int	do_sleep, final;
+	int	idx;
+	long	timeout = 0;
+	int	once = 1;
+
+	if (tty_paranoia_check(tty, inode, __func__))
+		return 0;
+
+	tty_lock(tty);
+	check_tty_count(tty, __func__);
+
+	__tty_fasync(-1, filp, 0);
+
+	idx = tty->index;
+	if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+	    tty->driver->subtype == PTY_TYPE_MASTER)
+		o_tty = tty->link;
+
+	if (tty_release_checks(tty, idx)) {
+		tty_unlock(tty);
+		return 0;
+	}
+
+	tty_debug_hangup(tty, "releasing (count=%d)\n", tty->count);
+
+	if (tty->ops->close)
+		tty->ops->close(tty, filp);
+
+	/* If tty is pty master, lock the slave pty (stable lock order) */
+	tty_lock_slave(o_tty);
+
+	/*
+	 * Sanity check: if tty->count is going to zero, there shouldn't be
+	 * any waiters on tty->read_wait or tty->write_wait.  We test the
+	 * wait queues and kick everyone out _before_ actually starting to
+	 * close.  This ensures that we won't block while releasing the tty
+	 * structure.
+	 *
+	 * The test for the o_tty closing is necessary, since the master and
+	 * slave sides may close in any order.  If the slave side closes out
+	 * first, its count will be one, since the master side holds an open.
+	 * Thus this test wouldn't be triggered at the time the slave closed,
+	 * so we do it now.
+	 */
+	while (1) {
+		do_sleep = 0;
+
+		if (tty->count <= 1) {
+			if (waitqueue_active(&tty->read_wait)) {
+				wake_up_poll(&tty->read_wait, EPOLLIN);
+				do_sleep++;
+			}
+			if (waitqueue_active(&tty->write_wait)) {
+				wake_up_poll(&tty->write_wait, EPOLLOUT);
+				do_sleep++;
+			}
+		}
+		if (o_tty && o_tty->count <= 1) {
+			if (waitqueue_active(&o_tty->read_wait)) {
+				wake_up_poll(&o_tty->read_wait, EPOLLIN);
+				do_sleep++;
+			}
+			if (waitqueue_active(&o_tty->write_wait)) {
+				wake_up_poll(&o_tty->write_wait, EPOLLOUT);
+				do_sleep++;
+			}
+		}
+		if (!do_sleep)
+			break;
+
+		if (once) {
+			once = 0;
+			tty_warn(tty, "read/write wait queue active!\n");
+		}
+		schedule_timeout_killable(timeout);
+		if (timeout < 120 * HZ)
+			timeout = 2 * timeout + 1;
+		else
+			timeout = MAX_SCHEDULE_TIMEOUT;
+	}
+
+	if (o_tty) {
+		if (--o_tty->count < 0) {
+			tty_warn(tty, "bad slave count (%d)\n", o_tty->count);
+			o_tty->count = 0;
+		}
+	}
+	if (--tty->count < 0) {
+		tty_warn(tty, "bad tty->count (%d)\n", tty->count);
+		tty->count = 0;
+	}
+
+	/*
+	 * We've decremented tty->count, so we need to remove this file
+	 * descriptor off the tty->tty_files list; this serves two
+	 * purposes:
+	 *  - check_tty_count sees the correct number of file descriptors
+	 *    associated with this tty.
+	 *  - do_tty_hangup no longer sees this file descriptor as
+	 *    something that needs to be handled for hangups.
+	 */
+	tty_del_file(filp);
+
+	/*
+	 * Perform some housekeeping before deciding whether to return.
+	 *
+	 * If _either_ side is closing, make sure there aren't any
+	 * processes that still think tty or o_tty is their controlling
+	 * tty.
+	 */
+	if (!tty->count) {
+		read_lock(&tasklist_lock);
+		session_clear_tty(tty->session);
+		if (o_tty)
+			session_clear_tty(o_tty->session);
+		read_unlock(&tasklist_lock);
+	}
+
+	/* check whether both sides are closing ... */
+	final = !tty->count && !(o_tty && o_tty->count);
+
+	tty_unlock_slave(o_tty);
+	tty_unlock(tty);
+
+	/* At this point, the tty->count == 0 should ensure a dead tty
+	   cannot be re-opened by a racing opener */
+
+	if (!final)
+		return 0;
+
+	tty_debug_hangup(tty, "final close\n");
+
+	tty_release_struct(tty, idx);
+	return 0;
+}
+
+/**
+ *	tty_open_current_tty - get locked tty of current task
+ *	@device: device number
+ *	@filp: file pointer to tty
+ *	@return: locked tty of the current task iff @device is /dev/tty
+ *
+ *	Performs a re-open of the current task's controlling tty.
+ *
+ *	We cannot return driver and index like for the other nodes because
+ *	devpts will not work then. It expects inodes to be from devpts FS.
+ */
+static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
+{
+	struct tty_struct *tty;
+	int retval;
+
+	if (device != MKDEV(TTYAUX_MAJOR, 0))
+		return NULL;
+
+	tty = get_current_tty();
+	if (!tty)
+		return ERR_PTR(-ENXIO);
+
+	filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
+	/* noctty = 1; */
+	tty_lock(tty);
+	tty_kref_put(tty);	/* safe to drop the kref now */
+
+	retval = tty_reopen(tty);
+	if (retval < 0) {
+		tty_unlock(tty);
+		tty = ERR_PTR(retval);
+	}
+	return tty;
+}
+
+/**
+ *	tty_lookup_driver - lookup a tty driver for a given device file
+ *	@device: device number
+ *	@filp: file pointer to tty
+ *	@index: index for the device in the @return driver
+ *	@return: driver for this inode (with increased refcount)
+ *
+ * 	If @return is not erroneous, the caller is responsible to decrement the
+ * 	refcount by tty_driver_kref_put.
+ *
+ *	Locking: tty_mutex protects get_tty_driver
+ */
+static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
+		int *index)
+{
+	struct tty_driver *driver = NULL;
+
+	switch (device) {
+#ifdef CONFIG_VT
+	case MKDEV(TTY_MAJOR, 0): {
+		extern struct tty_driver *console_driver;
+		driver = tty_driver_kref_get(console_driver);
+		*index = fg_console;
+		break;
+	}
+#endif
+	case MKDEV(TTYAUX_MAJOR, 1): {
+		struct tty_driver *console_driver = console_device(index);
+		if (console_driver) {
+			driver = tty_driver_kref_get(console_driver);
+			if (driver && filp) {
+				/* Don't let /dev/console block */
+				filp->f_flags |= O_NONBLOCK;
+				break;
+			}
+		}
+		if (driver)
+			tty_driver_kref_put(driver);
+		return ERR_PTR(-ENODEV);
+	}
+	default:
+		driver = get_tty_driver(device, index);
+		if (!driver)
+			return ERR_PTR(-ENODEV);
+		break;
+	}
+	return driver;
+}
+
+/**
+ *	tty_kopen	-	open a tty device for kernel
+ *	@device: dev_t of device to open
+ *
+ *	Opens tty exclusively for kernel. Performs the driver lookup,
+ *	makes sure it's not already opened and performs the first-time
+ *	tty initialization.
+ *
+ *	Returns the locked initialized &tty_struct
+ *
+ *	Claims the global tty_mutex to serialize:
+ *	  - concurrent first-time tty initialization
+ *	  - concurrent tty driver removal w/ lookup
+ *	  - concurrent tty removal from driver table
+ */
+struct tty_struct *tty_kopen(dev_t device)
+{
+	struct tty_struct *tty;
+	struct tty_driver *driver;
+	int index = -1;
+
+	mutex_lock(&tty_mutex);
+	driver = tty_lookup_driver(device, NULL, &index);
+	if (IS_ERR(driver)) {
+		mutex_unlock(&tty_mutex);
+		return ERR_CAST(driver);
+	}
+
+	/* check whether we're reopening an existing tty */
+	tty = tty_driver_lookup_tty(driver, NULL, index);
+	if (IS_ERR(tty))
+		goto out;
+
+	if (tty) {
+		/* drop kref from tty_driver_lookup_tty() */
+		tty_kref_put(tty);
+		tty = ERR_PTR(-EBUSY);
+	} else { /* tty_init_dev returns tty with the tty_lock held */
+		tty = tty_init_dev(driver, index);
+		if (IS_ERR(tty))
+			goto out;
+		tty_port_set_kopened(tty->port, 1);
+	}
+out:
+	mutex_unlock(&tty_mutex);
+	tty_driver_kref_put(driver);
+	return tty;
+}
+EXPORT_SYMBOL_GPL(tty_kopen);
+
+/**
+ *	tty_open_by_driver	-	open a tty device
+ *	@device: dev_t of device to open
+ *	@filp: file pointer to tty
+ *
+ *	Performs the driver lookup, checks for a reopen, or otherwise
+ *	performs the first-time tty initialization.
+ *
+ *	Returns the locked initialized or re-opened &tty_struct
+ *
+ *	Claims the global tty_mutex to serialize:
+ *	  - concurrent first-time tty initialization
+ *	  - concurrent tty driver removal w/ lookup
+ *	  - concurrent tty removal from driver table
+ */
+static struct tty_struct *tty_open_by_driver(dev_t device,
+					     struct file *filp)
+{
+	struct tty_struct *tty;
+	struct tty_driver *driver = NULL;
+	int index = -1;
+	int retval;
+
+	mutex_lock(&tty_mutex);
+	driver = tty_lookup_driver(device, filp, &index);
+	if (IS_ERR(driver)) {
+		mutex_unlock(&tty_mutex);
+		return ERR_CAST(driver);
+	}
+
+	/* check whether we're reopening an existing tty */
+	tty = tty_driver_lookup_tty(driver, filp, index);
+	if (IS_ERR(tty)) {
+		mutex_unlock(&tty_mutex);
+		goto out;
+	}
+
+	if (tty) {
+		if (tty_port_kopened(tty->port)) {
+			tty_kref_put(tty);
+			mutex_unlock(&tty_mutex);
+			tty = ERR_PTR(-EBUSY);
+			goto out;
+		}
+		mutex_unlock(&tty_mutex);
+		retval = tty_lock_interruptible(tty);
+		tty_kref_put(tty);  /* drop kref from tty_driver_lookup_tty() */
+		if (retval) {
+			if (retval == -EINTR)
+				retval = -ERESTARTSYS;
+			tty = ERR_PTR(retval);
+			goto out;
+		}
+		retval = tty_reopen(tty);
+		if (retval < 0) {
+			tty_unlock(tty);
+			tty = ERR_PTR(retval);
+		}
+	} else { /* Returns with the tty_lock held for now */
+		tty = tty_init_dev(driver, index);
+		mutex_unlock(&tty_mutex);
+	}
+out:
+	tty_driver_kref_put(driver);
+	return tty;
+}
+
+/**
+ *	tty_open		-	open a tty device
+ *	@inode: inode of device file
+ *	@filp: file pointer to tty
+ *
+ *	tty_open and tty_release keep up the tty count that contains the
+ *	number of opens done on a tty. We cannot use the inode-count, as
+ *	different inodes might point to the same tty.
+ *
+ *	Open-counting is needed for pty masters, as well as for keeping
+ *	track of serial lines: DTR is dropped when the last close happens.
+ *	(This is not done solely through tty->count, now.  - Ted 1/27/92)
+ *
+ *	The termios state of a pty is reset on first open so that
+ *	settings don't persist across reuse.
+ *
+ *	Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev.
+ *		 tty->count should protect the rest.
+ *		 ->siglock protects ->signal/->sighand
+ *
+ *	Note: the tty_unlock/lock cases without a ref are only safe due to
+ *	tty_mutex
+ */
+
+static int tty_open(struct inode *inode, struct file *filp)
+{
+	struct tty_struct *tty;
+	int noctty, retval;
+	dev_t device = inode->i_rdev;
+	unsigned saved_flags = filp->f_flags;
+
+	nonseekable_open(inode, filp);
+
+retry_open:
+	retval = tty_alloc_file(filp);
+	if (retval)
+		return -ENOMEM;
+
+	tty = tty_open_current_tty(device, filp);
+	if (!tty)
+		tty = tty_open_by_driver(device, filp);
+
+	if (IS_ERR(tty)) {
+		tty_free_file(filp);
+		retval = PTR_ERR(tty);
+		if (retval != -EAGAIN || signal_pending(current))
+			return retval;
+		schedule();
+		goto retry_open;
+	}
+
+	tty_add_file(tty, filp);
+
+	check_tty_count(tty, __func__);
+	tty_debug_hangup(tty, "opening (count=%d)\n", tty->count);
+
+	if (tty->ops->open)
+		retval = tty->ops->open(tty, filp);
+	else
+		retval = -ENODEV;
+	filp->f_flags = saved_flags;
+
+	if (retval) {
+		tty_debug_hangup(tty, "open error %d, releasing\n", retval);
+
+		tty_unlock(tty); /* need to call tty_release without BTM */
+		tty_release(inode, filp);
+		if (retval != -ERESTARTSYS)
+			return retval;
+
+		if (signal_pending(current))
+			return retval;
+
+		schedule();
+		/*
+		 * Need to reset f_op in case a hangup happened.
+		 */
+		if (tty_hung_up_p(filp))
+			filp->f_op = &tty_fops;
+		goto retry_open;
+	}
+	clear_bit(TTY_HUPPED, &tty->flags);
+
+	noctty = (filp->f_flags & O_NOCTTY) ||
+		 (IS_ENABLED(CONFIG_VT) && device == MKDEV(TTY_MAJOR, 0)) ||
+		 device == MKDEV(TTYAUX_MAJOR, 1) ||
+		 (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+		  tty->driver->subtype == PTY_TYPE_MASTER);
+	if (!noctty)
+		tty_open_proc_set_tty(filp, tty);
+	tty_unlock(tty);
+	return 0;
+}
+
+
+
+/**
+ *	tty_poll	-	check tty status
+ *	@filp: file being polled
+ *	@wait: poll wait structures to update
+ *
+ *	Call the line discipline polling method to obtain the poll
+ *	status of the device.
+ *
+ *	Locking: locks called line discipline but ldisc poll method
+ *	may be re-entered freely by other callers.
+ */
+
+static __poll_t tty_poll(struct file *filp, poll_table *wait)
+{
+	struct tty_struct *tty = file_tty(filp);
+	struct tty_ldisc *ld;
+	__poll_t ret = 0;
+
+	if (tty_paranoia_check(tty, file_inode(filp), "tty_poll"))
+		return 0;
+
+	ld = tty_ldisc_ref_wait(tty);
+	if (!ld)
+		return hung_up_tty_poll(filp, wait);
+	if (ld->ops->poll)
+		ret = ld->ops->poll(tty, filp, wait);
+	tty_ldisc_deref(ld);
+	return ret;
+}
+
+static int __tty_fasync(int fd, struct file *filp, int on)
+{
+	struct tty_struct *tty = file_tty(filp);
+	unsigned long flags;
+	int retval = 0;
+
+	if (tty_paranoia_check(tty, file_inode(filp), "tty_fasync"))
+		goto out;
+
+	retval = fasync_helper(fd, filp, on, &tty->fasync);
+	if (retval <= 0)
+		goto out;
+
+	if (on) {
+		enum pid_type type;
+		struct pid *pid;
+
+		spin_lock_irqsave(&tty->ctrl_lock, flags);
+		if (tty->pgrp) {
+			pid = tty->pgrp;
+			type = PIDTYPE_PGID;
+		} else {
+			pid = task_pid(current);
+			type = PIDTYPE_TGID;
+		}
+		get_pid(pid);
+		spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+		__f_setown(filp, pid, type, 0);
+		put_pid(pid);
+		retval = 0;
+	}
+out:
+	return retval;
+}
+
+static int tty_fasync(int fd, struct file *filp, int on)
+{
+	struct tty_struct *tty = file_tty(filp);
+	int retval = -ENOTTY;
+
+	tty_lock(tty);
+	if (!tty_hung_up_p(filp))
+		retval = __tty_fasync(fd, filp, on);
+	tty_unlock(tty);
+
+	return retval;
+}
+
+/**
+ *	tiocsti			-	fake input character
+ *	@tty: tty to fake input into
+ *	@p: pointer to character
+ *
+ *	Fake input to a tty device. Does the necessary locking and
+ *	input management.
+ *
+ *	FIXME: does not honour flow control ??
+ *
+ *	Locking:
+ *		Called functions take tty_ldiscs_lock
+ *		current->signal->tty check is safe without locks
+ */
+
+static int tiocsti(struct tty_struct *tty, char __user *p)
+{
+	char ch, mbz = 0;
+	struct tty_ldisc *ld;
+
+	if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	if (get_user(ch, p))
+		return -EFAULT;
+	tty_audit_tiocsti(tty, ch);
+	ld = tty_ldisc_ref_wait(tty);
+	if (!ld)
+		return -EIO;
+	tty_buffer_lock_exclusive(tty->port);
+	if (ld->ops->receive_buf)
+		ld->ops->receive_buf(tty, &ch, &mbz, 1);
+	tty_buffer_unlock_exclusive(tty->port);
+	tty_ldisc_deref(ld);
+	return 0;
+}
+
+/**
+ *	tiocgwinsz		-	implement window query ioctl
+ *	@tty: tty
+ *	@arg: user buffer for result
+ *
+ *	Copies the kernel idea of the window size into the user buffer.
+ *
+ *	Locking: tty->winsize_mutex is taken to ensure the winsize data
+ *		is consistent.
+ */
+
+static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
+{
+	int err;
+
+	mutex_lock(&tty->winsize_mutex);
+	err = copy_to_user(arg, &tty->winsize, sizeof(*arg));
+	mutex_unlock(&tty->winsize_mutex);
+
+	return err ? -EFAULT: 0;
+}
+
+/**
+ *	tty_do_resize		-	resize event
+ *	@tty: tty being resized
+ *	@ws: new dimensions
+ *
+ *	Update the termios variables and send the necessary signals to
+ *	peform a terminal resize correctly
+ */
+
+int tty_do_resize(struct tty_struct *tty, struct winsize *ws)
+{
+	struct pid *pgrp;
+
+	/* Lock the tty */
+	mutex_lock(&tty->winsize_mutex);
+	if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
+		goto done;
+
+	/* Signal the foreground process group */
+	pgrp = tty_get_pgrp(tty);
+	if (pgrp)
+		kill_pgrp(pgrp, SIGWINCH, 1);
+	put_pid(pgrp);
+
+	tty->winsize = *ws;
+done:
+	mutex_unlock(&tty->winsize_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(tty_do_resize);
+
+/**
+ *	tiocswinsz		-	implement window size set ioctl
+ *	@tty: tty side of tty
+ *	@arg: user buffer for result
+ *
+ *	Copies the user idea of the window size to the kernel. Traditionally
+ *	this is just advisory information but for the Linux console it
+ *	actually has driver level meaning and triggers a VC resize.
+ *
+ *	Locking:
+ *		Driver dependent. The default do_resize method takes the
+ *	tty termios mutex and ctrl_lock. The console takes its own lock
+ *	then calls into the default method.
+ */
+
+static int tiocswinsz(struct tty_struct *tty, struct winsize __user *arg)
+{
+	struct winsize tmp_ws;
+	if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
+		return -EFAULT;
+
+	if (tty->ops->resize)
+		return tty->ops->resize(tty, &tmp_ws);
+	else
+		return tty_do_resize(tty, &tmp_ws);
+}
+
+/**
+ *	tioccons	-	allow admin to move logical console
+ *	@file: the file to become console
+ *
+ *	Allow the administrator to move the redirected console device
+ *
+ *	Locking: uses redirect_lock to guard the redirect information
+ */
+
+static int tioccons(struct file *file)
+{
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	if (file->f_op->write_iter == redirected_tty_write) {
+		struct file *f;
+		spin_lock(&redirect_lock);
+		f = redirect;
+		redirect = NULL;
+		spin_unlock(&redirect_lock);
+		if (f)
+			fput(f);
+		return 0;
+	}
+	if (file->f_op->write_iter != tty_write)
+		return -ENOTTY;
+	if (!(file->f_mode & FMODE_WRITE))
+		return -EBADF;
+	if (!(file->f_mode & FMODE_CAN_WRITE))
+		return -EINVAL;
+	spin_lock(&redirect_lock);
+	if (redirect) {
+		spin_unlock(&redirect_lock);
+		return -EBUSY;
+	}
+	redirect = get_file(file);
+	spin_unlock(&redirect_lock);
+	return 0;
+}
+
+/**
+ *	tiocsetd	-	set line discipline
+ *	@tty: tty device
+ *	@p: pointer to user data
+ *
+ *	Set the line discipline according to user request.
+ *
+ *	Locking: see tty_set_ldisc, this function is just a helper
+ */
+
+static int tiocsetd(struct tty_struct *tty, int __user *p)
+{
+	int disc;
+	int ret;
+
+	if (get_user(disc, p))
+		return -EFAULT;
+
+	ret = tty_set_ldisc(tty, disc);
+
+	return ret;
+}
+
+/**
+ *	tiocgetd	-	get line discipline
+ *	@tty: tty device
+ *	@p: pointer to user data
+ *
+ *	Retrieves the line discipline id directly from the ldisc.
+ *
+ *	Locking: waits for ldisc reference (in case the line discipline
+ *		is changing or the tty is being hungup)
+ */
+
+static int tiocgetd(struct tty_struct *tty, int __user *p)
+{
+	struct tty_ldisc *ld;
+	int ret;
+
+	ld = tty_ldisc_ref_wait(tty);
+	if (!ld)
+		return -EIO;
+	ret = put_user(ld->ops->num, p);
+	tty_ldisc_deref(ld);
+	return ret;
+}
+
+/**
+ *	send_break	-	performed time break
+ *	@tty: device to break on
+ *	@duration: timeout in mS
+ *
+ *	Perform a timed break on hardware that lacks its own driver level
+ *	timed break functionality.
+ *
+ *	Locking:
+ *		atomic_write_lock serializes
+ *
+ */
+
+static int send_break(struct tty_struct *tty, unsigned int duration)
+{
+	int retval;
+
+	if (tty->ops->break_ctl == NULL)
+		return 0;
+
+	if (tty->driver->flags & TTY_DRIVER_HARDWARE_BREAK)
+		retval = tty->ops->break_ctl(tty, duration);
+	else {
+		/* Do the work ourselves */
+		if (tty_write_lock(tty, 0) < 0)
+			return -EINTR;
+		retval = tty->ops->break_ctl(tty, -1);
+		if (retval)
+			goto out;
+		if (!signal_pending(current))
+			msleep_interruptible(duration);
+		retval = tty->ops->break_ctl(tty, 0);
+out:
+		tty_write_unlock(tty);
+		if (signal_pending(current))
+			retval = -EINTR;
+	}
+	return retval;
+}
+
+/**
+ *	tty_tiocmget		-	get modem status
+ *	@tty: tty device
+ *	@p: pointer to result
+ *
+ *	Obtain the modem status bits from the tty driver if the feature
+ *	is supported. Return -ENOTTY if it is not available.
+ *
+ *	Locking: none (up to the driver)
+ */
+
+static int tty_tiocmget(struct tty_struct *tty, int __user *p)
+{
+	int retval = -ENOTTY;
+
+	if (tty->ops->tiocmget) {
+		retval = tty->ops->tiocmget(tty);
+
+		if (retval >= 0)
+			retval = put_user(retval, p);
+	}
+	return retval;
+}
+
+/**
+ *	tty_tiocmset		-	set modem status
+ *	@tty: tty device
+ *	@cmd: command - clear bits, set bits or set all
+ *	@p: pointer to desired bits
+ *
+ *	Set the modem status bits from the tty driver if the feature
+ *	is supported. Return -ENOTTY if it is not available.
+ *
+ *	Locking: none (up to the driver)
+ */
+
+static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
+	     unsigned __user *p)
+{
+	int retval;
+	unsigned int set, clear, val;
+
+	if (tty->ops->tiocmset == NULL)
+		return -ENOTTY;
+
+	retval = get_user(val, p);
+	if (retval)
+		return retval;
+	set = clear = 0;
+	switch (cmd) {
+	case TIOCMBIS:
+		set = val;
+		break;
+	case TIOCMBIC:
+		clear = val;
+		break;
+	case TIOCMSET:
+		set = val;
+		clear = ~val;
+		break;
+	}
+	set &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
+	clear &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
+	return tty->ops->tiocmset(tty, set, clear);
+}
+
+static int tty_tiocgicount(struct tty_struct *tty, void __user *arg)
+{
+	int retval = -EINVAL;
+	struct serial_icounter_struct icount;
+	memset(&icount, 0, sizeof(icount));
+	if (tty->ops->get_icount)
+		retval = tty->ops->get_icount(tty, &icount);
+	if (retval != 0)
+		return retval;
+	if (copy_to_user(arg, &icount, sizeof(icount)))
+		return -EFAULT;
+	return 0;
+}
+
+static int tty_tiocsserial(struct tty_struct *tty, struct serial_struct __user *ss)
+{
+	static DEFINE_RATELIMIT_STATE(depr_flags,
+			DEFAULT_RATELIMIT_INTERVAL,
+			DEFAULT_RATELIMIT_BURST);
+	char comm[TASK_COMM_LEN];
+	struct serial_struct v;
+	int flags;
+
+	if (copy_from_user(&v, ss, sizeof(*ss)))
+		return -EFAULT;
+
+	flags = v.flags & ASYNC_DEPRECATED;
+
+	if (flags && __ratelimit(&depr_flags))
+		pr_warn("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n",
+			__func__, get_task_comm(comm, current), flags);
+	if (!tty->ops->set_serial)
+		return -ENOTTY;
+	return tty->ops->set_serial(tty, &v);
+}
+
+static int tty_tiocgserial(struct tty_struct *tty, struct serial_struct __user *ss)
+{
+	struct serial_struct v;
+	int err;
+
+	memset(&v, 0, sizeof(v));
+	if (!tty->ops->get_serial)
+		return -ENOTTY;
+	err = tty->ops->get_serial(tty, &v);
+	if (!err && copy_to_user(ss, &v, sizeof(v)))
+		err = -EFAULT;
+	return err;
+}
+
+/*
+ * if pty, return the slave side (real_tty)
+ * otherwise, return self
+ */
+static struct tty_struct *tty_pair_get_tty(struct tty_struct *tty)
+{
+	if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+	    tty->driver->subtype == PTY_TYPE_MASTER)
+		tty = tty->link;
+	return tty;
+}
+
+/*
+ * Split this up, as gcc can choke on it otherwise..
+ */
+long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct tty_struct *tty = file_tty(file);
+	struct tty_struct *real_tty;
+	void __user *p = (void __user *)arg;
+	int retval;
+	struct tty_ldisc *ld;
+
+	if (tty_paranoia_check(tty, file_inode(file), "tty_ioctl"))
+		return -EINVAL;
+
+	real_tty = tty_pair_get_tty(tty);
+
+	/*
+	 * Factor out some common prep work
+	 */
+	switch (cmd) {
+	case TIOCSETD:
+	case TIOCSBRK:
+	case TIOCCBRK:
+	case TCSBRK:
+	case TCSBRKP:
+		retval = tty_check_change(tty);
+		if (retval)
+			return retval;
+		if (cmd != TIOCCBRK) {
+			tty_wait_until_sent(tty, 0);
+			if (signal_pending(current))
+				return -EINTR;
+		}
+		break;
+	}
+
+	/*
+	 *	Now do the stuff.
+	 */
+	switch (cmd) {
+	case TIOCSTI:
+		return tiocsti(tty, p);
+	case TIOCGWINSZ:
+		return tiocgwinsz(real_tty, p);
+	case TIOCSWINSZ:
+		return tiocswinsz(real_tty, p);
+	case TIOCCONS:
+		return real_tty != tty ? -EINVAL : tioccons(file);
+	case TIOCEXCL:
+		set_bit(TTY_EXCLUSIVE, &tty->flags);
+		return 0;
+	case TIOCNXCL:
+		clear_bit(TTY_EXCLUSIVE, &tty->flags);
+		return 0;
+	case TIOCGEXCL:
+	{
+		int excl = test_bit(TTY_EXCLUSIVE, &tty->flags);
+		return put_user(excl, (int __user *)p);
+	}
+	case TIOCGETD:
+		return tiocgetd(tty, p);
+	case TIOCSETD:
+		return tiocsetd(tty, p);
+	case TIOCVHANGUP:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		tty_vhangup(tty);
+		return 0;
+	case TIOCGDEV:
+	{
+		unsigned int ret = new_encode_dev(tty_devnum(real_tty));
+		return put_user(ret, (unsigned int __user *)p);
+	}
+	/*
+	 * Break handling
+	 */
+	case TIOCSBRK:	/* Turn break on, unconditionally */
+		if (tty->ops->break_ctl)
+			return tty->ops->break_ctl(tty, -1);
+		return 0;
+	case TIOCCBRK:	/* Turn break off, unconditionally */
+		if (tty->ops->break_ctl)
+			return tty->ops->break_ctl(tty, 0);
+		return 0;
+	case TCSBRK:   /* SVID version: non-zero arg --> no break */
+		/* non-zero arg means wait for all output data
+		 * to be sent (performed above) but don't send break.
+		 * This is used by the tcdrain() termios function.
+		 */
+		if (!arg)
+			return send_break(tty, 250);
+		return 0;
+	case TCSBRKP:	/* support for POSIX tcsendbreak() */
+		return send_break(tty, arg ? arg*100 : 250);
+
+	case TIOCMGET:
+		return tty_tiocmget(tty, p);
+	case TIOCMSET:
+	case TIOCMBIC:
+	case TIOCMBIS:
+		return tty_tiocmset(tty, cmd, p);
+	case TIOCGICOUNT:
+		return tty_tiocgicount(tty, p);
+	case TCFLSH:
+		switch (arg) {
+		case TCIFLUSH:
+		case TCIOFLUSH:
+		/* flush tty buffer and allow ldisc to process ioctl */
+			tty_buffer_flush(tty, NULL);
+			break;
+		}
+		break;
+	case TIOCSSERIAL:
+		return tty_tiocsserial(tty, p);
+	case TIOCGSERIAL:
+		return tty_tiocgserial(tty, p);
+	case TIOCGPTPEER:
+		/* Special because the struct file is needed */
+		return ptm_open_peer(file, tty, (int)arg);
+	default:
+		retval = tty_jobctrl_ioctl(tty, real_tty, file, cmd, arg);
+		if (retval != -ENOIOCTLCMD)
+			return retval;
+	}
+	if (tty->ops->ioctl) {
+		retval = tty->ops->ioctl(tty, cmd, arg);
+		if (retval != -ENOIOCTLCMD)
+			return retval;
+	}
+	ld = tty_ldisc_ref_wait(tty);
+	if (!ld)
+		return hung_up_tty_ioctl(file, cmd, arg);
+	retval = -EINVAL;
+	if (ld->ops->ioctl) {
+		retval = ld->ops->ioctl(tty, file, cmd, arg);
+		if (retval == -ENOIOCTLCMD)
+			retval = -ENOTTY;
+	}
+	tty_ldisc_deref(ld);
+	return retval;
+}
+
+#ifdef CONFIG_COMPAT
+
+struct serial_struct32 {
+	compat_int_t    type;
+	compat_int_t    line;
+	compat_uint_t   port;
+	compat_int_t    irq;
+	compat_int_t    flags;
+	compat_int_t    xmit_fifo_size;
+	compat_int_t    custom_divisor;
+	compat_int_t    baud_base;
+	unsigned short  close_delay;
+	char    io_type;
+	char    reserved_char;
+	compat_int_t    hub6;
+	unsigned short  closing_wait; /* time to wait before closing */
+	unsigned short  closing_wait2; /* no longer used... */
+	compat_uint_t   iomem_base;
+	unsigned short  iomem_reg_shift;
+	unsigned int    port_high;
+	/* compat_ulong_t  iomap_base FIXME */
+	compat_int_t    reserved;
+};
+
+static int compat_tty_tiocsserial(struct tty_struct *tty,
+		struct serial_struct32 __user *ss)
+{
+	static DEFINE_RATELIMIT_STATE(depr_flags,
+			DEFAULT_RATELIMIT_INTERVAL,
+			DEFAULT_RATELIMIT_BURST);
+	char comm[TASK_COMM_LEN];
+	struct serial_struct32 v32;
+	struct serial_struct v;
+	int flags;
+
+	if (copy_from_user(&v32, ss, sizeof(*ss)))
+		return -EFAULT;
+
+	memcpy(&v, &v32, offsetof(struct serial_struct32, iomem_base));
+	v.iomem_base = compat_ptr(v32.iomem_base);
+	v.iomem_reg_shift = v32.iomem_reg_shift;
+	v.port_high = v32.port_high;
+	v.iomap_base = 0;
+
+	flags = v.flags & ASYNC_DEPRECATED;
+
+	if (flags && __ratelimit(&depr_flags))
+		pr_warn("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n",
+			__func__, get_task_comm(comm, current), flags);
+	if (!tty->ops->set_serial)
+		return -ENOTTY;
+	return tty->ops->set_serial(tty, &v);
+}
+
+static int compat_tty_tiocgserial(struct tty_struct *tty,
+			struct serial_struct32 __user *ss)
+{
+	struct serial_struct32 v32;
+	struct serial_struct v;
+	int err;
+
+	memset(&v, 0, sizeof(v));
+	memset(&v32, 0, sizeof(v32));
+
+	if (!tty->ops->get_serial)
+		return -ENOTTY;
+	err = tty->ops->get_serial(tty, &v);
+	if (!err) {
+		memcpy(&v32, &v, offsetof(struct serial_struct32, iomem_base));
+		v32.iomem_base = (unsigned long)v.iomem_base >> 32 ?
+			0xfffffff : ptr_to_compat(v.iomem_base);
+		v32.iomem_reg_shift = v.iomem_reg_shift;
+		v32.port_high = v.port_high;
+		if (copy_to_user(ss, &v32, sizeof(v32)))
+			err = -EFAULT;
+	}
+	return err;
+}
+static long tty_compat_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	struct tty_struct *tty = file_tty(file);
+	struct tty_ldisc *ld;
+	int retval = -ENOIOCTLCMD;
+
+	switch (cmd) {
+	case TIOCOUTQ:
+	case TIOCSTI:
+	case TIOCGWINSZ:
+	case TIOCSWINSZ:
+	case TIOCGEXCL:
+	case TIOCGETD:
+	case TIOCSETD:
+	case TIOCGDEV:
+	case TIOCMGET:
+	case TIOCMSET:
+	case TIOCMBIC:
+	case TIOCMBIS:
+	case TIOCGICOUNT:
+	case TIOCGPGRP:
+	case TIOCSPGRP:
+	case TIOCGSID:
+	case TIOCSERGETLSR:
+	case TIOCGRS485:
+	case TIOCSRS485:
+#ifdef TIOCGETP
+	case TIOCGETP:
+	case TIOCSETP:
+	case TIOCSETN:
+#endif
+#ifdef TIOCGETC
+	case TIOCGETC:
+	case TIOCSETC:
+#endif
+#ifdef TIOCGLTC
+	case TIOCGLTC:
+	case TIOCSLTC:
+#endif
+	case TCSETSF:
+	case TCSETSW:
+	case TCSETS:
+	case TCGETS:
+#ifdef TCGETS2
+	case TCGETS2:
+	case TCSETSF2:
+	case TCSETSW2:
+	case TCSETS2:
+#endif
+	case TCGETA:
+	case TCSETAF:
+	case TCSETAW:
+	case TCSETA:
+	case TIOCGLCKTRMIOS:
+	case TIOCSLCKTRMIOS:
+#ifdef TCGETX
+	case TCGETX:
+	case TCSETX:
+	case TCSETXW:
+	case TCSETXF:
+#endif
+	case TIOCGSOFTCAR:
+	case TIOCSSOFTCAR:
+
+	case PPPIOCGCHAN:
+	case PPPIOCGUNIT:
+		return tty_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+	case TIOCCONS:
+	case TIOCEXCL:
+	case TIOCNXCL:
+	case TIOCVHANGUP:
+	case TIOCSBRK:
+	case TIOCCBRK:
+	case TCSBRK:
+	case TCSBRKP:
+	case TCFLSH:
+	case TIOCGPTPEER:
+	case TIOCNOTTY:
+	case TIOCSCTTY:
+	case TCXONC:
+	case TIOCMIWAIT:
+	case TIOCSERCONFIG:
+		return tty_ioctl(file, cmd, arg);
+	}
+
+	if (tty_paranoia_check(tty, file_inode(file), "tty_ioctl"))
+		return -EINVAL;
+
+	switch (cmd) {
+	case TIOCSSERIAL:
+		return compat_tty_tiocsserial(tty, compat_ptr(arg));
+	case TIOCGSERIAL:
+		return compat_tty_tiocgserial(tty, compat_ptr(arg));
+	}
+	if (tty->ops->compat_ioctl) {
+		retval = tty->ops->compat_ioctl(tty, cmd, arg);
+		if (retval != -ENOIOCTLCMD)
+			return retval;
+	}
+
+	ld = tty_ldisc_ref_wait(tty);
+	if (!ld)
+		return hung_up_tty_compat_ioctl(file, cmd, arg);
+	if (ld->ops->compat_ioctl)
+		retval = ld->ops->compat_ioctl(tty, file, cmd, arg);
+	if (retval == -ENOIOCTLCMD && ld->ops->ioctl)
+		retval = ld->ops->ioctl(tty, file,
+				(unsigned long)compat_ptr(cmd), arg);
+	tty_ldisc_deref(ld);
+
+	return retval;
+}
+#endif
+
+static int this_tty(const void *t, struct file *file, unsigned fd)
+{
+	if (likely(file->f_op->read_iter != tty_read))
+		return 0;
+	return file_tty(file) != t ? 0 : fd + 1;
+}
+	
+/*
+ * This implements the "Secure Attention Key" ---  the idea is to
+ * prevent trojan horses by killing all processes associated with this
+ * tty when the user hits the "Secure Attention Key".  Required for
+ * super-paranoid applications --- see the Orange Book for more details.
+ *
+ * This code could be nicer; ideally it should send a HUP, wait a few
+ * seconds, then send a INT, and then a KILL signal.  But you then
+ * have to coordinate with the init process, since all processes associated
+ * with the current tty must be dead before the new getty is allowed
+ * to spawn.
+ *
+ * Now, if it would be correct ;-/ The current code has a nasty hole -
+ * it doesn't catch files in flight. We may send the descriptor to ourselves
+ * via AF_UNIX socket, close it and later fetch from socket. FIXME.
+ *
+ * Nasty bug: do_SAK is being called in interrupt context.  This can
+ * deadlock.  We punt it up to process context.  AKPM - 16Mar2001
+ */
+void __do_SAK(struct tty_struct *tty)
+{
+#ifdef TTY_SOFT_SAK
+	tty_hangup(tty);
+#else
+	struct task_struct *g, *p;
+	struct pid *session;
+	int		i;
+	unsigned long flags;
+
+	if (!tty)
+		return;
+
+	spin_lock_irqsave(&tty->ctrl_lock, flags);
+	session = get_pid(tty->session);
+	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+
+	tty_ldisc_flush(tty);
+
+	tty_driver_flush_buffer(tty);
+
+	read_lock(&tasklist_lock);
+	/* Kill the entire session */
+	do_each_pid_task(session, PIDTYPE_SID, p) {
+		tty_notice(tty, "SAK: killed process %d (%s): by session\n",
+			   task_pid_nr(p), p->comm);
+		group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
+	} while_each_pid_task(session, PIDTYPE_SID, p);
+
+	/* Now kill any processes that happen to have the tty open */
+	do_each_thread(g, p) {
+		if (p->signal->tty == tty) {
+			tty_notice(tty, "SAK: killed process %d (%s): by controlling tty\n",
+				   task_pid_nr(p), p->comm);
+			group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
+			continue;
+		}
+		task_lock(p);
+		i = iterate_fd(p->files, 0, this_tty, tty);
+		if (i != 0) {
+			tty_notice(tty, "SAK: killed process %d (%s): by fd#%d\n",
+				   task_pid_nr(p), p->comm, i - 1);
+			group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
+		}
+		task_unlock(p);
+	} while_each_thread(g, p);
+	read_unlock(&tasklist_lock);
+	put_pid(session);
+#endif
+}
+
+static void do_SAK_work(struct work_struct *work)
+{
+	struct tty_struct *tty =
+		container_of(work, struct tty_struct, SAK_work);
+	__do_SAK(tty);
+}
+
+/*
+ * The tq handling here is a little racy - tty->SAK_work may already be queued.
+ * Fortunately we don't need to worry, because if ->SAK_work is already queued,
+ * the values which we write to it will be identical to the values which it
+ * already has. --akpm
+ */
+void do_SAK(struct tty_struct *tty)
+{
+	if (!tty)
+		return;
+	schedule_work(&tty->SAK_work);
+}
+
+EXPORT_SYMBOL(do_SAK);
+
+/* Must put_device() after it's unused! */
+static struct device *tty_get_device(struct tty_struct *tty)
+{
+	dev_t devt = tty_devnum(tty);
+	return class_find_device_by_devt(tty_class, devt);
+}
+
+
+/**
+ *	alloc_tty_struct
+ *
+ *	This subroutine allocates and initializes a tty structure.
+ *
+ *	Locking: none - tty in question is not exposed at this point
+ */
+
+struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
+{
+	struct tty_struct *tty;
+
+	tty = kzalloc(sizeof(*tty), GFP_KERNEL);
+	if (!tty)
+		return NULL;
+
+	kref_init(&tty->kref);
+	tty->magic = TTY_MAGIC;
+	if (tty_ldisc_init(tty)) {
+		kfree(tty);
+		return NULL;
+	}
+	tty->session = NULL;
+	tty->pgrp = NULL;
+	mutex_init(&tty->legacy_mutex);
+	mutex_init(&tty->throttle_mutex);
+	init_rwsem(&tty->termios_rwsem);
+	mutex_init(&tty->winsize_mutex);
+	init_ldsem(&tty->ldisc_sem);
+	init_waitqueue_head(&tty->write_wait);
+	init_waitqueue_head(&tty->read_wait);
+	INIT_WORK(&tty->hangup_work, do_tty_hangup);
+	mutex_init(&tty->atomic_write_lock);
+	spin_lock_init(&tty->ctrl_lock);
+	spin_lock_init(&tty->flow_lock);
+	spin_lock_init(&tty->files_lock);
+	INIT_LIST_HEAD(&tty->tty_files);
+	INIT_WORK(&tty->SAK_work, do_SAK_work);
+
+	tty->driver = driver;
+	tty->ops = driver->ops;
+	tty->index = idx;
+	tty_line_name(driver, idx, tty->name);
+	tty->dev = tty_get_device(tty);
+
+	return tty;
+}
+
+/**
+ *	tty_put_char	-	write one character to a tty
+ *	@tty: tty
+ *	@ch: character
+ *
+ *	Write one byte to the tty using the provided put_char method
+ *	if present. Returns the number of characters successfully output.
+ *
+ *	Note: the specific put_char operation in the driver layer may go
+ *	away soon. Don't call it directly, use this method
+ */
+
+int tty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	if (tty->ops->put_char)
+		return tty->ops->put_char(tty, ch);
+	return tty->ops->write(tty, &ch, 1);
+}
+EXPORT_SYMBOL_GPL(tty_put_char);
+
+struct class *tty_class;
+
+static int tty_cdev_add(struct tty_driver *driver, dev_t dev,
+		unsigned int index, unsigned int count)
+{
+	int err;
+
+	/* init here, since reused cdevs cause crashes */
+	driver->cdevs[index] = cdev_alloc();
+	if (!driver->cdevs[index])
+		return -ENOMEM;
+	driver->cdevs[index]->ops = &tty_fops;
+	driver->cdevs[index]->owner = driver->owner;
+	err = cdev_add(driver->cdevs[index], dev, count);
+	if (err)
+		kobject_put(&driver->cdevs[index]->kobj);
+	return err;
+}
+
+/**
+ *	tty_register_device - register a tty device
+ *	@driver: the tty driver that describes the tty device
+ *	@index: the index in the tty driver for this tty device
+ *	@device: a struct device that is associated with this tty device.
+ *		This field is optional, if there is no known struct device
+ *		for this tty device it can be set to NULL safely.
+ *
+ *	Returns a pointer to the struct device for this tty device
+ *	(or ERR_PTR(-EFOO) on error).
+ *
+ *	This call is required to be made to register an individual tty device
+ *	if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set.  If
+ *	that bit is not set, this function should not be called by a tty
+ *	driver.
+ *
+ *	Locking: ??
+ */
+
+struct device *tty_register_device(struct tty_driver *driver, unsigned index,
+				   struct device *device)
+{
+	return tty_register_device_attr(driver, index, device, NULL, NULL);
+}
+EXPORT_SYMBOL(tty_register_device);
+
+static void tty_device_create_release(struct device *dev)
+{
+	dev_dbg(dev, "releasing...\n");
+	kfree(dev);
+}
+
+/**
+ *	tty_register_device_attr - register a tty device
+ *	@driver: the tty driver that describes the tty device
+ *	@index: the index in the tty driver for this tty device
+ *	@device: a struct device that is associated with this tty device.
+ *		This field is optional, if there is no known struct device
+ *		for this tty device it can be set to NULL safely.
+ *	@drvdata: Driver data to be set to device.
+ *	@attr_grp: Attribute group to be set on device.
+ *
+ *	Returns a pointer to the struct device for this tty device
+ *	(or ERR_PTR(-EFOO) on error).
+ *
+ *	This call is required to be made to register an individual tty device
+ *	if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set.  If
+ *	that bit is not set, this function should not be called by a tty
+ *	driver.
+ *
+ *	Locking: ??
+ */
+struct device *tty_register_device_attr(struct tty_driver *driver,
+				   unsigned index, struct device *device,
+				   void *drvdata,
+				   const struct attribute_group **attr_grp)
+{
+	char name[64];
+	dev_t devt = MKDEV(driver->major, driver->minor_start) + index;
+	struct ktermios *tp;
+	struct device *dev;
+	int retval;
+
+	if (index >= driver->num) {
+		pr_err("%s: Attempt to register invalid tty line number (%d)\n",
+		       driver->name, index);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (driver->type == TTY_DRIVER_TYPE_PTY)
+		pty_line_name(driver, index, name);
+	else
+		tty_line_name(driver, index, name);
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return ERR_PTR(-ENOMEM);
+
+	dev->devt = devt;
+	dev->class = tty_class;
+	dev->parent = device;
+	dev->release = tty_device_create_release;
+	dev_set_name(dev, "%s", name);
+	dev->groups = attr_grp;
+	dev_set_drvdata(dev, drvdata);
+
+	dev_set_uevent_suppress(dev, 1);
+
+	retval = device_register(dev);
+	if (retval)
+		goto err_put;
+
+	if (!(driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)) {
+		/*
+		 * Free any saved termios data so that the termios state is
+		 * reset when reusing a minor number.
+		 */
+		tp = driver->termios[index];
+		if (tp) {
+			driver->termios[index] = NULL;
+			kfree(tp);
+		}
+
+		retval = tty_cdev_add(driver, devt, index, 1);
+		if (retval)
+			goto err_del;
+	}
+
+	dev_set_uevent_suppress(dev, 0);
+	kobject_uevent(&dev->kobj, KOBJ_ADD);
+
+	return dev;
+
+err_del:
+	device_del(dev);
+err_put:
+	put_device(dev);
+
+	return ERR_PTR(retval);
+}
+EXPORT_SYMBOL_GPL(tty_register_device_attr);
+
+/**
+ * 	tty_unregister_device - unregister a tty device
+ * 	@driver: the tty driver that describes the tty device
+ * 	@index: the index in the tty driver for this tty device
+ *
+ * 	If a tty device is registered with a call to tty_register_device() then
+ *	this function must be called when the tty device is gone.
+ *
+ *	Locking: ??
+ */
+
+void tty_unregister_device(struct tty_driver *driver, unsigned index)
+{
+	device_destroy(tty_class,
+		MKDEV(driver->major, driver->minor_start) + index);
+	if (!(driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)) {
+		cdev_del(driver->cdevs[index]);
+		driver->cdevs[index] = NULL;
+	}
+}
+EXPORT_SYMBOL(tty_unregister_device);
+
+/**
+ * __tty_alloc_driver -- allocate tty driver
+ * @lines: count of lines this driver can handle at most
+ * @owner: module which is responsible for this driver
+ * @flags: some of TTY_DRIVER_* flags, will be set in driver->flags
+ *
+ * This should not be called directly, some of the provided macros should be
+ * used instead. Use IS_ERR and friends on @retval.
+ */
+struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner,
+		unsigned long flags)
+{
+	struct tty_driver *driver;
+	unsigned int cdevs = 1;
+	int err;
+
+	if (!lines || (flags & TTY_DRIVER_UNNUMBERED_NODE && lines > 1))
+		return ERR_PTR(-EINVAL);
+
+	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+	if (!driver)
+		return ERR_PTR(-ENOMEM);
+
+	kref_init(&driver->kref);
+	driver->magic = TTY_DRIVER_MAGIC;
+	driver->num = lines;
+	driver->owner = owner;
+	driver->flags = flags;
+
+	if (!(flags & TTY_DRIVER_DEVPTS_MEM)) {
+		driver->ttys = kcalloc(lines, sizeof(*driver->ttys),
+				GFP_KERNEL);
+		driver->termios = kcalloc(lines, sizeof(*driver->termios),
+				GFP_KERNEL);
+		if (!driver->ttys || !driver->termios) {
+			err = -ENOMEM;
+			goto err_free_all;
+		}
+	}
+
+	if (!(flags & TTY_DRIVER_DYNAMIC_ALLOC)) {
+		driver->ports = kcalloc(lines, sizeof(*driver->ports),
+				GFP_KERNEL);
+		if (!driver->ports) {
+			err = -ENOMEM;
+			goto err_free_all;
+		}
+		cdevs = lines;
+	}
+
+	driver->cdevs = kcalloc(cdevs, sizeof(*driver->cdevs), GFP_KERNEL);
+	if (!driver->cdevs) {
+		err = -ENOMEM;
+		goto err_free_all;
+	}
+
+	return driver;
+err_free_all:
+	kfree(driver->ports);
+	kfree(driver->ttys);
+	kfree(driver->termios);
+	kfree(driver->cdevs);
+	kfree(driver);
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL(__tty_alloc_driver);
+
+static void destruct_tty_driver(struct kref *kref)
+{
+	struct tty_driver *driver = container_of(kref, struct tty_driver, kref);
+	int i;
+	struct ktermios *tp;
+
+	if (driver->flags & TTY_DRIVER_INSTALLED) {
+		for (i = 0; i < driver->num; i++) {
+			tp = driver->termios[i];
+			if (tp) {
+				driver->termios[i] = NULL;
+				kfree(tp);
+			}
+			if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV))
+				tty_unregister_device(driver, i);
+		}
+		proc_tty_unregister_driver(driver);
+		if (driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)
+			cdev_del(driver->cdevs[0]);
+	}
+	kfree(driver->cdevs);
+	kfree(driver->ports);
+	kfree(driver->termios);
+	kfree(driver->ttys);
+	kfree(driver);
+}
+
+void tty_driver_kref_put(struct tty_driver *driver)
+{
+	kref_put(&driver->kref, destruct_tty_driver);
+}
+EXPORT_SYMBOL(tty_driver_kref_put);
+
+void tty_set_operations(struct tty_driver *driver,
+			const struct tty_operations *op)
+{
+	driver->ops = op;
+};
+EXPORT_SYMBOL(tty_set_operations);
+
+void put_tty_driver(struct tty_driver *d)
+{
+	tty_driver_kref_put(d);
+}
+EXPORT_SYMBOL(put_tty_driver);
+
+/*
+ * Called by a tty driver to register itself.
+ */
+int tty_register_driver(struct tty_driver *driver)
+{
+	int error;
+	int i;
+	dev_t dev;
+	struct device *d;
+
+	if (!driver->major) {
+		error = alloc_chrdev_region(&dev, driver->minor_start,
+						driver->num, driver->name);
+		if (!error) {
+			driver->major = MAJOR(dev);
+			driver->minor_start = MINOR(dev);
+		}
+	} else {
+		dev = MKDEV(driver->major, driver->minor_start);
+		error = register_chrdev_region(dev, driver->num, driver->name);
+	}
+	if (error < 0)
+		goto err;
+
+	if (driver->flags & TTY_DRIVER_DYNAMIC_ALLOC) {
+		error = tty_cdev_add(driver, dev, 0, driver->num);
+		if (error)
+			goto err_unreg_char;
+	}
+
+	mutex_lock(&tty_mutex);
+	list_add(&driver->tty_drivers, &tty_drivers);
+	mutex_unlock(&tty_mutex);
+
+	if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV)) {
+		for (i = 0; i < driver->num; i++) {
+			d = tty_register_device(driver, i, NULL);
+			if (IS_ERR(d)) {
+				error = PTR_ERR(d);
+				goto err_unreg_devs;
+			}
+		}
+	}
+	proc_tty_register_driver(driver);
+	driver->flags |= TTY_DRIVER_INSTALLED;
+	return 0;
+
+err_unreg_devs:
+	for (i--; i >= 0; i--)
+		tty_unregister_device(driver, i);
+
+	mutex_lock(&tty_mutex);
+	list_del(&driver->tty_drivers);
+	mutex_unlock(&tty_mutex);
+
+err_unreg_char:
+	unregister_chrdev_region(dev, driver->num);
+err:
+	return error;
+}
+EXPORT_SYMBOL(tty_register_driver);
+
+/*
+ * Called by a tty driver to unregister itself.
+ */
+int tty_unregister_driver(struct tty_driver *driver)
+{
+#if 0
+	/* FIXME */
+	if (driver->refcount)
+		return -EBUSY;
+#endif
+	unregister_chrdev_region(MKDEV(driver->major, driver->minor_start),
+				driver->num);
+	mutex_lock(&tty_mutex);
+	list_del(&driver->tty_drivers);
+	mutex_unlock(&tty_mutex);
+	return 0;
+}
+
+EXPORT_SYMBOL(tty_unregister_driver);
+
+dev_t tty_devnum(struct tty_struct *tty)
+{
+	return MKDEV(tty->driver->major, tty->driver->minor_start) + tty->index;
+}
+EXPORT_SYMBOL(tty_devnum);
+
+void tty_default_fops(struct file_operations *fops)
+{
+	*fops = tty_fops;
+}
+
+static char *tty_devnode(struct device *dev, umode_t *mode)
+{
+	if (!mode)
+		return NULL;
+	if (dev->devt == MKDEV(TTYAUX_MAJOR, 0) ||
+	    dev->devt == MKDEV(TTYAUX_MAJOR, 2))
+		*mode = 0666;
+	return NULL;
+}
+
+static int __init tty_class_init(void)
+{
+	tty_class = class_create(THIS_MODULE, "tty");
+	if (IS_ERR(tty_class))
+		return PTR_ERR(tty_class);
+	tty_class->devnode = tty_devnode;
+	return 0;
+}
+
+postcore_initcall(tty_class_init);
+
+/* 3/2004 jmc: why do these devices exist? */
+static struct cdev tty_cdev, console_cdev;
+
+static ssize_t show_cons_active(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct console *cs[16];
+	int i = 0;
+	struct console *c;
+	ssize_t count = 0;
+
+	console_lock();
+	for_each_console(c) {
+		if (!c->device)
+			continue;
+		if (!c->write)
+			continue;
+		if ((c->flags & CON_ENABLED) == 0)
+			continue;
+		cs[i++] = c;
+		if (i >= ARRAY_SIZE(cs))
+			break;
+	}
+	while (i--) {
+		int index = cs[i]->index;
+		struct tty_driver *drv = cs[i]->device(cs[i], &index);
+
+		/* don't resolve tty0 as some programs depend on it */
+		if (drv && (cs[i]->index > 0 || drv->major != TTY_MAJOR))
+			count += tty_line_name(drv, index, buf + count);
+		else
+			count += sprintf(buf + count, "%s%d",
+					 cs[i]->name, cs[i]->index);
+
+		count += sprintf(buf + count, "%c", i ? ' ':'\n');
+	}
+	console_unlock();
+
+	return count;
+}
+static DEVICE_ATTR(active, S_IRUGO, show_cons_active, NULL);
+
+static struct attribute *cons_dev_attrs[] = {
+	&dev_attr_active.attr,
+	NULL
+};
+
+ATTRIBUTE_GROUPS(cons_dev);
+
+static struct device *consdev;
+
+void console_sysfs_notify(void)
+{
+	if (consdev)
+		sysfs_notify(&consdev->kobj, NULL, "active");
+}
+
+/*
+ * Ok, now we can initialize the rest of the tty devices and can count
+ * on memory allocations, interrupts etc..
+ */
+int __init tty_init(void)
+{
+	tty_sysctl_init();
+	cdev_init(&tty_cdev, &tty_fops);
+	if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
+	    register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
+		panic("Couldn't register /dev/tty driver\n");
+	device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 0), NULL, "tty");
+
+	cdev_init(&console_cdev, &console_fops);
+	if (cdev_add(&console_cdev, MKDEV(TTYAUX_MAJOR, 1), 1) ||
+	    register_chrdev_region(MKDEV(TTYAUX_MAJOR, 1), 1, "/dev/console") < 0)
+		panic("Couldn't register /dev/console driver\n");
+	consdev = device_create_with_groups(tty_class, NULL,
+					    MKDEV(TTYAUX_MAJOR, 1), NULL,
+					    cons_dev_groups, "console");
+	if (IS_ERR(consdev))
+		consdev = NULL;
+
+#ifdef CONFIG_VT
+	vty_init(&console_fops);
+#endif
+	return 0;
+}
+
diff --git a/upstream/linux-5.10/include/net/SI/netioc_proc.h b/upstream/linux-5.10/include/net/SI/netioc_proc.h
new file mode 100755
index 0000000..f0c8aa4
--- /dev/null
+++ b/upstream/linux-5.10/include/net/SI/netioc_proc.h
@@ -0,0 +1,492 @@
+/************************************************************************
+*¹¦ÄܽéÉÜ£ºlinuxÖÐÍøÂçfastnat¡¢fastbrÏà¹Ø²Ù×÷½Ó¿Ú
+*¸ºÔðÈË£º
+*±¸·ÝÈË£º
+*ÐÞ¸ÄÈÕ£º
+*ÐÞ¸ÄÄÚÈÝ£º
+*°æ±¾ºÅ£º
+************************************************************************/
+#ifndef _NETIO_PROC_H_
+#define _NETIO_PROC_H_
+
+
+#include <net/SI/net_track.h>
+#include <net/SI/ext_mem.h>
+
+
+#define  PC_MAX_NUM   100
+#define  DEV_NAME_LEN 50
+
+#define  DEV_NAME_LEN_20 20
+
+#define ETH_ALEN            6
+
+#define SLAB_NUM  21
+
+#define HASH_ARRAY_COUNT 512
+//SKBͳ¼ÆÖµ£¬°üÀ¨Òì³£µãµÄͳ¼ÆÖµ
+enum skbinfo_type{
+    SKB_TYPE_ALL = 0,     //skb½á¹¹ÌåÕýÔÚ¹¤×÷µÄ¸öÊý
+    SKB_TYPE_DATA,        //SKBÖеÄslab»úÖÆµÄdataÕýÔÚ¹¤×÷µÄ¸öÊý£¬°üº¬SKB_TYPE_TOCP£¬µ«²»°üº¬SKB_TYPE_FROMCP
+    SKB_TYPE_TOCP,        //·¢ÍùCPµÄskbÕýÔÚ¹¤×÷µÄ¸öÊý
+    SKB_TYPE_FROMCP,      //´ÓCP½ÓÊÕµ½µÄPSBUFÕýÔÚ¹¤×÷µÄ¸öÊý
+    SKB_DATA_BYTES,        //µ±Ç°ÉêÇëµÄdata×Ü×Ö½ÚÊý  £¬ksize·½Ê½ÀÛ¼ÓµÄ
+
+    //ÐÔÄÜÏà¹ØµÄÈ«¾ÖÐÅÏ¢£¬Äں˽öÀÛ¼Ó·¢ÉúµÄ´ÎÊý£¬²»¸ºÔð·ÖÎö
+    SKB_QUEUE_STOP,      //xmit_stopÔì³ÉµÄ¶ª°ü¸öÊý£¬ÀÛ¼ÓÖµ
+    SKB_QUEUE_LOCK,      //QUEUE_LOCKÔì³ÉµÄ¶ª°ü¸öÊý£¬ÀÛ¼ÓÖµ
+    SKB_COPY_CACHE,      //net_cacheÔ´ÎļþÖнøÐÐÈ«¿½±´µÄÀÛ¼ÓÖµ£¬Ä¿Ç°½öÔÚPPPºÍ·ÖƬʱ¿½±´£¬Ó°ÏìÐÔÄÜ
+    SKB_IRQ_FREE,           //ͨ¹ýÈíÖжÏÊͷŵÄskb£¬ÐÔÄÜÏà¹Ø
+    SKB_COPY,                   //Êý¾Ý¿½±´µÄskb£¬ÐÔÄÜÏà¹Ø
+    SKB_FLOOD,                 //·ººéµÄskb£¬ÐÔÄÜÏà¹Ø
+    SKB_ERRFREE,             //devÔ´ÎļþÖдíÎóÊͷŵÄskb¼ÆÊý£¬°üÀ¨SKB_QUEUE_STOPºÍSKB_QUEUE_LOCKÁ½¸ö¼ÆÊýÖµ
+    SKB_FRAG,                   //½ÓÊÕµ½µÄ·ÖƬ±¨ÎÄ£¬ÐÔÄÜÏà¹Ø
+    SKB_OVER_MTU,           //fastÖÐÊý¾Ý³¤¶È´óÓÚ³ö¿Údev MTUʱ£¬fastʧ°Ü¸öÊýͳ¼Æ
+    SKB_LOOP,                   //ͶµÝ¸øÇý¶¯£¬ÓÖ´ÓÇý¶¯ÊÕµ½µÄÒì³£»Ø»·¸öÊýͳ¼Æ£¬´óÓÚ0±íʾÇý¶¯Òì³£
+    SKB_ALLOC_FIAL,         //ÉêÇëskbʧ°ÜµÄ¸öÊýÀÛ¼ÓÖµ
+    SKB_INFO_MAX,
+};
+
+//ÍøÂçÄÚºËÔËÐÐʱµÄͳ¼ÆÖµ£¬ÖØµã¹Ø×¢½á¹¹ÌåµÄÉêÇëµã
+enum net_run_info{
+    BR_MAC_CHANGE = 0,        //ÍøÇÅmacµØÖ·±»¸Ä±ä´ÎÊý
+    NEIGH_ALLOC,                    //neighbourÉêÇë´ÎÊý
+    NEIGH_FREE,                        //neighbourÊͷŵãÀÛ¼ÓÖµ
+    BR_NEIGH_VARY,               //ÇŵãµÄ³ö¿ÚdevµÄMACµØÖ·±»¶à¸öPC¹²Ïí
+    CONN_ALLOC,                      //CONNÉêÇë´ÎÊýÀÛ¼ÓÖµ
+    CONN_FREE,                         //CONNÊͷŵãÀÛ¼ÓÖµ
+    BRFDB_ALLOC,                    //ÇŵãÉêÇë´ÎÊýÀÛ¼ÓÖµ
+    DST_ALLOC,                         //dst_entryÉêÇëÀÛ¼ÓÖµ
+    DST_FREE,                            //dst_entryÊͷŵãÀÛ¼ÓÖµ
+    HH_UPDATE,                         //HH¶þ²ãMACÍ·¸üÐÂÀÛ¼ÓÖµ
+    RT_CACHE_INVALID,               //Çå¿Õ·ÓÉcacheµÄÀÛ¼ÓÖµ
+    RT_HASH_ADD,                        //ÐÂÔört_hash_table½ÚµãµÄÀÛ¼ÓÖµ
+    RT_HASH_DEL,                        //ɾ³ýrt_hash_table½ÚµãµÄÀÛ¼ÓÖµ
+    SSMAC_CHANGE_INDEV,                //ͬһԴMACµØÖ·Èë¿Údev¸Ä±äµÄ´ÎÊý
+    NET_INFO_MAX,
+};
+enum dev_opt_state{
+    DEV_UNOPT=0,    //ÈôÓû§Î´×öÈκβÙ×÷£¬³õʼֵΪ0
+    DEV_NOPRESENT,  //¶ÔÓ¦!netif_device_presentÒì³££¬±íÃ÷Çý¶¯µ×²ãÉÐδ׼±¸ºÃ
+    DEV_OPENED,     //dev is opened
+    DEV_OPEN_FAIL,      //open  fail
+    DEV_CLOSED,          //dev is closed 
+};
+
+enum slabinfo_file{
+    FAST_SLAB = 0,
+    SKB_SLAB,
+    BRFDB_SLAB,
+    DST_SLAB,
+    FIB_TRIE_SLAB,
+    FLOW_SLAB,
+    INETPEER_SLAB,
+    INET_HASHTABLES_SLAB,
+    INET_TIMEWAIT_SOCK_SLAB,
+    MYSOCKET_SLAB,
+    NF_CONNTRACK_CORE_SLAB,
+    NF_CONNTRACK_EXCEPT_SLAB,
+    REQUEST_SOCK_SLAB,
+    SOCK_SLAB,
+    SOCKET_SLAB,
+    XFRM6_TUNNEL_SLAB,
+    XT_HASHLIMIT_SLAB,
+    SOCK_ALLOC_PAGES,           //ÓÉÓÚÎÞ·¨×¼È·ÕÒµ½Êͷŵ㣬ËùÒÔ½öÓÐ++¶¯×÷£¬--Ó¦¸ÃÓÉput_page´¥·¢
+    IP6_OUTPUT_ALLOC_PAGES,     //ÓÉÓÚÎÞ·¨×¼È·ÕÒµ½Êͷŵ㣬ËùÒÔ½öÓÐ++¶¯×÷£¬--Ó¦¸ÃÓÉput_page´¥·¢
+    IP_OUTPUT_ALLOC_PAGES,      //ÓÉÓÚÎÞ·¨×¼È·ÕÒµ½Êͷŵ㣬ËùÒÔ½öÓÐ++¶¯×÷£¬--Ó¦¸ÃÓÉput_page´¥·¢
+    SKB_ALLOC_PAGES,            //ÓÉÓÚÎÞ·¨×¼È·ÕÒµ½Êͷŵ㣬ËùÒÔ½öÓÐ++¶¯×÷£¬--Ó¦¸ÃÓÉput_page´¥·¢
+};
+
+
+/*±¾µØTCPͳ¼ÆÐÅÏ¢*/
+enum tcp_stat_info
+{
+    TCP_RECV_PKTS = 0,       /*½ÓÊÕµÄTCPÊýÄ¿*/
+    TCP_SEND_PKTS,           /*·¢Ë͵ÄTCPÊýÄ¿*/
+    TCP_RETRANS_PKTS,        /*·¢Ë͵ÄÖØ´«TCPÊýÄ¿*/
+    TCP_RECV_DROPS,          /*½ÓÊն˵ÄTCP¶ª°üÊý*/
+    TCP_SEND_DROPS,          /*·¢ËͶ˵ÄTCP¶ª°üÊý*/
+    TCP_RST_SEND_NUM,        /*·¢Ë͵ÄRSTÊý*/
+    TCP_RST_RECV_NUM,        /*½ÓÊÕµÄRSTÊý*/
+    TCP_STATS_MAX,
+};
+
+
+struct tcp_sock_stat
+{
+    unsigned long tcp_recv_num;
+    unsigned long tcp_send_num;
+    unsigned long tcp_retrans_num;
+    unsigned long tcp_recv_drops;
+    unsigned long tcp_send_drops;
+    unsigned long tcp_rst_send;
+    unsigned long tcp_rst_recv;
+};
+
+/****×ÊԴʹÓü°ÉÏÏÞÐÅÏ¢£¬ËùÓбê×¼ÄÚºËÓÐÉÏÏÞÇé¿öµÄ£¬½ÔÐè¼ì²â£¬ÒÔ·ÀÖ¹Òì³£·¢Éú****/
+struct net_max_check_msg
+{
+    /*nf_conntrack*/
+    unsigned long nf_conntrack_max;  //nf_conntrack_max = 4832
+    unsigned long nf_conntrack_now;  //net->ct.count
+    /*enqueue*/
+    int             netdev_max_backlog;    //netdev_max_backlog=1000
+    int          input_queue_len;          //²Î¿´enqueue_to_backlog½Ó¿ÚʵÏÖ
+    int          rx_dropped;                 //ÒòΪÈë¶ÓÁÐÒÑÂúÔì³ÉµÄÈë¶ÓÁжª°üµÄÀÛ¼ÓÖµ
+    /*ÐÔÄÜÏà¹Ø*/
+    int          fastnat_link_max;                //nf_conntrack_max
+    int          fastnat_link_now;                //working_list.count
+    int          fast6_link_max;                  //nf_conntrack_max
+    int          fast6_link_now;                  //working_list6.count
+    
+    /*ÍøÂçÄÚºËÔËÐÐʱµÄͳ¼ÆÖµ£¬ÖØµã¹Ø×¢½á¹¹ÌåµÄÉêÇëµã*/
+    unsigned long br_mac_change;          //ÍøÇÅmacµØÖ·±»¸Ä±ä´ÎÊý
+    unsigned long neigh_alloc;                   //neighbourÉêÇë´ÎÊý
+    unsigned long neigh_free;                      //neighbourÊͷŵãÀÛ¼ÓÖµ
+    unsigned long br_neigh_vary;                //ÇŵãµÄ³ö¿ÚdevµÄmacµØÖ·±»¶à¸öpc¹²Ïí
+    unsigned long conn_alloc;                    //connÉêÇë´ÎÊýÀÛ¼ÓÖµ
+    unsigned long conn_free;                          //connÊͷŵãÀÛ¼ÓÖµ
+    unsigned long brfdb_alloc;                    //ÇŵãÉêÇë´ÎÊýÀÛ¼ÓÖµ
+    unsigned long dst_alloc;                          //dst_entryÉêÇëÀÛ¼ÓÖµ
+    unsigned long dst_free;                            //dst_entryÊͷŵãÀÛ¼ÓÖµ
+    unsigned long hh_update;                          //hh¶þ²ãmacÍ·¸üÐÂÀÛ¼ÓÖµ
+    unsigned long rt_cache_invalid;                           //Çå¿Õ·ÓÉcacheµÄÀÛ¼ÓÖµ
+    unsigned long rt_hash_add;                        //ÐÂÔört_hash_table½ÚµãµÄÀÛ¼ÓÖµ
+    unsigned long rt_hash_del;                        //ɾ³ýrt_hash_table½ÚµãµÄÀÛ¼ÓÖµ
+    unsigned long ssmac_change_indev;           //ͬһԴMACµØÖ·Èë¿Údev¸Ä±äµÄ´ÎÊý
+};
+
+/*************************  SKBÏà¹ØÐÅÏ¢£¬°üÀ¨Í³¼ÆÐÅÏ¢ºÍfastÐÅÏ¢ ***********************/
+struct skb_and_fast_msg
+{
+    int  skb_num4;              //½ÓÊÕµ½µÄV4Êý¾Ý°ü
+    int  skb_num6;              //½ÓÊÕµ½µÄV6Êý¾Ý°ü
+    int  skb_big_num;           //len³¤¶È³¬¹ý1000µÄÊý¾Ý°ü£¬º¬V4ºÍV6
+    int  skb_small_num;         //len³¤¶ÈСÓÚ100µÄÊý¾Ý°ü£¬º¬V4ºÍV6
+    int  skb_bytes4;            //½ÓÊÕµ½µÄV4Êý¾Ý°ü×Ö½ÚÊý
+    int  skb_bytes6;            //½ÓÊÕµ½µÄV6Êý¾Ý°ü×Ö½ÚÊý
+    int  skb_unknown;           //½ÓÊÕµ½µÄδ֪ЭÒéÊý¾Ý°ü£¬°üÀ¨ARPµÈ·ÇV4ºÍV6µÄ±¨ÎÄ
+    int  skb_tcpnum;            //½ÓÊÕµ½µÄtcpÊý¾Ý°ü£¬º¬V4ºÍV6£¬µ«²»º¬fastbrµÄ±¨ÎÄ
+    int  skb_udpnum;            //½ÓÊÕµ½µÄudpÊý¾Ý°ü£¬º¬V4ºÍV6£¬µ«²»º¬fastbrµÄ±¨ÎÄ
+    int  broadcast_num4;        //½ÓÊÕµ½µÄV4¹ã²¥°ü
+    int  broadcast_num6;        //½ÓÊÕµ½µÄV6¹ã²¥°ü
+    int  multicast_num4;        //½ÓÊÕµ½µÄV4×é²¥±¨
+    int  multicast_num6;        //½ÓÊÕµ½µÄV6×é²¥±¨
+    int  fastnat_num;           //fastnat³É¹¦µÄ±¨ÎÄ
+    int  fast6_num;             //fast6³É¹¦µÄ±¨ÎÄ
+    int  fastbr_num;            //fastbr³É¹¦µÄ±¨ÎÄ
+    int  fastnat_level;         //²Î¿´FAST_NET_DEVICE
+    int  fastbr_level;          //²¼¶ûÀàÐÍ
+    //ÏÂÃæ¼¸¸öֵΪÀÛ¼ÓÖµ£¬ÐÔÄÜÏà¹Ø
+    int  irqfree_num;           //ͨ¹ýÈíÖжÏÊͷŵÄskb£¬ÐÔÄÜÏà¹Ø
+    int  skbcopy_num;           //Êý¾Ý¿½±´µÄskb£¬ÐÔÄÜÏà¹Ø
+    int  cache_copy;             //net_cacheÔ´Îļþ½øÐп½±´µÄÀÛ»ý¼ÆÊý£¬ÐÔÄÜÏà¹Ø
+    int  skbflood_num;          //·ººéµÄskb£¬ÐÔÄÜÏà¹Ø
+    int  errfree_num;          //devÔ´ÎļþÖдíÎóÊͷŵÄskb¼ÆÊý£¬°üÀ¨SKB_QUEUE_STOPºÍSKB_QUEUE_LOCKÁ½¸ö¼ÆÊýÖµ
+    int  frag_num;              //½ÓÊÕµ½µÄ·ÖƬ±¨ÎÄ£¬ÐÔÄÜÏà¹Ø
+    int  mtu_num;               //fastÖÐÊý¾Ý³¤¶È´óÓÚ³ö¿Údev MTUʱ£¬fastʧ°Ü¸öÊýͳ¼Æ
+    int  fast_loop;             //ͶµÝ¸øÇý¶¯£¬ÓÖ´ÓÇý¶¯ÊÕµ½µÄÒì³£»Ø»·¸öÊýͳ¼Æ
+    int  skb_alloc_fail ;        //ÉêÇëskbʧ°ÜµÄ¸öÊýÀÛ¼ÓÖµ
+    int  xmit_lock_num;        //xmit_lock_owner±»Ëø×¡Ôì³ÉµÄ¶ª°üµÄÀÛ¼Ó¼ÆÊý£¬Çý¶¯Ôì³ÉµÄ
+    int  xmit_stop_num;      //!netif_xmit_stopped(txq)Ôì³ÉµÄ¶ª°üµÄÀÛ¼Ó¼ÆÊý£¬Çý¶¯Ôì³ÉµÄ
+    int  br_mac_change_num;      //ÍøÇÅmacµØÖ·±»¸Ä±ä´ÎÊý
+    int  fast_tcpdump_num;      //fast×¥°ü¸öÊý
+    int  fast_switch;
+    int  fast_local4_rcv_num;       //±¾µØfast_local4³É¹¦½ÓÊÕ±¨ÎÄ
+    int  fast_local6_rcv_num;       //±¾µØfast_local6³É¹¦½ÓÊÕ±¨ÎÄ
+    int  fast_local4_output_num;    //±¾µØfast_local4³É¹¦·¢Ëͱ¨ÎÄ
+    int  fast_local6_output_num;    //±¾µØfast_local6³É¹¦·¢Ëͱ¨ÎÄ
+};
+
+struct skb_using_msg
+{
+    unsigned long skb_all;
+    unsigned long skb_tocp;
+    unsigned long skb_fromcp;
+    unsigned long skb_data_num;
+    unsigned long skb_data_size;
+    unsigned long skb_stop;
+    unsigned long skb_lock;
+    unsigned long skb_panic;
+    unsigned long skb_fail;
+};
+
+/*************************  ½á¹¹Ì嶨Òå    DEV  ***********************/
+//ÍøÂçÌí¼Ó£¬Í³¼ÆÍøÂç²ãÃæÊÕ·¢°üÇé¿ö
+struct net_dev_skbinfo {
+    unsigned long    rx_packets;  //Çý¶¯·¢ËÍÀ´µÄÊý¾Ý°ü¸öÊý£¬ÔÚnetif_rxÍ·ÀÛ¼Ó
+    unsigned long    tx_packets;  //·¢Ë͸øÇý¶¯µÄÊý¾Ý°ü¸öÊý£¬ÔÚdev_queue_xmitÍ·ÀÛ¼Ó
+    unsigned long    rx_bytes;    //×Ö½ÚÊý
+    unsigned long    tx_bytes;    //×Ö½ÚÊý
+    unsigned long    rx_dropped;  //netif_rxÄÚ²¿Á÷³ÌÖÐËùÓÐÒì³£ÊÍ·ÅskbµÄÀÛ¼Ó£¬Èç´ïµ½netdev_max_backlogÈë¶ÓÁÐÉÏÏÞ¶ø¶ª°ü
+    unsigned long    tx_dropped;  //dev_queue_xmitÄÚ²¿Á÷³ÌÖÐËùÓÐÒì³£ÊÍ·ÅskbµÄÀÛ¼Ó£¬Èç(txq->xmit_lock_owner == cpu)Ôì³ÉµÄ¶ª°ü
+};
+
+//ÍøÂçÌí¼Ó£¬connÊÕ·¢°üÇé¿ö
+struct conn_skbinfo {
+    unsigned long    packets;  //Êý¾Ý°ü¸öÊý
+    unsigned long    bytes;    //×Ö½ÚÊý
+};
+
+/* ¸Ã½á¹¹ÌåÖеÄËùÓÐÐÅÏ¢½ÔÓÉÇý¶¯¸³Öµ£¬ÍøÂç×é²»»á¸³Öµ */
+//Õâ¸ö½á¹¹Ì屨ÐëºÍ <linux/netdevice.h>ÖеÄnet_device_stats±£³ÖÒ»Ö   
+struct net_dev_stats {
+            unsigned long    rx_packets;
+            unsigned long    tx_packets;
+            unsigned long    rx_bytes;
+            unsigned long    tx_bytes;
+            unsigned long    rx_errors;   //Ðè¼à¿Ø
+            unsigned long    tx_errors;   //Ðè¼à¿Ø
+            unsigned long    rx_dropped;   //Ðè¼à¿Ø
+            unsigned long    tx_dropped;   //Ðè¼à¿Ø
+            unsigned long    multicast;
+            unsigned long    collisions;
+            unsigned long    rx_length_errors;   //Ðè¼à¿Ø
+            unsigned long    rx_over_errors;   //Ðè¼à¿Ø
+            unsigned long    rx_crc_errors;   //Ðè¼à¿Ø
+            unsigned long    rx_frame_errors;   //Ðè¼à¿Ø
+            unsigned long    rx_fifo_errors;   //Ðè¼à¿Ø
+            unsigned long    rx_missed_errors;   //Ðè¼à¿Ø
+            unsigned long    tx_aborted_errors;   //Ðè¼à¿Ø
+            unsigned long    tx_carrier_errors;   //Ðè¼à¿Ø
+            unsigned long    tx_fifo_errors;   //Ðè¼à¿Ø
+            unsigned long    tx_heartbeat_errors;   //Ðè¼à¿Ø
+            unsigned long    tx_window_errors;   //Ðè¼à¿Ø
+            unsigned long    rx_compressed;
+            unsigned long    tx_compressed;         
+};
+
+struct ioctl_dev_netstats
+{
+    char dev_name[20];
+    struct net_dev_skbinfo  stats_dbg;  //ÍøÂçÌí¼ÓµÄ£¬Í³¼ÆÍøÂç²ãÃæÊÕ·¢°üÇé¿ö
+    struct net_dev_stats  stats;     //Êý¾ÝÊÕ·¢Í³¼ÆÖµ£¬¸Ãֵȫ²¿ÓÉÇý¶¯¸³ÖµµÄ£¬ÍøÂç²»¸³Öµ
+    unsigned int        flags;           //IFF_UPµÈ
+    unsigned char       operstate;    //ĿǰûɶÓÃ
+    unsigned long       state;           //µ×²ãÇý¶¯×´Ì¬£¬__LINK_STATE_NOCARRIER
+    int                 net_flag;        //Óû§ifconfigµÄ½á¹û״̬£¬ÈçDEV_OPEN_FAIL
+    unsigned long       que_state;       //¶ÓÁÐ״̬£¬¶ÔÓ¦ dev->_tx[0].state£¬Èç__QUEUE_STATE_DRV_XOFF
+    unsigned int        num_tx_queues;   //TX¶ÓÁÐÊýÁ¿£¬¶àÊýÇý¶¯Ê¹ÓÃΨһ¶ÓÁУ¬ÔòֵΪ1
+};
+
+
+/*************************  ½á¹¹Ì嶨Òå    OTHER  ***********************/
+struct pc_info
+{  
+    unsigned char mac_addr[6];
+    char dev_name[DEV_NAME_LEN];
+};
+
+struct pc_node
+{
+    unsigned int num;
+    struct pc_info info[PC_MAX_NUM];
+};
+
+struct leak_info
+{  
+    void *addr;
+    int user_num;  //µ±Ç°data»òskb±»Ê¹ÓõļÆÊýÖµ£¬°éËæskbÖеÄusersºÍdataref¶ø±ä»¯
+    int track_num; //¼Ç¼ÒѼǼµ½Êý×é¹ì¼£µÄϱêË÷Òý
+    char func_track[10][100];//º¯Êý»ØËݹ켣
+};
+
+struct slab_info
+{
+    int num[SLAB_NUM][2];
+};
+
+ 
+struct hash_info
+{
+    int max_hash_size;
+    int current_hash_num;
+    int hash[HASH_ARRAY_COUNT][2];
+    int current_array_size;
+};
+
+struct ptype_info
+{
+    unsigned long ptype_all[5];
+    unsigned long ptype_base[15];
+};
+
+struct pkt_lost_stats
+{
+    unsigned int send_drops;
+    unsigned int send_drop_bytes;
+    unsigned int recv_drops;
+    unsigned int recv_drop_bytes;
+    unsigned int total_packets;
+    unsigned int total_bytes;
+};
+
+struct pkt_lost_info
+{
+    struct pkt_lost_stats stats[2];
+};
+
+typedef struct
+{
+    char usb[DEV_NAME_LEN_20];
+    char ps[DEV_NAME_LEN_20];
+    char wifi_wan[DEV_NAME_LEN_20];
+    char wifi_lan[DEV_NAME_LEN_20];
+    char eth_wan[DEV_NAME_LEN_20];
+    char eth_lan[DEV_NAME_LEN_20];
+    char ps_ext1[DEV_NAME_LEN_20];
+    char ps_ext2[DEV_NAME_LEN_20];
+    char ps_ext3[DEV_NAME_LEN_20];
+    char ps_ext4[DEV_NAME_LEN_20];
+} net_dbg_dev_info_t;
+
+struct time_list{
+    struct timeval tv;
+    struct list_head packet_list;
+};
+
+struct net_debug_packet_list{
+    struct list_head list;
+    struct list_head time;
+    int pid;
+	int tgid;
+	char pname[DEV_NAME_LEN];
+    int count;
+};
+
+/******************************±äÁ¿ÉùÃ÷***********************/
+/******************************±äÁ¿ÉùÃ÷***********************/
+/******************************±äÁ¿ÉùÃ÷***********************/
+extern int leak_set;      // 1±íʾ¸ú×Ùskb¼°dataÉêÇëÊͷŵ㣻2±íʾ¸ú×Ùusers£¬ÒÔ¼ì²âΪºÎskbʼÖÕ²»ÄÜÊÍ·Å£¬ÔÝʱ¸Ã¹¦ÄÜÉв»¿ÉÓÃ
+extern int leak_list_max; // ÈÝÐí»º´æµÄ´ýÊͷŵÄskb¼°dataµÄ¸öÊý£¬¿Éµ÷Õû£»
+extern int track_max;     //¹ì¼£¸ú×ÙÊý×éµÄÉÏÏÞ£¬½öµ±user++--ʱ²ÅÐèÒªÀ©´ó£¬·ñÔò2¼´¿É£»
+extern int stack_lenmax;  //Õ»º¯Êý¹ì¼£µÄ×Ö·û¸öÊýÉÏÏÞ£»
+extern int leak_full_panic;
+extern unsigned long  now_time; //µ±Ç°Ê±¿Ìµã
+extern spinlock_t  leak_lock;   //·ÀÖ¹ÔÚbhÖб»µ÷Óã¬Ê¹ÓÃbhËø
+
+
+extern struct leak_list data_leak[TRACK_END];
+extern struct leak_list data_free[TRACK_END];//·Ö±ð¶ÔÓ¦ÕýÔÚʹÓõÄÊý¾ÝºÍÒѾ­ÊͷŵÄÊý¾ÝÁ´±í
+extern void *data_head[TRACK_END];//ÿ¸öÄÚ´æ¼à¿ØÀàÐ͵ijõʼ»¯Ê×µØÖ·£¬È·±£Á´±íµÄÊý¾ÝÇøÁ¬Ðø£¬ÒÔ±ãramdumpʱֱ½ÓËÑË÷
+extern int init_finish ;//Á´±í³õʼ»¯±êÖ¾
+
+/*dump stkÓõ½µÄÏà¹Ø±äÁ¿*/
+extern unsigned int skb_dump_len;
+extern char skb_dump_str[];
+
+/*ºË¼äÖØ¸´Êͷżì²â¿ª¹Ø*/
+extern int set_psbufleak ;  
+extern int set_extskbleak ;
+
+extern unsigned long skbinfo_dbg[SKB_INFO_MAX];
+extern unsigned long netruninfo_dbg[NET_INFO_MAX]; 
+extern unsigned char br_ipchange_flag; //br0 ip´Û¸Ä¶ÏÑÔ
+extern int set_tcpdump; //¶¨µã×¥°ü¿ª¹Ø
+
+extern unsigned char ignoremac[ETH_ALEN];
+
+/*¶Ô±¾µØTCP½øÐÐÏà¹ØÍ³¼Æ*/
+extern unsigned long tcp_stats_dbg[TCP_STATS_MAX];
+
+extern char br_name[];
+extern char ps_name[];
+extern char usb_name[];
+extern char ppp_name[];
+
+
+//sqÌí¼Ó£¬ÓÃÓÚÊý¾Ý°ü½¨Ä£ÒÔ¼°ÐÔÄÜͳ¼ÆÏà¹Ø£¬net_info_numÈ«¾ÖÖÐÐÔÄÜÏà¹ØµÄͳ¼ÆÒ²ÐèÌåÏÖ
+extern  int  skb_num4;                  //½ÓÊÕµ½µÄV4Êý¾Ý°ü
+extern  int  skb_num6;                  //½ÓÊÕµ½µÄV6Êý¾Ý°ü
+extern  int  skb_big_num;               //len³¤¶È³¬¹ý1000µÄÊý¾Ý°ü£¬º¬V4ºÍV6
+extern  int  skb_small_num;             //len³¤¶ÈСÓÚ100µÄÊý¾Ý°ü£¬º¬V4ºÍV6
+extern  int  skb_bytes4;                //½ÓÊÕµ½µÄV4Êý¾Ý°ü×Ö½ÚÊý
+extern  int  skb_bytes6;                //½ÓÊÕµ½µÄV6Êý¾Ý°ü×Ö½ÚÊý
+extern  int  skb_unknown;               //½ÓÊÕµ½µÄδ֪ЭÒéÊý¾Ý°ü£¬°üÀ¨ARP
+extern  int  skb_tcpnum;                //½ÓÊÕµ½µÄtcpÊý¾Ý°ü£¬º¬V4ºÍV6
+extern  int  skb_udpnum;                //½ÓÊÕµ½µÄudpÊý¾Ý°ü£¬º¬V4ºÍV6
+extern  int  broadcast_num4;            //½ÓÊÕµ½µÄV4¹ã²¥°ü
+extern  int  broadcast_num6;            //½ÓÊÕµ½µÄV6¹ã²¥°ü
+extern  int  multicast_num4;            //½ÓÊÕµ½µÄV4×é²¥±¨
+extern  int  multicast_num6;            //½ÓÊÕµ½µÄV6×é²¥±¨
+extern  int  fastnat_num;               //fastnat³É¹¦µÄ±¨ÎÄ
+extern  int  fast6_num;                 //fast6³É¹¦µÄ±¨ÎÄ
+extern  int  fastbr_num;                //fastbr³É¹¦µÄ±¨ÎÄ
+extern  int  fast_local4_rcv_num;       //±¾µØfast_local4³É¹¦½ÓÊÕ±¨ÎÄ
+extern  int  fast_local6_rcv_num;       //±¾µØfast_local6³É¹¦½ÓÊÕ±¨ÎÄ
+extern  int  fast_local4_output_num;    //±¾µØfast_local4³É¹¦·¢Ëͱ¨ÎÄ
+extern  int  fast_local6_output_num;    //±¾µØfast_local6³É¹¦·¢Ëͱ¨ÎÄ
+extern  int  fast_tcpdump_num;          //fast×¥°üÊýÁ¿
+
+extern int double_mac; 
+
+extern int net_debug_ping;               //×ÔÑÐping°ü¼Ç¼¶ª°üʱÑÓ¹¦ÄÜ
+extern int net_debug_perf;               //×ÔÑÐtcp/udp°ü¼Ç¼¶ª°üʱÑÓ¹¦ÄÜ£¬ÐèÒªserver¶ÎÄܰ²×°×ÔÑÐÓ¦ÓÃ
+
+//slabÄÚ´æÊ¹ÓÃÏà¹ØÍ³¼Æ£¬Î´¿¼ÂÇͨÓÃslabµØÖ·³Ø£¬Èçkmalloc
+extern struct slab_info slab_count;
+
+//·¢Ë͸øCP´¦ÀíµÄ£¬´ýÊͷŵÄskbÁ´±í£¬ÓÃÓÚlog¸ú×Ù£¬·ÀÖ¹ºË¼äÄÚ´æÐ¹Â©£»
+extern struct ext_list toCp_listlog[MAX_EXT_MEM_HASH];
+
+//CP·¢Ë͸øAPµÄpsbufÐÅÏ¢Á´±í£¬ÓÃÓÚlog¸ú×Ù£¬·ÀÖ¹ºË¼äÄÚ´æÐ¹Â©;
+extern struct ext_list fromCp_list[MAX_EXT_MEM_HASH];
+
+/*½øÐÐTCPͳ¼Æ*/
+#define TCP_PKT_STATS_INC(_mod)    tcp_stats_dbg[_mod]++
+
+
+/******************************Íⲿ½Ó¿ÚÉùÃ÷***********************/
+/******************************Íⲿ½Ó¿ÚÉùÃ÷***********************/
+/******************************Íⲿ½Ó¿ÚÉùÃ÷***********************/
+extern void skbinfo_add(unsigned char *addr,unsigned int skb_type);
+extern void skbinfo_del(unsigned char *addr,unsigned int skb_type);
+extern void netruninfo_add(unsigned char *addr,unsigned int info_type);
+extern void netruninfo_del(unsigned char *addr,unsigned int info_type);
+
+extern int get_skbcnt(unsigned long arg);
+extern int get_dev_info(unsigned long arg);
+extern int get_skb_using(unsigned long arg);
+extern int network_get_pcmac(unsigned long arg);
+extern int get_kernelparam(unsigned long arg);
+extern int get_slab_info(unsigned long arg);
+extern int get_hash_info(unsigned long arg);
+
+extern int set_fastnat_level(void *arg);
+extern int set_fastbr_level(void *arg);
+extern int set_fast_debug_panic(void *arg);
+extern int set_fast_dev_xmit(void *arg);
+extern int set_ackdrop(void *arg);
+extern int set_dumpflag(void *arg);
+extern int set_skb_dump(unsigned long arg);
+extern int set_print_opt(void *arg);
+extern int set_sq_tcpdump(void *arg);
+extern int set_leak(void *arg);
+extern int set_max(unsigned long arg);
+extern int set_stacklenmax(unsigned long arg);
+extern int set_trackmax(unsigned long arg);
+extern int set_tcpdump_opt(unsigned long arg);
+extern int set_br_name(void *arg);
+extern int set_ps_name(void *arg);
+extern int set_usb_name(void *arg);
+extern int set_ppp_name(void *arg);
+extern int set_brip(unsigned long arg);
+extern int set_kernelparam(unsigned long arg);
+extern int set_errno_procname(void *arg);
+extern int get_neigh_ip(unsigned long arg);
+extern int get_skb_fast(unsigned long arg);
+extern int get_max_msg(unsigned long arg);
+extern int get_ptype(unsigned long arg);
+extern int get_process_info(void *arg);
+extern void netslab_inc(int i);
+extern void netslab_dec(int i);
+extern void track_netlink(struct sk_buff *skb,u32 group);
+//extern void record_app_atcive_net();
+
+int get_pkt_lost_info(unsigned long arg);
+
+int get_tcp_stat_info(unsigned long arg);
+
+#endif //end _NETIO_FASTINFO_H_
+
+
diff --git a/upstream/linux-5.10/include/net/netfilter/nf_conntrack.h b/upstream/linux-5.10/include/net/netfilter/nf_conntrack.h
new file mode 100755
index 0000000..04ab917
--- /dev/null
+++ b/upstream/linux-5.10/include/net/netfilter/nf_conntrack.h
@@ -0,0 +1,368 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Connection state tracking for netfilter.  This is separated from,
+ * but required by, the (future) NAT layer; it can also be used by an iptables
+ * extension.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *	- generalize L3 protocol dependent part.
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack.h
+ */
+
+#ifndef _NF_CONNTRACK_H
+#define _NF_CONNTRACK_H
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <linux/netfilter/nf_conntrack_dccp.h>
+#include <linux/netfilter/nf_conntrack_sctp.h>
+#include <linux/netfilter/nf_conntrack_proto_gre.h>
+#include <net/SI/netioc_proc.h>
+
+#include <net/netfilter/nf_conntrack_tuple.h>
+#ifdef CONFIG_FASTNAT_MODULE
+#include <net/SI/pkt_lost_track.h>
+#endif
+
+
+
+struct nf_ct_udp {
+	unsigned long	stream_ts;
+};
+
+/* per conntrack: protocol private data */
+union nf_conntrack_proto {
+	/* insert conntrack proto private data here */
+	struct nf_ct_dccp dccp;
+	struct ip_ct_sctp sctp;
+	struct ip_ct_tcp tcp;
+	struct nf_ct_udp udp;
+	struct nf_ct_gre gre;
+	unsigned int tmpl_padto;
+};
+
+union nf_conntrack_expect_proto {
+	/* insert expect proto private data here */
+};
+
+struct nf_conntrack_net {
+	unsigned int users4;
+	unsigned int users6;
+	unsigned int users_bridge;
+};
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+#ifdef CONFIG_FASTNAT_MODULE
+struct fast_ct_ext{
+    union {
+        struct {
+            struct dst_entry  __rcu *fast_dst[IP_CT_DIR_MAX];
+            struct net_device __rcu *fast_brport[IP_CT_DIR_MAX];
+        };
+        struct sock __rcu *sk;
+    };
+    unsigned char isFast; 
+};
+#endif
+
+struct nf_conn {
+	/* Usage count in here is 1 for hash table, 1 per skb,
+	 * plus 1 for any connection(s) we are `master' for
+	 *
+	 * Hint, SKB address this struct and refcnt via skb->_nfct and
+	 * helpers nf_conntrack_get() and nf_conntrack_put().
+	 * Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt,
+	 * beware nf_ct_get() is different and don't inc refcnt.
+	 */
+	struct nf_conntrack ct_general;
+
+	spinlock_t	lock;
+	/* jiffies32 when this ct is considered dead */
+	u32 timeout;
+
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+	struct nf_conntrack_zone zone;
+#endif
+	/* XXX should I move this to the tail ? - Y.K */
+	/* These are my tuples; original and reply */
+	struct nf_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX];
+
+	/* Have we seen traffic both ways yet? (bitset) */
+	unsigned long status;
+
+	u16		cpu;
+	possible_net_t ct_net;
+
+#if IS_ENABLED(CONFIG_NF_NAT)
+	struct hlist_node	nat_bysource;
+#endif
+	/* all members below initialized via memset */
+	struct { } __nfct_init_offset;
+
+	/* If we were expected by an expectation, this will be it */
+	struct nf_conn *master;
+
+#if defined(CONFIG_NF_CONNTRACK_MARK)
+	u_int32_t mark;
+#endif
+
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+	u_int32_t secmark;
+#endif
+
+	/* Extensions */
+	struct nf_ct_ext *ext;
+
+	/* Storage reserved for other modules, must be the last member */
+	union nf_conntrack_proto proto;
+#ifdef CONFIG_FASTNAT_MODULE
+	struct fast_ct_ext fast_ct;
+	struct conn_seq_track conn_pktloss[IP_CT_DIR_MAX];
+#endif
+
+	struct conn_skbinfo packet_info[IP_CT_DIR_MAX];
+	struct net_device* indev[IP_CT_DIR_MAX];
+	struct net_device* outdev[IP_CT_DIR_MAX];
+};
+
+static inline struct nf_conn *
+nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash)
+{
+	return container_of(hash, struct nf_conn,
+			    tuplehash[hash->tuple.dst.dir]);
+}
+
+static inline u_int16_t nf_ct_l3num(const struct nf_conn *ct)
+{
+	return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+}
+
+static inline u_int8_t nf_ct_protonum(const struct nf_conn *ct)
+{
+	return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
+}
+
+#define nf_ct_tuple(ct, dir) (&(ct)->tuplehash[dir].tuple)
+
+/* get master conntrack via master expectation */
+#define master_ct(conntr) (conntr->master)
+
+extern struct net init_net;
+
+static inline struct net *nf_ct_net(const struct nf_conn *ct)
+{
+	return read_pnet(&ct->ct_net);
+}
+
+/* Alter reply tuple (maybe alter helper). */
+void nf_conntrack_alter_reply(struct nf_conn *ct,
+			      const struct nf_conntrack_tuple *newreply);
+
+/* Is this tuple taken? (ignoring any belonging to the given
+   conntrack). */
+int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+			     const struct nf_conn *ignored_conntrack);
+
+/* Return conntrack_info and tuple hash for given skb. */
+static inline struct nf_conn *
+nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
+{
+	unsigned long nfct = skb_get_nfct(skb);
+
+	*ctinfo = nfct & NFCT_INFOMASK;
+	return (struct nf_conn *)(nfct & NFCT_PTRMASK);
+}
+
+/* decrement reference count on a conntrack */
+static inline void nf_ct_put(struct nf_conn *ct)
+{
+	WARN_ON(!ct);
+	nf_conntrack_put(&ct->ct_general);
+}
+
+/* Protocol module loading */
+int nf_ct_l3proto_try_module_get(unsigned short l3proto);
+void nf_ct_l3proto_module_put(unsigned short l3proto);
+
+/* load module; enable/disable conntrack in this namespace */
+int nf_ct_netns_get(struct net *net, u8 nfproto);
+void nf_ct_netns_put(struct net *net, u8 nfproto);
+
+/*
+ * Allocate a hashtable of hlist_head (if nulls == 0),
+ * or hlist_nulls_head (if nulls == 1)
+ */
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
+
+void nf_ct_free_hashtable(void *hash, unsigned int size);
+int nf_conntrack_hash_check_insert(struct nf_conn *ct);
+bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
+
+bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
+		       u_int16_t l3num, struct net *net,
+		       struct nf_conntrack_tuple *tuple);
+
+void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			  const struct sk_buff *skb,
+			  u32 extra_jiffies, bool do_acct);
+
+/* Refresh conntrack for this many jiffies and do accounting */
+static inline void nf_ct_refresh_acct(struct nf_conn *ct,
+				      enum ip_conntrack_info ctinfo,
+				      const struct sk_buff *skb,
+				      u32 extra_jiffies)
+{
+	__nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, true);
+}
+
+/* Refresh conntrack for this many jiffies */
+static inline void nf_ct_refresh(struct nf_conn *ct,
+				 const struct sk_buff *skb,
+				 u32 extra_jiffies)
+{
+	__nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, false);
+}
+
+/* kill conntrack and do accounting */
+bool nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+		     const struct sk_buff *skb);
+
+/* kill conntrack without accounting */
+static inline bool nf_ct_kill(struct nf_conn *ct)
+{
+	return nf_ct_delete(ct, 0, 0);
+}
+
+/* Set all unconfirmed conntrack as dying */
+void nf_ct_unconfirmed_destroy(struct net *);
+
+/* Iterate over all conntracks: if iter returns true, it's deleted. */
+void nf_ct_iterate_cleanup_net(struct net *net,
+			       int (*iter)(struct nf_conn *i, void *data),
+			       void *data, u32 portid, int report);
+
+/* also set unconfirmed conntracks as dying. Only use in module exit path. */
+void nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data),
+			   void *data);
+
+struct nf_conntrack_zone;
+
+void nf_conntrack_free(struct nf_conn *ct);
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+				   const struct nf_conntrack_zone *zone,
+				   const struct nf_conntrack_tuple *orig,
+				   const struct nf_conntrack_tuple *repl,
+				   gfp_t gfp);
+
+static inline int nf_ct_is_template(const struct nf_conn *ct)
+{
+	return test_bit(IPS_TEMPLATE_BIT, &ct->status);
+}
+
+/* It's confirmed if it is, or has been in the hash table. */
+static inline int nf_ct_is_confirmed(const struct nf_conn *ct)
+{
+	return test_bit(IPS_CONFIRMED_BIT, &ct->status);
+}
+
+static inline int nf_ct_is_dying(const struct nf_conn *ct)
+{
+	return test_bit(IPS_DYING_BIT, &ct->status);
+}
+
+/* Packet is received from loopback */
+static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
+{
+	return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
+}
+
+#define nfct_time_stamp ((u32)(jiffies))
+
+/* jiffies until ct expires, 0 if already expired */
+static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
+{
+	s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
+
+	return timeout > 0 ? timeout : 0;
+}
+
+static inline bool nf_ct_is_expired(const struct nf_conn *ct)
+{
+	return (__s32)(READ_ONCE(ct->timeout) - nfct_time_stamp) <= 0;
+}
+
+/* use after obtaining a reference count */
+static inline bool nf_ct_should_gc(const struct nf_conn *ct)
+{
+	return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
+	       !nf_ct_is_dying(ct);
+}
+
+#define	NF_CT_DAY	(86400 * HZ)
+
+/* Set an arbitrary timeout large enough not to ever expire, this save
+ * us a check for the IPS_OFFLOAD_BIT from the packet path via
+ * nf_ct_is_expired().
+ */
+static inline void nf_ct_offload_timeout(struct nf_conn *ct)
+{
+	if (nf_ct_expires(ct) < NF_CT_DAY / 2)
+		WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY);
+}
+
+struct kernel_param;
+
+int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp);
+int nf_conntrack_hash_resize(unsigned int hashsize);
+
+extern struct hlist_nulls_head *nf_conntrack_hash;
+extern unsigned int nf_conntrack_htable_size;
+extern seqcount_spinlock_t nf_conntrack_generation;
+extern unsigned int nf_conntrack_max;
+
+/* must be called with rcu read lock held */
+static inline void
+nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize)
+{
+	struct hlist_nulls_head *hptr;
+	unsigned int sequence, hsz;
+
+	do {
+		sequence = read_seqcount_begin(&nf_conntrack_generation);
+		hsz = nf_conntrack_htable_size;
+		hptr = nf_conntrack_hash;
+	} while (read_seqcount_retry(&nf_conntrack_generation, sequence));
+
+	*hash = hptr;
+	*hsize = hsz;
+}
+
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+				 const struct nf_conntrack_zone *zone,
+				 gfp_t flags);
+void nf_ct_tmpl_free(struct nf_conn *tmpl);
+
+u32 nf_ct_get_id(const struct nf_conn *ct);
+
+static inline void
+nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
+{
+	skb_set_nfct(skb, (unsigned long)ct | info);
+}
+
+#define NF_CT_STAT_INC(net, count)	  __this_cpu_inc((net)->ct.stat->count)
+#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
+#define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v))
+
+#define MODULE_ALIAS_NFCT_HELPER(helper) \
+        MODULE_ALIAS("nfct-helper-" helper)
+
+#endif /* _NF_CONNTRACK_H */
diff --git a/upstream/linux-5.10/init/main.c b/upstream/linux-5.10/init/main.c
new file mode 100755
index 0000000..83d93ea
--- /dev/null
+++ b/upstream/linux-5.10/init/main.c
@@ -0,0 +1,1571 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *  linux/init/main.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  GK 2/5/95  -  Changed to support mounting root fs via NFS
+ *  Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96
+ *  Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96
+ *  Simplified starting of init:  Michael A. Griffith <grif@acm.org>
+ */
+
+#define DEBUG		/* Enable initcall_debug */
+
+#include <linux/types.h>
+#include <linux/extable.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/binfmts.h>
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+#include <linux/stackprotector.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/initrd.h>
+#include <linux/memblock.h>
+#include <linux/acpi.h>
+#include <linux/bootconfig.h>
+#include <linux/console.h>
+#include <linux/nmi.h>
+#include <linux/percpu.h>
+#include <linux/kmod.h>
+#include <linux/kprobes.h>
+#include <linux/vmalloc.h>
+#include <linux/kernel_stat.h>
+#include <linux/start_kernel.h>
+#include <linux/security.h>
+#include <linux/smp.h>
+#include <linux/profile.h>
+#include <linux/rcupdate.h>
+#include <linux/moduleparam.h>
+#include <linux/kallsyms.h>
+#include <linux/writeback.h>
+#include <linux/cpu.h>
+#include <linux/cpuset.h>
+#include <linux/cgroup.h>
+#include <linux/efi.h>
+#include <linux/tick.h>
+#include <linux/sched/isolation.h>
+#include <linux/interrupt.h>
+#include <linux/taskstats_kern.h>
+#include <linux/delayacct.h>
+#include <linux/unistd.h>
+#include <linux/utsname.h>
+#include <linux/rmap.h>
+#include <linux/mempolicy.h>
+#include <linux/key.h>
+#include <linux/buffer_head.h>
+#include <linux/page_ext.h>
+#include <linux/debug_locks.h>
+#include <linux/debugobjects.h>
+#include <linux/lockdep.h>
+#include <linux/kmemleak.h>
+#include <linux/padata.h>
+#include <linux/pid_namespace.h>
+#include <linux/device/driver.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/sched/init.h>
+#include <linux/signal.h>
+#include <linux/idr.h>
+#include <linux/kgdb.h>
+#include <linux/ftrace.h>
+#include <linux/async.h>
+#include <linux/sfi.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/pti.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/sched/clock.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/context_tracking.h>
+#include <linux/random.h>
+#include <linux/list.h>
+#include <linux/integrity.h>
+#include <linux/proc_ns.h>
+#include <linux/io.h>
+#include <linux/cache.h>
+#include <linux/rodata_test.h>
+#include <linux/jump_label.h>
+#include <linux/mem_encrypt.h>
+#include <linux/kcsan.h>
+#include <linux/init_syscalls.h>
+
+#include <asm/io.h>
+#include <asm/bugs.h>
+#include <asm/setup.h>
+#include <asm/sections.h>
+#include <asm/cacheflush.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/initcall.h>
+
+#include <kunit/test.h>
+
+#ifdef CONFIG_FLAGS_UTILS
+#include <linux/reboot.h>
+#include "pub_flags.h"
+#endif
+
+static int kernel_init(void *);
+
+extern void init_IRQ(void);
+extern void radix_tree_init(void);
+
+/*
+ * Debug helper: via this flag we know that we are in 'early bootup code'
+ * where only the boot processor is running with IRQ disabled.  This means
+ * two things - IRQ must not be enabled before the flag is cleared and some
+ * operations which are not allowed with IRQ disabled are allowed while the
+ * flag is set.
+ */
+bool early_boot_irqs_disabled __read_mostly;
+
+enum system_states system_state __read_mostly;
+EXPORT_SYMBOL(system_state);
+
+/*
+ * Boot command-line arguments
+ */
+#define MAX_INIT_ARGS CONFIG_INIT_ENV_ARG_LIMIT
+#define MAX_INIT_ENVS CONFIG_INIT_ENV_ARG_LIMIT
+
+extern void time_init(void);
+/* Default late time init is NULL. archs can override this later. */
+void (*__initdata late_time_init)(void);
+
+/* Untouched command line saved by arch-specific code. */
+char __initdata boot_command_line[COMMAND_LINE_SIZE];
+/* Untouched saved command line (eg. for /proc) */
+char *saved_command_line;
+/* Command line for parameter parsing */
+static char *static_command_line;
+/* Untouched extra command line */
+static char *extra_command_line;
+/* Extra init arguments */
+static char *extra_init_args;
+
+#ifdef CONFIG_BOOT_CONFIG
+/* Is bootconfig on command line? */
+static bool bootconfig_found;
+static bool initargs_found;
+#else
+# define bootconfig_found false
+# define initargs_found false
+#endif
+
+static char *execute_command;
+static char *ramdisk_execute_command = "/init";
+
+/*
+ * Used to generate warnings if static_key manipulation functions are used
+ * before jump_label_init is called.
+ */
+bool static_key_initialized __read_mostly;
+EXPORT_SYMBOL_GPL(static_key_initialized);
+
+/*
+ * If set, this is an indication to the drivers that reset the underlying
+ * device before going ahead with the initialization otherwise driver might
+ * rely on the BIOS and skip the reset operation.
+ *
+ * This is useful if kernel is booting in an unreliable environment.
+ * For ex. kdump situation where previous kernel has crashed, BIOS has been
+ * skipped and devices will be in unknown state.
+ */
+unsigned int reset_devices;
+EXPORT_SYMBOL(reset_devices);
+
+static int __init set_reset_devices(char *str)
+{
+	reset_devices = 1;
+	return 1;
+}
+
+__setup("reset_devices", set_reset_devices);
+
+static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
+const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
+static const char *panic_later, *panic_param;
+
+extern const struct obs_kernel_param __setup_start[], __setup_end[];
+
+static bool __init obsolete_checksetup(char *line)
+{
+	const struct obs_kernel_param *p;
+	bool had_early_param = false;
+
+	p = __setup_start;
+	do {
+		int n = strlen(p->str);
+		if (parameqn(line, p->str, n)) {
+			if (p->early) {
+				/* Already done in parse_early_param?
+				 * (Needs exact match on param part).
+				 * Keep iterating, as we can have early
+				 * params and __setups of same names 8( */
+				if (line[n] == '\0' || line[n] == '=')
+					had_early_param = true;
+			} else if (!p->setup_func) {
+				pr_warn("Parameter %s is obsolete, ignored\n",
+					p->str);
+				return true;
+			} else if (p->setup_func(line + n))
+				return true;
+		}
+		p++;
+	} while (p < __setup_end);
+
+	return had_early_param;
+}
+
+/*
+ * This should be approx 2 Bo*oMips to start (note initial shift), and will
+ * still work even if initially too large, it will just take slightly longer
+ */
+unsigned long loops_per_jiffy = (1<<12);
+EXPORT_SYMBOL(loops_per_jiffy);
+
+static int __init debug_kernel(char *str)
+{
+	console_loglevel = CONSOLE_LOGLEVEL_DEBUG;
+	return 0;
+}
+
+static int __init quiet_kernel(char *str)
+{
+	console_loglevel = CONSOLE_LOGLEVEL_QUIET;
+	return 0;
+}
+
+early_param("debug", debug_kernel);
+early_param("quiet", quiet_kernel);
+
+static int __init loglevel(char *str)
+{
+	int newlevel;
+
+	/*
+	 * Only update loglevel value when a correct setting was passed,
+	 * to prevent blind crashes (when loglevel being set to 0) that
+	 * are quite hard to debug
+	 */
+	if (get_option(&str, &newlevel)) {
+		console_loglevel = newlevel;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+early_param("loglevel", loglevel);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+static void * __init get_boot_config_from_initrd(u32 *_size, u32 *_csum)
+{
+	u32 size, csum;
+	char *data;
+	u32 *hdr;
+	int i;
+
+	if (!initrd_end)
+		return NULL;
+
+	data = (char *)initrd_end - BOOTCONFIG_MAGIC_LEN;
+	/*
+	 * Since Grub may align the size of initrd to 4, we must
+	 * check the preceding 3 bytes as well.
+	 */
+	for (i = 0; i < 4; i++) {
+		if (!memcmp(data, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN))
+			goto found;
+		data--;
+	}
+	return NULL;
+
+found:
+	hdr = (u32 *)(data - 8);
+	size = le32_to_cpu(hdr[0]);
+	csum = le32_to_cpu(hdr[1]);
+
+	data = ((void *)hdr) - size;
+	if ((unsigned long)data < initrd_start) {
+		pr_err("bootconfig size %d is greater than initrd size %ld\n",
+			size, initrd_end - initrd_start);
+		return NULL;
+	}
+
+	/* Remove bootconfig from initramfs/initrd */
+	initrd_end = (unsigned long)data;
+	if (_size)
+		*_size = size;
+	if (_csum)
+		*_csum = csum;
+
+	return data;
+}
+#else
+static void * __init get_boot_config_from_initrd(u32 *_size, u32 *_csum)
+{
+	return NULL;
+}
+#endif
+
+#ifdef CONFIG_BOOT_CONFIG
+
+static char xbc_namebuf[XBC_KEYLEN_MAX] __initdata;
+
+#define rest(dst, end) ((end) > (dst) ? (end) - (dst) : 0)
+
+static int __init xbc_snprint_cmdline(char *buf, size_t size,
+				      struct xbc_node *root)
+{
+	struct xbc_node *knode, *vnode;
+	char *end = buf + size;
+	const char *val;
+	int ret;
+
+	xbc_node_for_each_key_value(root, knode, val) {
+		ret = xbc_node_compose_key_after(root, knode,
+					xbc_namebuf, XBC_KEYLEN_MAX);
+		if (ret < 0)
+			return ret;
+
+		vnode = xbc_node_get_child(knode);
+		if (!vnode) {
+			ret = snprintf(buf, rest(buf, end), "%s ", xbc_namebuf);
+			if (ret < 0)
+				return ret;
+			buf += ret;
+			continue;
+		}
+		xbc_array_for_each_value(vnode, val) {
+			ret = snprintf(buf, rest(buf, end), "%s=\"%s\" ",
+				       xbc_namebuf, val);
+			if (ret < 0)
+				return ret;
+			buf += ret;
+		}
+	}
+
+	return buf - (end - size);
+}
+#undef rest
+
+/* Make an extra command line under given key word */
+static char * __init xbc_make_cmdline(const char *key)
+{
+	struct xbc_node *root;
+	char *new_cmdline;
+	int ret, len = 0;
+
+	root = xbc_find_node(key);
+	if (!root)
+		return NULL;
+
+	/* Count required buffer size */
+	len = xbc_snprint_cmdline(NULL, 0, root);
+	if (len <= 0)
+		return NULL;
+
+	new_cmdline = memblock_alloc(len + 1, SMP_CACHE_BYTES);
+	if (!new_cmdline) {
+		pr_err("Failed to allocate memory for extra kernel cmdline.\n");
+		return NULL;
+	}
+
+	ret = xbc_snprint_cmdline(new_cmdline, len + 1, root);
+	if (ret < 0 || ret > len) {
+		pr_err("Failed to print extra kernel cmdline.\n");
+		memblock_free(__pa(new_cmdline), len + 1);
+		return NULL;
+	}
+
+	return new_cmdline;
+}
+
+static u32 boot_config_checksum(unsigned char *p, u32 size)
+{
+	u32 ret = 0;
+
+	while (size--)
+		ret += *p++;
+
+	return ret;
+}
+
+static int __init bootconfig_params(char *param, char *val,
+				    const char *unused, void *arg)
+{
+	if (strcmp(param, "bootconfig") == 0) {
+		bootconfig_found = true;
+	}
+	return 0;
+}
+
+static void __init setup_boot_config(const char *cmdline)
+{
+	static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;
+	const char *msg;
+	int pos;
+	u32 size, csum;
+	char *data, *copy, *err;
+	int ret;
+
+	/* Cut out the bootconfig data even if we have no bootconfig option */
+	data = get_boot_config_from_initrd(&size, &csum);
+
+	strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+	err = parse_args("bootconfig", tmp_cmdline, NULL, 0, 0, 0, NULL,
+			 bootconfig_params);
+
+	if (IS_ERR(err) || !bootconfig_found)
+		return;
+
+	/* parse_args() stops at '--' and returns an address */
+	if (err)
+		initargs_found = true;
+
+	if (!data) {
+		pr_err("'bootconfig' found on command line, but no bootconfig found\n");
+		return;
+	}
+
+	if (size >= XBC_DATA_MAX) {
+		pr_err("bootconfig size %d greater than max size %d\n",
+			size, XBC_DATA_MAX);
+		return;
+	}
+
+	if (boot_config_checksum((unsigned char *)data, size) != csum) {
+		pr_err("bootconfig checksum failed\n");
+		return;
+	}
+
+	copy = memblock_alloc(size + 1, SMP_CACHE_BYTES);
+	if (!copy) {
+		pr_err("Failed to allocate memory for bootconfig\n");
+		return;
+	}
+
+	memcpy(copy, data, size);
+	copy[size] = '\0';
+
+	ret = xbc_init(copy, &msg, &pos);
+	if (ret < 0) {
+		if (pos < 0)
+			pr_err("Failed to init bootconfig: %s.\n", msg);
+		else
+			pr_err("Failed to parse bootconfig: %s at %d.\n",
+				msg, pos);
+	} else {
+		pr_info("Load bootconfig: %d bytes %d nodes\n", size, ret);
+		/* keys starting with "kernel." are passed via cmdline */
+		extra_command_line = xbc_make_cmdline("kernel");
+		/* Also, "init." keys are init arguments */
+		extra_init_args = xbc_make_cmdline("init");
+	}
+	return;
+}
+
+#else
+
+static void __init setup_boot_config(const char *cmdline)
+{
+	/* Remove bootconfig data from initrd */
+	get_boot_config_from_initrd(NULL, NULL);
+}
+
+static int __init warn_bootconfig(char *str)
+{
+	pr_warn("WARNING: 'bootconfig' found on the kernel command line but CONFIG_BOOT_CONFIG is not set.\n");
+	return 0;
+}
+early_param("bootconfig", warn_bootconfig);
+
+#endif
+
+/* Change NUL term back to "=", to make "param" the whole string. */
+static void __init repair_env_string(char *param, char *val)
+{
+	if (val) {
+		/* param=val or param="val"? */
+		if (val == param+strlen(param)+1)
+			val[-1] = '=';
+		else if (val == param+strlen(param)+2) {
+			val[-2] = '=';
+			memmove(val-1, val, strlen(val)+1);
+		} else
+			BUG();
+	}
+}
+
+/* Anything after -- gets handed straight to init. */
+static int __init set_init_arg(char *param, char *val,
+			       const char *unused, void *arg)
+{
+	unsigned int i;
+
+	if (panic_later)
+		return 0;
+
+	repair_env_string(param, val);
+
+	for (i = 0; argv_init[i]; i++) {
+		if (i == MAX_INIT_ARGS) {
+			panic_later = "init";
+			panic_param = param;
+			return 0;
+		}
+	}
+	argv_init[i] = param;
+	return 0;
+}
+
+/*
+ * Unknown boot options get handed to init, unless they look like
+ * unused parameters (modprobe will find them in /proc/cmdline).
+ */
+static int __init unknown_bootoption(char *param, char *val,
+				     const char *unused, void *arg)
+{
+	size_t len = strlen(param);
+
+	repair_env_string(param, val);
+
+	/* Handle obsolete-style parameters */
+	if (obsolete_checksetup(param))
+		return 0;
+
+	/* Unused module parameter. */
+	if (strnchr(param, len, '.'))
+		return 0;
+
+	if (panic_later)
+		return 0;
+
+	if (val) {
+		/* Environment option */
+		unsigned int i;
+		for (i = 0; envp_init[i]; i++) {
+			if (i == MAX_INIT_ENVS) {
+				panic_later = "env";
+				panic_param = param;
+			}
+			if (!strncmp(param, envp_init[i], len+1))
+				break;
+		}
+		envp_init[i] = param;
+	} else {
+		/* Command line option */
+		unsigned int i;
+		for (i = 0; argv_init[i]; i++) {
+			if (i == MAX_INIT_ARGS) {
+				panic_later = "init";
+				panic_param = param;
+			}
+		}
+		argv_init[i] = param;
+	}
+	return 0;
+}
+
+static int __init init_setup(char *str)
+{
+	unsigned int i;
+
+	execute_command = str;
+	/*
+	 * In case LILO is going to boot us with default command line,
+	 * it prepends "auto" before the whole cmdline which makes
+	 * the shell think it should execute a script with such name.
+	 * So we ignore all arguments entered _before_ init=... [MJ]
+	 */
+	for (i = 1; i < MAX_INIT_ARGS; i++)
+		argv_init[i] = NULL;
+	return 1;
+}
+__setup("init=", init_setup);
+
+static int __init rdinit_setup(char *str)
+{
+	unsigned int i;
+
+	ramdisk_execute_command = str;
+	/* See "auto" comment in init_setup */
+	for (i = 1; i < MAX_INIT_ARGS; i++)
+		argv_init[i] = NULL;
+	return 1;
+}
+__setup("rdinit=", rdinit_setup);
+
+#ifndef CONFIG_SMP
+static const unsigned int setup_max_cpus = NR_CPUS;
+static inline void setup_nr_cpu_ids(void) { }
+static inline void smp_prepare_cpus(unsigned int maxcpus) { }
+#endif
+
+/*
+ * We need to store the untouched command line for future reference.
+ * We also need to store the touched command line since the parameter
+ * parsing is performed in place, and we should allow a component to
+ * store reference of name/value for future reference.
+ */
+static void __init setup_command_line(char *command_line)
+{
+	size_t len, xlen = 0, ilen = 0;
+
+	if (extra_command_line)
+		xlen = strlen(extra_command_line);
+	if (extra_init_args)
+		ilen = strlen(extra_init_args) + 4; /* for " -- " */
+
+	len = xlen + strlen(boot_command_line) + 1;
+
+	saved_command_line = memblock_alloc(len + ilen, SMP_CACHE_BYTES);
+	if (!saved_command_line)
+		panic("%s: Failed to allocate %zu bytes\n", __func__, len + ilen);
+
+	static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
+	if (!static_command_line)
+		panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+
+	if (xlen) {
+		/*
+		 * We have to put extra_command_line before boot command
+		 * lines because there could be dashes (separator of init
+		 * command line) in the command lines.
+		 */
+		strcpy(saved_command_line, extra_command_line);
+		strcpy(static_command_line, extra_command_line);
+	}
+	strcpy(saved_command_line + xlen, boot_command_line);
+	strcpy(static_command_line + xlen, command_line);
+
+	if (ilen) {
+		/*
+		 * Append supplemental init boot args to saved_command_line
+		 * so that user can check what command line options passed
+		 * to init.
+		 */
+		len = strlen(saved_command_line);
+		if (initargs_found) {
+			saved_command_line[len++] = ' ';
+		} else {
+			strcpy(saved_command_line + len, " -- ");
+			len += 4;
+		}
+
+		strcpy(saved_command_line + len, extra_init_args);
+	}
+}
+
+/*
+ * We need to finalize in a non-__init function or else race conditions
+ * between the root thread and the init thread may cause start_kernel to
+ * be reaped by free_initmem before the root thread has proceeded to
+ * cpu_idle.
+ *
+ * gcc-3.4 accidentally inlines this function, so use noinline.
+ */
+
+static __initdata DECLARE_COMPLETION(kthreadd_done);
+
+noinline void __ref rest_init(void)
+{
+	struct task_struct *tsk;
+	int pid;
+
+	rcu_scheduler_starting();
+	/*
+	 * We need to spawn init first so that it obtains pid 1, however
+	 * the init task will end up wanting to create kthreads, which, if
+	 * we schedule it before we create kthreadd, will OOPS.
+	 */
+	pid = kernel_thread(kernel_init, NULL, CLONE_FS);
+	/*
+	 * Pin init on the boot CPU. Task migration is not properly working
+	 * until sched_init_smp() has been run. It will set the allowed
+	 * CPUs for init to the non isolated CPUs.
+	 */
+	rcu_read_lock();
+	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
+	set_cpus_allowed_ptr(tsk, cpumask_of(smp_processor_id()));
+	rcu_read_unlock();
+
+	numa_default_policy();
+	pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
+	rcu_read_lock();
+	kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
+	rcu_read_unlock();
+
+	/*
+	 * Enable might_sleep() and smp_processor_id() checks.
+	 * They cannot be enabled earlier because with CONFIG_PREEMPTION=y
+	 * kernel_thread() would trigger might_sleep() splats. With
+	 * CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled
+	 * already, but it's stuck on the kthreadd_done completion.
+	 */
+	system_state = SYSTEM_SCHEDULING;
+
+	complete(&kthreadd_done);
+
+	/*
+	 * The boot idle thread must execute schedule()
+	 * at least once to get things moving:
+	 */
+	schedule_preempt_disabled();
+	/* Call into cpu_idle with preempt disabled */
+	cpu_startup_entry(CPUHP_ONLINE);
+}
+
+/* Check for early params. */
+static int __init do_early_param(char *param, char *val,
+				 const char *unused, void *arg)
+{
+	const struct obs_kernel_param *p;
+
+	for (p = __setup_start; p < __setup_end; p++) {
+		if ((p->early && parameq(param, p->str)) ||
+		    (strcmp(param, "console") == 0 &&
+		     strcmp(p->str, "earlycon") == 0)
+		) {
+			if (p->setup_func(val) != 0)
+				pr_warn("Malformed early option '%s'\n", param);
+		}
+	}
+	/* We accept everything at this stage. */
+	return 0;
+}
+
+void __init parse_early_options(char *cmdline)
+{
+	parse_args("early options", cmdline, NULL, 0, 0, 0, NULL,
+		   do_early_param);
+}
+
+/* Arch code calls this early on, or if not, just before other parsing. */
+void __init parse_early_param(void)
+{
+	static int done __initdata;
+	static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;
+
+	if (done)
+		return;
+
+	/* All fall through to do_early_param. */
+	strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+	parse_early_options(tmp_cmdline);
+	done = 1;
+}
+
+void __init __weak arch_post_acpi_subsys_init(void) { }
+
+void __init __weak smp_setup_processor_id(void)
+{
+}
+
+# if THREAD_SIZE >= PAGE_SIZE
+void __init __weak thread_stack_cache_init(void)
+{
+}
+#endif
+
+void __init __weak mem_encrypt_init(void) { }
+
+void __init __weak poking_init(void) { }
+
+void __init __weak pgtable_cache_init(void) { }
+
+bool initcall_debug;
+core_param(initcall_debug, initcall_debug, bool, 0644);
+
+#ifdef TRACEPOINTS_ENABLED
+static void __init initcall_debug_enable(void);
+#else
+static inline void initcall_debug_enable(void)
+{
+}
+#endif
+
+/* Report memory auto-initialization states for this boot. */
+static void __init report_meminit(void)
+{
+	const char *stack;
+
+	if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN))
+		stack = "all(pattern)";
+	else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO))
+		stack = "all(zero)";
+	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL))
+		stack = "byref_all(zero)";
+	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF))
+		stack = "byref(zero)";
+	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER))
+		stack = "__user(zero)";
+	else
+		stack = "off";
+
+	pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
+		stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off",
+		want_init_on_free() ? "on" : "off");
+	if (want_init_on_free())
+		pr_info("mem auto-init: clearing system memory may take some time...\n");
+}
+
+/*
+ * Set up kernel memory allocators
+ */
+static void __init mm_init(void)
+{
+	/*
+	 * page_ext requires contiguous pages,
+	 * bigger than MAX_ORDER unless SPARSEMEM.
+	 */
+	page_ext_init_flatmem();
+	init_debug_pagealloc();
+	report_meminit();
+	mem_init();
+	kmem_cache_init();
+	kmemleak_init();
+	pgtable_init();
+	debug_objects_mem_init();
+	vmalloc_init();
+	ioremap_huge_init();
+	/* Should be run before the first non-init thread is created */
+	init_espfix_bsp();
+	/* Should be run after espfix64 is set up. */
+	pti_init();
+}
+
+void __init __weak arch_call_rest_init(void)
+{
+	rest_init();
+}
+
+void __weak early_drv_init(void) {}
+
+asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
+{
+	char *command_line;
+	char *after_dashes;
+
+	set_task_stack_end_magic(&init_task);
+	smp_setup_processor_id();
+	debug_objects_early_init();
+
+	cgroup_init_early();
+
+	local_irq_disable();
+	early_boot_irqs_disabled = true;
+
+	/*
+	 * Interrupts are still disabled. Do necessary setups, then
+	 * enable them.
+	 */
+	boot_cpu_init();
+	page_address_init();
+	pr_notice("%s", linux_banner);
+	early_security_init();
+	setup_arch(&command_line);
+	setup_boot_config(command_line);
+	setup_command_line(command_line);
+	setup_nr_cpu_ids();
+	setup_per_cpu_areas();
+	smp_prepare_boot_cpu();	/* arch-specific boot-cpu hooks */
+	boot_cpu_hotplug_init();
+
+	build_all_zonelists(NULL);
+	page_alloc_init();
+
+	pr_notice("Kernel command line: %s\n", saved_command_line);
+	/* parameters may set static keys */
+	jump_label_init();
+	parse_early_param();
+	after_dashes = parse_args("Booting kernel",
+				  static_command_line, __start___param,
+				  __stop___param - __start___param,
+				  -1, -1, NULL, &unknown_bootoption);
+	if (!IS_ERR_OR_NULL(after_dashes))
+		parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
+			   NULL, set_init_arg);
+	if (extra_init_args)
+		parse_args("Setting extra init args", extra_init_args,
+			   NULL, 0, -1, -1, NULL, set_init_arg);
+
+	/*
+	 * These use large bootmem allocations and must precede
+	 * kmem_cache_init()
+	 */
+	setup_log_buf(0);
+	vfs_caches_init_early();
+	sort_main_extable();
+	trap_init();
+	mm_init();
+
+	ftrace_init();
+
+	/* trace_printk can be enabled here */
+	early_trace_init();
+
+	/*
+	 * Set up the scheduler prior starting any interrupts (such as the
+	 * timer interrupt). Full topology setup happens at smp_init()
+	 * time - but meanwhile we still have a functioning scheduler.
+	 */
+	sched_init();
+
+	if (WARN(!irqs_disabled(),
+		 "Interrupts were enabled *very* early, fixing it\n"))
+		local_irq_disable();
+	radix_tree_init();
+
+	/*
+	 * Set up housekeeping before setting up workqueues to allow the unbound
+	 * workqueue to take non-housekeeping into account.
+	 */
+	housekeeping_init();
+
+	/*
+	 * Allow workqueue creation and work item queueing/cancelling
+	 * early.  Work item execution depends on kthreads and starts after
+	 * workqueue_init().
+	 */
+	workqueue_init_early();
+
+	rcu_init();
+
+	/* Trace events are available after this */
+	trace_init();
+
+	if (initcall_debug)
+		initcall_debug_enable();
+
+	context_tracking_init();
+
+	early_drv_init();
+	
+	/* init some links before init_ISA_irqs() */
+	early_irq_init();
+	init_IRQ();
+	tick_init();
+	rcu_init_nohz();
+	init_timers();
+	hrtimers_init();
+	softirq_init();
+	timekeeping_init();
+	time_init();
+
+	/*
+	 * For best initial stack canary entropy, prepare it after:
+	 * - setup_arch() for any UEFI RNG entropy and boot cmdline access
+	 * - timekeeping_init() for ktime entropy used in random_init()
+	 * - time_init() for making random_get_entropy() work on some platforms
+	 * - random_init() to initialize the RNG from from early entropy sources
+	 */
+	random_init(command_line);
+	boot_init_stack_canary();
+
+	perf_event_init();
+	profile_init();
+	call_function_init();
+	WARN(!irqs_disabled(), "Interrupts were enabled early\n");
+
+	early_boot_irqs_disabled = false;
+	local_irq_enable();
+
+	kmem_cache_init_late();
+
+	/*
+	 * HACK ALERT! This is early. We're enabling the console before
+	 * we've done PCI setups etc, and console_init() must be aware of
+	 * this. But we do want output early, in case something goes wrong.
+	 */
+	console_init();
+	if (panic_later)
+		panic("Too many boot %s vars at `%s'", panic_later,
+		      panic_param);
+
+	lockdep_init();
+
+	/*
+	 * Need to run this when irqs are enabled, because it wants
+	 * to self-test [hard/soft]-irqs on/off lock inversion bugs
+	 * too:
+	 */
+	locking_selftest();
+
+	/*
+	 * This needs to be called before any devices perform DMA
+	 * operations that might use the SWIOTLB bounce buffers. It will
+	 * mark the bounce buffers as decrypted so that their usage will
+	 * not cause "plain-text" data to be decrypted when accessed.
+	 */
+	mem_encrypt_init();
+
+#ifdef CONFIG_BLK_DEV_INITRD
+	if (initrd_start && !initrd_below_start_ok &&
+	    page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
+		pr_crit("initrd overwritten (0x%08lx < 0x%08lx) - disabling it.\n",
+		    page_to_pfn(virt_to_page((void *)initrd_start)),
+		    min_low_pfn);
+		initrd_start = 0;
+	}
+#endif
+	setup_per_cpu_pageset();
+	numa_policy_init();
+	acpi_early_init();
+	if (late_time_init)
+		late_time_init();
+	sched_clock_init();
+	calibrate_delay();
+	pid_idr_init();
+	anon_vma_init();
+#ifdef CONFIG_X86
+	if (efi_enabled(EFI_RUNTIME_SERVICES))
+		efi_enter_virtual_mode();
+#endif
+	thread_stack_cache_init();
+	cred_init();
+	fork_init();
+	proc_caches_init();
+	uts_ns_init();
+	buffer_init();
+	key_init();
+	security_init();
+	dbg_late_init();
+	vfs_caches_init();
+	pagecache_init();
+	signals_init();
+	seq_file_init();
+	proc_root_init();
+	nsfs_init();
+	cpuset_init();
+	cgroup_init();
+	taskstats_init_early();
+	delayacct_init();
+
+	poking_init();
+	check_bugs();
+
+	acpi_subsystem_init();
+	arch_post_acpi_subsys_init();
+	sfi_init_late();
+	kcsan_init();
+
+	/* Do the rest non-__init'ed, we're now alive */
+	arch_call_rest_init();
+
+	prevent_tail_call_optimization();
+}
+
+/* Call all constructor functions linked into the kernel. */
+static void __init do_ctors(void)
+{
+#ifdef CONFIG_CONSTRUCTORS
+	ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;
+
+	for (; fn < (ctor_fn_t *) __ctors_end; fn++)
+		(*fn)();
+#endif
+}
+
+#ifdef CONFIG_KALLSYMS
+struct blacklist_entry {
+	struct list_head next;
+	char *buf;
+};
+
+static __initdata_or_module LIST_HEAD(blacklisted_initcalls);
+
+static int __init initcall_blacklist(char *str)
+{
+	char *str_entry;
+	struct blacklist_entry *entry;
+
+	/* str argument is a comma-separated list of functions */
+	do {
+		str_entry = strsep(&str, ",");
+		if (str_entry) {
+			pr_debug("blacklisting initcall %s\n", str_entry);
+			entry = memblock_alloc(sizeof(*entry),
+					       SMP_CACHE_BYTES);
+			if (!entry)
+				panic("%s: Failed to allocate %zu bytes\n",
+				      __func__, sizeof(*entry));
+			entry->buf = memblock_alloc(strlen(str_entry) + 1,
+						    SMP_CACHE_BYTES);
+			if (!entry->buf)
+				panic("%s: Failed to allocate %zu bytes\n",
+				      __func__, strlen(str_entry) + 1);
+			strcpy(entry->buf, str_entry);
+			list_add(&entry->next, &blacklisted_initcalls);
+		}
+	} while (str_entry);
+
+	return 1;
+}
+
+static bool __init_or_module initcall_blacklisted(initcall_t fn)
+{
+	struct blacklist_entry *entry;
+	char fn_name[KSYM_SYMBOL_LEN];
+	unsigned long addr;
+
+	if (list_empty(&blacklisted_initcalls))
+		return false;
+
+	addr = (unsigned long) dereference_function_descriptor(fn);
+	sprint_symbol_no_offset(fn_name, addr);
+
+	/*
+	 * fn will be "function_name [module_name]" where [module_name] is not
+	 * displayed for built-in init functions.  Strip off the [module_name].
+	 */
+	strreplace(fn_name, ' ', '\0');
+
+	list_for_each_entry(entry, &blacklisted_initcalls, next) {
+		if (!strcmp(fn_name, entry->buf)) {
+			pr_debug("initcall %s blacklisted\n", fn_name);
+			return true;
+		}
+	}
+
+	return false;
+}
+#else
+static int __init initcall_blacklist(char *str)
+{
+	pr_warn("initcall_blacklist requires CONFIG_KALLSYMS\n");
+	return 0;
+}
+
+static bool __init_or_module initcall_blacklisted(initcall_t fn)
+{
+	return false;
+}
+#endif
+__setup("initcall_blacklist=", initcall_blacklist);
+
+static __init_or_module void
+trace_initcall_start_cb(void *data, initcall_t fn)
+{
+	ktime_t *calltime = (ktime_t *)data;
+
+	printk(KERN_DEBUG "calling  %pS @ %i\n", fn, task_pid_nr(current));
+	*calltime = ktime_get();
+}
+
+static __init_or_module void
+trace_initcall_finish_cb(void *data, initcall_t fn, int ret)
+{
+	ktime_t *calltime = (ktime_t *)data;
+	ktime_t delta, rettime;
+	unsigned long long duration;
+
+	rettime = ktime_get();
+	delta = ktime_sub(rettime, *calltime);
+	duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+	printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs\n",
+		 fn, ret, duration);
+}
+
+static ktime_t initcall_calltime;
+
+#ifdef TRACEPOINTS_ENABLED
+static void __init initcall_debug_enable(void)
+{
+	int ret;
+
+	ret = register_trace_initcall_start(trace_initcall_start_cb,
+					    &initcall_calltime);
+	ret |= register_trace_initcall_finish(trace_initcall_finish_cb,
+					      &initcall_calltime);
+	WARN(ret, "Failed to register initcall tracepoints\n");
+}
+# define do_trace_initcall_start	trace_initcall_start
+# define do_trace_initcall_finish	trace_initcall_finish
+#else
+static inline void do_trace_initcall_start(initcall_t fn)
+{
+	if (!initcall_debug)
+		return;
+	trace_initcall_start_cb(&initcall_calltime, fn);
+}
+static inline void do_trace_initcall_finish(initcall_t fn, int ret)
+{
+	if (!initcall_debug)
+		return;
+	trace_initcall_finish_cb(&initcall_calltime, fn, ret);
+}
+#endif /* !TRACEPOINTS_ENABLED */
+
+int __init_or_module do_one_initcall(initcall_t fn)
+{
+	int count = preempt_count();
+	char msgbuf[64];
+	int ret;
+
+	if (initcall_blacklisted(fn))
+		return -EPERM;
+
+	do_trace_initcall_start(fn);
+	ret = fn();
+	do_trace_initcall_finish(fn, ret);
+
+	msgbuf[0] = 0;
+
+	if (preempt_count() != count) {
+		sprintf(msgbuf, "preemption imbalance ");
+		preempt_count_set(count);
+	}
+	if (irqs_disabled()) {
+		strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
+		local_irq_enable();
+	}
+	WARN(msgbuf[0], "initcall %pS returned with %s\n", fn, msgbuf);
+
+	add_latent_entropy();
+	return ret;
+}
+
+
+extern initcall_entry_t __initcall_start[];
+extern initcall_entry_t __initcall0_start[];
+extern initcall_entry_t __initcall1_start[];
+extern initcall_entry_t __initcall2_start[];
+extern initcall_entry_t __initcall3_start[];
+extern initcall_entry_t __initcall4_start[];
+extern initcall_entry_t __initcall5_start[];
+extern initcall_entry_t __initcall6_start[];
+extern initcall_entry_t __initcall7_start[];
+extern initcall_entry_t __initcall_end[];
+
+static initcall_entry_t *initcall_levels[] __initdata = {
+	__initcall0_start,
+	__initcall1_start,
+	__initcall2_start,
+	__initcall3_start,
+	__initcall4_start,
+	__initcall5_start,
+	__initcall6_start,
+	__initcall7_start,
+	__initcall_end,
+};
+
+/* Keep these in sync with initcalls in include/linux/init.h */
+static const char *initcall_level_names[] __initdata = {
+	"pure",
+	"core",
+	"postcore",
+	"arch",
+	"subsys",
+	"fs",
+	"device",
+	"late",
+};
+
+static int __init ignore_unknown_bootoption(char *param, char *val,
+			       const char *unused, void *arg)
+{
+	return 0;
+}
+
+static void __init do_initcall_level(int level, char *command_line)
+{
+	initcall_entry_t *fn;
+
+	parse_args(initcall_level_names[level],
+		   command_line, __start___param,
+		   __stop___param - __start___param,
+		   level, level,
+		   NULL, ignore_unknown_bootoption);
+
+	trace_initcall_level(initcall_level_names[level]);
+	for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
+		do_one_initcall(initcall_from_entry(fn));
+}
+
+static void __init do_initcalls(void)
+{
+	int level;
+	size_t len = strlen(saved_command_line) + 1;
+	char *command_line;
+
+	command_line = kzalloc(len, GFP_KERNEL);
+	if (!command_line)
+		panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+
+	for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) {
+		/* Parser modifies command_line, restore it each time */
+		strcpy(command_line, saved_command_line);
+		do_initcall_level(level, command_line);
+	}
+
+	kfree(command_line);
+}
+
+/*
+ * Ok, the machine is now initialized. None of the devices
+ * have been touched yet, but the CPU subsystem is up and
+ * running, and memory and process management works.
+ *
+ * Now we can finally start doing some real work..
+ */
+static void __init do_basic_setup(void)
+{
+	cpuset_init_smp();
+	driver_init();
+	init_irq_proc();
+	do_ctors();
+	usermodehelper_enable();
+	do_initcalls();
+}
+
+static void __init do_pre_smp_initcalls(void)
+{
+	initcall_entry_t *fn;
+
+	trace_initcall_level("early");
+	for (fn = __initcall_start; fn < __initcall0_start; fn++)
+		do_one_initcall(initcall_from_entry(fn));
+}
+
+static int run_init_process(const char *init_filename)
+{
+	const char *const *p;
+
+	argv_init[0] = init_filename;
+	pr_info("Run %s as init process\n", init_filename);
+	pr_debug("  with arguments:\n");
+	for (p = argv_init; *p; p++)
+		pr_debug("    %s\n", *p);
+	pr_debug("  with environment:\n");
+	for (p = envp_init; *p; p++)
+		pr_debug("    %s\n", *p);
+	return kernel_execve(init_filename, argv_init, envp_init);
+}
+
+static int try_to_run_init_process(const char *init_filename)
+{
+	int ret;
+
+	ret = run_init_process(init_filename);
+
+	if (ret && ret != -ENOENT) {
+		pr_err("Starting init: %s exists but couldn't execute it (error %d)\n",
+		       init_filename, ret);
+	}
+
+	return ret;
+}
+
+static noinline void __init kernel_init_freeable(void);
+
+#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
+bool rodata_enabled __ro_after_init = true;
+static int __init set_debug_rodata(char *str)
+{
+	if (strtobool(str, &rodata_enabled))
+		pr_warn("Invalid option string for rodata: '%s'\n", str);
+	return 1;
+}
+__setup("rodata=", set_debug_rodata);
+#endif
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+static void mark_readonly(void)
+{
+	if (rodata_enabled) {
+		/*
+		 * load_module() results in W+X mappings, which are cleaned
+		 * up with call_rcu().  Let's make sure that queued work is
+		 * flushed so that we don't hit false positives looking for
+		 * insecure pages which are W+X.
+		 */
+		rcu_barrier();
+		mark_rodata_ro();
+		rodata_test();
+	} else
+		pr_info("Kernel memory protection disabled.\n");
+}
+#elif defined(CONFIG_ARCH_HAS_STRICT_KERNEL_RWX)
+static inline void mark_readonly(void)
+{
+	pr_warn("Kernel memory protection not selected by kernel config.\n");
+}
+#else
+static inline void mark_readonly(void)
+{
+	pr_warn("This architecture does not have kernel memory protection.\n");
+}
+#endif
+
+void __weak free_initmem(void)
+{
+	free_initmem_default(POISON_FREE_INITMEM);
+}
+
+static int __ref kernel_init(void *unused)
+{
+	int ret;
+
+	kernel_init_freeable();
+	/* need to finish all async __init code before freeing the memory */
+	async_synchronize_full();
+	kprobe_free_init_mem();
+	ftrace_free_init_mem();
+	kgdb_free_init_mem();
+	free_initmem();
+	mark_readonly();
+
+	/*
+	 * Kernel mappings are now finalized - update the userspace page-table
+	 * to finalize PTI.
+	 */
+	pti_finalize();
+
+	system_state = SYSTEM_RUNNING;
+	numa_default_policy();
+
+	rcu_end_inkernel_boot();
+
+	do_sysctl_args();
+
+	if (ramdisk_execute_command) {
+		ret = run_init_process(ramdisk_execute_command);
+		if (!ret)
+			return 0;
+		pr_err("Failed to execute %s (error %d)\n",
+		       ramdisk_execute_command, ret);
+	}
+
+	/*
+	 * We try each of these until one succeeds.
+	 *
+	 * The Bourne shell can be used instead of init if we are
+	 * trying to recover a really broken machine.
+	 */
+	if (execute_command) {
+		ret = run_init_process(execute_command);
+		if (!ret)
+			return 0;
+		panic("Requested init %s failed (error %d).",
+		      execute_command, ret);
+	}
+
+	if (CONFIG_DEFAULT_INIT[0] != '\0') {
+		ret = run_init_process(CONFIG_DEFAULT_INIT);
+		if (ret)
+			pr_err("Default init %s failed (error %d)\n",
+			       CONFIG_DEFAULT_INIT, ret);
+		else
+			return 0;
+	}
+
+	if (!try_to_run_init_process("/sbin/init") ||
+	    !try_to_run_init_process("/etc/init") ||
+	    !try_to_run_init_process("/bin/init") ||
+	    !try_to_run_init_process("/bin/sh"))
+		return 0;
+#ifdef CONFIG_FLAGS_UTILS
+{
+	extern int flags_sys_switch(void);
+	int ret;
+	printk(KERN_EMERG "No working init found.  Try passing init= option to kernel. "
+	      "See Linux Documentation/admin-guide/init.rst for guidance.");
+	ret = flags_sys_switch();
+	if (ret < 0)
+		panic("VFS: flags_sys_switch fail");
+	else
+		kernel_restart("VFS: Switch to another system, please reset machine");
+}
+#endif
+	panic("No working init found.  Try passing init= option to kernel. "
+	      "See Linux Documentation/admin-guide/init.rst for guidance.");
+}
+
+/* Open /dev/console, for stdin/stdout/stderr, this should never fail */
+void __init console_on_rootfs(void)
+{
+	struct file *file = filp_open("/dev/console", O_RDWR, 0);
+
+	if (IS_ERR(file)) {
+		pr_err("Warning: unable to open an initial console.\n");
+		return;
+	}
+	init_dup(file);
+	init_dup(file);
+	init_dup(file);
+	fput(file);
+}
+
+static noinline void __init kernel_init_freeable(void)
+{
+	/*
+	 * Wait until kthreadd is all set-up.
+	 */
+	wait_for_completion(&kthreadd_done);
+
+	/* Now the scheduler is fully set up and can do blocking allocations */
+	gfp_allowed_mask = __GFP_BITS_MASK;
+
+	/*
+	 * init can allocate pages on any node
+	 */
+	set_mems_allowed(node_states[N_MEMORY]);
+
+	cad_pid = get_pid(task_pid(current));
+
+	smp_prepare_cpus(setup_max_cpus);
+
+	workqueue_init();
+
+	init_mm_internals();
+
+	rcu_init_tasks_generic();
+	do_pre_smp_initcalls();
+	lockup_detector_init();
+
+	smp_init();
+	sched_init_smp();
+
+	padata_init();
+	page_alloc_init_late();
+	/* Initialize page ext after all struct pages are initialized. */
+	page_ext_init();
+
+	do_basic_setup();
+
+	kunit_run_all_tests();
+
+	console_on_rootfs();
+
+	/*
+	 * check if there is an early userspace init.  If yes, let it do all
+	 * the work
+	 */
+	if (init_eaccess(ramdisk_execute_command) != 0) {
+		ramdisk_execute_command = NULL;
+		prepare_namespace();
+	}
+
+	/*
+	 * Ok, we have completed the initial bootup, and
+	 * we're essentially up and running. Get rid of the
+	 * initmem segments and start the user-mode stuff..
+	 *
+	 * rootfs is available now, try loading the public keys
+	 * and default modules
+	 */
+
+	integrity_load_keys();
+}
diff --git a/upstream/linux-5.10/kernel/ramdump/ramdump_device_trans.c b/upstream/linux-5.10/kernel/ramdump/ramdump_device_trans.c
new file mode 100755
index 0000000..40c920d
--- /dev/null
+++ b/upstream/linux-5.10/kernel/ramdump/ramdump_device_trans.c
@@ -0,0 +1,753 @@
+/**
+ * @file oss_ramdump_osa.c
+ * @brief Implementation of Ramdump os adapt
+ *
+ * Copyright (C) 2017 Sanechips Technology Co., Ltd.
+ * @author Qing Wang <wang.qing@sanechips.com.cn>
+ * @ingroup si_ap_oss_ramdump_id
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0 
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/*******************************************************************************
+ *                           Include header files                              *
+ ******************************************************************************/
+#include "ramdump.h"
+#include <linux/lzo.h>
+#include "ramdump_compress.h"
+#ifdef CONFIG_RAMDUMP_EMMC
+#include "ramdump_emmc.h"
+#endif
+#ifdef CONFIG_MTD_SPI_NAND
+#include "ramdump_spinand.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+*                          Extern function declarations                        *
+*******************************************************************************/
+extern unsigned char *ramdump_phy_to_vir(unsigned long phy, unsigned long size);
+extern int dump_printk_text(char *buffer, unsigned long len);
+
+/*******************************************************************************
+*                          Extern variable declarations                        *
+*******************************************************************************/
+extern unsigned int ramdump_compress_flag;
+extern unsigned char *ramdump_log_buf;
+extern unsigned int ramdump_export_mode;
+
+/*******************************************************************************
+ *                             Macro definitions                               *
+ ******************************************************************************/
+/*Ö¸ÁîÖ¡³¤¶È */
+#define RAMDUMP_INTERACTIVE_DATA_LEN        40
+#define RAMDUMP_INTERACTIVE_ARRAY_LEN       10
+
+/* ramdump ºÍ ¹²ÏíÄÚ´æ½»»¥ÃüÁîÔ¼¶¨ */
+/*ͬ²½ÇëÇó*/
+#define RAMDUMP_PC_INTERACTIVE_REQ                  1
+/*ͬ²½ÇëÇóÓ¦´ð,´«ÊäramdumpµÄÎļþÊýÄ¿*/
+#define RAMDUMP_TRANS_SERVER_INTERACTIVE_RSP        2
+/*ÇëÇó´«µÝÖ¸¶¨Îļþ±àºÅµÄÎļþÐÅÏ¢*/
+#define RAMDUMP_PC_FILE_INFO_READ_REQ               3
+/*ÇëÇó´«µÝÖ¸¶¨Îļþ±àºÅµÄÎļþÐÅÏ¢µÄÓ¦´ð£¬´«ÊäÎļþÃû¼°´óС*/
+#define RAMDUMP_TRANS_SERVER_FILE_INFO_READ_RSP     4
+/*ÇëÇó¶Áȡָ¶¨Îļþ±àºÅµÄÎļþÄÚÈÝ*/
+#define RAMDUMP_PC_FILE_DATA_TRANS_REQ              5
+/*ÇëÇó¶Áȡָ¶¨Îļþ±àºÅµÄÎļþÄÚÈݵÄÓ¦´ð£¬´«ÊäÎļþÄÚÈÝ*/
+#define RAMDUMP_TRANS_SERVER_FILE_DATA_TRANS_RSP    6
+/*´«Êä½áÊø*/
+#define RAMDUMP_PC_FILE_TRANS_DONE_REQ              7
+/*´«Êä½áÊøÓ¦´ð*/ 
+#define RAMDUMP_TRANS_SERVER_FILE_TRANS_DONE_RSP    8
+
+/* ´íÎóÀàÐÍ */
+/*Ö¸Áî´íÎó*/
+#define RAMDUMP_INTERACTIVE_CMD_ERROR               9
+/*ÇëÇó´«µÝÖ¸¶¨Îļþ±àºÅ´í*/
+#define RAMDUMP_FILE_NUMBER_ERROR                   10
+/*ÇëÇó´«µÝÖ¸¶¨ÎļþλÖôóС´í*/
+#define RAMDUMP_FILE_SIZE_ERROR                     11
+
+#define RAMDUMP_DELAY_MS_COUNT (2500)
+
+/*******************************************************************************
+ *                             Type definitions                                *
+ ******************************************************************************/
+/*
+ * struct TRANS WITH AP
+ */
+
+/* trans_server rsp pc, interactive msg struct */
+typedef struct
+{
+	unsigned int cmd;
+	unsigned int file_num;
+} ramdump_trans_server_interactive_req;
+
+/* trans_server rsp pc, file info msg struct */
+typedef struct
+{
+	unsigned int cmd;
+	char    file_name[RAMDUMP_RAMCONF_FILENAME_MAXLEN];
+	unsigned int file_size;
+} ramdump_trans_server_file_info_req;
+
+/* pc req trans_server, file info msg struct */
+typedef struct
+{
+	unsigned int cmd;
+	unsigned int file_id;
+} ramdump_pc_file_info_rsp;
+
+/* trans_server rsp pc, trans data msg struct */
+typedef struct
+{
+	unsigned int cmd;
+	unsigned int buf_addr;
+	unsigned int buf_left_size;
+} ramdump_trans_server_data_trans_req;
+
+/* pc req trans_server, trans data msg struct */
+typedef struct
+{
+	unsigned int   cmd;
+	unsigned int   file_id;      /* Îļþ±àºÅ         */
+	unsigned int   offset;       /* offsetΪÊý¾ÝÆ«ÒÆ */
+	unsigned int   length;       /* lengthΪÊý¾Ý³¤¶È */
+} ramdump_pc_trans_data_rsp;
+
+/*******************************************************************************
+ *                        Local function declarations                          *
+ ******************************************************************************/
+
+/*******************************************************************************
+ *                         Local variable definitions                          *
+ ******************************************************************************/
+char *ramdump_log_buf_region = NULL;
+unsigned int ramdump_log_buf_len = 0;
+
+/*******************************************************************************
+ *                        Global variable definitions                          *
+ ******************************************************************************/
+unsigned char *ramdump_shared_mem_base = NULL;
+unsigned char *ramdump_export_flag_base = NULL;
+int ramdump_file_num = 0;
+ramdump_file_t ramdump_device_fp = {0};
+ramdump_file_t ramdump_spinand_fp = {0};
+ramdump_file_t *g_ramdump_dev_fp;
+unsigned int   ramdump_device_file_cnt = 0;
+unsigned char *ramdump_log_buf = NULL; /* ¸´ÓÃramdump´æ´¢µÄ128KB(Æ«ÒÆ16KB) */
+
+/*******************************************************************************
+ *                      Inline function implementations                        *
+ ******************************************************************************/
+static inline void ramdump_wait_delay( unsigned long ms)
+{
+	volatile int j = 0;
+	for (j = 0; j < 10000; j++);
+}
+/*******************************************************************************
+ *                      extern function implementations                         *
+ ******************************************************************************/
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö:    ramdump_oss_data_trans_write
+* ²ÎÊý˵Ã÷:     
+*   (´«Èë²ÎÊý) void
+*   (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ:    void 
+* ÆäËü˵Ã÷:    This function is used for ramdump to trans dump data to PC
+*******************************************************************************/
+int ramdump_oss_data_trans_write(unsigned char *buffer, unsigned int size)
+{
+	int ret;
+	ramdump_shmem_t *msg = (ramdump_shmem_t *)ramdump_shared_mem_base;
+
+	if (size > (RAMDUMP_SHARED_MEM_LEN- roundup(sizeof(ramdump_shmem_t), RAMDUMP_SHMEM_ALIGN_SIZE)));
+		ret = -1;
+
+	while(1){
+		if ((msg->core_flag == 1) && (msg->rw_flag == 1)){
+			memcpy(msg->buf, buffer, size);
+			msg->size = size;
+			msg->core_flag = 0;
+			msg->rw_flag = 2;
+			ret = size;
+			break;
+		}
+		else
+			ramdump_wait_delay(0);
+	}
+	return ret;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö:    ramdump_oss_data_trans_read
+* ²ÎÊý˵Ã÷:     
+*   (´«Èë²ÎÊý) void
+*   (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ:    void 
+* ÆäËü˵Ã÷:    This function is used for ramdump to trans dump data to PC
+*******************************************************************************/
+int ramdump_oss_data_trans_read(unsigned char *buffer, unsigned int size)
+{
+	int ret;
+	ramdump_shmem_t *msg = (ramdump_shmem_t *)ramdump_shared_mem_base;
+
+	if (size > (RAMDUMP_SHARED_MEM_LEN - roundup(sizeof(ramdump_shmem_t), RAMDUMP_SHMEM_ALIGN_SIZE)))
+		ret = -1;
+
+	while(1){
+		if ((msg->core_flag == 1) && (msg->rw_flag == 2)){
+			if (size < msg->size)
+				return -1;
+			memcpy(buffer, msg->buf, msg->size);
+			msg->size = size;
+			msg->core_flag = 1;
+			msg->rw_flag = 1;
+			ret = size;
+			break;
+		}
+		else
+			ramdump_wait_delay(0);
+	}
+	return ret;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö:    ramdump_oss_data_trans_init
+* ²ÎÊý˵Ã÷:     
+*   (´«Èë²ÎÊý) void
+*   (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ:    void 
+* ÆäËü˵Ã÷:    This function is used for map ramdump_shared_mem_base
+*******************************************************************************/
+void ramdump_oss_data_trans_init(void)
+{
+	ramdump_shmem_t *msg = (ramdump_shmem_t *)ramdump_shared_mem_base;
+
+	memset(msg, 0, sizeof(ramdump_shmem_t));
+	msg->core_flag = 1;
+	msg->rw_flag = 1;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö:    ramdump_device_init
+* ²ÎÊý˵Ã÷:     
+*   (´«Èë²ÎÊý) void
+*   (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ:    void 
+* ÆäËü˵Ã÷:    This function is used for init fp head
+*******************************************************************************/
+int ramdump_device_init(void)
+{
+	int ret = 0;
+
+	ramdump_lzo_init();
+	if(ramdump_export_mode == RAMDUMP_MODE_EMMC)
+	{
+#ifdef CONFIG_RAMDUMP_EMMC
+		ret = ramdump_emmc_init(&ramdump_device_fp);
+#endif
+		g_ramdump_dev_fp = &ramdump_device_fp;
+	}
+	else if(ramdump_export_mode == RAMDUMP_MODE_SPINAND)
+	{
+#ifdef CONFIG_MTD_SPI_NAND
+		ret = ramdump_spinand_init(&ramdump_spinand_fp);
+#endif
+		g_ramdump_dev_fp = &ramdump_spinand_fp;
+	}
+	return ret;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö:    ramdump_device_close
+* ²ÎÊý˵Ã÷:     
+*   (´«Èë²ÎÊý) void
+*   (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ:    void 
+* ÆäËü˵Ã÷:    This function is used for print close msg
+*******************************************************************************/
+void ramdump_device_close(void)
+{
+	if(ramdump_export_mode == RAMDUMP_MODE_EMMC)
+	{
+#ifdef CONFIG_RAMDUMP_EMMC
+		ramdump_emmc_close(&ramdump_device_fp);
+#endif
+	}
+	else if (ramdump_export_mode == RAMDUMP_MODE_SPINAND)
+	{
+#ifdef CONFIG_MTD_SPI_NAND
+		ramdump_spinand_close(&ramdump_spinand_fp);
+#endif
+	}
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö:    ramdump_fill_header
+* ²ÎÊý˵Ã÷:     
+*   (´«Èë²ÎÊý) 
+*   (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ:    void 
+* ÆäËü˵Ã÷:    This function is used for  ramdump file header
+*******************************************************************************/
+int ramdump_fill_header(char *file_name, unsigned int file_size, ramdump_file_t *fp, unsigned int offset)
+{
+	if (ramdump_device_file_cnt >= RAMDUMP_FILE_NUM_MAX)
+		return -1;
+
+	fp->file_fp[ramdump_device_file_cnt].magic = 0x3A3A3A3A;
+	strncpy(fp->file_fp[ramdump_device_file_cnt].file_name, file_name, RAMDUMP_RAMCONF_FILENAME_MAXLEN - 1);
+	fp->file_fp[ramdump_device_file_cnt].offset = offset;
+	fp->file_fp[ramdump_device_file_cnt].size = file_size;
+	return 0;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö:    ramdump_device_write_file
+* ²ÎÊý˵Ã÷:     
+*   (´«Èë²ÎÊý) void
+*   (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ:    void 
+* ÆäËü˵Ã÷:    This function is used for write file infomation
+*******************************************************************************/
+int ramdump_device_write_file(ramdump_trans_server_file_info_req *server_to_cap)
+{
+	int ret = -1;
+	
+	if(ramdump_export_mode == RAMDUMP_MODE_EMMC)
+	{
+#ifdef CONFIG_RAMDUMP_EMMC
+		if (ramdump_emmc_offset >= RAMDUMP_TRANS_EMMC_LEN)
+			return -1;
+
+		ret = ramdump_fill_header(server_to_cap->file_name, 
+								 server_to_cap->file_size, 
+								 &ramdump_device_fp, 
+								 ramdump_emmc_offset);
+#endif
+	}
+	else if(ramdump_export_mode == RAMDUMP_MODE_SPINAND)
+	{
+#ifdef CONFIG_MTD_SPI_NAND
+		if (ramdump_spinand_offset >= RAMDUMP_SPINAND_LEN)
+			return -1;
+
+		ret = ramdump_fill_header(server_to_cap->file_name, 
+								  server_to_cap->file_size,
+								  &ramdump_spinand_fp, 
+								  ramdump_spinand_offset);
+#endif
+	}
+	return ret;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö:	ramdump_device_write_file
+* ²ÎÊý˵Ã÷:	 
+*   (´«Èë²ÎÊý) fp£º	  Îļþ¾ä±ú
+*   (´«³ö²ÎÊý) file_size Îļþ´óС
+* ·µ »Ø Öµ:	³É¹¦·µ»Ø0£¬Ê§°Ü·µ»Ø-1 
+* ÆäËü˵Ã÷:	This function is used for write file infomation
+*******************************************************************************/
+int ramdump_device_modify_file_size(ssize_t file_size)
+{
+	int ret = -1;
+	ramdump_file_t *fp = g_ramdump_dev_fp;
+
+	if(fp)
+	{
+		fp->file_fp[ramdump_device_file_cnt].size = file_size;
+		return 0;
+	}
+	return ret;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö:	ramdump_device_write_file_head
+* ²ÎÊý˵Ã÷:	 
+*   (´«Èë²ÎÊý) void
+*   (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ:	void 
+* ÆäËü˵Ã÷:	This function is used for write file head
+*******************************************************************************/
+int ramdump_device_write_file_head(void)
+{
+	int ret = -1;
+	
+	if(ramdump_export_mode == RAMDUMP_MODE_EMMC)
+	{
+#ifdef CONFIG_RAMDUMP_EMMC
+		ret = ramdump_emmc_write_file_head(&ramdump_device_fp);		
+#endif
+	}
+	else if(ramdump_export_mode == RAMDUMP_MODE_SPINAND)
+	{
+#ifdef CONFIG_MTD_SPI_NAND
+		ret = ramdump_spinand_write_file_head(&ramdump_spinand_fp);
+#endif
+	}
+	return ret;
+}
+
+int ramdump_do_write_log_txt(ramdump_file_t *fp)
+{
+	int ret = -1;
+	size_t dst_len = 0;	
+	size_t send_len = 0;
+	ramdump_shmem_t *msg = (ramdump_shmem_t *)ramdump_shared_mem_base;
+	char *buf = NULL;
+
+	memset(ramdump_log_buf, 0, RAMDUMP_LOG_BUF);
+	ret = dump_printk_text(ramdump_log_buf, RAMDUMP_LOG_BUF);
+	if(ret < 0){
+		printk("ramdump printk log buf failed!!\n");
+		return ret;
+	}
+	if (ramdump_compress_flag == 1){
+		ret = ramdump_lzo_compress(ramdump_log_buf, RAMDUMP_LOG_BUF, msg->buf, &dst_len);
+		buf = msg->buf;
+	}
+	if (ret != LZO_E_OK){
+		dst_len = RAMDUMP_LOG_BUF;
+		buf = ramdump_log_buf;
+	}
+	fp->file_num += 1;
+	fp->file_fp[ramdump_device_file_cnt].magic = 0x3A3A3A3A;
+	strncpy(fp->file_fp[ramdump_device_file_cnt].file_name, "cap_log_buf.txt", RAMDUMP_RAMCONF_FILENAME_MAXLEN - 1);
+
+	if (fp == &ramdump_device_fp)
+	{
+#ifdef CONFIG_RAMDUMP_EMMC
+		fp->file_fp[ramdump_device_file_cnt].size = roundup(dst_len, RAMDUMP_EMMC_ALIGN_SIZE);
+		fp->file_fp[ramdump_device_file_cnt].offset = ramdump_emmc_offset;
+		ret = mmc_bwrite(RAMDUMP_EMMC_ADDR + ramdump_emmc_offset, dst_len, buf);
+		ramdump_emmc_write_file_head(fp);
+		ramdump_emmc_offset = ramdump_emmc_offset + roundup(dst_len, RAMDUMP_EMMC_ALIGN_SIZE);
+#endif
+	}
+	else if (fp == &ramdump_spinand_fp)
+	{
+#ifdef CONFIG_MTD_SPI_NAND
+		send_len = roundup(dst_len, RAMDUMP_FLASH_ALIGN_SIZE);
+		fp->file_fp[ramdump_device_file_cnt].size = send_len;
+		fp->file_fp[ramdump_device_file_cnt].offset = ramdump_spinand_offset;
+		ret = write_data(RAMDUMP_SPINAND_ADDR + ramdump_spinand_offset, send_len, buf);
+		ramdump_spinand_offset = ramdump_spinand_offset + send_len;
+#endif
+	}
+	else
+	{
+		printk("ramdump_do_write_logbuf error fp!\n");
+		return -1;
+	}
+	ramdump_device_file_cnt += 1;
+	return ret;
+}
+
+int ramdump_do_write_logbuf(ramdump_file_t *fp)
+{
+	char   *buf = NULL;
+	int	ret  = -1;
+	size_t dst_len = 0;
+	size_t send_len = 0;
+	ramdump_shmem_t *msg = (ramdump_shmem_t *)ramdump_shared_mem_base;
+
+	if(!fp)
+	{
+		printk("ramdump_do_write_logbuf error: fp is Null\n");
+		return -1;
+	}
+
+	ramdump_log_buf_region = log_buf_addr_get();
+	ramdump_log_buf_len = log_buf_len_get();
+
+	if (ramdump_compress_flag == 1){
+		ret = ramdump_lzo_compress(ramdump_log_buf_region, ramdump_log_buf_len, msg->buf, &dst_len);
+		buf = msg->buf;
+	}
+	if (ret != LZO_E_OK){
+		dst_len = ramdump_log_buf_len;
+		buf = ramdump_log_buf_region;
+	}
+
+	fp->file_num += 1;
+	fp->file_fp[ramdump_device_file_cnt].magic = 0x3A3A3A3A;
+	strncpy(fp->file_fp[ramdump_device_file_cnt].file_name, "cap_log_buf.bin", RAMDUMP_RAMCONF_FILENAME_MAXLEN - 1);
+
+	if (fp == &ramdump_device_fp)
+	{
+#ifdef CONFIG_RAMDUMP_EMMC
+		fp->file_fp[ramdump_device_file_cnt].size = roundup(dst_len, RAMDUMP_EMMC_ALIGN_SIZE);
+		ret = mmc_bwrite(RAMDUMP_EMMC_ADDR + ramdump_emmc_offset, dst_len, buf);
+		fp->file_fp[ramdump_device_file_cnt].offset = ramdump_emmc_offset;
+		ramdump_emmc_write_file_head(fp);
+		ramdump_emmc_offset = ramdump_emmc_offset + roundup(dst_len, RAMDUMP_EMMC_ALIGN_SIZE);
+#endif
+	}
+	else if (fp == &ramdump_spinand_fp)
+	{
+#ifdef CONFIG_MTD_SPI_NAND
+		send_len = roundup(dst_len, RAMDUMP_FLASH_ALIGN_SIZE);
+		fp->file_fp[ramdump_device_file_cnt].size = send_len;
+		fp->file_fp[ramdump_device_file_cnt].offset = ramdump_spinand_offset;
+		ret = write_data(RAMDUMP_SPINAND_ADDR + ramdump_spinand_offset, send_len, buf);
+		ramdump_spinand_offset = ramdump_spinand_offset + send_len;
+#endif
+	}
+	else
+	{
+		printk("ramdump_do_write_logbuf error fp!\n");
+		return -1;
+	}
+	
+	ramdump_device_file_cnt += 1;
+	ramdump_do_write_log_txt(fp);
+
+	return ret;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö:	ramdump_device_write_logbuf
+* ²ÎÊý˵Ã÷:	 
+*   (´«Èë²ÎÊý) void
+*   (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ:	void 
+* ÆäËü˵Ã÷:	This function is used for  write cap logbuf
+*******************************************************************************/
+int ramdump_device_write_logbuf(void)
+{
+	int ret = -1;
+
+	ret = ramdump_do_write_logbuf(g_ramdump_dev_fp);
+	if (ret < 0)
+		ramdump_printf("device memory trans file:cap_log_buf error!!!\n");
+	else
+		ramdump_printf("device memory trans file:cap_log_buf success!!!\n");
+	return ret;
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö:	ramdump_device_write_data
+* ²ÎÊý˵Ã÷:	 
+*   (´«Èë²ÎÊý) void
+*   (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ:	void 
+* ÆäËü˵Ã÷:	This function is used for write data
+*******************************************************************************/
+int ramdump_device_write_data(ramdump_shmem_t *msg, unsigned int size, ssize_t *dstlen)
+{
+	int ret = 0;
+		
+	if(ramdump_export_mode == RAMDUMP_MODE_EMMC)
+	{		
+#ifdef CONFIG_RAMDUMP_EMMC
+		ret = ramdump_emmc_write_data(msg, &ramdump_device_fp, size);
+		if(ret < 0)
+			*dstlen = 0;
+		else
+			*dstlen += roundup(ret, RAMDUMP_EMMC_ALIGN_SIZE);
+#endif
+	}
+	else if(ramdump_export_mode == RAMDUMP_MODE_SPINAND)
+	{
+#ifdef CONFIG_MTD_SPI_NAND
+		ret = ramdump_spinand_write_data(msg, &ramdump_spinand_fp, size);
+		if(ret < 0)
+			*dstlen = 0;
+		else
+			*dstlen += ret;
+#endif
+	}
+	else
+		return 0;
+	return ret;
+}
+
+/*******************************************************************************
+ *                  Global function implementations                            *
+ ******************************************************************************/
+void ramdump_shared_mem_init(void)
+{
+	ramdump_shared_mem_base = ramdump_phy_to_vir((unsigned long)RAMDUMP_SHARED_MEM_BASE, (unsigned long)RAMDUMP_MEM_LEN);
+	ramdump_export_flag_base = ramdump_phy_to_vir((unsigned long)IRAM_BASE_ADDR_RAMDUMP_MODE, sizeof(unsigned long));
+	ramdump_log_buf = ramdump_shared_mem_base + 0x4000;
+	ramdump_flash_alloc_transbuf();
+}
+
+/*******************************************************************************
+* ¹¦ÄÜÃèÊö:	ramdump_data_transfer_to_device
+* ²ÎÊý˵Ã÷:
+*   (´«Èë²ÎÊý) void
+*   (´«³ö²ÎÊý) void
+* ·µ »Ø Öµ:	void 
+* ÆäËü˵Ã÷:	This function is used for ramdump to trans dump data to ap
+*******************************************************************************/
+void ramdump_data_transfer_to_device(void)
+{
+	int data_trans_max;
+	int file_cnt = 0;
+	int file_size = 0;
+	int file_offset = 0;
+	int file_left_size = 0;
+	int file_trans_size = 0;
+	int error_cmd = 0;
+	int ret = 0;
+	ssize_t file_dstlen = 0;
+
+	unsigned int req_buf[RAMDUMP_INTERACTIVE_ARRAY_LEN] = {0};
+	ramdump_trans_server_interactive_req cap_to_server_msg = {0};
+
+	/* interactive begin */
+	if(ramdump_device_init() < 0)
+		return;
+	data_trans_max = RAMDUMP_SHARED_MEM_LEN - roundup(sizeof(ramdump_shmem_t), RAMDUMP_SHMEM_ALIGN_SIZE) - RAMDUMP_COMPRESS_OUT_LEN;
+	cap_to_server_msg.cmd = RAMDUMP_PC_INTERACTIVE_REQ;
+	ramdump_oss_data_trans_write((unsigned char*)(&cap_to_server_msg), sizeof(cap_to_server_msg));
+
+	for(;;)
+	{
+		ramdump_oss_data_trans_read((unsigned char *)req_buf, RAMDUMP_INTERACTIVE_DATA_LEN);
+		switch (*(unsigned int *)req_buf)
+		{
+			case RAMDUMP_TRANS_SERVER_INTERACTIVE_RSP:
+			{
+				ramdump_pc_file_info_rsp cap_to_server_msg ={0};
+				ramdump_trans_server_interactive_req *server_to_cap_msg = (ramdump_trans_server_interactive_req *)req_buf;
+				/* data from server to cap */
+				ramdump_file_num = server_to_cap_msg->file_num;
+				ramdump_device_fp.file_num  = ramdump_file_num;
+				ramdump_spinand_fp.file_num = ramdump_file_num;
+
+				/* data from cap to server */
+				cap_to_server_msg.cmd = RAMDUMP_PC_FILE_INFO_READ_REQ;
+				cap_to_server_msg.file_id = file_cnt;
+
+				ramdump_oss_data_trans_write(
+					(unsigned char*)(&cap_to_server_msg),
+					sizeof(cap_to_server_msg));
+
+				break;
+			}
+			case RAMDUMP_TRANS_SERVER_FILE_INFO_READ_RSP:
+			{
+				ramdump_pc_trans_data_rsp cap_to_server_msg = {0};
+				ramdump_trans_server_file_info_req *server_to_cap_msg = (ramdump_trans_server_file_info_req *)req_buf;
+				/* data from server to cap */
+				/*device memory file create*/
+				if(ramdump_device_write_file(server_to_cap_msg) == -1){
+					cap_to_server_msg.cmd = RAMDUMP_PC_FILE_TRANS_DONE_REQ;
+					ramdump_device_write_file_head();//±£Ö¤³ö´íǰ¼¸¸öÎļþ¾ùд¶Ô¡£
+					ramdump_printf("ramdump write emmc file error!\n");
+				}
+				file_size = server_to_cap_msg->file_size;
+				file_offset = 0;
+				file_left_size = file_size;
+				/* data from cap to server */
+				cap_to_server_msg.cmd = RAMDUMP_PC_FILE_DATA_TRANS_REQ;
+				cap_to_server_msg.file_id = file_cnt;
+				cap_to_server_msg.offset = file_offset;
+				if (file_size >= data_trans_max)
+					cap_to_server_msg.length = data_trans_max;
+				else
+					cap_to_server_msg.length = file_size;
+				file_trans_size = cap_to_server_msg.length;
+				file_left_size = file_left_size - cap_to_server_msg.length;
+				file_offset = file_offset + cap_to_server_msg.length;
+				printk("device memory trans file:%s !!!\n", server_to_cap_msg->file_name);
+				/* interactive data trans */
+				ramdump_oss_data_trans_write(
+					(unsigned char*)(&cap_to_server_msg), 
+					sizeof(cap_to_server_msg));
+
+				break;
+			}
+			case RAMDUMP_TRANS_SERVER_FILE_DATA_TRANS_RSP:
+			{
+				int write_len = 0;
+				ramdump_pc_trans_data_rsp cap_to_server_msg = {0};
+				/* data from server to cap */
+				ramdump_shmem_t *server_to_cap_msg = (ramdump_shmem_t *)ramdump_shared_mem_base;
+				server_to_cap_msg->core_flag = 0;
+				/*data from cap to emmc*/
+				
+				write_len = ramdump_device_write_data(server_to_cap_msg, file_left_size, &file_dstlen);
+				if(write_len < 0)
+				{
+					ramdump_printf("ramdump write emmc data error!\n");
+				}
+				
+				/*ÅжÏÊ£Óà´óС*/
+				if (file_left_size == 0)
+				{
+					file_cnt++;
+					if (file_cnt == ramdump_file_num)
+					{
+						cap_to_server_msg.cmd = RAMDUMP_PC_FILE_TRANS_DONE_REQ;
+					}
+					else
+					{
+						cap_to_server_msg.cmd = RAMDUMP_PC_FILE_INFO_READ_REQ;
+						cap_to_server_msg.file_id = file_cnt;
+					}
+					ramdump_device_modify_file_size(file_dstlen);
+					file_dstlen = 0;
+					ramdump_device_file_cnt++;
+				}
+				else
+				{
+					/* data from cap to server */
+					if (file_left_size >= data_trans_max)
+						cap_to_server_msg.length = data_trans_max;
+					else
+						cap_to_server_msg.length = file_left_size;
+					cap_to_server_msg.cmd = RAMDUMP_PC_FILE_DATA_TRANS_REQ;
+					cap_to_server_msg.file_id = file_cnt;
+					cap_to_server_msg.offset = file_offset;
+					file_left_size = file_left_size - cap_to_server_msg.length;
+					file_offset= file_offset + cap_to_server_msg.length;
+				}
+
+				ramdump_oss_data_trans_write((unsigned char *)(&cap_to_server_msg), sizeof(cap_to_server_msg));
+				continue;
+			}
+			case RAMDUMP_TRANS_SERVER_FILE_TRANS_DONE_RSP:
+			{
+				ramdump_device_write_logbuf();
+				ramdump_device_close();
+				return;
+			}
+			default:
+			{
+				error_cmd = RAMDUMP_INTERACTIVE_CMD_ERROR;
+				ramdump_printf("ramdump trans emmc error:%d!\n", error_cmd);
+				/* interactive data trans */
+				break;
+			}
+		}
+	}
+}
+
+#ifdef __cplusplus
+}
+#endif
+
diff --git a/upstream/linux-5.10/net/8021q/vlan_dev.c b/upstream/linux-5.10/net/8021q/vlan_dev.c
new file mode 100755
index 0000000..86a1c99
--- /dev/null
+++ b/upstream/linux-5.10/net/8021q/vlan_dev.c
@@ -0,0 +1,842 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* -*- linux-c -*-
+ * INET		802.1Q VLAN
+ *		Ethernet-type device handling.
+ *
+ * Authors:	Ben Greear <greearb@candelatech.com>
+ *              Please send support related email to: netdev@vger.kernel.org
+ *              VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
+ *
+ * Fixes:       Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com>
+ *                - reset skb->pkt_type on incoming packets when MAC was changed
+ *                - see that changed MAC is saddr for outgoing packets
+ *              Oct 20, 2001:  Ard van Breeman:
+ *                - Fix MC-list, finally.
+ *                - Flush MC-list on VLAN destroy.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <net/arp.h>
+
+#include "vlan.h"
+#include "vlanproc.h"
+#include <linux/if_vlan.h>
+#include <linux/netpoll.h>
+
+/*
+ *	Create the VLAN header for an arbitrary protocol layer
+ *
+ *	saddr=NULL	means use device source address
+ *	daddr=NULL	means leave destination address (eg unresolved arp)
+ *
+ *  This is called when the SKB is moving down the stack towards the
+ *  physical devices.
+ */
+static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
+				unsigned short type,
+				const void *daddr, const void *saddr,
+				unsigned int len)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	struct vlan_hdr *vhdr;
+	unsigned int vhdrlen = 0;
+	u16 vlan_tci = 0;
+	int rc;
+
+	if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) {
+		vhdr = skb_push(skb, VLAN_HLEN);
+
+		vlan_tci = vlan->vlan_id;
+		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
+		vhdr->h_vlan_TCI = htons(vlan_tci);
+
+		/*
+		 *  Set the protocol type. For a packet of type ETH_P_802_3/2 we
+		 *  put the length in here instead.
+		 */
+		if (type != ETH_P_802_3 && type != ETH_P_802_2)
+			vhdr->h_vlan_encapsulated_proto = htons(type);
+		else
+			vhdr->h_vlan_encapsulated_proto = htons(len);
+
+		skb->protocol = vlan->vlan_proto;
+		type = ntohs(vlan->vlan_proto);
+		vhdrlen = VLAN_HLEN;
+	}
+
+	/* Before delegating work to the lower layer, enter our MAC-address */
+	if (saddr == NULL)
+		saddr = dev->dev_addr;
+
+	/* Now make the underlying real hard header */
+	dev = vlan->real_dev;
+	rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
+	if (rc > 0)
+		rc += vhdrlen;
+	return rc;
+}
+
+static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	return netpoll_send_skb(vlan->netpoll, skb);
+#else
+	BUG();
+	return NETDEV_TX_OK;
+#endif
+}
+
+static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
+					    struct net_device *dev)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
+	unsigned int len;
+	int ret;
+
+	/* Handle non-VLAN frames if they are sent to us, for example by DHCP.
+	 *
+	 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
+	 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
+	 */
+	if (veth->h_vlan_proto != vlan->vlan_proto ||
+	    vlan->flags & VLAN_FLAG_REORDER_HDR) {
+		u16 vlan_tci;
+		vlan_tci = vlan->vlan_id;
+		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
+		__vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
+	}
+
+	skb->dev = vlan->real_dev;
+	len = skb->len;
+	if (unlikely(netpoll_tx_running(dev)))
+		return vlan_netpoll_send_skb(vlan, skb);
+
+	ret = dev_queue_xmit(skb);
+
+	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
+		struct vlan_pcpu_stats *stats;
+
+		stats = this_cpu_ptr(vlan->vlan_pcpu_stats);
+		u64_stats_update_begin(&stats->syncp);
+		stats->tx_packets++;
+		stats->tx_bytes += len;
+		u64_stats_update_end(&stats->syncp);
+	} else {
+		this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped);
+	}
+
+	return ret;
+}
+
+static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	unsigned int max_mtu = real_dev->mtu;
+
+	if (netif_reduces_vlan_mtu(real_dev))
+		max_mtu -= VLAN_HLEN;
+	if (max_mtu < new_mtu)
+		return -ERANGE;
+
+	dev->mtu = new_mtu;
+
+	return 0;
+}
+
+void vlan_dev_set_ingress_priority(const struct net_device *dev,
+				   u32 skb_prio, u16 vlan_prio)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+	if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio)
+		vlan->nr_ingress_mappings--;
+	else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio)
+		vlan->nr_ingress_mappings++;
+
+	vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio;
+}
+
+int vlan_dev_set_egress_priority(const struct net_device *dev,
+				 u32 skb_prio, u16 vlan_prio)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	struct vlan_priority_tci_mapping *mp = NULL;
+	struct vlan_priority_tci_mapping *np;
+	u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
+
+	/* See if a priority mapping exists.. */
+	mp = vlan->egress_priority_map[skb_prio & 0xF];
+	while (mp) {
+		if (mp->priority == skb_prio) {
+			if (mp->vlan_qos && !vlan_qos)
+				vlan->nr_egress_mappings--;
+			else if (!mp->vlan_qos && vlan_qos)
+				vlan->nr_egress_mappings++;
+			mp->vlan_qos = vlan_qos;
+			return 0;
+		}
+		mp = mp->next;
+	}
+
+	/* Create a new mapping then. */
+	mp = vlan->egress_priority_map[skb_prio & 0xF];
+	np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL);
+	if (!np)
+		return -ENOBUFS;
+
+	np->next = mp;
+	np->priority = skb_prio;
+	np->vlan_qos = vlan_qos;
+	/* Before inserting this element in hash table, make sure all its fields
+	 * are committed to memory.
+	 * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
+	 */
+	smp_wmb();
+	vlan->egress_priority_map[skb_prio & 0xF] = np;
+	if (vlan_qos)
+		vlan->nr_egress_mappings++;
+	return 0;
+}
+
+/* Flags are defined in the vlan_flags enum in
+ * include/uapi/linux/if_vlan.h file.
+ */
+int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	u32 old_flags = vlan->flags;
+
+	if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
+		     VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP |
+		     VLAN_FLAG_BRIDGE_BINDING))
+		return -EINVAL;
+
+	vlan->flags = (old_flags & ~mask) | (flags & mask);
+
+	if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_GVRP) {
+		if (vlan->flags & VLAN_FLAG_GVRP)
+			vlan_gvrp_request_join(dev);
+		else
+			vlan_gvrp_request_leave(dev);
+	}
+
+	if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_MVRP) {
+		if (vlan->flags & VLAN_FLAG_MVRP)
+			vlan_mvrp_request_join(dev);
+		else
+			vlan_mvrp_request_leave(dev);
+	}
+	return 0;
+}
+
+void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
+{
+	strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
+}
+
+bool vlan_dev_inherit_address(struct net_device *dev,
+			      struct net_device *real_dev)
+{
+	if (dev->addr_assign_type != NET_ADDR_STOLEN)
+		return false;
+
+	ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+	call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+	return true;
+}
+
+static int vlan_dev_open(struct net_device *dev)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	struct net_device *real_dev = vlan->real_dev;
+	int err;
+
+	if (!(real_dev->flags & IFF_UP) &&
+	    !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
+		return -ENETDOWN;
+
+	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
+	    !vlan_dev_inherit_address(dev, real_dev)) {
+		err = dev_uc_add(real_dev, dev->dev_addr);
+		if (err < 0)
+			goto out;
+	}
+
+	if (dev->flags & IFF_ALLMULTI) {
+		err = dev_set_allmulti(real_dev, 1);
+		if (err < 0)
+			goto del_unicast;
+	}
+	if (dev->flags & IFF_PROMISC) {
+		err = dev_set_promiscuity(real_dev, 1);
+		if (err < 0)
+			goto clear_allmulti;
+	}
+
+	ether_addr_copy(vlan->real_dev_addr, real_dev->dev_addr);
+
+	if (vlan->flags & VLAN_FLAG_GVRP)
+		vlan_gvrp_request_join(dev);
+
+	if (vlan->flags & VLAN_FLAG_MVRP)
+		vlan_mvrp_request_join(dev);
+
+	if (netif_carrier_ok(real_dev) &&
+	    !(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
+		netif_carrier_on(dev);
+	return 0;
+
+clear_allmulti:
+	if (dev->flags & IFF_ALLMULTI)
+		dev_set_allmulti(real_dev, -1);
+del_unicast:
+	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
+		dev_uc_del(real_dev, dev->dev_addr);
+out:
+	netif_carrier_off(dev);
+	return err;
+}
+
+static int vlan_dev_stop(struct net_device *dev)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	struct net_device *real_dev = vlan->real_dev;
+
+	dev_mc_unsync(real_dev, dev);
+	dev_uc_unsync(real_dev, dev);
+	if (dev->flags & IFF_ALLMULTI)
+		dev_set_allmulti(real_dev, -1);
+	if (dev->flags & IFF_PROMISC)
+		dev_set_promiscuity(real_dev, -1);
+
+	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
+		dev_uc_del(real_dev, dev->dev_addr);
+
+	if (!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
+		netif_carrier_off(dev);
+	return 0;
+}
+
+static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	struct sockaddr *addr = p;
+	int err;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	if (!(dev->flags & IFF_UP))
+		goto out;
+
+	if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) {
+		err = dev_uc_add(real_dev, addr->sa_data);
+		if (err < 0)
+			return err;
+	}
+
+	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
+		dev_uc_del(real_dev, dev->dev_addr);
+
+out:
+	ether_addr_copy(dev->dev_addr, addr->sa_data);
+	return 0;
+}
+
+static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	struct ifreq ifrr;
+	int err = -EOPNOTSUPP;
+
+	strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
+	ifrr.ifr_ifru = ifr->ifr_ifru;
+
+	switch (cmd) {
+	case SIOCSHWTSTAMP:
+		if (!net_eq(dev_net(dev), &init_net))
+			break;
+		fallthrough;
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+	case SIOCGHWTSTAMP:
+		if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
+			err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
+		break;
+	}
+
+	if (!err)
+		ifr->ifr_ifru = ifrr.ifr_ifru;
+
+	return err;
+}
+
+static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	int err = 0;
+
+	if (netif_device_present(real_dev) && ops->ndo_neigh_setup)
+		err = ops->ndo_neigh_setup(real_dev, pa);
+
+	return err;
+}
+
+#if IS_ENABLED(CONFIG_FCOE)
+static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
+				   struct scatterlist *sgl, unsigned int sgc)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	int rc = 0;
+
+	if (ops->ndo_fcoe_ddp_setup)
+		rc = ops->ndo_fcoe_ddp_setup(real_dev, xid, sgl, sgc);
+
+	return rc;
+}
+
+static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	int len = 0;
+
+	if (ops->ndo_fcoe_ddp_done)
+		len = ops->ndo_fcoe_ddp_done(real_dev, xid);
+
+	return len;
+}
+
+static int vlan_dev_fcoe_enable(struct net_device *dev)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	int rc = -EINVAL;
+
+	if (ops->ndo_fcoe_enable)
+		rc = ops->ndo_fcoe_enable(real_dev);
+	return rc;
+}
+
+static int vlan_dev_fcoe_disable(struct net_device *dev)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	int rc = -EINVAL;
+
+	if (ops->ndo_fcoe_disable)
+		rc = ops->ndo_fcoe_disable(real_dev);
+	return rc;
+}
+
+static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
+				    struct scatterlist *sgl, unsigned int sgc)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	int rc = 0;
+
+	if (ops->ndo_fcoe_ddp_target)
+		rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
+
+	return rc;
+}
+#endif
+
+#ifdef NETDEV_FCOE_WWNN
+static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	const struct net_device_ops *ops = real_dev->netdev_ops;
+	int rc = -EINVAL;
+
+	if (ops->ndo_fcoe_get_wwn)
+		rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
+	return rc;
+}
+#endif
+
+static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+
+	if (dev->flags & IFF_UP) {
+		if (change & IFF_ALLMULTI)
+			dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+		if (change & IFF_PROMISC)
+			dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
+	}
+}
+
+static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
+{
+	dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+	dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+}
+
+/*
+ * vlan network devices have devices nesting below it, and are a special
+ * "super class" of normal network devices; split their locks off into a
+ * separate class since they always nest.
+ */
+static struct lock_class_key vlan_netdev_xmit_lock_key;
+static struct lock_class_key vlan_netdev_addr_lock_key;
+
+static void vlan_dev_set_lockdep_one(struct net_device *dev,
+				     struct netdev_queue *txq,
+				     void *unused)
+{
+	lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
+}
+
+static void vlan_dev_set_lockdep_class(struct net_device *dev)
+{
+	lockdep_set_class(&dev->addr_list_lock,
+			  &vlan_netdev_addr_lock_key);
+	netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
+}
+
+static const struct header_ops vlan_header_ops = {
+	.create	 = vlan_dev_hard_header,
+	.parse	 = eth_header_parse,
+};
+
+static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
+				     unsigned short type,
+				     const void *daddr, const void *saddr,
+				     unsigned int len)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	struct net_device *real_dev = vlan->real_dev;
+
+	if (saddr == NULL)
+		saddr = dev->dev_addr;
+
+	return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
+}
+
+static const struct header_ops vlan_passthru_header_ops = {
+	.create	 = vlan_passthru_hard_header,
+	.parse	 = eth_header_parse,
+};
+
+static struct device_type vlan_type = {
+	.name	= "vlan",
+};
+
+static const struct net_device_ops vlan_netdev_ops;
+
+static int vlan_dev_init(struct net_device *dev)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	struct net_device *real_dev = vlan->real_dev;
+
+	netif_carrier_off(dev);
+
+	/* IFF_BROADCAST|IFF_MULTICAST; ??? */
+	dev->flags  = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
+					  IFF_MASTER | IFF_SLAVE);
+	dev->state  = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
+					  (1<<__LINK_STATE_DORMANT))) |
+		      (1<<__LINK_STATE_PRESENT);
+
+	if (vlan->flags & VLAN_FLAG_BRIDGE_BINDING)
+		dev->state |= (1 << __LINK_STATE_NOCARRIER);
+
+	dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG |
+			   NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE |
+			   NETIF_F_GSO_ENCAP_ALL |
+			   NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC |
+			   NETIF_F_ALL_FCOE;
+
+	dev->features |= dev->hw_features | NETIF_F_LLTX;
+	dev->gso_max_size = real_dev->gso_max_size;
+	dev->gso_max_segs = real_dev->gso_max_segs;
+	if (dev->features & NETIF_F_VLAN_FEATURES)
+		netdev_warn(real_dev, "VLAN features are set incorrectly.  Q-in-Q configurations may not work correctly.\n");
+
+	dev->vlan_features = real_dev->vlan_features & ~NETIF_F_ALL_FCOE;
+	dev->hw_enc_features = vlan_tnl_features(real_dev);
+	dev->mpls_features = real_dev->mpls_features;
+
+	/* ipv6 shared card related stuff */
+	dev->dev_id = real_dev->dev_id;
+
+	if (is_zero_ether_addr(dev->dev_addr)) {
+		ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+		dev->addr_assign_type = NET_ADDR_STOLEN;
+	}
+	if (is_zero_ether_addr(dev->broadcast))
+		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
+
+#if IS_ENABLED(CONFIG_FCOE)
+	dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
+#endif
+
+	dev->needed_headroom = real_dev->needed_headroom;
+	if (vlan_hw_offload_capable(real_dev->features, vlan->vlan_proto)) {
+		dev->header_ops      = &vlan_passthru_header_ops;
+		dev->hard_header_len = real_dev->hard_header_len;
+	} else {
+		dev->header_ops      = &vlan_header_ops;
+		dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
+	}
+
+	dev->netdev_ops = &vlan_netdev_ops;
+
+	SET_NETDEV_DEVTYPE(dev, &vlan_type);
+
+	vlan_dev_set_lockdep_class(dev);
+
+	vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
+	if (!vlan->vlan_pcpu_stats)
+		return -ENOMEM;
+
+	/* Get vlan's reference to real_dev */
+	dev_hold(real_dev);
+
+	return 0;
+}
+
+/* Note: this function might be called multiple times for the same device. */
+void vlan_dev_uninit(struct net_device *dev)
+{
+	struct vlan_priority_tci_mapping *pm;
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
+		while ((pm = vlan->egress_priority_map[i]) != NULL) {
+			vlan->egress_priority_map[i] = pm->next;
+			kfree(pm);
+		}
+	}
+}
+
+static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
+	netdev_features_t features)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+	netdev_features_t old_features = features;
+	netdev_features_t lower_features;
+
+	lower_features = netdev_intersect_features((real_dev->vlan_features |
+						    NETIF_F_RXCSUM),
+						   real_dev->features);
+
+	/* Add HW_CSUM setting to preserve user ability to control
+	 * checksum offload on the vlan device.
+	 */
+	if (lower_features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
+		lower_features |= NETIF_F_HW_CSUM;
+	features = netdev_intersect_features(features, lower_features);
+	features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_GSO_SOFTWARE);
+	features |= NETIF_F_LLTX;
+
+	return features;
+}
+
+static int vlan_ethtool_get_link_ksettings(struct net_device *dev,
+					   struct ethtool_link_ksettings *cmd)
+{
+	const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+	return __ethtool_get_link_ksettings(vlan->real_dev, cmd);
+}
+
+static void vlan_ethtool_get_drvinfo(struct net_device *dev,
+				     struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, vlan_fullname, sizeof(info->driver));
+	strlcpy(info->version, vlan_version, sizeof(info->version));
+	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+}
+
+static int vlan_ethtool_get_ts_info(struct net_device *dev,
+				    struct ethtool_ts_info *info)
+{
+	const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops;
+	struct phy_device *phydev = vlan->real_dev->phydev;
+
+	if (phy_has_tsinfo(phydev)) {
+		return phy_ts_info(phydev, info);
+	} else if (ops->get_ts_info) {
+		return ops->get_ts_info(vlan->real_dev, info);
+	} else {
+		info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+			SOF_TIMESTAMPING_SOFTWARE;
+		info->phc_index = -1;
+	}
+
+	return 0;
+}
+
+static void vlan_dev_get_stats64(struct net_device *dev,
+				 struct rtnl_link_stats64 *stats)
+{
+	struct vlan_pcpu_stats *p;
+	u32 rx_errors = 0, tx_dropped = 0;
+	int i;
+
+	for_each_possible_cpu(i) {
+		u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
+		unsigned int start;
+
+		p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
+		do {
+			start = u64_stats_fetch_begin_irq(&p->syncp);
+			rxpackets	= p->rx_packets;
+			rxbytes		= p->rx_bytes;
+			rxmulticast	= p->rx_multicast;
+			txpackets	= p->tx_packets;
+			txbytes		= p->tx_bytes;
+		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+		stats->rx_packets	+= rxpackets;
+		stats->rx_bytes		+= rxbytes;
+		stats->multicast	+= rxmulticast;
+		stats->tx_packets	+= txpackets;
+		stats->tx_bytes		+= txbytes;
+		/* rx_errors & tx_dropped are u32 */
+		rx_errors	+= p->rx_errors;
+		tx_dropped	+= p->tx_dropped;
+	}
+	stats->rx_errors  = rx_errors;
+	stats->tx_dropped = tx_dropped;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void vlan_dev_poll_controller(struct net_device *dev)
+{
+	return;
+}
+
+static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+	struct net_device *real_dev = vlan->real_dev;
+	struct netpoll *netpoll;
+	int err = 0;
+
+	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+	err = -ENOMEM;
+	if (!netpoll)
+		goto out;
+
+	err = __netpoll_setup(netpoll, real_dev);
+	if (err) {
+		kfree(netpoll);
+		goto out;
+	}
+
+	vlan->netpoll = netpoll;
+
+out:
+	return err;
+}
+
+static void vlan_dev_netpoll_cleanup(struct net_device *dev)
+{
+	struct vlan_dev_priv *vlan= vlan_dev_priv(dev);
+	struct netpoll *netpoll = vlan->netpoll;
+
+	if (!netpoll)
+		return;
+
+	vlan->netpoll = NULL;
+	__netpoll_free(netpoll);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static int vlan_dev_get_iflink(const struct net_device *dev)
+{
+	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+
+	return real_dev->ifindex;
+}
+
+static const struct ethtool_ops vlan_ethtool_ops = {
+	.get_link_ksettings	= vlan_ethtool_get_link_ksettings,
+	.get_drvinfo	        = vlan_ethtool_get_drvinfo,
+	.get_link		= ethtool_op_get_link,
+	.get_ts_info		= vlan_ethtool_get_ts_info,
+};
+
+static const struct net_device_ops vlan_netdev_ops = {
+	.ndo_change_mtu		= vlan_dev_change_mtu,
+	.ndo_init		= vlan_dev_init,
+	.ndo_uninit		= vlan_dev_uninit,
+	.ndo_open		= vlan_dev_open,
+	.ndo_stop		= vlan_dev_stop,
+	.ndo_start_xmit =  vlan_dev_hard_start_xmit,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= vlan_dev_set_mac_address,
+	.ndo_set_rx_mode	= vlan_dev_set_rx_mode,
+	.ndo_change_rx_flags	= vlan_dev_change_rx_flags,
+	.ndo_do_ioctl		= vlan_dev_ioctl,
+	.ndo_neigh_setup	= vlan_dev_neigh_setup,
+	.ndo_get_stats64	= vlan_dev_get_stats64,
+#if IS_ENABLED(CONFIG_FCOE)
+	.ndo_fcoe_ddp_setup	= vlan_dev_fcoe_ddp_setup,
+	.ndo_fcoe_ddp_done	= vlan_dev_fcoe_ddp_done,
+	.ndo_fcoe_enable	= vlan_dev_fcoe_enable,
+	.ndo_fcoe_disable	= vlan_dev_fcoe_disable,
+	.ndo_fcoe_ddp_target	= vlan_dev_fcoe_ddp_target,
+#endif
+#ifdef NETDEV_FCOE_WWNN
+	.ndo_fcoe_get_wwn	= vlan_dev_fcoe_get_wwn,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= vlan_dev_poll_controller,
+	.ndo_netpoll_setup	= vlan_dev_netpoll_setup,
+	.ndo_netpoll_cleanup	= vlan_dev_netpoll_cleanup,
+#endif
+	.ndo_fix_features	= vlan_dev_fix_features,
+	.ndo_get_iflink		= vlan_dev_get_iflink,
+};
+
+static void vlan_dev_free(struct net_device *dev)
+{
+	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+	free_percpu(vlan->vlan_pcpu_stats);
+	vlan->vlan_pcpu_stats = NULL;
+
+	/* Get rid of the vlan's reference to real_dev */
+	dev_put(vlan->real_dev);
+}
+
+void vlan_setup(struct net_device *dev)
+{
+	ether_setup(dev);
+
+	dev->priv_flags		|= IFF_802_1Q_VLAN | IFF_NO_QUEUE;
+	dev->priv_flags		|= IFF_UNICAST_FLT;
+	dev->priv_flags		&= ~IFF_TX_SKB_SHARING;
+	netif_keep_dst(dev);
+
+	dev->netdev_ops		= &vlan_netdev_ops;
+	dev->needs_free_netdev	= true;
+	dev->priv_destructor	= vlan_dev_free;
+	dev->ethtool_ops	= &vlan_ethtool_ops;
+
+	dev->min_mtu		= 0;
+	dev->max_mtu		= ETH_MAX_MTU;
+
+	eth_zero_addr(dev->broadcast);
+}
diff --git a/upstream/linux-5.10/net/bridge/br_fdb.c b/upstream/linux-5.10/net/bridge/br_fdb.c
new file mode 100755
index 0000000..65a6054
--- /dev/null
+++ b/upstream/linux-5.10/net/bridge/br_fdb.c
@@ -0,0 +1,1611 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *	Forwarding database
+ *	Linux ethernet bridge
+ *
+ *	Authors:
+ *	Lennert Buytenhek		<buytenh@gnu.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/rculist.h>
+#include <linux/spinlock.h>
+#include <linux/times.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/jhash.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <asm/unaligned.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+#include <trace/events/bridge.h>
+#include "br_private.h"
+#ifdef CONFIG_FASTNAT_MODULE
+#include <net/SI/fast_common.h>
+#include <net/SI/net_track.h>
+#include <net/SI/netioc_proc.h>
+#endif
+
+static const struct rhashtable_params br_fdb_rht_params = {
+	.head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
+	.key_offset = offsetof(struct net_bridge_fdb_entry, key),
+	.key_len = sizeof(struct net_bridge_fdb_key),
+	.automatic_shrinking = true,
+};
+
+static struct kmem_cache *br_fdb_cache __read_mostly;
+static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
+		      const unsigned char *addr, u16 vid);
+static void fdb_notify(struct net_bridge *br,
+		       const struct net_bridge_fdb_entry *, int, bool);
+
+int __init br_fdb_init(void)
+{
+	br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
+					 sizeof(struct net_bridge_fdb_entry),
+					 0,
+					 SLAB_HWCACHE_ALIGN, NULL);
+	if (!br_fdb_cache)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void br_fdb_fini(void)
+{
+	kmem_cache_destroy(br_fdb_cache);
+}
+
+int br_fdb_hash_init(struct net_bridge *br)
+{
+	return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params);
+}
+
+void br_fdb_hash_fini(struct net_bridge *br)
+{
+	rhashtable_destroy(&br->fdb_hash_tbl);
+}
+
+/* if topology_changing then use forward_delay (default 15 sec)
+ * otherwise keep longer (default 5 minutes)
+ */
+static inline unsigned long hold_time(const struct net_bridge *br)
+{
+	return br->topology_change ? br->forward_delay : br->ageing_time;
+}
+
+static inline int has_expired(const struct net_bridge *br,
+				  const struct net_bridge_fdb_entry *fdb)
+{
+	return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
+	       !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
+	       time_before_eq(fdb->updated + hold_time(br), jiffies);
+}
+
+static void fdb_rcu_free(struct rcu_head *head)
+{
+	struct net_bridge_fdb_entry *ent
+		= container_of(head, struct net_bridge_fdb_entry, rcu);
+	kmem_cache_free(br_fdb_cache, ent);
+}
+
+static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl,
+						 const unsigned char *addr,
+						 __u16 vid)
+{
+	struct net_bridge_fdb_key key;
+
+	WARN_ON_ONCE(!rcu_read_lock_held());
+
+	key.vlan_id = vid;
+	memcpy(key.addr.addr, addr, sizeof(key.addr.addr));
+
+	return rhashtable_lookup(tbl, &key, br_fdb_rht_params);
+}
+
+/* requires bridge hash_lock */
+static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
+						const unsigned char *addr,
+						__u16 vid)
+{
+	struct net_bridge_fdb_entry *fdb;
+
+	lockdep_assert_held_once(&br->hash_lock);
+
+	rcu_read_lock();
+	fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
+	rcu_read_unlock();
+
+	return fdb;
+}
+
+struct net_device *br_fdb_find_port(const struct net_device *br_dev,
+				    const unsigned char *addr,
+				    __u16 vid)
+{
+	struct net_bridge_fdb_entry *f;
+	struct net_device *dev = NULL;
+	struct net_bridge *br;
+
+	ASSERT_RTNL();
+
+	if (!netif_is_bridge_master(br_dev))
+		return NULL;
+
+	br = netdev_priv(br_dev);
+	rcu_read_lock();
+	f = br_fdb_find_rcu(br, addr, vid);
+	if (f && f->dst)
+		dev = f->dst->dev;
+	rcu_read_unlock();
+
+	return dev;
+}
+EXPORT_SYMBOL_GPL(br_fdb_find_port);
+
+struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
+					     const unsigned char *addr,
+					     __u16 vid)
+{
+	return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
+}
+
+/* When a static FDB entry is added, the mac address from the entry is
+ * added to the bridge private HW address list and all required ports
+ * are then updated with the new information.
+ * Called under RTNL.
+ */
+static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr)
+{
+	int err;
+	struct net_bridge_port *p;
+
+	ASSERT_RTNL();
+
+	list_for_each_entry(p, &br->port_list, list) {
+		if (!br_promisc_port(p)) {
+			err = dev_uc_add(p->dev, addr);
+			if (err)
+				goto undo;
+		}
+	}
+
+	return;
+undo:
+	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
+		if (!br_promisc_port(p))
+			dev_uc_del(p->dev, addr);
+	}
+}
+
+/* When a static FDB entry is deleted, the HW address from that entry is
+ * also removed from the bridge private HW address list and updates all
+ * the ports with needed information.
+ * Called under RTNL.
+ */
+static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
+{
+	struct net_bridge_port *p;
+
+	ASSERT_RTNL();
+
+	list_for_each_entry(p, &br->port_list, list) {
+		if (!br_promisc_port(p))
+			dev_uc_del(p->dev, addr);
+	}
+}
+
+static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
+		       bool swdev_notify)
+{
+	trace_fdb_delete(br, f);
+
+	if (test_bit(BR_FDB_STATIC, &f->flags))
+		fdb_del_hw_addr(br, f->key.addr.addr);
+
+	hlist_del_init_rcu(&f->fdb_node);
+	rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode,
+			       br_fdb_rht_params);
+	fdb_notify(br, f, RTM_DELNEIGH, swdev_notify);
+	call_rcu(&f->rcu, fdb_rcu_free);
+}
+
+/* Delete a local entry if no other port had the same address. */
+static void fdb_delete_local(struct net_bridge *br,
+			     const struct net_bridge_port *p,
+			     struct net_bridge_fdb_entry *f)
+{
+	const unsigned char *addr = f->key.addr.addr;
+	struct net_bridge_vlan_group *vg;
+	const struct net_bridge_vlan *v;
+	struct net_bridge_port *op;
+	u16 vid = f->key.vlan_id;
+
+	/* Maybe another port has same hw addr? */
+	list_for_each_entry(op, &br->port_list, list) {
+		vg = nbp_vlan_group(op);
+		if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
+		    (!vid || br_vlan_find(vg, vid))) {
+			f->dst = op;
+			clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
+			return;
+		}
+	}
+
+	vg = br_vlan_group(br);
+	v = br_vlan_find(vg, vid);
+	/* Maybe bridge device has same hw addr? */
+	if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
+	    (!vid || (v && br_vlan_should_use(v)))) {
+		f->dst = NULL;
+		clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
+		return;
+	}
+
+	fdb_delete(br, f, true);
+}
+
+void br_fdb_find_delete_local(struct net_bridge *br,
+			      const struct net_bridge_port *p,
+			      const unsigned char *addr, u16 vid)
+{
+	struct net_bridge_fdb_entry *f;
+
+	spin_lock_bh(&br->hash_lock);
+	f = br_fdb_find(br, addr, vid);
+	if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
+	    !test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p)
+		fdb_delete_local(br, p, f);
+	spin_unlock_bh(&br->hash_lock);
+}
+
+void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
+{
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_fdb_entry *f;
+	struct net_bridge *br = p->br;
+	struct net_bridge_vlan *v;
+
+	spin_lock_bh(&br->hash_lock);
+	vg = nbp_vlan_group(p);
+	hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
+		if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) &&
+		    !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) {
+			/* delete old one */
+			fdb_delete_local(br, p, f);
+
+			/* if this port has no vlan information
+			 * configured, we can safely be done at
+			 * this point.
+			 */
+			if (!vg || !vg->num_vlans)
+				goto insert;
+		}
+	}
+
+insert:
+	/* insert new address,  may fail if invalid address or dup. */
+	fdb_insert(br, p, newaddr, 0);
+
+	if (!vg || !vg->num_vlans)
+		goto done;
+
+	/* Now add entries for every VLAN configured on the port.
+	 * This function runs under RTNL so the bitmap will not change
+	 * from under us.
+	 */
+	list_for_each_entry(v, &vg->vlan_list, vlist)
+		fdb_insert(br, p, newaddr, v->vid);
+
+done:
+	spin_unlock_bh(&br->hash_lock);
+}
+
+void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
+{
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_fdb_entry *f;
+	struct net_bridge_vlan *v;
+
+	spin_lock_bh(&br->hash_lock);
+
+	/* If old entry was unassociated with any port, then delete it. */
+	f = br_fdb_find(br, br->dev->dev_addr, 0);
+	if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
+	    !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
+		fdb_delete_local(br, NULL, f);
+
+	fdb_insert(br, NULL, newaddr, 0);
+	vg = br_vlan_group(br);
+	if (!vg || !vg->num_vlans)
+		goto out;
+	/* Now remove and add entries for every VLAN configured on the
+	 * bridge.  This function runs under RTNL so the bitmap will not
+	 * change from under us.
+	 */
+	list_for_each_entry(v, &vg->vlan_list, vlist) {
+		if (!br_vlan_should_use(v))
+			continue;
+		f = br_fdb_find(br, br->dev->dev_addr, v->vid);
+		if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
+		    !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
+			fdb_delete_local(br, NULL, f);
+		fdb_insert(br, NULL, newaddr, v->vid);
+	}
+out:
+	spin_unlock_bh(&br->hash_lock);
+}
+
+void br_fdb_cleanup(struct work_struct *work)
+{
+	struct net_bridge *br = container_of(work, struct net_bridge,
+					     gc_work.work);
+	struct net_bridge_fdb_entry *f = NULL;
+	unsigned long delay = hold_time(br);
+	unsigned long work_delay = delay;
+	unsigned long now = jiffies;
+
+	/* this part is tricky, in order to avoid blocking learning and
+	 * consequently forwarding, we rely on rcu to delete objects with
+	 * delayed freeing allowing us to continue traversing
+	 */
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		unsigned long this_timer = f->updated + delay;
+
+		if (test_bit(BR_FDB_STATIC, &f->flags) ||
+		    test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) {
+			if (test_bit(BR_FDB_NOTIFY, &f->flags)) {
+				if (time_after(this_timer, now))
+					work_delay = min(work_delay,
+							 this_timer - now);
+				else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE,
+							   &f->flags))
+					fdb_notify(br, f, RTM_NEWNEIGH, false);
+			}
+			continue;
+		}
+
+		if (time_after(this_timer, now)) {
+			work_delay = min(work_delay, this_timer - now);
+		} else {
+			spin_lock_bh(&br->hash_lock);
+			if (!hlist_unhashed(&f->fdb_node))
+				fdb_delete(br, f, true);
+			spin_unlock_bh(&br->hash_lock);
+		}
+	}
+	rcu_read_unlock();
+
+	/* Cleanup minimum 10 milliseconds apart */
+	work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
+	mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
+}
+
+/* Completely flush all dynamic entries in forwarding database.*/
+void br_fdb_flush(struct net_bridge *br)
+{
+	struct net_bridge_fdb_entry *f;
+	struct hlist_node *tmp;
+
+	spin_lock_bh(&br->hash_lock);
+	hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
+		if (!test_bit(BR_FDB_STATIC, &f->flags))
+			fdb_delete(br, f, true);
+	}
+	spin_unlock_bh(&br->hash_lock);
+}
+
+/* Flush all entries referring to a specific port.
+ * if do_all is set also flush static entries
+ * if vid is set delete all entries that match the vlan_id
+ */
+void br_fdb_delete_by_port(struct net_bridge *br,
+			   const struct net_bridge_port *p,
+			   u16 vid,
+			   int do_all)
+{
+	struct net_bridge_fdb_entry *f;
+	struct hlist_node *tmp;
+
+	spin_lock_bh(&br->hash_lock);
+	hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
+		if (f->dst != p)
+			continue;
+
+		if (!do_all)
+			if (test_bit(BR_FDB_STATIC, &f->flags) ||
+			    (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags) &&
+			     !test_bit(BR_FDB_OFFLOADED, &f->flags)) ||
+			    (vid && f->key.vlan_id != vid))
+				continue;
+
+		if (test_bit(BR_FDB_LOCAL, &f->flags))
+			fdb_delete_local(br, p, f);
+		else
+			fdb_delete(br, f, true);
+	}
+	spin_unlock_bh(&br->hash_lock);
+}
+
+#if IS_ENABLED(CONFIG_ATM_LANE)
+/* Interface used by ATM LANE hook to test
+ * if an addr is on some other bridge port */
+int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
+{
+	struct net_bridge_fdb_entry *fdb;
+	struct net_bridge_port *port;
+	int ret;
+
+	rcu_read_lock();
+	port = br_port_get_rcu(dev);
+	if (!port)
+		ret = 0;
+	else {
+		fdb = br_fdb_find_rcu(port->br, addr, 0);
+		ret = fdb && fdb->dst && fdb->dst->dev != dev &&
+			fdb->dst->state == BR_STATE_FORWARDING;
+	}
+	rcu_read_unlock();
+
+	return ret;
+}
+#endif /* CONFIG_ATM_LANE */
+
+/*
+ * Fill buffer with forwarding table records in
+ * the API format.
+ */
+int br_fdb_fillbuf(struct net_bridge *br, void *buf,
+		   unsigned long maxnum, unsigned long skip)
+{
+	struct net_bridge_fdb_entry *f;
+	struct __fdb_entry *fe = buf;
+	int num = 0;
+
+	memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		if (num >= maxnum)
+			break;
+
+		if (has_expired(br, f))
+			continue;
+
+		/* ignore pseudo entry for local MAC address */
+		if (!f->dst)
+			continue;
+
+		if (skip) {
+			--skip;
+			continue;
+		}
+
+		/* convert from internal format to API */
+		memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
+
+		/* due to ABI compat need to split into hi/lo */
+		fe->port_no = f->dst->port_no;
+		fe->port_hi = f->dst->port_no >> 8;
+
+		fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
+		if (!test_bit(BR_FDB_STATIC, &f->flags))
+			fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
+		++fe;
+		++num;
+	}
+	rcu_read_unlock();
+
+	return num;
+}
+
+static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
+					       struct net_bridge_port *source,
+					       const unsigned char *addr,
+					       __u16 vid,
+					       unsigned long flags)
+{
+	struct net_bridge_fdb_entry *fdb;
+
+	fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
+	if (fdb) {
+		memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
+		fdb->dst = source;
+		fdb->key.vlan_id = vid;
+		fdb->flags = flags;
+		fdb->updated = fdb->used = jiffies;
+		if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
+						  &fdb->rhnode,
+						  br_fdb_rht_params)) {
+			kmem_cache_free(br_fdb_cache, fdb);
+			fdb = NULL;
+		} else {
+			hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
+		}
+	}
+	return fdb;
+}
+
+static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
+		  const unsigned char *addr, u16 vid)
+{
+	struct net_bridge_fdb_entry *fdb;
+
+	if (!is_valid_ether_addr(addr))
+		return -EINVAL;
+
+	fdb = br_fdb_find(br, addr, vid);
+	if (fdb) {
+		/* it is okay to have multiple ports with same
+		 * address, just use the first one.
+		 */
+		if (test_bit(BR_FDB_LOCAL, &fdb->flags))
+			return 0;
+		br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
+		       source ? source->dev->name : br->dev->name, addr, vid);
+		fdb_delete(br, fdb, true);
+	}
+
+	fdb = fdb_create(br, source, addr, vid,
+			 BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC));
+	if (!fdb)
+		return -ENOMEM;
+
+	fdb_add_hw_addr(br, addr);
+	fdb_notify(br, fdb, RTM_NEWNEIGH, true);
+	return 0;
+}
+
+/* Add entry for local address of interface */
+int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
+		  const unsigned char *addr, u16 vid)
+{
+	int ret;
+
+	spin_lock_bh(&br->hash_lock);
+	ret = fdb_insert(br, source, addr, vid);
+	spin_unlock_bh(&br->hash_lock);
+	return ret;
+}
+
+/* returns true if the fdb was modified */
+static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb)
+{
+	return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) &&
+		  test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags));
+}
+
+void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
+		   const unsigned char *addr, u16 vid, unsigned long flags)
+{
+	struct net_bridge_fdb_entry *fdb;
+
+	/* some users want to always flood. */
+	if (hold_time(br) == 0)
+		return;
+
+	fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
+	if (likely(fdb)) {
+		/* attempt to update an entry for a local interface */
+		if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) {
+			if (net_ratelimit())
+				br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
+					source->dev->name, addr, vid);
+		} else {
+			unsigned long now = jiffies;
+			bool fdb_modified = false;
+
+			if (now != fdb->updated) {
+				fdb->updated = now;
+				fdb_modified = __fdb_mark_active(fdb);
+			}
+
+			/* fastpath: update of existing entry */
+			if (unlikely(source != fdb->dst &&
+				     !test_bit(BR_FDB_STICKY, &fdb->flags))) {
+				fdb->dst = source;
+				fdb_modified = true;
+				/* Take over HW learned entry */
+				if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
+						      &fdb->flags)))
+					clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
+						  &fdb->flags);
+			}
+
+			if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
+				set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
+			if (unlikely(fdb_modified)) {
+				trace_br_fdb_update(br, source, addr, vid, flags);
+				fdb_notify(br, fdb, RTM_NEWNEIGH, true);
+			}
+		}
+	} else {
+		spin_lock(&br->hash_lock);
+		fdb = fdb_create(br, source, addr, vid, flags);
+		if (fdb) {
+			trace_br_fdb_update(br, source, addr, vid, flags);
+			fdb_notify(br, fdb, RTM_NEWNEIGH, true);
+		}
+		/* else  we lose race and someone else inserts
+		 * it first, don't bother updating
+		 */
+		spin_unlock(&br->hash_lock);
+	}
+}
+
+static int fdb_to_nud(const struct net_bridge *br,
+		      const struct net_bridge_fdb_entry *fdb)
+{
+	if (test_bit(BR_FDB_LOCAL, &fdb->flags))
+		return NUD_PERMANENT;
+	else if (test_bit(BR_FDB_STATIC, &fdb->flags))
+		return NUD_NOARP;
+	else if (has_expired(br, fdb))
+		return NUD_STALE;
+	else
+		return NUD_REACHABLE;
+}
+
+static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
+			 const struct net_bridge_fdb_entry *fdb,
+			 u32 portid, u32 seq, int type, unsigned int flags)
+{
+	unsigned long now = jiffies;
+	struct nda_cacheinfo ci;
+	struct nlmsghdr *nlh;
+	struct ndmsg *ndm;
+
+	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
+	if (nlh == NULL)
+		return -EMSGSIZE;
+
+	ndm = nlmsg_data(nlh);
+	ndm->ndm_family	 = AF_BRIDGE;
+	ndm->ndm_pad1    = 0;
+	ndm->ndm_pad2    = 0;
+	ndm->ndm_flags	 = 0;
+	ndm->ndm_type	 = 0;
+	ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
+	ndm->ndm_state   = fdb_to_nud(br, fdb);
+
+	if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
+		ndm->ndm_flags |= NTF_OFFLOADED;
+	if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
+		ndm->ndm_flags |= NTF_EXT_LEARNED;
+	if (test_bit(BR_FDB_STICKY, &fdb->flags))
+		ndm->ndm_flags |= NTF_STICKY;
+
+	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
+		goto nla_put_failure;
+	if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
+		goto nla_put_failure;
+	ci.ndm_used	 = jiffies_to_clock_t(now - fdb->used);
+	ci.ndm_confirmed = 0;
+	ci.ndm_updated	 = jiffies_to_clock_t(now - fdb->updated);
+	ci.ndm_refcnt	 = 0;
+	if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+		goto nla_put_failure;
+
+	if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
+					&fdb->key.vlan_id))
+		goto nla_put_failure;
+
+	if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) {
+		struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS);
+		u8 notify_bits = FDB_NOTIFY_BIT;
+
+		if (!nest)
+			goto nla_put_failure;
+		if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
+			notify_bits |= FDB_NOTIFY_INACTIVE_BIT;
+
+		if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) {
+			nla_nest_cancel(skb, nest);
+			goto nla_put_failure;
+		}
+
+		nla_nest_end(skb, nest);
+	}
+
+	nlmsg_end(skb, nlh);
+	return 0;
+
+nla_put_failure:
+	nlmsg_cancel(skb, nlh);
+	return -EMSGSIZE;
+}
+
+static inline size_t fdb_nlmsg_size(void)
+{
+	return NLMSG_ALIGN(sizeof(struct ndmsg))
+		+ nla_total_size(ETH_ALEN) /* NDA_LLADDR */
+		+ nla_total_size(sizeof(u32)) /* NDA_MASTER */
+		+ nla_total_size(sizeof(u16)) /* NDA_VLAN */
+		+ nla_total_size(sizeof(struct nda_cacheinfo))
+		+ nla_total_size(0) /* NDA_FDB_EXT_ATTRS */
+		+ nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */
+}
+
+static void fdb_notify(struct net_bridge *br,
+		       const struct net_bridge_fdb_entry *fdb, int type,
+		       bool swdev_notify)
+{
+	struct net *net = dev_net(br->dev);
+	struct sk_buff *skb;
+	int err = -ENOBUFS;
+
+	if (swdev_notify)
+		br_switchdev_fdb_notify(fdb, type);
+
+	skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
+	if (skb == NULL)
+		goto errout;
+
+	err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
+	if (err < 0) {
+		/* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
+		WARN_ON(err == -EMSGSIZE);
+		kfree_skb(skb);
+		goto errout;
+	}
+	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+	return;
+errout:
+	rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+}
+
+/* Dump information about entries, in response to GETNEIGH */
+int br_fdb_dump(struct sk_buff *skb,
+		struct netlink_callback *cb,
+		struct net_device *dev,
+		struct net_device *filter_dev,
+		int *idx)
+{
+	struct net_bridge *br = netdev_priv(dev);
+	struct net_bridge_fdb_entry *f;
+	int err = 0;
+
+	if (!(dev->priv_flags & IFF_EBRIDGE))
+		return err;
+
+	if (!filter_dev) {
+		err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
+		if (err < 0)
+			return err;
+	}
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		if (*idx < cb->args[2])
+			goto skip;
+		if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
+			if (filter_dev != dev)
+				goto skip;
+			/* !f->dst is a special case for bridge
+			 * It means the MAC belongs to the bridge
+			 * Therefore need a little more filtering
+			 * we only want to dump the !f->dst case
+			 */
+			if (f->dst)
+				goto skip;
+		}
+		if (!filter_dev && f->dst)
+			goto skip;
+
+		err = fdb_fill_info(skb, br, f,
+				    NETLINK_CB(cb->skb).portid,
+				    cb->nlh->nlmsg_seq,
+				    RTM_NEWNEIGH,
+				    NLM_F_MULTI);
+		if (err < 0)
+			break;
+skip:
+		*idx += 1;
+	}
+	rcu_read_unlock();
+
+	return err;
+}
+
+int br_fdb_get(struct sk_buff *skb,
+	       struct nlattr *tb[],
+	       struct net_device *dev,
+	       const unsigned char *addr,
+	       u16 vid, u32 portid, u32 seq,
+	       struct netlink_ext_ack *extack)
+{
+	struct net_bridge *br = netdev_priv(dev);
+	struct net_bridge_fdb_entry *f;
+	int err = 0;
+
+	rcu_read_lock();
+	f = br_fdb_find_rcu(br, addr, vid);
+	if (!f) {
+		NL_SET_ERR_MSG(extack, "Fdb entry not found");
+		err = -ENOENT;
+		goto errout;
+	}
+
+	err = fdb_fill_info(skb, br, f, portid, seq,
+			    RTM_NEWNEIGH, 0);
+errout:
+	rcu_read_unlock();
+	return err;
+}
+
+/* returns true if the fdb is modified */
+static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify)
+{
+	bool modified = false;
+
+	/* allow to mark an entry as inactive, usually done on creation */
+	if ((notify & FDB_NOTIFY_INACTIVE_BIT) &&
+	    !test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
+		modified = true;
+
+	if ((notify & FDB_NOTIFY_BIT) &&
+	    !test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) {
+		/* enabled activity tracking */
+		modified = true;
+	} else if (!(notify & FDB_NOTIFY_BIT) &&
+		   test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) {
+		/* disabled activity tracking, clear notify state */
+		clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags);
+		modified = true;
+	}
+
+	return modified;
+}
+
+/* Update (create or replace) forwarding database entry */
+static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
+			 const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid,
+			 struct nlattr *nfea_tb[])
+{
+	bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY);
+	bool refresh = !nfea_tb[NFEA_DONT_REFRESH];
+	struct net_bridge_fdb_entry *fdb;
+	u16 state = ndm->ndm_state;
+	bool modified = false;
+	u8 notify = 0;
+
+	/* If the port cannot learn allow only local and static entries */
+	if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
+	    !(source->state == BR_STATE_LEARNING ||
+	      source->state == BR_STATE_FORWARDING))
+		return -EPERM;
+
+	if (!source && !(state & NUD_PERMANENT)) {
+		pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
+			br->dev->name);
+		return -EINVAL;
+	}
+
+	if (is_sticky && (state & NUD_PERMANENT))
+		return -EINVAL;
+
+	if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) {
+		notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]);
+		if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) ||
+		    (notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT)
+			return -EINVAL;
+	}
+
+	fdb = br_fdb_find(br, addr, vid);
+	if (fdb == NULL) {
+		if (!(flags & NLM_F_CREATE))
+			return -ENOENT;
+
+		fdb = fdb_create(br, source, addr, vid, 0);
+		if (!fdb)
+			return -ENOMEM;
+
+		modified = true;
+	} else {
+		if (flags & NLM_F_EXCL)
+			return -EEXIST;
+
+		if (fdb->dst != source) {
+			fdb->dst = source;
+			modified = true;
+		}
+	}
+
+	if (fdb_to_nud(br, fdb) != state) {
+		if (state & NUD_PERMANENT) {
+			set_bit(BR_FDB_LOCAL, &fdb->flags);
+			if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
+				fdb_add_hw_addr(br, addr);
+		} else if (state & NUD_NOARP) {
+			clear_bit(BR_FDB_LOCAL, &fdb->flags);
+			if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
+				fdb_add_hw_addr(br, addr);
+		} else {
+			clear_bit(BR_FDB_LOCAL, &fdb->flags);
+			if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags))
+				fdb_del_hw_addr(br, addr);
+		}
+
+		modified = true;
+	}
+
+	if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) {
+		change_bit(BR_FDB_STICKY, &fdb->flags);
+		modified = true;
+	}
+
+	if (fdb_handle_notify(fdb, notify))
+		modified = true;
+
+	set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
+
+	fdb->used = jiffies;
+	if (modified) {
+		if (refresh)
+			fdb->updated = jiffies;
+		fdb_notify(br, fdb, RTM_NEWNEIGH, true);
+	}
+
+	return 0;
+}
+
+static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
+			struct net_bridge_port *p, const unsigned char *addr,
+			u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
+			struct netlink_ext_ack *extack)
+{
+	int err = 0;
+
+	if (ndm->ndm_flags & NTF_USE) {
+		if (!p) {
+			pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
+				br->dev->name);
+			return -EINVAL;
+		}
+		if (!nbp_state_should_learn(p))
+			return 0;
+
+		local_bh_disable();
+		rcu_read_lock();
+		br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER));
+		rcu_read_unlock();
+		local_bh_enable();
+	} else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
+		if (!p && !(ndm->ndm_state & NUD_PERMANENT)) {
+			NL_SET_ERR_MSG_MOD(extack,
+					   "FDB entry towards bridge must be permanent");
+			return -EINVAL;
+		}
+		err = br_fdb_external_learn_add(br, p, addr, vid, true);
+	} else {
+		spin_lock_bh(&br->hash_lock);
+		err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
+		spin_unlock_bh(&br->hash_lock);
+	}
+
+	return err;
+}
+
+static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = {
+	[NFEA_ACTIVITY_NOTIFY]	= { .type = NLA_U8 },
+	[NFEA_DONT_REFRESH]	= { .type = NLA_FLAG },
+};
+
+/* Add new permanent fdb entry with RTM_NEWNEIGH */
+int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+	       struct net_device *dev,
+	       const unsigned char *addr, u16 vid, u16 nlh_flags,
+	       struct netlink_ext_ack *extack)
+{
+	struct nlattr *nfea_tb[NFEA_MAX + 1], *attr;
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_port *p = NULL;
+	struct net_bridge_vlan *v;
+	struct net_bridge *br = NULL;
+	int err = 0;
+
+	trace_br_fdb_add(ndm, dev, addr, vid, nlh_flags);
+
+	if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
+		pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
+		return -EINVAL;
+	}
+
+	if (is_zero_ether_addr(addr)) {
+		pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
+		return -EINVAL;
+	}
+
+	if (dev->priv_flags & IFF_EBRIDGE) {
+		br = netdev_priv(dev);
+		vg = br_vlan_group(br);
+	} else {
+		p = br_port_get_rtnl(dev);
+		if (!p) {
+			pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
+				dev->name);
+			return -EINVAL;
+		}
+		br = p->br;
+		vg = nbp_vlan_group(p);
+	}
+
+	if (tb[NDA_FDB_EXT_ATTRS]) {
+		attr = tb[NDA_FDB_EXT_ATTRS];
+		err = nla_parse_nested(nfea_tb, NFEA_MAX, attr,
+				       br_nda_fdb_pol, extack);
+		if (err)
+			return err;
+	} else {
+		memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1));
+	}
+
+	if (vid) {
+		v = br_vlan_find(vg, vid);
+		if (!v || !br_vlan_should_use(v)) {
+			pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
+			return -EINVAL;
+		}
+
+		/* VID was specified, so use it. */
+		err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
+				   extack);
+	} else {
+		err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
+				   extack);
+		if (err || !vg || !vg->num_vlans)
+			goto out;
+
+		/* We have vlans configured on this port and user didn't
+		 * specify a VLAN.  To be nice, add/update entry for every
+		 * vlan on this port.
+		 */
+		list_for_each_entry(v, &vg->vlan_list, vlist) {
+			if (!br_vlan_should_use(v))
+				continue;
+			err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
+					   nfea_tb, extack);
+			if (err)
+				goto out;
+		}
+	}
+
+out:
+	return err;
+}
+
+static int fdb_delete_by_addr_and_port(struct net_bridge *br,
+				       const struct net_bridge_port *p,
+				       const u8 *addr, u16 vlan)
+{
+	struct net_bridge_fdb_entry *fdb;
+
+	fdb = br_fdb_find(br, addr, vlan);
+	if (!fdb || fdb->dst != p)
+		return -ENOENT;
+
+	fdb_delete(br, fdb, true);
+
+	return 0;
+}
+
+static int __br_fdb_delete(struct net_bridge *br,
+			   const struct net_bridge_port *p,
+			   const unsigned char *addr, u16 vid)
+{
+	int err;
+
+	spin_lock_bh(&br->hash_lock);
+	err = fdb_delete_by_addr_and_port(br, p, addr, vid);
+	spin_unlock_bh(&br->hash_lock);
+
+	return err;
+}
+
+/* Remove neighbor entry with RTM_DELNEIGH */
+int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
+		  struct net_device *dev,
+		  const unsigned char *addr, u16 vid)
+{
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_port *p = NULL;
+	struct net_bridge_vlan *v;
+	struct net_bridge *br;
+	int err;
+
+	if (dev->priv_flags & IFF_EBRIDGE) {
+		br = netdev_priv(dev);
+		vg = br_vlan_group(br);
+	} else {
+		p = br_port_get_rtnl(dev);
+		if (!p) {
+			pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
+				dev->name);
+			return -EINVAL;
+		}
+		vg = nbp_vlan_group(p);
+		br = p->br;
+	}
+
+	if (vid) {
+		v = br_vlan_find(vg, vid);
+		if (!v) {
+			pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
+			return -EINVAL;
+		}
+
+		err = __br_fdb_delete(br, p, addr, vid);
+	} else {
+		err = -ENOENT;
+		err &= __br_fdb_delete(br, p, addr, 0);
+		if (!vg || !vg->num_vlans)
+			return err;
+
+		list_for_each_entry(v, &vg->vlan_list, vlist) {
+			if (!br_vlan_should_use(v))
+				continue;
+			err &= __br_fdb_delete(br, p, addr, v->vid);
+		}
+	}
+
+	return err;
+}
+
+int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
+{
+	struct net_bridge_fdb_entry *f, *tmp;
+	int err = 0;
+
+	ASSERT_RTNL();
+
+	/* the key here is that static entries change only under rtnl */
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		/* We only care for static entries */
+		if (!test_bit(BR_FDB_STATIC, &f->flags))
+			continue;
+		err = dev_uc_add(p->dev, f->key.addr.addr);
+		if (err)
+			goto rollback;
+	}
+done:
+	rcu_read_unlock();
+
+	return err;
+
+rollback:
+	hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
+		/* We only care for static entries */
+		if (!test_bit(BR_FDB_STATIC, &tmp->flags))
+			continue;
+		if (tmp == f)
+			break;
+		dev_uc_del(p->dev, tmp->key.addr.addr);
+	}
+
+	goto done;
+}
+
+void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
+{
+	struct net_bridge_fdb_entry *f;
+
+	ASSERT_RTNL();
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		/* We only care for static entries */
+		if (!test_bit(BR_FDB_STATIC, &f->flags))
+			continue;
+
+		dev_uc_del(p->dev, f->key.addr.addr);
+	}
+	rcu_read_unlock();
+}
+
+int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
+			      const unsigned char *addr, u16 vid,
+			      bool swdev_notify)
+{
+	struct net_bridge_fdb_entry *fdb;
+	bool modified = false;
+	int err = 0;
+
+	trace_br_fdb_external_learn_add(br, p, addr, vid);
+
+	spin_lock_bh(&br->hash_lock);
+
+	fdb = br_fdb_find(br, addr, vid);
+	if (!fdb) {
+		unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN);
+
+		if (swdev_notify)
+			flags |= BIT(BR_FDB_ADDED_BY_USER);
+
+		if (!p)
+			flags |= BIT(BR_FDB_LOCAL);
+
+		fdb = fdb_create(br, p, addr, vid, flags);
+		if (!fdb) {
+			err = -ENOMEM;
+			goto err_unlock;
+		}
+		fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
+	} else {
+		fdb->updated = jiffies;
+
+		if (fdb->dst != p) {
+			fdb->dst = p;
+			modified = true;
+		}
+
+		if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
+			/* Refresh entry */
+			fdb->used = jiffies;
+		} else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
+			/* Take over SW learned entry */
+			set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
+			modified = true;
+		}
+
+		if (swdev_notify)
+			set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
+
+		if (!p)
+			set_bit(BR_FDB_LOCAL, &fdb->flags);
+
+		if (modified)
+			fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
+	}
+
+err_unlock:
+	spin_unlock_bh(&br->hash_lock);
+
+	return err;
+}
+
+int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
+			      const unsigned char *addr, u16 vid,
+			      bool swdev_notify)
+{
+	struct net_bridge_fdb_entry *fdb;
+	int err = 0;
+
+	spin_lock_bh(&br->hash_lock);
+
+	fdb = br_fdb_find(br, addr, vid);
+	if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
+		fdb_delete(br, fdb, swdev_notify);
+	else
+		err = -ENOENT;
+
+	spin_unlock_bh(&br->hash_lock);
+
+	return err;
+}
+
+void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
+			  const unsigned char *addr, u16 vid, bool offloaded)
+{
+	struct net_bridge_fdb_entry *fdb;
+
+	spin_lock_bh(&br->hash_lock);
+
+	fdb = br_fdb_find(br, addr, vid);
+	if (fdb && offloaded != test_bit(BR_FDB_OFFLOADED, &fdb->flags))
+		change_bit(BR_FDB_OFFLOADED, &fdb->flags);
+
+	spin_unlock_bh(&br->hash_lock);
+}
+#ifdef CONFIG_FASTNAT_MODULE
+int fast_br(struct sk_buff *skb)
+{
+	//lium_fastnat_del
+	const unsigned char *dest = NULL;
+	struct hlist_head *head;
+	struct net_bridge_fdb_entry *fdb;
+	struct net_bridge_port *p;
+	struct net_bridge *br;
+	u16 vid = 0;
+	int ntl_port_id = 0xff;
+	
+	if(!skb->dev)
+	{
+		//print_sun(SUN_DBG, "fast_br  skb->dev err skb->dev = %x\n", skb->dev);
+		return 0;
+	}
+					  
+	/*if(skb->mac_header == 0 || skb->mac_header == ~0U)
+		panic("driver  not  set  macheader !!!\n");*/
+					  
+	dest = eth_hdr(skb)->h_dest;
+				  
+
+	p = br_port_get_rtnl(skb->dev);
+	if (p == NULL || p->br == NULL)		  
+	{
+		//print_sun(SUN_DBG, "fast_br  br_port_get_rtnl err p = %x\n", p);
+		return 0;
+	}
+				  
+	br = p->br;
+	br_should_learn(p, skb, &vid);
+
+	//head = &br->hash[br_mac_hash(dest, vid)];
+					  
+	//if((fdb = fdb_find_rcu(head, dest,vid)) != NULL)
+	if((fdb = fdb_find_rcu(&(br->fdb_hash_tbl), dest,vid)) != NULL)
+	{
+		
+		if((!(test_bit(BR_FDB_LOCAL, &fdb->flags))) && fdb->dst && fdb->dst->dev && 
+			(((fdb->dst->flags & BR_HAIRPIN_MODE) || skb->dev != fdb->dst->dev) &&fdb->dst->state == BR_STATE_FORWARDING)) 
+		{
+			fast_tcpdump(skb);
+			if(fastnat_level == FAST_NET_DEVICE){
+				skb->dev->stats.rx_packets++;
+				skb->dev->stats.rx_bytes += skb->len;
+			}
+			skb->dev = fdb->dst->dev;
+			skb->isFastbr = 1;
+			fdb->updated = jiffies;
+			skb->now_location |= FASTBR_SUCC;
+			skb_rest_data_byproto(skb);
+
+			br_dev_queue_push_xmit(NULL, NULL, skb);
+			return 1;
+		}
+			  
+		
+		if((!(test_bit(BR_FDB_LOCAL, &fdb->flags))) && fdb->dst && fdb->dst->dev && 
+			(skb->dev == fdb->dst->dev) && fdb->dst->state == BR_STATE_FORWARDING)
+		{
+			skbinfo_add(NULL,SKB_LOOP);
+			skb->dev->stats.rx_dropped++;
+			//print_sun(SUN_ERR,"fast_br loop data discarded, dev:%s \n", skb->dev->name);
+			kfree_skb(skb);
+			return 1;
+		}
+	}
+	//print_sun(SUN_DBG, "fast_br  fdb_find_rcu err fdb = %x \n",fdb);
+	
+	return 0;
+}
+EXPORT_SYMBOL(fast_br);
+
+
+
+struct net_device *getbrport_bydst(struct net_device *dev,unsigned char *dest)
+{
+	//lium_fastnat_del
+	//struct hlist_head *head;
+	struct net_bridge_fdb_entry *fdb;
+	struct net_bridge_port *p;
+	struct net_bridge *br;
+	struct net_bridge_vlan_group *vg;
+	__u16 vid;
+#if FASTNAT_DEBUG
+	printk("getbrport_bydst() begine");
+#endif
+
+	if (dev == NULL || !(dev->priv_flags & IFF_EBRIDGE))
+		return dev;
+#if FASTNAT_DEBUG
+	printk("getbrport_bydst() 1");
+#endif
+	br = netdev_priv(dev);
+	vg = br_vlan_group_rcu(br);
+	vid = br_get_pvid(vg);
+#if FASTNAT_DEBUG
+	printk("getbrport_bydst() 2");
+#endif
+	
+	//head = &br->hash[br_mac_hash(dest,vid)];
+#if FASTNAT_DEBUG
+	printk("getbrport_bydst() 3");
+#endif
+	//if((fdb = fdb_find_rcu(head, dest,vid)) != NULL)
+	if((fdb = fdb_find_rcu(&(br->fdb_hash_tbl), dest,vid)) != NULL)
+	{
+	
+		if((!(test_bit(BR_FDB_LOCAL, &fdb->flags))) && fdb->dst && fdb->dst->dev && 
+			(fdb->dst->state == BR_STATE_FORWARDING)) //(fdb->dst->flags & BR_HAIRPIN_MODE)
+		{
+			return fdb->dst->dev;
+		}
+	}
+	return dev;
+}
+
+extern void fast_tcpdump(struct sk_buff *skb);
+extern struct neigh_table arp_tbl;
+extern char default_route_name[IFNAMSIZ];
+char default_br_name[IFNAMSIZ] = {0};
+int fast_fwd_ip4addr_conflict(struct sk_buff *skb)
+{
+	struct iphdr *iph = ip_hdr(skb);
+	__be32 saddr,daddr,wan_ip,br_ip=0,br_bcast=0;
+	struct net_device* in_dev = NULL;
+	struct net_device* out_dev = NULL;
+	struct ethhdr *eth;
+	struct net_bridge_port *p;
+	struct net_bridge *br = NULL;
+	struct net_device *default_route_dev;
+	struct net_device *default_br_dev;
+	struct in_device *ip_ptr;
+
+	if(iph->version != 4 || skb->indev == NULL)
+	{
+		return 0;
+	}
+	default_route_dev = dev_get_by_name(&init_net, default_route_name);
+	if(default_route_dev == NULL)
+	{
+		return 0;
+	}
+	ip_ptr = __in_dev_get_rtnl(default_route_dev);
+	if(ip_ptr && ip_ptr->ifa_list)
+	{
+		wan_ip = ip_ptr->ifa_list->ifa_local;
+	}
+	else
+	{
+		default_br_name[0] = 0;
+		dev_put(default_route_dev);
+		return 0;
+	}
+	in_dev = skb->indev;
+	saddr = iph->saddr;
+	daddr = iph->daddr;
+	p = br_port_get_rtnl(in_dev);
+	if (p != NULL) 		
+	{
+		br = p->br;
+		if (br && br->dev && strncmp(br->dev->name, default_br_name, IFNAMSIZ-1))
+		{
+			strncpy(default_br_name, br->dev->name, IFNAMSIZ-1);
+		}
+	}
+	default_br_dev = dev_get_by_name(&init_net, default_br_name);
+	if(default_br_dev)
+	{
+		ip_ptr = __in_dev_get_rtnl(default_br_dev);
+		if(ip_ptr && ip_ptr->ifa_list)
+		{
+			br_ip = ip_ptr->ifa_list->ifa_local;
+			br_bcast = ip_ptr->ifa_list->ifa_broadcast;
+		}
+	}
+	else
+	{
+		dev_put(default_route_dev);
+		return 0;
+	}
+	if(br && ((daddr == br_ip) || (daddr == br_bcast) || (daddr == wan_ip)))
+	{
+		//printk("@!@1saddr=%08x,daddr=%08x,br_ip=%08x,br_bcast=%08x,wan_ip=%08x\n",saddr, daddr,  br_ip, br_bcast, wan_ip);
+		if (IPPROTO_UDP == iph->protocol)
+		{
+			struct udphdr *udph = (struct udphdr *)(skb->data + iph->ihl * 4);
+			if(udph->source == 0x4300 || udph->source == 0x4400
+				|| udph->dest == 0x4300 || udph->dest == 0x4400)
+			{
+				//printk("@!@dhcp packet\n");
+				dev_put(default_route_dev);
+				dev_put(default_br_dev);
+				return 0;
+			}
+		}
+		out_dev = default_route_dev;
+		skb_push(skb, ETH_HLEN);
+		eth = (struct ethhdr*)(skb->data);
+		memcpy(eth->h_source, in_dev->dev_addr, ETH_ALEN);
+		memcpy(eth->h_dest, out_dev->dev_addr, ETH_ALEN);
+		fast_tcpdump(skb);
+		skb->dev = out_dev;
+	}
+	else if(in_dev == default_route_dev && ((saddr == br_ip) || (saddr == br_bcast) || (saddr == wan_ip)))
+	{
+		struct neighbour *neigh  = neigh_lookup(&arp_tbl, &daddr, default_br_dev);
+		//printk("@!@2saddr=%08x,daddr=%08x,neigh=%08x,wan_ip=%08x\n",saddr, daddr, neigh, wan_ip);
+		if(neigh)
+		{
+			//printk("@!@neigh=%s\n",neigh->dev->name);
+			out_dev = getbrport_bydst(default_br_dev,neigh->ha);
+			if(out_dev)
+			{
+				//printk("@!@out_dev=%s\n",out_dev->name);
+				skb_push(skb, ETH_HLEN);
+				eth = (struct ethhdr*)(skb->data);
+				memcpy(eth->h_source, out_dev->dev_addr, ETH_ALEN);
+				memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
+				//printk("@!@mac=%02x %02x %02x %02x %02x %02x\n",eth->h_dest[0],eth->h_dest[1],eth->h_dest[2],eth->h_dest[3],eth->h_dest[4],eth->h_dest[5]);
+			}
+			neigh_release(neigh);
+		}
+		if(out_dev == NULL)
+		{
+			printk("@!@dev: br port not found\n");
+			dev_put(default_route_dev);
+			dev_put(default_br_dev);
+			return 0;
+		}
+		fast_tcpdump(skb);
+		skb->dev = out_dev;
+	}
+	else
+	{
+		dev_put(default_route_dev);
+		dev_put(default_br_dev);
+		return 0;
+	}
+		
+	eth->h_proto = htons(ETH_P_IP);
+	skb->now_location |= FASTNAT_SUCC;
+	dev_queue_xmit(skb);
+	
+	dev_put(default_route_dev);
+	dev_put(default_br_dev);
+	return 1;
+}
+
+int fast_for_multicast(struct sk_buff *skb)
+{
+	if (skb->indev && !strncmp(skb->indev->name, default_route_name, IFNAMSIZ-1))
+	{
+		struct net_device* dev = NULL;
+		struct net_bridge *br;
+		struct net_bridge_port *p;
+		
+		dev = dev_get_by_name(&init_net, default_br_name);
+		if (dev == NULL || !(dev->priv_flags & IFF_EBRIDGE))
+		{
+			printk("@!@dev: br not found\n");
+			return 0;
+		}
+		br = (struct net_bridge *)netdev_priv(dev);
+		p = br_get_port(br, 1);
+		if(p && p->dev)
+		{
+			struct ethhdr *eth;
+			struct iphdr *iph = ip_hdr(skb);
+			
+			skb_push(skb, ETH_HLEN);
+			eth = (struct ethhdr *)skb->data;
+			memcpy(eth->h_source, p->dev->dev_addr, ETH_ALEN); 
+			ip_eth_mc_map(iph->daddr, eth->h_dest);
+			eth->h_proto = htons(ETH_P_IP);
+			skb->dev = p->dev;
+			skb->now_location |= FASTNAT_SUCC;
+			dev_queue_xmit(skb);
+			dev_put(dev);
+			return 1;
+		}
+		dev_put(dev);
+	}
+	return 0;
+}
+#endif
+
+void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
+{
+	struct net_bridge_fdb_entry *f;
+	struct net_bridge_port *p;
+
+	ASSERT_RTNL();
+
+	p = br_port_get_rtnl(dev);
+	if (!p)
+		return;
+
+	spin_lock_bh(&p->br->hash_lock);
+	hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) {
+		if (f->dst == p && f->key.vlan_id == vid)
+			clear_bit(BR_FDB_OFFLOADED, &f->flags);
+	}
+	spin_unlock_bh(&p->br->hash_lock);
+}
+EXPORT_SYMBOL_GPL(br_fdb_clear_offload);
diff --git a/upstream/linux-5.10/net/core/SI/net_other.c b/upstream/linux-5.10/net/core/SI/net_other.c
new file mode 100755
index 0000000..a6748c7
--- /dev/null
+++ b/upstream/linux-5.10/net/core/SI/net_other.c
@@ -0,0 +1,1222 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <linux/device.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/jhash.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include "../../bridge/br_private.h"
+#include <net/arp.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/netioctl.h>
+#include <net/SI/errno_track.h>
+#include <net/ipv6.h>
+#include <net/SI/net_other.h>
+#include <linux/if_arp.h>
+
+#ifdef NETLINK_UC 
+#include "../../../../drivers/net/fast6/fast6.h"
+#include "../../../../drivers/net/fastnat/fastnat.h"
+#else
+#include <net/SI/fastnat.h>
+#include <net/SI/fast6.h>
+#endif
+#ifdef CONFIG_SPEED_OPT
+extern size_t skb_sys_pool_size(const void *ptr);
+#endif
+unsigned long check_pkt = 0;
+EXPORT_SYMBOL(check_pkt);
+int set_print_pkt = 0;  /*´òÓ¡°üÄÚÈÝ*/
+
+/*¼Ç¼±¨ÎijöÈë¶ÔÐÅÏ¢*/
+struct check_pkt_info skb_insert_info = {0};
+EXPORT_SYMBOL(skb_insert_info);
+struct check_pkt_info skb_unlink_info = {0};
+EXPORT_SYMBOL(skb_unlink_info);
+
+/*½«skbÌø×ªµ½ARP/IPV4Í·²¿,·µ»ØÍøÂç²ãЭÒ飬²»¸Ä±äskb*/
+static unsigned char* skip_mac_header(struct sk_buff *skb, unsigned short *protocol)
+{
+    __be16 next_pro;
+    unsigned char *curr_ptr = NULL;
+
+    if(skb_mac_header_was_set(skb))
+    {
+        curr_ptr = skb_mac_header(skb);
+        curr_ptr += ETH_HLEN;
+        next_pro = *(__be16 *)(curr_ptr - 2);
+    }
+    else
+    {
+        curr_ptr = skb->data + ETH_HLEN;
+        next_pro = *(__be16 *)(curr_ptr - 2);
+    }
+
+again:
+    if (htons(ETH_P_IP) == next_pro || htons(ETH_P_ARP) == next_pro)
+    {
+        *protocol = ntohs(next_pro);
+        return curr_ptr;
+    }
+    //vlan
+    else if (next_pro == cpu_to_be16(ETH_P_8021Q))
+    {
+        curr_ptr += VLAN_HLEN;
+        next_pro = *((__be16 *)(curr_ptr - 2));
+        goto again;
+    }
+    //pppoe 
+    else if (next_pro == htons(ETH_P_PPP_SES))
+    {
+        if (*(curr_ptr + 6) == 0x00 && (*(curr_ptr + 7) == 0x21 || *(curr_ptr + 7) == 0x57))
+        {
+            next_pro = htons(ETH_P_IP);
+            curr_ptr += PPPOE_HEADER_LEN;
+            goto again;
+        }
+    }    
+    return NULL;
+}
+
+
+/*½âÎöDHCPÑ¡Ïî×ֶΣ¬»ñÈ¡¶ÔÓ¦message type,ûÓнâoverload*/
+unsigned char *dhcp_option_get(unsigned char *data, int data_len, int code)
+{
+	unsigned char *opt_ptr;
+	int len;
+	int overload = 0;
+
+	opt_ptr = data;
+	while (1) {
+		if (data_len <= 0)
+			return NULL;
+
+		if (opt_ptr[0] == DHCP_PADDING) 
+        {
+			data_len--;
+			opt_ptr++;
+			continue;
+		}
+		if (opt_ptr[0] == DHCP_END)
+			return NULL;
+        
+		len = 2 + opt_ptr[1];
+		data_len -= len;
+		if (data_len < 0)
+			return NULL;
+
+		if (opt_ptr[0] == code)
+			return opt_ptr + 2;
+
+		opt_ptr += len;
+	}
+
+	return NULL;
+}
+
+
+void print_check_pkt_info(struct check_pkt_info *pkt_info, int num)
+{
+    int i = 0;
+    
+    num = num > MAX_PKT_NUM ? MAX_PKT_NUM : num;
+    printk("\n%10s %10s %10s\n", "Protocol", "MsgType", "Time");
+    for(i = 0; i < num; i++)
+    {
+        printk("%10s %10d %10lu\n", 
+            proto_str[pkt_info->info[i].proto_type], 
+            pkt_info->info[i].msg_type, 
+            pkt_info->info[i].time);
+    }
+}
+EXPORT_SYMBOL(print_check_pkt_info);
+
+/*¼ì²âÊý¾Ý°üÊÇ·ñÊÇÖ¸¶¨ÀàÐ͵İü²¢¼Ç¼ʱ¼ä*/
+/*pkt_info·µ»Ø½âÎöµÄЭÒéÏà¹ØÄÚÈÝ*/
+int check_packet_type(struct sk_buff *skb, struct pkt_info *pkt_info)
+{
+    struct iphdr *ip_hdr = NULL;
+    struct icmphdr *icmphdr = NULL;
+    struct udphdr *udp_hdr = NULL;
+    struct arphdr *arp_hdr = NULL;
+    unsigned char *data_ptr = NULL;
+    unsigned char *opt_ptr = NULL;
+    unsigned short data_len = 0;
+    unsigned short protocol = 0;
+
+    if(0 == check_pkt)
+        return 0;
+
+    memset(pkt_info, 0, sizeof(struct pkt_info));
+
+    /*Ìø¹ýMACÍ·£¬µ½ARP/IPV4*/
+    data_ptr = skip_mac_header(skb, &protocol);
+    if(NULL == data_ptr)
+        return 0;
+
+    if(ETH_P_ARP == protocol)
+    {
+        if(test_bit(PKT_TYPE_ARP_BIT, &check_pkt))
+        {
+            arp_hdr = (struct arphdr *)data_ptr;
+            pkt_info->proto_type = PROTO_TYPE_ARP;
+            pkt_info->msg_type   = ntohs(arp_hdr->ar_op);
+            pkt_info->time       = jiffies;
+            return 1;
+        }
+    }
+    else if(ETH_P_IP == protocol)
+    {   
+        ip_hdr = (struct iphdr *)data_ptr;
+
+        /*¶ÔÓÚ·ÖÆ¬°ü£¬Ö»Ê¶±ðÊׯ¬*/
+        if(ntohs(ip_hdr->frag_off) & IP_OFFSET)
+        {
+            return 0;
+        }
+
+        data_len = ntohs(ip_hdr->tot_len);
+
+        switch(ip_hdr->protocol)
+        {
+            case IPPROTO_UDP:
+                udp_hdr = (struct udphdr *)((unsigned char *)ip_hdr + ip_hdr->ihl * 4);
+                if(test_bit(PKT_TYPE_DHCP_BIT, &check_pkt))
+                {
+                    if((DHCP_CLIENT_PORT == ntohs(udp_hdr->source) && DHCP_SERVER_PORT == ntohs(udp_hdr->dest)) || 
+                        (DHCP_CLIENT_PORT == ntohs(udp_hdr->dest) && DHCP_SERVER_PORT == ntohs(udp_hdr->source)))
+                    {
+                        /*Ìø¹ýUDPÍ·*/
+                        data_ptr = (unsigned char *)udp_hdr + 8;
+                        /*Ìø¹ýDHCP¹Ì¶¨Í·*/
+                        data_ptr += 236;
+                        /*Ìø¹ýmagic cookies*/
+                        data_ptr += 4;
+                        data_len = data_len - ip_hdr->ihl * 4 - 8 - 236 - 4;
+
+                        /*È¡DHCPµÄmessage type*/
+                        opt_ptr = dhcp_option_get(data_ptr, data_len, DHCP_MSG_TYPE);
+                        if(opt_ptr)
+                            pkt_info->msg_type = opt_ptr[0];
+
+                        pkt_info->proto_type = PROTO_TYPE_DHCP;
+                        pkt_info->time = jiffies;
+
+                        return 1;
+                    }
+                }
+                break;
+                
+            case IPPROTO_TCP:
+                break;
+            case IPPROTO_ICMP:
+                icmphdr = (struct icmphdr *)((unsigned char *)ip_hdr + ip_hdr->ihl * 4);
+                if(test_bit(PKT_TYPE_PING_BIT, &check_pkt))
+                {
+                    if(ICMP_ECHOREPLY == icmphdr->type || ICMP_ECHO == icmphdr->type)
+                    {
+                        pkt_info->proto_type = PROTO_TYPE_PING;
+                        pkt_info->msg_type   = icmphdr->type;
+                        pkt_info->time       = jiffies;
+                        return 1;
+                    }
+                }
+                break;
+            default:
+                break;
+        }
+    }
+
+    return 0;
+}
+EXPORT_SYMBOL(check_packet_type);
+
+
+#ifdef CONFIG_NETCTL
+
+void net_print_packet(unsigned char *data, unsigned int len, int flag)
+{
+    int i = 0;
+
+    if(set_print_pkt && net_ratelimit())
+    {
+        if(0 == flag)
+            printk("\nrecv packet:\n");
+        else if(1 == flag)
+            printk("\nsend packet:\n");
+        else
+            printk("\nprint packet:\n");
+
+        for(i = 0; i < len; i++)
+        {
+            if(i % 16 == 0)
+                printk("\n");
+            printk("%2x ", data[i]);
+        }
+    }
+}
+
+
+void track_netlink(struct sk_buff *skb,u32 group)
+{
+	struct nlmsghdr *nlh;	
+
+	nlh = (struct nlmsghdr*)(skb->data);	
+	net_run_track(PRT_RTNL_SEND,"rtnetlink_send,msg_type =%d;group = %d",nlh->nlmsg_type,group);
+}
+
+
+/*½«MACµØÖ·ºÍnet_deviceµØÖ·½øÐбȽÏ
+  ³öÏֵij¡¾°£º
+  1.Êý¾Ý»Ø»·
+  2.Êý¾ÝͨѶÕý³££¬Á½¸öCPEÓÐÏàͬµÄMACµØÖ·
+ */
+void check_macaddr_only(unsigned char *ha, unsigned char ha_len)
+{
+	struct net_device *dev;
+	unsigned char addr_len = 0;
+	unsigned char addr[ETH_ALEN] = {0};
+	
+	if(0 == addr_check)
+	{
+		return;
+	}
+	
+	read_lock(&dev_base_lock);
+
+	for_each_netdev(&init_net, dev)
+	{
+		if(dev->addr_len != ha_len)
+		{
+			//ÐÞ¸ÄÔ­Òò£ºÄ¬ÈÏsit0µÄaddr_len=4ÇÒdev_addrĬÈÏΪȫ0£¬µ±ÆäËûÉ豸macµØÖ·[0:3]ҲȫΪ0ʱ¾Í»á¶ÏÑÔËÀ»ú¡£
+			continue;
+		}
+		//addr_len = dev->addr_len > ha_len ? ha_len : dev->addr_len;
+		addr_len = ha_len;
+		if((addr_len > 0) && !memcmp(dev->dev_addr, ha, addr_len))
+		{
+			addr_len = addr_len > ETH_ALEN ? ETH_ALEN : addr_len;
+			memcpy(addr, ha, addr_len);
+			
+			panic("check_macaddr_only: mac address of pc is same as the device, dev name: %s, mac %x:%x:%x:%x:%x:%x\n", 
+				dev->name, addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);		
+			read_unlock(&dev_base_lock);
+			return;
+		}
+	}
+
+	read_unlock(&dev_base_lock);
+
+	return;
+}
+
+
+void skbinfo_add(unsigned char *addr,unsigned int skbinfo_type)
+{
+	struct sk_buff *skb;
+	skbinfo_dbg[skbinfo_type] ++;
+/*
+	if(skb_max_panic && skbinfo_type == SKB_TYPE_ALL)
+	{
+		//´Ë´¦Î´ÓÃspinlock±£»¤£¬Ò»µ©Ê¹Óøù¦ÄÜ£¬ÐèÒªÓÃËø±£»¤
+		if(skbinfo_dbg[SKB_TYPE_ALL] > skb_max_panic)
+			panic("too much skb is alloced,pleasw check data_leak ");
+	}
+*/
+	if(skbinfo_type == SKB_TYPE_DATA)
+	{
+		skb = (struct sk_buff *)addr;
+        //if(skb->isExtern == 0)
+		//    skbinfo_dbg[SKB_DATA_BYTES] += ksize(skb->head);
+        //else
+            skbinfo_dbg[SKB_DATA_BYTES] += skb->data - skb->head + skb->len;
+	}
+	if(skbinfo_type==(skb_info_track&0X7F)) 
+	{ 
+		printk("net resource monitor!!!");   
+		dump_stack(); 
+		if(skb_info_track&0X80)
+			panic("net team dbg panic !!!");
+	} 
+	
+}
+
+
+void skbinfo_del(unsigned char *addr,unsigned int skbinfo_type)
+{
+	struct sk_buff *skb;
+	skbinfo_dbg[skbinfo_type] --;
+	
+	if(skbinfo_type == SKB_TYPE_DATA)
+	{
+		skb = (struct sk_buff *)addr;
+        //if(skb->isExtern == 0)
+		//    skbinfo_dbg[SKB_DATA_BYTES] -= ksize(skb->head);
+        //else
+            skbinfo_dbg[SKB_DATA_BYTES] -= skb->data - skb->head + skb->len;
+	}
+
+}
+
+void netruninfo_add(unsigned char *addr,unsigned int info_type)
+{
+	netruninfo_dbg[info_type] ++;	
+	if(info_type==(net_info_track&0X7F)) 
+	{ 
+		printk("net resource monitor!!!");   
+		dump_stack(); 
+		if(net_info_track&0X80)
+			panic("net team dbg panic !!!");
+	} 
+}
+
+void netruninfo_del(unsigned char *addr,unsigned int info_type)
+{
+	netruninfo_dbg[info_type] --;
+}
+static int filter(void *start1, void *start2, unsigned int len)
+{
+    int i = 100, ret = -1;
+    int *p = start1, *q = start2;
+
+    do {
+        if (!memcmp(p, q, len)) {
+            ret = 0;
+            break;
+        }
+        ++p;
+    }while(i--);
+
+    return ret;
+}
+
+//¸Ãº¯ÊýÓÃÓÚÅжÏskb->network_headerÓÐЧÐÔ£¬·µ»Ø»ñÈ¡ÓÐЧµÄipÍ·
+unsigned char * check_skb_for_dump(struct sk_buff *skb, int *mark)
+{
+    unsigned char * mac_head, *net_head, *tsp_head;
+    //dri->net 0; net->dri 1;
+    mac_head = skb_mac_head(skb);
+    net_head = skb_network_head(skb);
+    tsp_head = skb_transport_head(skb);
+    *mark = 0;
+    if (!skb->dev){
+   //     printk("skb->dev = NULL err in %s.\n", __func__);
+        return NULL;
+    }
+    if (skb->data == NULL){
+        printk("skb->data = NULL err in %s.\n", __func__);
+        return NULL;
+    }
+    if (skb->data == mac_head){
+        if(net_head && net_head < (mac_head + skb->dev->hard_header_len )){
+            *mark = -2;
+            return mac_head + skb->dev->hard_header_len;
+        }
+        *mark = 2;
+         return mac_head + skb->dev->hard_header_len;  
+    }
+    else if(skb->data == net_head){
+        if(mac_head && net_head < (mac_head + skb->dev->hard_header_len )){
+            *mark = -23;
+            return mac_head + skb->dev->hard_header_len;
+        }
+        if(tsp_head && net_head < (tsp_head - 20)){
+            *mark = -3;
+            return net_head;
+        }
+        *mark = 3;
+        return net_head;
+    }
+    else if(skb->data == tsp_head ){
+        if((!net_head) || (net_head && net_head < (tsp_head - 20))){
+            *mark = -4;
+            return NULL;
+        }
+        *mark = 4;
+        return net_head;
+    }
+    else{
+     //   printk("unexpected err in %s\n", __func__);
+        return NULL;
+    }
+        
+}
+
+extern struct nf_conntrack_tuple tuple_info;
+extern int getconn_type;
+
+//ÔÚskb_release_dataʱ½øÐÐÄÚÈݵÄ×Ö·û´®Æ¥Å䣬²¢µ¼³öÕ»¹ì¼£
+void dump_net_stack(struct sk_buff *skb, unsigned int offset)
+{
+    if(getconn_type != 8 && getconn_type != 10)
+        return;
+    int mark = 0;
+    struct iphdr *iphv4;
+    struct ipv6hdr *iphv6;
+    unsigned char *tsp_start = NULL;
+    unsigned char *iph = check_skb_for_dump(skb, &mark);
+    
+    if(getconn_type == 10 && iph){
+        if (skb->now_location & FASTNAT_SUCC){
+            printk("skb->len = %d now_location = %d\n", skb->len, skb->now_location);
+            struct tcphdr *th = (struct tcphdr *)(iph + 20);
+            printk("th->seq=%lu, th->sport=%ld, th->dport=%ld\n", htonl(th->seq), htons(th->source), htons(th->dest));
+            goto out;
+        }
+        return;
+    }
+    if(iph){
+        if((iph[0] & 0xf0) == 0x40){
+            iphv4 = (struct iphdr*)iph;
+            if(tuple_info.dst.protonum && tuple_info.dst.protonum != iphv4->protocol){
+                return;
+            }
+            if(tuple_info.dst.u3.ip && memcmp(&tuple_info.dst.u3.ip, &iphv4->daddr, 4) != 0){
+                return;
+            }
+            if(tuple_info.src.u3.ip && memcmp(&tuple_info.src.u3.ip, &iphv4->saddr, 4) != 0){
+                return;
+            }
+            tsp_start = (unsigned char*)iphv4 + (iphv4->ihl << 2);
+        }
+        else if((iph[0] & 0xf0) == 0x60){
+            iphv6 = (struct ipv6hdr*)iph;
+            if(tuple_info.dst.protonum && tuple_info.dst.protonum != iphv6->nexthdr){
+                return;
+            }
+            if(tuple_info.dst.u3.ip && memcmp(&tuple_info.dst.u3.in6, &iphv6->daddr, 16) != 0){
+                return;
+            }
+            if(tuple_info.src.u3.ip && memcmp(&tuple_info.src.u3.in6, &iphv6->saddr, 16) != 0){
+                return;
+            }
+            tsp_start = (unsigned char*)iphv6 + 40;
+        }
+    }
+    //Èç¹ûÊÍ·ÅʱֻÓд«ÊäͷûÓÐÍøÂçÍ·£¬Ö»±È½ÏÓÐЧport
+    if(mark == -4){
+        tsp_start = skb->data; 
+    }
+    if(tsp_start == NULL)
+        return;
+    
+    if(tuple_info.src.u.all && memcmp(&tuple_info.src.u.all, tsp_start, 2) != 0){
+        return;
+    }
+    if(tuple_info.dst.u.all && memcmp(&tuple_info.dst.u.all, tsp_start + 2, 2) != 0) {
+        return;
+    }
+    
+    printk("free skb match mark = %d:\n", mark);
+    if(tuple_info.dst.protonum && mark != -4)
+        printk("protonum = %d ",tuple_info.dst.protonum);
+    if(tuple_info.src.u3.ip && mark != -4){
+        if(iph && (iph[0] & 0xf0) == 0x40){
+            printk("sip: %08x ", ntohl(tuple_info.src.u3.ip));
+        }else if(iph && (iph[0] & 0xf0) == 0x60){
+            printk("sip: %x:%x:%x:%x:%x:%x:%x:%x ", ntohs(tuple_info.src.u3.in6.s6_addr16[0]), ntohs(tuple_info.src.u3.in6.s6_addr16[1]), ntohs(tuple_info.src.u3.in6.s6_addr16[2]), ntohs(tuple_info.src.u3.in6.s6_addr16[3]), 
+                    ntohs(tuple_info.src.u3.in6.s6_addr16[4]), ntohs(tuple_info.src.u3.in6.s6_addr16[5]), ntohs(tuple_info.src.u3.in6.s6_addr16[6]), ntohs(tuple_info.src.u3.in6.s6_addr16[7]));
+        }
+    }
+    if(tuple_info.src.u.all){
+        printk("sport : %d ", ntohs(tuple_info.src.u.all));
+    }
+    if(tuple_info.dst.u3.ip && mark != -4){
+        if(iph && (iph[0] & 0xf0) == 0x40){
+            printk("%dip: %08x ", ntohl(tuple_info.dst.u3.ip));
+        }else if(iph && (iph[0] & 0xf0) == 0x60){
+            printk("dip: %x:%x:%x:%x:%x:%x:%x:%x ", ntohs(tuple_info.dst.u3.in6.s6_addr16[0]), ntohs(tuple_info.dst.u3.in6.s6_addr16[1]), ntohs(tuple_info.dst.u3.in6.s6_addr16[2]), ntohs(tuple_info.dst.u3.in6.s6_addr16[3]), 
+                    ntohs(tuple_info.dst.u3.in6.s6_addr16[4]), ntohs(tuple_info.dst.u3.in6.s6_addr16[5]), ntohs(tuple_info.dst.u3.in6.s6_addr16[6]), ntohs(tuple_info.dst.u3.in6.s6_addr16[7]));
+        }
+    }
+    if(tuple_info.dst.u.all) {
+        printk("dport : %d ", ntohs(tuple_info.dst.u.all));
+    }
+    printk("\n");
+   // if (skb_dump_len) 
+      //  if(!filter((skb->head + offset), skb_dump_str, skb_dump_len))
+out:
+    dump_stack();
+}
+
+
+
+/***********************************************************************************************************/
+/*ÒÔÏÂÎªÍøÂç×éÐÂÔöµÄ½Ó¿Ú£¬ÓÉÓÚñîºÏÇ¿£¬²»ÄܶÀÁ¢³ÉÔ´Îļþ£¬µ«Ðè×¢ÊÍÇå³þ*/
+/***********************************************************************************************************/
+
+extern int set_tcpdump;
+extern char br_name[];
+
+//ÐèÒªskb->mac_headerÒѾ­¸³Öµ
+unsigned int get_network_head_len(struct sk_buff *skb)
+{
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+    unsigned char *buf = (unsigned char *)skb->head + skb->mac_header + ETH_HLEN;
+#else
+    unsigned char *buf = (unsigned char *)skb->mac_header + ETH_HLEN;
+#endif
+
+    if ((((unsigned)buf[0]) & 0xF0) == 0x40)
+        return 20; //ipv4 ipÍ·³¤¶È20
+        
+    if ((((unsigned)buf[0]) & 0xF0) == 0x60)
+        return 40; //ipv6 ipÍ·³¤¶È40
+        
+    return 20; //ĬÈÏipv4´¦Àí
+}
+
+//ËïȪÌí¼Ó£¬²Î¿¼__netif_receive_skb£¬ÓÃÓÚץȡ½ÓÊÕ·½ÏòµÄ¶¨ÖƱ¨ÎÄ£¬ÈçfastnatµÈÈë¿Ú±¨ÎÄ£¬°´ÕÕtcpdumpÕý³£·½Ê½Ê¹Óü´¿É£¬µ«»áÓ°ÏìÐÔÄÜ£¬ËùÒÔÐèÒªÓÃproc±äÁ¿¿ØÖÆ
+void tcpdumpin_sq(struct sk_buff *skb)
+{
+    unsigned char        *data_priv;
+    struct packet_type *ptype;
+    __be16 type;
+    unsigned int len_priv;
+    int dev_flag = 0;
+
+    if (!(set_tcpdump & 1))
+        return;
+    
+    if (list_empty(&ptype_all))
+    {
+        return;
+    }
+
+    //±£´æµ±Ç°±¨ÎĵÄÊý¾ÝÖ¡Í·ÐÅÏ¢£¬×¥°ü½áÊøºó»Ö¸´
+    data_priv = skb->data;
+    len_priv = skb->len;
+    
+    //½«³¤¶ÈºÍdataÖ¸ÏòMACÍ·£¬¸ù¾Ýµ±Ç°Çé¿ö¶¯Ì¬µ÷Õû
+    if (skb->mac_header == 0 || skb->mac_header == ~0U)
+    {
+        skb_reset_mac_header(skb);
+        skb->network_header = skb->mac_header + ETH_HLEN;
+        skb->transport_header = skb->network_header + get_network_head_len(skb);
+    }
+    else if (skb->network_header == 0 || skb->network_header == ~0U)
+    {
+        skb->network_header = skb->mac_header + ETH_HLEN;
+        skb->transport_header = skb->network_header + get_network_head_len(skb);
+        skb_reset_data_bymachd(skb);
+        skb_push(skb, ETH_HLEN);
+    }
+    else if (skb->transport_header == 0 || skb->transport_header == ~0U)
+    {
+        skb->network_header = skb->mac_header + ETH_HLEN;
+        skb->transport_header = skb->network_header + get_network_head_len(skb);
+        skb_reset_data_bymachd(skb);
+        skb_push(skb, ETH_HLEN);
+    }
+    
+    //devΪ¿Õʱ£¬ÎªÁ˱£Ö¤ËùÓе㶼ÄÜ×¥µ½±¨£¬½«°ÑdevÉèΪptype->dev
+    if (skb->dev == NULL)
+    {
+        dev_flag = 1;
+    }
+
+    rcu_read_lock();
+    list_for_each_entry_rcu(ptype, &ptype_all, list) {
+        //Èë¿ÚskbÊÇÍêÕûµÄMACÖ¡£¬²»ÐèÒª½øÐÐÈÎºÎÆ«ÒƵ÷Õû
+        if ((!ptype->dev || !skb->dev || ptype->dev == skb->dev))  // && (ptype->func == packet_rcv)
+        {
+            //packet_rcvÄÚ²¿ÒªÇóskb->dev±ØÐ벻Ϊ¿Õ£¬ËùÒÔ´Ë´¦½øÐÐÁËС¼¼ÇÉ£¬µ«¿ÉÄÜÓжàÓà°ü±»×¥µ½
+            if(skb->dev == NULL && ptype->dev)
+                skb->dev = ptype->dev;
+            else if(skb->dev == NULL)
+                skb->dev = __dev_get_by_name(&init_net, br_name);
+
+            atomic_inc(&skb->users);
+            //track_add(skb, 0, USER_INFO, 0);
+            ptype->func(skb, skb->dev, ptype, skb->dev);
+            if(dev_flag == 1)
+                skb->dev = NULL;
+        }
+    }
+    rcu_read_unlock();
+
+    //»Ö¸´skb³õʼ״̬
+    skb->data = data_priv;
+    skb->len = len_priv;
+    if(dev_flag == 1)
+        skb->dev = NULL;
+}
+
+//ËïȪÌí¼Ó£¬²Î¿¼dev_queue_xmit_nit£¬ÓÃÓÚץȡ·¢ËÍ·½ÏòµÄ¶¨ÖƱ¨ÎÄ£¬ÈçfastnatµÈ³ö¿Ú±¨ÎÄ£¬°´ÕÕtcpdumpÕý³£·½Ê½Ê¹Óü´¿É£¬µ«»áÓ°ÏìÐÔÄÜ£¬ËùÒÔÐèÒªÓÃproc±äÁ¿¿ØÖÆ
+void tcpdumpout_sq(struct sk_buff *skb)
+{
+    struct packet_type *ptype;
+    struct sk_buff *skb2 = NULL;
+    int dev_flag = 0;
+    sk_buff_data_t        transport_header;
+    sk_buff_data_t        network_header;
+    sk_buff_data_t        mac_header;
+    unsigned char        *data_priv;
+    unsigned int len_priv;
+    
+    if (!(set_tcpdump & 2))
+        return;
+    
+    if (list_empty(&ptype_all))
+    {
+        return;
+    }
+
+    //±£´æµ±Ç°±¨ÎĵÄÊý¾ÝÖ¡Í·ÐÅÏ¢£¬×¥°ü½áÊøºó»Ö¸´
+    data_priv = skb->data;
+    len_priv = skb->len;
+    transport_header = skb->transport_header;
+    network_header = skb->network_header;
+    mac_header = skb->mac_header;
+
+    if (skb->mac_header == 0 || skb->mac_header == ~0U)
+    {
+        skb_reset_mac_header(skb);
+        skb->network_header = skb->mac_header + ETH_HLEN;
+        skb->transport_header = skb->network_header + get_network_head_len(skb);
+    }
+    else if (skb->network_header == 0 || skb->network_header == ~0U)
+    {
+        skb->network_header = skb->mac_header + ETH_HLEN;
+        skb->transport_header = skb->network_header + get_network_head_len(skb);
+        skb_reset_data_bymachd(skb);
+        skb_push(skb, ETH_HLEN);
+    }
+    else if (skb->transport_header == 0 || skb->transport_header == ~0U)
+    {
+        skb->transport_header = skb->network_header + get_network_head_len(skb);
+        skb_reset_data_bymachd(skb);
+        skb_push(skb, ETH_HLEN);
+    }
+
+    //TCP²ãÃæÉÐδÀ´µÃ¼°¸³Öµ³ö¿Údev£¬´Ë´¦Ç¿Ðи³ÖµÎªbr0£¬ËùÒÔÒªÇó×¥³ö¿Ú±¨ÎÄʱ£¬Òª²»ÏÞÖÆdev²ÅÐУ¬·ñÔòÍâÍø¿Ú±¨ÎÄ×¥²»µ½
+    if(skb->dev == NULL)
+    {
+        dev_flag = 1;
+    }
+    
+    rcu_read_lock();
+    list_for_each_entry_rcu(ptype, &ptype_all, list) {
+        //ÈôÔÚTCP»òIP²ãÃæ£¬´ËʱdataδָÏòMACÍ·£¬µ«ÓÉÓÚÎÒÃǽøÐÐÁËÆ«ÒÆ£¬Ö¸ÏòÁËMACÍ·£¬ËùÒÔÄÜץȡÍêÕû±¨ÎÄ£¬µ«PPPoEÀ౨ÎÄ£¬ÎÞ·¨×¥È«
+        if ((ptype->dev == skb->dev || !ptype->dev || !skb->dev) && (ptype->af_packet_priv != NULL) &&
+          (struct sock *)ptype->af_packet_priv != skb->sk) // && (ptype->func == packet_rcv)
+        {
+            //packet_rcvÄÚ²¿ÒªÇóskb->dev±ØÐ벻Ϊ¿Õ£¬ËùÒÔ´Ë´¦½øÐÐÁËС¼¼ÇÉ£¬µ«¿ÉÄÜÓжàÓà°ü±»×¥µ½
+            if(skb->dev == NULL && ptype->dev)
+                skb->dev = ptype->dev;
+            else if(skb->dev == NULL)
+                skb->dev = __dev_get_by_name(&init_net, br_name);
+            
+            skb2 = skb_clone(skb, GFP_ATOMIC);
+            if (!skb2)
+                break;
+            ptype->func(skb2, skb->dev, ptype, skb->dev);
+            
+            if (dev_flag == 1)
+                skb->dev = NULL;
+        }
+    }
+    rcu_read_unlock();
+
+    //»Ö¸´skb³õʼ״̬
+    if(dev_flag == 1)
+        skb->dev = NULL;
+    skb->transport_header = transport_header;
+    skb->network_header = network_header;
+    skb->mac_header = mac_header;
+    skb->data = data_priv;
+    skb->len = len_priv;
+}
+
+
+
+
+/*¸ù¾ÝdevÆ¥Åä²éÕÒÁÚ¾Ó±íarp_tblÖеÄÁÚ¾Ó½Úµã*/
+void get_neigh_bydev(struct neigh_table *tbl, struct net_device *dev, struct dev_neigh_info *neigh_info)
+{
+    int i;
+    int len;
+    unsigned int neigh_num = 0;
+    struct neigh_hash_table *nht;
+
+    if(tbl->family != AF_INET && tbl->family != AF_INET6)
+        return;
+
+    rcu_read_lock_bh();
+    nht = rcu_dereference_bh(tbl->nht);
+
+    for(i = 0; i < (1 << nht->hash_shift); i++)
+    {
+        struct neighbour *neigh;
+
+        for(neigh = rcu_dereference_bh(nht->hash_buckets[i]); neigh != NULL; neigh = rcu_dereference_bh(neigh->next))
+        {
+            if(neigh->dev == dev)
+            {
+                len = tbl->key_len > MAX_IPADDR_LEN ? MAX_IPADDR_LEN : tbl->key_len;
+
+                memcpy(neigh_info->neigh_nod[neigh_num].ip_addr, neigh->primary_key, len);
+                neigh_info->neigh_nod[neigh_num].ip_len = len;
+                memcpy(neigh_info->neigh_nod[neigh_num].mac_addr, neigh->ha, MAX_MACADDR_LEN);
+                neigh_num++;
+                if(neigh_num >= 20)
+                    goto end;
+            }
+        }
+    }
+
+end:
+    neigh_info->num = neigh_num;
+
+    rcu_read_unlock_bh();
+
+    return;
+}
+
+/*¸ù¾ÝÔ¶¶ËÁÚ¾ÓµÄMACµØÖ·ÕÒµ½arp_tblÖеÄÁÚ¾ÓIPµØÖ·µÈÐÅÏ¢*/
+void get_neigh_bymac(struct neigh_table *tbl,mac_addr *addr, struct neigh_info *info)
+{
+    int i;
+    int len;
+    struct neigh_hash_table *nht;
+
+    if(tbl->family != AF_INET && tbl->family != AF_INET6)
+        return;
+
+    nht = rcu_dereference_bh(tbl->nht);
+
+    for(i = 0; i < (1 << nht->hash_shift); i++)
+    {
+        struct neighbour *neigh;
+
+        for(neigh = rcu_dereference_bh(nht->hash_buckets[i]); neigh != NULL; neigh = rcu_dereference_bh(neigh->next))
+        {
+            if(!compare_ether_addr(neigh->ha,addr->addr)) 
+            {
+                len = tbl->key_len > MAX_IPADDR_LEN ? MAX_IPADDR_LEN : tbl->key_len;
+
+                memcpy(info->ip_addr, neigh->primary_key, len);
+                info->ip_len = len;
+                memcpy(info->mac_addr, neigh->ha, MAX_MACADDR_LEN);
+                return;
+            }
+        }
+	}
+}
+
+//»ñȡij2²ãÇŵãÉ豸µÄÁÚ¾ÓÁбíÐÅÏ¢£¬Ïȸù¾Ý³ö¿ÚdevÕÒµ½ËùÓеÄÔ¶³ÌÁÚ¾ÓMACµØÖ·£¬ÔÙ¸ù¾ÝMACµØÖ·ÕÒµ½arp_tblÖеÄÁÚ¾ÓIPµØÖ·µÈÐÅÏ¢
+void getneigh_ofdev(struct net_device *dst_dev,struct dev_neigh_info  *neigh_info)
+{
+	int i;
+	int neigh_num = 0;
+	struct net_device *br_dev;
+	struct net_bridge *br;
+	
+	br_dev = dev_get_by_name(&init_net, br_name);
+	br = netdev_priv(br_dev);
+	
+	spin_lock_bh(&br->hash_lock);
+	for (i = 0; i < BR_HASH_SIZE; i++) {
+		struct hlist_node *h;
+		hlist_for_each(h, &br->hash[i]) {
+			struct net_bridge_fdb_entry *f;
+
+			f = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
+			if (f->dst && f->dst->dev == dst_dev && !(test_bit(0, &f->flags))) {//BR_FDB_LOCAL==0
+			   get_neigh_bymac(&arp_tbl, &f->addr,neigh_info->neigh_nod+neigh_num);
+	                neigh_num++;
+
+			}
+		}
+	}
+	spin_unlock_bh(&br->hash_lock);
+	neigh_info->num = neigh_num;
+}
+
+void update_brport_info(struct devlist_info *dev_info){
+    int i = 0, j = 0, k = 0;
+    struct net_device *br_dev;
+    struct net_device *temp_dev;
+    struct dev_neigh_info *temp_neigh;
+    int temp_count ;
+    for(i = 0; i < dev_info->num; i++){
+        if(dev_info->info[i].dev_layer == BR_DEV){
+            br_dev = dev_get_by_name(&init_net, dev_info->info[i].name);
+            for(j = 0; j < dev_info->info[i].dev_neigh.num; j++){
+                temp_dev = getbrport_bydst(br_dev, dev_info->info[i].dev_neigh.neigh_nod[j].mac_addr);
+                if(!temp_dev || temp_dev->ifindex == br_dev->ifindex){
+                    printk("temp_dev error!!!\n");
+                    continue;
+                }        
+                for(k = 0 ; k < dev_info->num; k++){
+                    if(strcmp(dev_info->info[k].name, temp_dev->name) == 0){
+						temp_neigh = &(dev_info->info[k].dev_neigh);
+						if(temp_neigh->num >= 20){
+							printk("dev=%s , neigh info is full!\n", temp_dev->name);
+						    break;
+						}
+                        temp_count = temp_neigh->num;			
+                        memcpy(temp_neigh->neigh_nod[temp_count].ip_addr, dev_info->info[i].dev_neigh.neigh_nod[j].ip_addr,  dev_info->info[i].dev_neigh.neigh_nod[j].ip_len);
+                        temp_neigh->neigh_nod[temp_count].ip_len = dev_info->info[i].dev_neigh.neigh_nod[j].ip_len;
+                        memcpy(temp_neigh->neigh_nod[temp_count].mac_addr, dev_info->info[i].dev_neigh.neigh_nod[j].mac_addr, MAX_MACADDR_LEN);
+                        temp_count ++;
+                        temp_neigh->num = temp_count;
+                        break;
+                    }
+                }
+            }
+			if(br_dev)
+				dev_put(br_dev);
+        }
+    }
+}
+
+/*»ñÈ¡init_netÖÐÍøÂçÉ豸Ïà¹ØÐÅÏ¢£¬°üÀ¨IP¡¢MAC¡¢³ö¿Údev¡¢ÁÚ¾ÓÁбíµÈ*/
+int get_devlist_info(unsigned long arg)
+{
+    struct devlist_info *dev_info;
+    struct net_device *dev;
+    struct net_device *temp_dev;
+    struct dev_neigh_info *temp_neigh;
+    unsigned int temp_count = 0;
+    unsigned int dev_num = 0;
+	
+    dev_info=(struct devlist_info*)kzalloc(sizeof(struct devlist_info), GFP_KERNEL);
+    if(!dev_info)
+        return -EFAULT;
+
+    read_lock(&dev_base_lock);
+
+    for_each_netdev(&init_net, dev)
+    {
+        if(dev->flags & IFF_UP && strcmp(dev->name, "lo") != 0)
+        {
+        	//¼Ç¼±¾µØÍø¿ÚµÄÐÅÏ¢
+            strcpy(dev_info->info[dev_num].name, dev->name);
+            if(dev->ip_ptr && dev->ip_ptr->ifa_list)
+                dev_info->info[dev_num].ipv4_addr = dev->ip_ptr->ifa_list->ifa_address; 
+            if(dev->header_ops && dev->dev_addr){
+                memcpy(dev_info->info[dev_num].mac_addr, dev->dev_addr, dev->addr_len);
+                if(is_zero_ether_addr(dev->dev_addr))
+                    dev_info->info[dev_num].mac_errtype = ZERO_ADDRERR;
+                else if(is_broadcast_ether_addr(dev->dev_addr))
+                    dev_info->info[dev_num].mac_errtype = BROADCAST_ADDRERR;
+                else if(is_multicast_ether_addr(dev->dev_addr))
+                    dev_info->info[dev_num].mac_errtype = MULTICAST_ADDRERR;              
+            }
+
+		//ÒÔÏÂΪ¸üб¾µØÍø¿Ú¹ØÁªµÄÔ¶³ÌÁÚ¾ÓÁбíÐÅÏ¢
+		//¶ÔÓÚÇŵãÉ豸£¬Ïȸù¾Ý³ö¿ÚdevÕÒµ½ËùÓеÄÔ¶³ÌÁÚ¾ÓMACµØÖ·£¬ÔÙ¸ù¾ÝMACµØÖ·ÕÒµ½arp_tblÖеÄÁÚ¾ÓIPµØÖ·µÈÐÅÏ¢
+            if(dev->priv_flags & IFF_BRIDGE_PORT){
+		    	dev_info->info[dev_num].dev_layer = L2_DEV;
+                dev_info->info[dev_num].dev_neigh.num = 0;
+		//	getneigh_ofdev(dev,&(dev_info->info[dev_num].dev_neigh));
+            }
+		//br0ÌØÊâÍø¹Ø£¬»ñÈ¡ÆäÁÚ¾ÓÁбí
+            else if(dev->priv_flags & IFF_EBRIDGE){
+                dev_info->info[dev_num].dev_layer = BR_DEV;
+                get_neigh_bydev(&arp_tbl, dev,&(dev_info->info[dev_num].dev_neigh));
+            }
+		//¶ÔÓÚÆÕͨµÄ3²ãÉ豸£¬»ñÈ¡ÆäÁÚ¾ÓÁбíÐÅÏ¢
+            else{
+                //Åųý·Çarp½Ó¿Ú
+                if(!(dev->flags & IFF_NOARP))
+                    get_neigh_bydev(&arp_tbl, dev,&(dev_info->info[dev_num].dev_neigh));
+                dev_info->info[dev_num].dev_layer = L3_DEV;         
+            }
+		    dev_num++;
+            if(dev_num >= MAX_DEV_NUM)
+            {
+                break;
+            }
+        }
+    }
+    dev_info->num = dev_num;
+    update_brport_info(dev_info);
+    read_unlock(&dev_base_lock);
+    
+    if (copy_to_user((char *)arg, dev_info, sizeof(struct devlist_info)))
+    {
+        kfree(dev_info);
+        return -EFAULT;
+    }
+    kfree(dev_info);
+
+    return 0;
+}
+
+extern wait_queue_head_t skb_wait_queue;
+//extern atomic_t  skb_used;
+extern atomic_t  skb_tops;
+extern atomic_t  skb_fromps;
+//ÓÃÓÚ¸ú×Ùskb½á¹¹ÌåµÄÉêÇ룬°üº¬Íⲿ´«µÝÀ´µÄdata
+void skb_alloc_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+	netslab_inc(SKB_SLAB);
+	//track_add(skb, 0, SKB_INFO, skb->truesize);
+	skbinfo_add(skb,SKB_TYPE_ALL);
+#endif
+//	atomic_inc(&skb_used);
+}
+//ÓÃÓÚ¸ú×Ùskb½á¹¹ÌåµÄÊÍ·Å£¬°üº¬Íⲿ´«µÝÀ´µÄdata
+extern wait_queue_head_t skb_wait_queue;
+void skb_free_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+	track_del(skb, 0, SKB_INFO);
+	skbinfo_del(skb,SKB_TYPE_ALL);
+	netslab_dec(SKB_SLAB);	
+#endif
+	//2017.6.3  add by linxu  set a limit for skb
+//	atomic_dec(&skb_used);
+	if(waitqueue_active(&skb_wait_queue))
+	{
+		wake_up(&skb_wait_queue);		
+	}
+}
+//ÓÃÓÚ¸ú×Ùslab»úÖÆµÄskb->data½á¹¹ÌåµÄÉêÇë,²»°üº¬ÍⲿPSBUF
+void skbdata_alloc_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+	skbinfo_add((unsigned char *)skb,SKB_TYPE_DATA);
+	//track_add(skb->head, 0, DATA_INFO, skb->len);
+#endif
+}
+//ÓÃÓÚ¸ú×Ùslab»úÖÆµÄskb->data½á¹¹ÌåµÄÊÍ·Å,²»°üº¬ÍⲿPSBUF
+void skbdata_free_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+	track_del(skb->head, 0, DATA_INFO);
+	skbinfo_del(skb,SKB_TYPE_DATA);
+#endif
+}
+
+//ÓÃÓÚ¸ú×ÙÍⲿ´«µÝÀ´µÄdataÉêÇë
+void fromext_alloc_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+	//track_add(skb, 0, DATA_INFO,  skb->len);
+	skbinfo_add((unsigned char *)skb,SKB_TYPE_FROMCP);
+#endif
+	atomic_inc(&skb_fromps);
+}
+//ÓÃÓÚ¸ú×ÙÍⲿ´«µÝÀ´µÄdataµÄÊÍ·Å
+void fromext_free_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+	track_del(skb->head, 0, DATA_INFO);
+	skbinfo_del(skb,SKB_TYPE_FROMCP);
+#endif	
+	atomic_dec(&skb_fromps);
+}
+//ÓÃÓÚ¸ú×Ù·¢Ë͸øÍⲿµÄÉêÇë
+void toext_alloc_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+	skbinfo_add((unsigned char *)skb,SKB_TYPE_TOCP);
+#endif
+	atomic_inc(&skb_tops);
+}
+//ÓÃÓÚ¸ú×Ù·¢Ë͸øÍⲿµÄÊÍ·Å
+void toext_free_track(struct sk_buff *skb)
+{
+#ifndef CONFIG_SPEED_OPT
+	skbinfo_del(skb,SKB_TYPE_TOCP);
+#endif
+	atomic_dec(&skb_tops);
+}
+#else
+
+
+
+int set_print = 0;       //ÍøÂç×Ô¶¨ÒåµÄ´òÓ¡¿ª¹Ø
+EXPORT_SYMBOL(set_print);
+
+int set_tcpdump = 0;  //ÈÎÒâµã×¥°ü¿ª¹Ø
+
+//´æ·Åµ±Ç°skbÏà¹ØµÄͳ¼ÆÐÅÏ¢£¬°üÀ¨Ò»Ð©Òì³£Ïà¹ØÐÅÏ¢
+unsigned long skbinfo_dbg[SKB_INFO_MAX]= {0}; 
+
+//¸÷ÖÖÍøÂçÔËÐÐ×ÊÔ´µÄͳ¼ÆÖµ£¬ÓÈÆäÊǸ÷¸ö¹Ø¼ü½á¹¹ÌåµÄÉêÇ룬ÒÔ¹©ÄÚ²¿Ñ§Ï°ºÍ¶¨Î»
+unsigned long netruninfo_dbg[NET_INFO_MAX]= {0}; 
+
+//skbÉêÇëÉÏÏÞµÄÅäÖÃÈ«¾Ö£¬Ä¿Ç°ÉÐδʹÓÃ
+unsigned long skb_max_panic = 0; //skb×ÜÊýÉÏÏÞ£¬³¬³ö»áµ¼ÖÂpanic
+unsigned long skb_num_limit = 6000;  //skb×ÜÊýÉÏÏÞ£¬³¬³ö·µ»ØNULL
+
+//¶ÔÌØ¶¨µÄskbÔÚÊͷŵãʱ½øÐÐÕ»¸ú×Ù
+char skb_dump_str[NIOCTL_MAX_MSGLEN] = {0};
+unsigned int skb_dump_len = 0;
+
+/*¶Ô±¾µØTCP½øÐÐÏà¹ØÍ³¼Æ*/
+unsigned long tcp_stats_dbg[TCP_STATS_MAX] = {0};
+
+
+//ÒÔÏÂΪÊý¾Ý°üµÄ½¨Ä£ÐÅÏ¢£¬ÒÔͳ¼Æ³öÊý¾Ý°üÄ£ÐÍ
+int  skb_num4 = 0;                  //½ÓÊÕµ½µÄV4Êý¾Ý°ü
+int  skb_num6 = 0;                  //½ÓÊÕµ½µÄV6Êý¾Ý°ü
+int  skb_big_num;                   //len³¤¶È³¬¹ý1000µÄÊý¾Ý°ü£¬º¬V4ºÍV6
+int  skb_small_num;                 //len³¤¶ÈСÓÚ100µÄÊý¾Ý°ü£¬º¬V4ºÍV6
+int  skb_bytes4 = 0;                //½ÓÊÕµ½µÄV4Êý¾Ý°ü×Ö½ÚÊý
+int  skb_bytes6 = 0;                //½ÓÊÕµ½µÄV6Êý¾Ý°ü×Ö½ÚÊý
+int  skb_unknown = 0;               //½ÓÊÕµ½µÄδ֪ЭÒéÊý¾Ý°ü£¬°üÀ¨ARPµÈ·ÇV4ºÍV6µÄ±¨ÎÄ
+int  skb_tcpnum = 0;                //½ÓÊÕµ½µÄtcpÊý¾Ý°ü£¬º¬V4ºÍV6£¬µ«²»º¬fastbrµÄ±¨ÎÄ
+int  skb_udpnum = 0;                //½ÓÊÕµ½µÄudpÊý¾Ý°ü£¬º¬V4ºÍV6£¬µ«²»º¬fastbrµÄ±¨ÎÄ
+int  broadcast_num4 = 0;            //½ÓÊÕµ½µÄV4¹ã²¥°ü
+int  broadcast_num6 = 0;            //½ÓÊÕµ½µÄV6¹ã²¥°ü
+int  multicast_num4 = 0;            //½ÓÊÕµ½µÄV4×é²¥±¨
+int  multicast_num6 = 0;            //½ÓÊÕµ½µÄV6×é²¥±¨
+int  fastnat_num = 0;               //fastnat³É¹¦µÄ±¨ÎÄ
+int  fast6_num = 0;                 //fast6³É¹¦µÄ±¨ÎÄ
+int  fastbr_num = 0;                //fastbr³É¹¦µÄ±¨ÎÄ
+int  fast_local4_rcv_num = 0;       //±¾µØfast_local4³É¹¦½ÓÊÕ±¨ÎÄ
+int  fast_local6_rcv_num = 0;       //±¾µØfast_local6³É¹¦½ÓÊÕ±¨ÎÄ
+int  fast_local4_output_num = 0;    //±¾µØfast_local4³É¹¦·¢Ëͱ¨ÎÄ
+int  fast_local6_output_num = 0;    //±¾µØfast_local6³É¹¦·¢Ëͱ¨ÎÄ
+int  fast_tcpdump_num = 0;          //fast×¥°üÊýÁ¿
+
+
+int double_mac = 0; //mac¼ì²é¿ª¹Ø
+//slabÄÚ´æÊ¹ÓÃÏà¹ØÍ³¼Æ£¬Î´¿¼ÂÇͨÓÃslabµØÖ·³Ø£¬Èçkmalloc
+struct slab_info slab_count = {0};
+
+/*½øÐÐTCPͳ¼Æ*/
+#define TCP_PKT_STATS_INC(_mod)    tcp_stats_dbg[_mod]++
+
+void dump_net_stack(struct sk_buff *skb, unsigned int offset)
+{
+   
+}
+
+
+void check_macaddr_only(unsigned char *ha, unsigned char ha_len)
+{
+	
+}
+
+//ÓÃÓÚ¸ú×Ùskb½á¹¹ÌåµÄÉêÇ룬°üº¬Íⲿ´«µÝÀ´µÄdata
+void skb_alloc_track(struct sk_buff *skb)
+{
+	
+}
+//ÓÃÓÚ¸ú×Ùskb½á¹¹ÌåµÄÊÍ·Å£¬°üº¬Íⲿ´«µÝÀ´µÄdata
+void skb_free_track(struct sk_buff *skb)
+{
+	
+}
+//ÓÃÓÚ¸ú×Ùslab»úÖÆµÄskb->data½á¹¹ÌåµÄÉêÇë,²»°üº¬ÍⲿPSBUF
+void skbdata_alloc_track(struct sk_buff *skb)
+{
+	
+}
+//ÓÃÓÚ¸ú×Ùslab»úÖÆµÄskb->data½á¹¹ÌåµÄÊÍ·Å,²»°üº¬ÍⲿPSBUF
+void skbdata_free_track(struct sk_buff *skb)
+{
+	
+}
+
+//ÓÃÓÚ¸ú×ÙÍⲿ´«µÝÀ´µÄdataÉêÇë
+void fromext_alloc_track(struct sk_buff *skb)
+{
+	
+
+}
+//ÓÃÓÚ¸ú×ÙÍⲿ´«µÝÀ´µÄdataµÄÊÍ·Å
+void fromext_free_track(struct sk_buff *skb)
+{
+	
+}
+//ÓÃÓÚ¸ú×Ù·¢Ë͸øÍⲿµÄÉêÇë
+void toext_alloc_track(struct sk_buff *skb)
+{
+	
+}
+//ÓÃÓÚ¸ú×Ù·¢Ë͸øÍⲿµÄÊÍ·Å
+void toext_free_track(struct sk_buff *skb)
+{
+	
+}
+
+void net_print_packet(unsigned char *data, unsigned int len, int flag)
+{
+   
+}
+
+
+void tcpdumpin_sq(struct sk_buff *skb)
+{
+    
+}
+
+void track_netlink(struct sk_buff *skb,u32 group)
+{
+
+}
+
+void netslab_inc(int i)
+{
+
+}
+
+void netslab_dec(int i)
+{
+
+}
+
+void netruninfo_add(unsigned char *addr,unsigned int info_type)
+{
+}
+
+void netruninfo_del(unsigned char *addr,unsigned int info_type)
+{
+}
+
+void skbinfo_add(unsigned char *addr,unsigned int skbinfo_type)
+{
+}
+
+void skbinfo_del(unsigned char *addr,unsigned int skbinfo_type)
+{
+}
+
+int net_debug_packet = 0;
+struct timeval net_debug_packet_tv = {0, 0};
+struct list_head net_debug_packet_list_head; 
+int net_debug_packet_sec = 0;
+
+#if 0
+//¼Ç¼ӦÓ÷¢°üÇé¿ö
+void record_app_atcive_net()
+{
+
+}
+#endif
+int get_tcp_stat_info(unsigned long arg)
+{
+    return 0;
+}
+#endif
+
+#ifdef _USE_TestHarness 
+int *vir_addr_ddrnet = 0;
+
+void psnet_freepsbuf(void *head)
+{
+}
+#endif
\ No newline at end of file
diff --git a/upstream/linux-5.10/net/core/fastproc/fast4_fw.c b/upstream/linux-5.10/net/core/fastproc/fast4_fw.c
new file mode 100755
index 0000000..0ef32e4
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fast4_fw.c
@@ -0,0 +1,829 @@
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/xt_multiport.h>
+#include <linux/netfilter/xt_iprange.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/route.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <net/SI/fast_common.h>
+#include <net/inet_hashtables.h>
+#include <linux/igmp.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/SI/net_track.h>
+#include <net/SI/net_other.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/net_cache.h>
+#include <net/SI/print_sun.h>
+
+#include <linux/netfilter/xt_tcpudp.h>
+
+MODULE_LICENSE("GPL");
+
+
+static struct task_struct *ct_iptables_syn;
+static struct tasklet_struct ct_iptables_bh;
+unsigned int ct_iptables_syn_sw;
+enum table_index {
+	IPTABLE_RAW,
+	IPTABLE_MANGLE,
+	IPTABLE_NAT,
+	IPTABLE_FILTER
+};
+
+
+/* ***************** ipv4 ¿ìËÙת·¢Ïà¹Ø±äÁ¿ ********************************/
+#ifndef CONFIG_PREEMPT_RT_FULL
+extern int *vir_addr_ddrnet;
+#endif
+/* ******************************** º¯ÊýÉêÃ÷ ********************************/
+
+
+/* ******************************** º¯ÊýʵÏÖ ********************************/
+int dst_expire_count = 0;
+extern int no_neighbour;
+
+static inline int rt_is_expired(struct rtable *rth)
+{
+    return rth->rt_genid != atomic_read(&(dev_net(rth->dst.dev))->ipv4.rt_genid);
+}
+
+void __flush_dcache_area(void *addr, size_t len)
+{
+      //´ò×®º¯Êý,ºóÐø¿´ÈçºÎʹÓà  
+}
+
+
+extern void ntl_ct_set_iw(struct sk_buff *skb, struct nf_conn *ct, int ct_dir);
+int fast4_fw_recv(struct nf_conn *tmpl,
+                  struct sk_buff *skb,
+                  struct nf_conn *ct,
+                  struct nf_conntrack_l4proto *l4proto,
+                  unsigned int dataoff,
+                  int dir,
+                  u_int8_t protonum)
+{
+    struct iphdr *iph = ip_hdr(skb);
+    struct udphdr *udph = NULL;
+    struct tcphdr *tcph = NULL;
+    __sum16 *cksum = NULL;
+    __be32 *oldip = NULL;
+    __be16 *oldport = 0;
+    struct net_device *dev = NULL;
+    u_int32_t skip_nat = 0;
+
+    enum ip_conntrack_info ctinfo;
+    int ret;
+    int rdir;
+    int type;
+    u_int32_t      nat_addr;
+    u_int16_t      nat_port;
+    struct ethhdr * eth;
+    struct dst_entry *dst_dir = NULL, *dst_rdir = NULL;
+    struct neighbour *_neighbour = NULL;
+	
+	__be16			vlan_proto_raw = skb->vlan_proto;
+	__u16			vlan_tci_raw = skb->vlan_tci;
+
+    /*²Î¿¼tcf_ipt_act()*/
+    struct nf_hook_state state = {
+        .hook = NF_INET_PRE_ROUTING,
+        .net = &init_net,
+        .in	= skb->dev,
+        .pf	= NFPROTO_IPV4,
+    };
+
+
+    rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+    dst_dir = dst_get_by_ct(ct, dir);
+
+    if (!dst_dir)
+    {
+        goto err_out;
+    }
+
+    if (rt_is_expired((struct rtable*)dst_dir))
+    {
+        dst_expire_count++;
+        fast_fw_conn_release(ct);
+        goto err_out;
+    }
+
+    // Èç¹û¼Ç¼ÁËÇŵ㣬¾ÍÖ±½ÓÖ¸ÏòÇŵã
+    if (ct->fast_ct.fast_brport[dir])
+    {
+        rcu_read_lock();
+        dev = rcu_dereference_protected(ct->fast_ct.fast_brport[dir], 1);
+        rcu_read_unlock();
+    }
+    else {
+        dev = dst_dir->dev;
+    }
+
+    /*¼ì²é°ü³¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚÉ豸MTU*/
+
+    if (!dev || (skb->len > dev->mtu))
+    {
+        skbinfo_add(NULL, SKB_OVER_MTU);
+        goto err_out;
+    }
+
+    //»Ø´«µÄ°üÖ±½ÓÊͷŲ¢¼ÆÊý
+    if (strcmp(skb->dev->name, dev->name) == 0)
+    {
+        skbinfo_add(NULL, SKB_LOOP);
+
+        kfree_skb(skb);
+        goto drop_packet;
+    }
+
+    //²Î¿¼resolve_normal_ct
+    if (dir == 1) {
+        ctinfo = IP_CT_ESTABLISHED_REPLY;
+    } else {
+        if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+            ctinfo = IP_CT_ESTABLISHED;
+        } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
+            ctinfo = IP_CT_RELATED;
+        } else {
+            ctinfo = IP_CT_NEW;
+        }
+    }
+
+
+    ret = nf_conntrack_handle_packet_fast(ct, skb, dataoff, ctinfo, &state);
+    if (ret <= 0) {
+        skb->_nfct = 0;
+        goto err_out; // fastʧ°Üǰ¶¼²»Äܸü¸ÄskbµÄÄÚÈÝ£¬·ñÔòʧ°Ü¾ÍÒª×ö»Ö¸´²Ù×÷
+    }
+    //Åжϳö¿ÚdevµÄÍ·²¿¿Õ¼äÊÇ·ñ×ã¹»£¬²»¹»ÐèÒªexpand
+    if (!(skb = fast_expand_headroom(skb, dev))) {
+        goto drop_packet;
+    }
+
+    fast_tcpdump(skb);
+
+    //Èç¹û×¥°üÃüÖУ¬Êý¾Ý»áclone£¬fast³É¹¦ÐèÒª¸Ä±ädataÄÚÈÝ£¬ÐèÒªÖØÐÂcopyÒ»·Ý
+    if (skb_cloned(skb))
+    {
+        if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+        {
+            print_sun(SUN_DBG, "fast4_fw_recv clone copy failed !!!\n");
+            kfree_skb(skb);
+            goto drop_packet;
+        }
+        clean_cache(skb->data,skb->len);
+    }
+
+    iph = ip_hdr(skb);
+
+    //½öµ±fastnat³É¹¦£¬²ÅÄÜÉèÖÃIPÍ·Ê×µØÖ·,ÒÔ¹©cacheË¢ÐÂʱʹÓÃ
+    skb_reset_network_header(skb);
+    skb->isFastnat = 1;
+    nf_ct_set(skb, (struct nf_conn *)&ct->ct_general, ctinfo);
+
+    if (test_bit(IPS_SRC_NAT_BIT, &ct->status))
+    {
+        if(IP_CT_DIR_ORIGINAL == dir)
+        {
+            nat_addr = ct->tuplehash[rdir].tuple.dst.u3.ip;
+            nat_port = ct->tuplehash[rdir].tuple.dst.u.all;
+            type = FN_TYPE_SRC;
+        }
+        else
+        {
+            nat_addr = ct->tuplehash[rdir].tuple.src.u3.ip;
+            nat_port = ct->tuplehash[rdir].tuple.src.u.all;
+            type = FN_TYPE_DST;
+        }
+    }
+    else if (test_bit(IPS_DST_NAT_BIT, &ct->status))
+    {
+        if (IP_CT_DIR_ORIGINAL == dir)
+        {
+            nat_addr = ct->tuplehash[rdir].tuple.src.u3.ip;
+            nat_port = ct->tuplehash[rdir].tuple.src.u.all;
+            type = FN_TYPE_DST;
+        }
+        else
+        {
+            nat_addr = ct->tuplehash[rdir].tuple.dst.u3.ip;
+            nat_port = ct->tuplehash[rdir].tuple.dst.u.all;
+            type = FN_TYPE_SRC;
+        }
+    }
+    else
+    {
+        skip_nat = 1;
+    }
+
+    if (!skip_nat)
+    {
+        /*½øÐÐnatת»»*/
+        if (IPPROTO_TCP == iph->protocol)
+        {
+            tcph = (struct tcphdr *)(skb->data + iph->ihl * 4);
+            cksum = &tcph->check;
+            oldport = (FN_TYPE_SRC == type)? (&tcph->source): (&tcph->dest);
+        }
+        else if (IPPROTO_UDP == iph->protocol)
+        {
+            udph = (struct udphdr *)(skb->data + iph->ihl * 4);
+            cksum = &udph->check;
+            oldport = (FN_TYPE_SRC == type)? (&udph->source): (&udph->dest);
+        }
+
+        oldip = (FN_TYPE_SRC == type)? (&iph->saddr) : (&iph->daddr);
+
+        if (cksum != NULL && (0!=*cksum || IPPROTO_TCP == iph->protocol))
+        {
+            inet_proto_csum_replace4(cksum, skb, *oldip, nat_addr, 0);
+            inet_proto_csum_replace2(cksum, skb, *oldport, nat_port, 0);
+        }
+        csum_replace4(&iph->check, *oldip, nat_addr);
+        if(oldport)
+            *oldport = nat_port;
+        *oldip = nat_addr;
+    }
+    else
+    {
+        if (IPPROTO_TCP == iph->protocol)
+        {
+            tcph = (struct tcphdr *)(skb->data + iph->ihl * 4);
+        }
+    }
+
+	//»ùÓÚctÁ´½ÓµÄÁ÷Á¿Í³¼Æ
+    ct->packet_info[dir].packets++;
+    ct->packet_info[dir].bytes += skb->len;
+    //»ùÓÚÍø¿ÚµÄÁ÷Á¿Í³¼Æ  --- ²Î¿¼linuxÔ­ÉúµÄÇý¶¯£¬Í³¼ÆµÄ¶¼ÊÇIP°ü³¤¶È
+    if (fastnat_level == FAST_NET_DEVICE)
+    {
+        skb->dev->stats.rx_packets++;
+        skb->dev->stats.rx_bytes += skb->len;
+    }
+
+
+    if (dev->flags & IFF_UP)
+    {
+        if (!(dev->flags & IFF_POINTOPOINT)) {
+            skb_push(skb, ETH_HLEN);
+			skb_reset_mac_header(skb);
+			if(skb->isvlan == 1)
+			{
+				struct vlan_ethhdr *vlan_eth = (struct vlan_ethhdr*)(skb->data - VLAN_HLEN);
+				skb->vlan_proto = vlan_eth->h_vlan_proto;
+				skb->vlan_tci = ntohs(vlan_eth->h_vlan_TCI);				
+			}
+            eth = (struct ethhdr *)skb->data;
+            _neighbour = dst_neigh_lookup_skb(dst_dir, skb);
+            //³ö¿Údev macµØÖ·×÷ΪÊý¾Ý°üÔ´macµØÖ·
+            memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
+            if (_neighbour)
+            {
+                memcpy(eth->h_dest, _neighbour->ha, ETH_ALEN);
+                neigh_release(_neighbour);
+            }
+            else {
+                __flush_dcache_area(skb->data, skb->len);
+                kfree_skb(skb);
+                no_neighbour++;
+                goto drop_packet;
+            }
+            eth->h_proto = htons(ETH_P_IP);
+        }
+        skb->dev = dev;
+        skb->now_location |= FASTNAT_SUCC;
+
+	if(ct->indev[dir] == NULL && skb->indev != NULL)
+	{
+		ct->indev[dir] = skb->indev;
+	}
+
+	if(ct->outdev[dir] == NULL && skb->dev != NULL)
+	{
+		ct->outdev[dir] = skb->dev;
+	}
+
+	skb->vlan_proto = vlan_proto_raw;
+	skb->vlan_tci = vlan_tci_raw;
+        __flush_dcache_area(skb->data, skb->len);
+        spin_unlock_bh(&fast_fw_spinlock);
+        dev_queue_xmit(skb);
+		spin_lock_bh(&fast_fw_spinlock);
+    }
+    else
+    {
+        print_sun(SUN_DBG, "ERR &&&&&& %s DOWN, kfree_skb!!!!!!!! \n", skb->dev->name);
+        kfree_skb(skb);
+    }
+
+
+    print_sun(SUN_DBG, "skb : 0x%x, new fastnat succ--------", skb);
+
+succ_out:
+drop_packet:
+    if (tmpl)
+        nf_conntrack_put(&tmpl->ct_general);
+    dst_release(dst_dir);
+    return 1;
+
+err_out :
+    dst_release(dst_dir);
+    nf_conntrack_put(&ct->ct_general);
+    print_sun(SUN_DBG, "skb : 0x%x, new fastnat FAIL!!!!!!!!!!", skb);
+    if (tmpl) {
+        nf_ct_set(skb, (struct nf_conn *)&tmpl->ct_general, IP_CT_NEW);
+    }
+    else {
+        skb->_nfct = 0;
+    }
+    return 0; /* not fast nat */
+}
+
+//¹¤×÷ÔÚPOST_ROUTING½Úµã£¬ÓÃÓÚfastÏà¹ØÊý¾ÝµÄ¸³Öµ
+unsigned int napt_handle4_fw(void *priv,
+                             struct sk_buff *skb,
+                             const struct nf_hook_state *state)
+{
+    struct nf_conn *ct;
+    enum ip_conntrack_info ctinfo;
+    u_int8_t protocol;
+    enum ip_conntrack_dir dir, rdir;
+    struct dst_entry *dst = skb_dst(skb);
+#ifdef CONFIG_ATHRS_HW_NAT
+    u_int32_t mask =0;
+#endif
+    struct neighbour *_neighbour = NULL;
+    struct net_device *out = state->out;
+
+
+    //¿ìËÙת·¢×Ü¿ª¹Ø
+    if (fastnat_level == FAST_CLOSE || fastnat_level == FAST_CLOSE_KEEP_LINK)
+    {
+        return NF_ACCEPT;
+    }
+
+    //Ö»ÓÐTCP¡¢UDPÖ§³Öfast£¬ICMPÕÒµ½µÄct¿ÉÄÜÊÇTCP¡¢UDPµÄ£¬ÀýÈç: ¶Ë¿Ú²»¿É´ï£¬ËùÒÔ±ØÐëÏÔʾÅжÏ
+    if (ip_hdr(skb)->protocol != IPPROTO_TCP && ip_hdr(skb)->protocol != IPPROTO_UDP)
+    {
+        return NF_ACCEPT;
+    }
+    //¿ìËÙת·¢×Ó¹¦ÄÜλͼ¿ª¹Ø
+
+    if (!test_bit(FAST_TYPE_VERSION_BIT, &fast_switch)
+            || !test_bit(FAST_TYPE_FW4_BIT, &fast_switch) )
+    {
+        return NF_ACCEPT;
+    }
+
+    if (!out)
+    {
+        return NF_ACCEPT;
+    }
+
+    //¹ã²¥¡¢×é²¥²»½¨Á´
+    if (ipv4_is_multicast(ip_hdr(skb)->daddr) || ipv4_is_lbcast(ip_hdr(skb)->daddr))
+    {
+        return NF_ACCEPT;
+    }
+
+    if(!dst)
+    {
+        return NF_ACCEPT;
+    }
+
+    _neighbour = dst_neigh_lookup_skb(dst, skb);
+    if (!_neighbour)
+    {
+        return NF_ACCEPT;
+    }
+    if (memcmp(_neighbour->ha, zeromac, ETH_ALEN) == 0)
+    {
+        if (!(skb->dev->flags & IFF_POINTOPOINT))
+            goto accept;
+    }
+
+    /*¼ì²é°ü³¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚÉ豸MTU*/
+    if (dst->dev && (skb->len > dst->dev->mtu))
+    {
+        goto accept;
+    }
+
+    ct = nf_ct_get(skb, &ctinfo);
+
+    if (!ct)
+    {
+        goto accept;
+    }
+
+    protocol = nf_ct_protonum(ct);
+
+    if (ct->master == NULL)
+    {
+        struct nf_conn_help *temp_help = nfct_help(ct);
+        //¶ÔÓÚijÌõÁ´½ÓÉÏ´æÔÚhelpµÈ¹³×Ó£¬±ØÐë½»ÓÉlinux±ê×¢Äں˴¦Àí£¬·ñÔòÄں˲»ÄÜ»ñÈ¡Ïà¹ØµÄÊý¾Ý°üÐÅÏ¢
+        if(temp_help!=NULL)
+        {
+            goto accept;
+        }
+    }
+
+    /* only forward */
+    if (!skb->skb_iif)
+    {
+        goto accept;
+    }
+
+    //¹ýÂ˲»ÐèÒª¾­¹ýfastnatµÄЭÒé°ü,¸ù¾Ý¶Ë¿ÚºÅ½øÐйýÂË
+    if (check_skip_ports(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all))
+    {
+        goto accept;
+    }
+
+    dir = CTINFO2DIR(ctinfo);
+
+    rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+
+    /*Ö»Õë¶ÔTCP/UDP½øÐÐfastnat*/
+    if (IPPROTO_TCP == protocol)
+    {
+        /*TCPÈý´ÎÎÕÊֳɹ¦*/
+        if(!test_bit(IPS_ASSURED_BIT, &ct->status))
+        {
+            goto accept;
+        }
+    }
+    else if (IPPROTO_UDP != protocol)
+    {
+        goto accept;
+    }
+
+    spin_lock_bh(&fast_fw_spinlock);
+    if (ct->fast_ct.fast_dst[dir] && (ct->fast_ct.fast_dst[dir] != dst))
+    {
+        fast_fw_conn_release(ct);
+    }
+
+    if (!ct->fast_ct.fast_dst[dir])
+    {
+        rcu_assign_pointer(ct->fast_ct.fast_dst[dir], dst);
+        ct->fast_ct.fast_brport[dir] = getBridgePort(_neighbour, out);
+        fast_dst_add_ct(dst, ct);
+    }
+
+    ct->fast_ct.isFast = FAST_CT_FW4;
+    spin_unlock_bh(&fast_fw_spinlock);
+
+accept:
+
+    neigh_release(_neighbour);
+    return NF_ACCEPT;
+}
+
+static struct nf_hook_ops fast4_fw_hook = {
+    .hook = napt_handle4_fw,
+    //.owner = THIS_MODULE,
+    .pf = PF_INET,
+    .hooknum = NF_INET_POST_ROUTING,
+    .priority = NF_IP_PRI_LAST,
+};
+
+static inline bool
+port_match(u_int16_t min, u_int16_t max, u_int16_t port, bool invert)
+{
+	return (port >= min && port <= max) ^ invert;
+}
+
+static bool ctable_mt(struct nf_conn* ct, struct xt_action_param *par,int dir, int* match_filter)
+{
+	const struct xt_tcp *tcpinfo = par->matchinfo;
+
+	if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
+			ntohs(ct->tuplehash[dir].tuple.src.u.tcp.port),
+			!!(tcpinfo->invflags & XT_TCP_INV_SRCPT)))
+		return false;
+	if((tcpinfo->spts[0] || tcpinfo->spts[1]) ^ (!!(tcpinfo->invflags & XT_TCP_INV_SRCPT)))
+		*match_filter = 1;
+	if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
+			ntohs(ct->tuplehash[dir].tuple.dst.u.tcp.port),
+			!!(tcpinfo->invflags & XT_TCP_INV_DSTPT)))
+		return false;
+	if((tcpinfo->dpts[0] || tcpinfo->dpts[1]) ^ (!!(tcpinfo->invflags & XT_TCP_INV_DSTPT)))
+		*match_filter = 1;
+	return true;
+}
+
+static inline bool
+ip_packet_match(const struct ipt_ip *ipinfo,struct nf_conn* ct,int dir, int* match_filter)
+{
+	unsigned long ret;
+	if (NF_INVF(ipinfo, IPT_INV_SRCIP,
+		    (ct->tuplehash[dir].tuple.src.u3.ip & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
+	    NF_INVF(ipinfo, IPT_INV_DSTIP,
+		    (ct->tuplehash[dir].tuple.dst.u3.ip & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
+		return false;
+	if(ipinfo->src.s_addr || ipinfo->dst.s_addr)
+		*match_filter = 1;
+
+	if(ct->indev[dir] != NULL)
+	{
+		ret = ifname_compare_aligned(ct->indev[dir]->name, ipinfo->iniface, ipinfo->iniface_mask);
+	}
+	
+	if(ipinfo->iniface[0] != '\0')
+		*match_filter = 1;
+		
+	if (NF_INVF(ipinfo, IPT_INV_VIA_IN, ret != 0))
+		return false;
+	if(ct->outdev[dir] != NULL)
+	{
+		ret = ifname_compare_aligned(ct->outdev[dir]->name, ipinfo->outiface, ipinfo->outiface_mask);
+	}
+
+	if(ipinfo->outiface[0] != '\0')
+		*match_filter = 1;
+
+	if (NF_INVF(ipinfo, IPT_INV_VIA_OUT, ret != 0))
+		return false;
+
+	if (ipinfo->proto &&
+	    NF_INVF(ipinfo, IPT_INV_PROTO, ct->tuplehash[dir].tuple.dst.protonum != ipinfo->proto))
+		return false;
+
+	return true;
+}
+
+static inline bool
+ip_packet_match_neg(const struct ipt_ip *ipinfo,struct nf_conn* ct,int dir, int* match_filter)
+{
+	unsigned long ret;
+	if (NF_INVF(ipinfo, IPT_INV_SRCIP,
+		    (ct->tuplehash[dir].tuple.dst.u3.ip & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
+	    NF_INVF(ipinfo, IPT_INV_DSTIP,
+		    (ct->tuplehash[dir].tuple.src.u3.ip & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
+		return false;
+	if(ipinfo->src.s_addr || ipinfo->dst.s_addr)
+		*match_filter = 1;
+
+	if(ct->outdev[dir] != NULL)
+	{
+		ret = ifname_compare_aligned(ct->outdev[dir]->name, ipinfo->iniface, ipinfo->iniface_mask);
+	}
+	
+	if(ipinfo->iniface[0] != '\0')
+		*match_filter = 1;
+		
+	if (NF_INVF(ipinfo, IPT_INV_VIA_IN, ret != 0))
+		return false;
+	if(ct->indev[dir] != NULL)
+	{
+		ret = ifname_compare_aligned(ct->indev[dir]->name, ipinfo->outiface, ipinfo->outiface_mask);
+	}
+
+	if(ipinfo->outiface[0] != '\0')
+		*match_filter = 1;
+
+	if (NF_INVF(ipinfo, IPT_INV_VIA_OUT, ret != 0))
+		return false;
+
+	if (ipinfo->proto &&
+	    NF_INVF(ipinfo, IPT_INV_PROTO, ct->tuplehash[dir].tuple.dst.protonum != ipinfo->proto))
+		return false;
+
+	return true;
+}
+
+static inline struct ipt_entry *
+get_entry(const void *base, unsigned int offset)
+{
+	return (struct ipt_entry *)(base + offset);
+}
+
+static inline
+struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
+{
+	return (void *)entry + entry->next_offset;
+}
+
+
+
+//ͬ²½ctͳ¼ÆÐÅÏ¢µ½iptables,ͬ²½ÖÜÆÚÔݶ¨1s
+static int ct_iptables_syn_thread(void *param)
+{
+	while (1) 
+	{
+		if(ct_iptables_syn_sw)
+		{
+			rcu_read_lock();
+			tasklet_schedule(&ct_iptables_bh);
+			rcu_read_unlock();
+			//ÿ´Îͬ²½¼ä¸ôΪ1s.
+		}
+		msleep(1*1000);
+	}
+	return 0;
+}
+
+void ct_iptables_syn_handle(struct nf_conn *ct,struct xt_table_info *private,int table_id)
+{
+	void *table_base;
+	struct ipt_entry *e;
+	const struct xt_entry_match *ematch;
+	struct xt_action_param acpar;
+	struct xt_counters *counter;
+	int match_flag = 0;
+	int match_filter = 0;
+	int num = 0;
+
+
+	table_base = private->entries;
+	num = private->number;
+	switch(table_id)
+	{
+		case 0:
+		case 1:
+		case 2:
+			e = get_entry(table_base, private->hook_entry[NF_INET_PRE_ROUTING]);
+			break;
+		case 3:
+			e = get_entry(table_base, private->hook_entry[NF_INET_LOCAL_IN]);
+			break;
+		default:
+			break;
+	}
+	while(num--)
+	{
+		match_flag = 0;
+		match_filter = 0;
+		if(!ip_packet_match(&e->ip, ct, IP_CT_DIR_ORIGINAL,&match_filter) &&
+			!ip_packet_match_neg(&e->ip, ct, IP_CT_DIR_REPLY,&match_filter))
+		{
+		}
+		else
+		{
+			xt_ematch_foreach(ematch, e) 
+			{
+				acpar.matchinfo = ematch->data;
+				if (!ctable_mt(ct, &acpar, IP_CT_DIR_ORIGINAL,&match_filter))
+				{
+					match_flag = 1;
+					break;
+				}
+				else
+				{
+				}
+			}
+			if(!match_flag)
+			{
+				if(match_filter)
+				{
+					counter = xt_get_this_cpu_counter(&e->counters);
+					ADD_COUNTER(*counter, ct->packet_info[IP_CT_DIR_ORIGINAL].bytes,ct->packet_info[IP_CT_DIR_ORIGINAL].packets);
+				}
+				e = ipt_next_entry(e);
+				continue;
+			}
+			match_flag = 0;
+			match_filter = 0;
+		}
+		
+		if (!ip_packet_match(&e->ip, ct, IP_CT_DIR_REPLY,&match_filter) &&
+			!ip_packet_match_neg(&e->ip, ct, IP_CT_DIR_ORIGINAL,&match_filter))
+		{
+			e = ipt_next_entry(e);
+			continue;
+		}
+		else
+		{
+			xt_ematch_foreach(ematch, e) 
+			{
+				acpar.matchinfo = ematch->data;
+				if (!ctable_mt(ct, &acpar, IP_CT_DIR_REPLY,&match_filter))
+				{
+					match_flag = 1;
+					break;
+				}
+			}
+			if(!match_flag)
+			{
+				if(match_filter)
+				{
+					counter = xt_get_this_cpu_counter(&e->counters);
+					ADD_COUNTER(*counter, ct->packet_info[IP_CT_DIR_REPLY].bytes,ct->packet_info[IP_CT_DIR_REPLY].packets);
+				}
+				e = ipt_next_entry(e);
+				continue;
+			}
+		}
+	}
+
+}
+
+static void ct_iptables_bhfunc(unsigned long param)
+{
+	int hash = 0;
+	struct nf_conntrack_tuple_hash *h;
+	struct hlist_nulls_node *n;
+	struct nf_conn *ct;
+	struct xt_table_info *private;
+	struct net * net;
+	unsigned int addend;
+	local_bh_disable();
+	addend = xt_write_recseq_begin();
+	for(hash = 0; hash < nf_conntrack_htable_size; hash++)
+    {
+	   	hlist_nulls_for_each_entry_rcu(h,n,&nf_conntrack_hash[hash],hnnode)
+		{
+	   		if(h)
+			{
+	   			ct = nf_ct_tuplehash_to_ctrack(h);
+				if(ct->fast_ct.isFast)
+				{
+					net = nf_ct_net(ct);
+					private = READ_ONCE(net->ipv4.iptable_raw->private);
+					ct_iptables_syn_handle(ct,private,IPTABLE_RAW);
+					private = READ_ONCE(net->ipv4.iptable_mangle->private);
+					ct_iptables_syn_handle(ct,private,IPTABLE_MANGLE);
+					private = READ_ONCE(net->ipv4.nat_table->private);
+					ct_iptables_syn_handle(ct,private,IPTABLE_NAT);
+					private = READ_ONCE(net->ipv4.iptable_filter->private);	
+					ct_iptables_syn_handle(ct,private,IPTABLE_FILTER);
+				}
+				else
+					continue;
+				spin_lock_bh(&fast_fw_spinlock);
+				ct->packet_info[IP_CT_DIR_ORIGINAL].bytes = 0;
+				ct->packet_info[IP_CT_DIR_ORIGINAL].packets = 0;
+				ct->packet_info[IP_CT_DIR_REPLY].bytes = 0;
+				ct->packet_info[IP_CT_DIR_REPLY].packets = 0;
+				spin_unlock_bh(&fast_fw_spinlock);
+			}
+	   	}
+    }
+	xt_write_recseq_end(addend);
+	local_bh_enable();
+}
+
+
+int fast4_fw_init(void)
+{
+    int ret = 0;
+
+    ret = nf_register_net_hook(&init_net, &fast4_fw_hook);
+    if (ret != 0)
+    {
+        print_sun(SUN_ERR,"init fast4_fw_init failed\n");
+        return -EINVAL;
+    }
+    print_sun(SUN_DBG,"init fast4_fw_init done\n");
+	
+	ct_iptables_bh.func = ct_iptables_bhfunc;
+	ct_iptables_syn = kthread_create(ct_iptables_syn_thread, (void *)0, "ct_iptables_syn" );
+    if (!IS_ERR(ct_iptables_syn))
+    {
+        printk("ntl_syn_task thread's init is succ");
+        wake_up_process(ct_iptables_syn);
+    }
+
+    return 0;
+}
+
+int fast4_fw_cleanup(void)
+{
+    fast_release_all(RELEASE_ALL_DST);
+    nf_unregister_net_hook(&init_net, &fast4_fw_hook);
+	if (ct_iptables_syn) 
+	{
+    	kthread_stop(ct_iptables_syn);
+    	ct_iptables_syn = NULL;
+	}
+	tasklet_kill(&ct_iptables_bh);
+    return 0;
+}
+
diff --git a/upstream/linux-5.10/net/core/fastproc/fast6.c b/upstream/linux-5.10/net/core/fastproc/fast6.c
new file mode 100755
index 0000000..d5fab9c
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fast6.c
@@ -0,0 +1,626 @@
+/* * Copyright (c) 2011 Qualcomm Atheros, Inc. * */
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter/xt_multiport.h>
+#include <linux/netfilter/xt_iprange.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/route.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <net/SI/fast6.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/print_sun.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/SI/net_track.h>
+
+
+MODULE_LICENSE("GPL");
+
+/* ***************** ipv6 ¿ìËÙת·¢Ïà¹Ø±äÁ¿ ********************************/
+spinlock_t fast6_spinlock; //×ÔÐýËø£¬±£»¤Á´±íµÄ²Ù×÷
+fast_list_t working_list6 = {0};
+struct hlist_nulls_head *working_hash6;
+
+/* ******************************** º¯ÊýÉêÃ÷ ********************************/
+
+
+/* ******************************** º¯ÊýʵÏÖ ********************************/
+// ipv6±¨Í·À©Õ¹Ñ¡ÏÅжÏÊÇ·ñ°üº¬l4head
+static int ip6nol4head(int type)
+{
+    int i, count;
+    int optarray[] = {IPPROTO_ESP}; //ÔÝʱµ÷ÊÔÖ»ÖªµÀESP£¬½«À´Ñо¿Ð­ÒéºóÀ©Õ¹
+
+    count = sizeof(optarray)/sizeof(optarray[0]);
+    for (i = 0; i < count; i++)
+    {
+        if (type == optarray[i])
+            return (1);
+    }
+    return (0);
+}
+
+/*ÅжÏÊÇ·ñÊÇIPV6À©Õ¹Í·*/
+static int ip6option(int type)
+{
+    int i, optarray[8] = {IPPROTO_HOPOPTS, IPPROTO_IPV6, IPPROTO_ROUTING, IPPROTO_FRAGMENT,
+            IPPROTO_ESP, IPPROTO_AH, IPPROTO_DSTOPTS, IPPROTO_NONE};
+    
+    for (i = 0; i < 8; i++)
+    {
+        if (type == optarray[i])
+            return(optarray[i]);
+    }
+    return (0);
+}
+
+//skb->dataÐèÒªÖ¸ÏòipÍ·
+/*Ìø¹ýIPV6Í·¼°À©Õ¹Í·£¬Ö¸ÏòL4Í·,²¢·µ»ØL4ЭÒéÀàÐÍ*/
+unsigned char *getipv6uppkg(unsigned char *ippkg, unsigned char *protocol, int *uppkglen)
+{
+    unsigned char *ippkgpos = ippkg + 40;
+    struct ip6_hdr *hdr = (struct ip6_hdr *)ippkg;
+    struct ip6_opthdr *opthdr;
+    int ip6hdrlen;
+    int proto = 0;
+
+    proto = ip6option(hdr->ip6_nxt);
+    if (proto)
+    {
+        return NULL;
+#if 0
+        if (ip6nol4head(proto))
+            return NULL;
+
+        opthdr =(struct ip6_opthdr *)ippkgpos;
+        while (proto = ip6option(opthdr->nxt))
+        {
+            if (ip6nol4head(proto))
+                return NULL;
+            ippkgpos += (opthdr->len + 1) << 3;
+            opthdr = (struct ip6_opthdr *)ippkgpos;
+        }
+        if (protocol)
+            *protocol = opthdr->nxt;
+        ippkgpos += (opthdr->len + 1) << 3;
+#endif
+    }
+    else
+        if (protocol)
+            *protocol = hdr->ip6_nxt;
+        
+    ip6hdrlen = ippkgpos - ippkg;
+    
+    if (uppkglen)
+        *uppkglen = ntohs(hdr->ip6_plen) + 40 - ip6hdrlen;
+    
+    return (ippkgpos);
+}
+
+/*»ñÈ¡IPV6ÎåÔª×éÐÅÏ¢£¬Ä¿Ç°½ö´¦ÀíTCP/UDP/ICMP°ü*/
+int fast6_get_tuple(struct sk_buff *skb, struct nf_conntrack_tuple *tuple)
+{
+    if (!skb || !tuple)
+    {
+        return -1;
+    }
+    __u8 next_hdr;
+    unsigned char *l4head;
+    struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+    struct udphdr *udph = NULL;
+    struct tcphdr *tcph = NULL;
+    struct icmp6hdr *icmph = NULL;
+
+
+    /* only IPv6 packets */    
+    if (htons(ETH_P_IPV6) != skb->protocol)
+    {
+        return -1;
+    }
+    
+    if (skb->len - sizeof(struct ipv6hdr) >= 0)
+    {
+        l4head = getipv6uppkg(skb->data, &next_hdr, NULL);
+        if (l4head == NULL)
+            return -1;
+    }
+    else
+        return -1;
+    
+    memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
+
+    /* only tcp/udp */
+    if (NEXTHDR_UDP == next_hdr)
+    {
+        udph = (struct udphdr *)l4head;
+        tuple->src.u.udp.port = udph->source;
+        tuple->dst.u.udp.port = udph->dest;
+        skb_udpnum++;
+    }
+    else if (NEXTHDR_TCP == next_hdr)
+    {
+        tcph = (struct tcphdr *)l4head;
+        tuple->src.u.tcp.port = tcph->source;
+        tuple->dst.u.tcp.port = tcph->dest;
+        skb_tcpnum++;
+    }
+    else if (NEXTHDR_ICMP == next_hdr)
+    {
+        icmph = (struct icmp6hdr *)l4head; /* point to ICMPv4 header */
+        tuple->src.u.icmp.id = icmph->icmp6_identifier;
+        tuple->dst.u.icmp.type = icmph->icmp6_type;
+        tuple->dst.u.icmp.code = icmph->icmp6_code;
+    }
+    else
+    {
+        return -1;
+    }
+
+    tuple->src.l3num = AF_INET6;
+    tuple->src.u3.in6 = iph->saddr;
+    tuple->dst.u3.in6 = iph->daddr;
+    tuple->dst.protonum = next_hdr;
+    tuple->dst.dir = IP_CT_DIR_ORIGINAL;
+
+    return 0;
+}
+
+//´Ë´¦ÐèÒª±£³ÖºÍdev_xmit_completeÒ»ÖÂ
+//ÓÃinlineÎÞ·¨ÌáÈ¡µ½¹«¹²ÎļþÖУ¬Ö»ÄÜfastnat¡¢fast6¸÷·ÅÒ»·Ý
+static inline bool start_xmit_complete(int rc)
+{
+    /*
+     * Positive cases with an skb consumed by a driver:
+     * - successful transmission (rc == NETDEV_TX_OK)
+     * - error while transmitting (rc < 0)
+     * - error while queueing to a different device (rc & NET_XMIT_MASK)
+     */
+    if (likely(rc < NET_XMIT_MASK))
+        return true;
+
+    return false;
+}
+
+//ipv6Êý¾Ý°üµÄ¿ìËÙ´¦Àí£¬hashÓÃRCU»úÖÆ½øÐб£»¤£¬×ܵÄÁ¬½ÓÁ´±íÓÃspin½øÐб£»¤
+int fast6_recv(struct sk_buff *skb)
+{
+    struct nf_conntrack_tuple tuple;
+    fast_entry_data_t *fast6_entry_data = NULL;
+    fast_entry_t *fast6_entry = NULL;
+    struct tcphdr *tcph = NULL;
+    struct net_device *dev = NULL;
+    __u8 next_hdr = 0;
+    unsigned char *l4head;
+    struct ipv6hdr *ip6;
+    
+    print_sun(SUN_DBG, "enter fast_6_recv \n");
+
+    if (fastnat_level == FAST_CLOSE)
+    {
+        return 0;
+    }
+    
+    if (fast6_get_tuple(skb, &tuple) < 0)
+    {
+        print_sun(SUN_DBG, "fast_6_recv get tuple err \n");
+        return 0;
+    }
+    
+    ip6 = ipv6_hdr(skb);
+    if (ip6->nexthdr != IPPROTO_TCP && ip6->nexthdr != IPPROTO_UDP)
+        return 0;
+    
+    rcu_read_lock();
+
+    fast6_entry_data = fast_find_entry_data(working_hash6, &tuple);
+    if (fast6_entry_data == NULL)
+    {
+        rcu_read_unlock();
+
+        print_sun(SUN_DBG, "fast_6_recv fast_6_find null \n");
+        return 0;
+    }
+    
+    /*Åжϱ¨Îij¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚDEVµÄMTU*/
+    dev = fast6_entry_data->outdev;
+    if (!dev || (skb->len > dev->mtu))
+    {
+        skbinfo_add(NULL, SKB_OVER_MTU);
+        rcu_read_unlock();
+
+        print_sun(SUN_DBG, "fast_6_recv outdev err \n");
+        return 0;
+    }
+    
+    //»Ø´«µÄ°üÖ±½ÓÊͷŲ¢¼ÆÊý
+    if (strcmp(skb->dev->name, dev->name) == 0)
+    {
+        skbinfo_add(NULL, SKB_LOOP);
+        rcu_read_unlock();
+
+        kfree_skb(skb);
+        printk("loopback skb, free skb\n");
+        return 1;
+    }
+
+    fast6_entry = fast_data_to_entry(fast6_entry_data);
+    if (!fast6_entry)
+    {
+        rcu_read_unlock();
+
+        print_sun(SUN_DBG, "fast_6_recv fast6_entry is null \n");
+        return 0;
+    }
+
+    /* Ö»Óе±Ë«Ïò¿ìËÙÁ´½Ó¶¼½¨Á¢³É¹¦²Å×ßFASTNAT£¬·ñÔò×ß±ê×¼Á÷³Ì */
+	/* udp²»ÐèҪ˫Ïò½¨Á´        */
+	if ((fast6_entry->flags != FAST_ALL_DIR) && (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP))
+    {
+        rcu_read_unlock();
+
+        print_sun(SUN_DBG, "fast_6_recv flags is not FAST_ALL_DIR \n");
+        return 0;
+    }
+
+    /*Ìø¹ýIPV6Í·£¬»ñÈ¡L4Í·Ö¸Õë*/
+    l4head = getipv6uppkg(skb->data, &next_hdr, NULL);
+    if (l4head == NULL)
+    {
+        rcu_read_unlock();
+
+        print_sun(SUN_DBG, "fast_6_recv l4head is null \n");
+        return 0;
+    }
+
+
+
+    if (!(skb = fast_expand_headroom_v6(skb, dev))){
+		rcu_read_unlock();
+        return 1;
+    }
+
+    fast_tcpdump(skb);
+    
+    //Èç¹û×¥°üÃüÖУ¬Êý¾Ý»áclone£¬fast³É¹¦ÐèÒª¸Ä±ädataÄÚÈÝ£¬ÐèÒªÖØÐÂcopyÒ»·Ý
+    if (skb_cloned(skb))
+    {
+        print_sun(SUN_DBG, "fast6_recv clone \n");
+        if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+        {
+            rcu_read_unlock();
+
+            print_sun(SUN_DBG, "fast6_recv clone copy failed !!!\n");
+            printk("pskb_expand_head skb failed, free skb\n");
+            kfree_skb(skb);
+            return 1;
+        }
+    }
+
+    //½öµ±fast6³É¹¦£¬²ÅÄÜÉèÖÃIPÍ·Ê×µØÖ·,ÒÔ¹©cacheË¢ÐÂʱʹÓÃ
+    skb_reset_network_header(skb);
+    skb->isFastnat = 1;
+    skb->priority = fast6_entry_data->priority;
+    skb->mark = fast6_entry_data->mark;
+
+
+    //ÄÚºË×Ô´øµÄ»ùÓÚÁ´½ÓµÄÁ÷Á¿Í³¼Æ
+    struct nf_conn_counter *acct = (struct nf_conn_counter*)nf_conn_acct_find(fast6_entry->ct);
+    if (acct) {
+        enum ip_conntrack_info ctinfo;
+        if (fast6_entry_data->tuplehash.tuple.dst.dir == IP_CT_DIR_ORIGINAL)
+            ctinfo = IP_CT_ESTABLISHED;
+        else 
+            ctinfo = IP_CT_ESTABLISHED_REPLY;
+
+        atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
+        atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes);
+    }
+	
+    /* ¶¨Öƹ¦ÄÜ£¬ÎªÁ˽â¾öµ¥UDP¹à°üʱ£¬ÎÞ·¨ÖªÏþindev½øÐÐÁ÷Á¿Í³¼ÆÎÊÌâ¶¨ÖÆ */
+    if ((fast6_entry_data->indev == NULL) && skb->dev)
+    {
+        fast6_entry_data->indev = skb->dev;
+    }
+
+    // ͳ¼ÆÈë¿ÚÍøÂçÉ豸µÄ½ÓÊÕ°üÊýÁ¿  --- ²Î¿¼linuxÔ­ÉúµÄÇý¶¯£¬Í³¼ÆµÄ¶¼ÊÇIP°ü³¤¶È
+    if (fast6_entry_data->indev && (fastnat_level == FAST_NET_DEVICE))
+    {
+        fast6_entry_data->indev->stats.rx_packets++;
+        fast6_entry_data->indev->stats.rx_bytes += skb->len;
+    }  
+    
+    skb->dev = dev;
+
+    //Ö»Óе±ÓÐMACÍ·Ô¤¸³ÖµÊ±£¬²Å×¼¸³Öµ£¬·ñÔòΪIPÍ·
+    skb_push(skb, ETH_HLEN);
+
+    memcpy(skb->data, fast6_entry_data->hh_data, ETH_HLEN);
+    /*¸üÐÂÁ¬½Ó³¬Ê±*/
+    if (IPPROTO_TCP == tuple.dst.protonum)
+    {
+        mod_timer(&fast6_entry->timeout, jiffies + tcp_timeouts[fast6_entry->ct->proto.tcp.state]);
+        tcph = (struct tcphdr *)l4head;
+        update_tcp_timeout(fast6_entry, fast6_entry_data, tcph);
+		fast6_entry->ct->timeout = jiffies + tcp_timeouts[fast6_entry->ct->proto.tcp.state];
+    }
+    else if (IPPROTO_UDP == tuple.dst.protonum)
+    {
+        /*udp*/
+        if (test_bit(IPS_SEEN_REPLY_BIT, &fast6_entry->ct->status))
+        {
+            mod_timer(&fast6_entry->timeout, jiffies + fast_udp_timeout_stream);
+			fast6_entry->ct->timeout = jiffies + fast_udp_timeout_stream;
+        }
+        else
+        {
+            mod_timer(&fast6_entry->timeout, jiffies + fast_udp_timeout);
+			fast6_entry->ct->timeout = jiffies + fast_udp_timeout;
+        }
+    }
+
+    if (skb->dev->flags & IFF_UP)
+    {
+        //pppÖ»ÐèÒª´«ÊäIP°ü
+        if (strncmp(skb->dev->name, ppp_name, strlen(ppp_name)) == 0)
+        {
+           skb_pull(skb, ETH_HLEN);
+        }
+        
+        skb->now_location |= FAST6_SUCC;
+        if (fastnat_level == FAST_NET_DEVICE)
+        {
+            print_sun(SUN_DBG, "fastnat-2 dev_queue_xmit, send to:%s !!!!!!!! \n", skb->dev->name);
+            dev_queue_xmit(skb);
+        }
+        //¶ÔÓÚÁ÷¿ØµÈÌØÊâÓ¦Óã¬Ö»ÄÜ×ß±ê×¼µÄfastnatÁ÷³Ì£¬·ñÔòÎÞ·¨½øÐвå¼þÖ´ÐÐ
+        else if (fastnat_level == FAST_NET_CORE)
+        {
+            dev_queue_xmit(skb);
+        }
+        /*add by jiangjing*/
+        fast6_entry_data->packet_num++;
+
+    }
+    else
+    {
+        print_sun(SUN_DBG, "fast6_recv ERR &&&&&& %s DOWN, kfree_skb!!!!!!!! \n", skb->dev->name);
+        kfree_skb(skb);
+    }
+
+    rcu_read_unlock();
+
+    print_sun(SUN_DBG, "fast_6_recv okokok \n");
+    return 1;
+}
+
+static struct nf_hook_ops fast6_hook = {
+    .hook = napt6_handle,
+    //.owner = THIS_MODULE,
+    .pf = PF_INET6,
+    .hooknum = NF_INET_POST_ROUTING,
+    .priority = NF_IP6_PRI_LAST,
+};
+
+//¹¤×÷ÔÚPOST_ROUTING½Úµã£¬ÓÃÓÚfastÁ´½ÓµÄ¸³ÖµºÍÌí¼Óhash±í
+unsigned int napt6_handle(void* priv,
+            struct sk_buff *skb,
+            const struct nf_hook_state *state)
+{
+    struct nf_conn *ct;
+    enum ip_conntrack_info ctinfo;
+    u_int8_t protocol;
+    fast_entry_t *fast6_entry;
+    fast_entry_data_t *fast6_entry_data;
+    enum ip_conntrack_dir dir, rdir;
+    struct dst_entry *dst = skb_dst(skb);
+#ifdef CONFIG_ATHRS_HW_NAT
+    u_int32_t mask =0;
+#endif
+	struct neighbour *_neighbour = NULL;
+	struct net_device *out = state->out;
+
+
+    if (fastnat_level == FAST_CLOSE || fastnat_level == FAST_CLOSE_KEEP_LINK)
+    {
+        return NF_ACCEPT;
+    }
+
+    if (test_bit(FAST_TYPE_VERSION_BIT, &fast_switch))
+    {
+        return NF_ACCEPT;
+    }
+    
+    if (!out)
+    {
+        return NF_ACCEPT;
+    }
+    
+    if (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP && ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
+        return NF_ACCEPT;
+
+    //×é²¥²»½¨Á´
+    if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
+    {
+        return NF_ACCEPT;
+    }
+
+    //´Ë´¦Òª¹Ø×¢ÊÇ·ñ»áƵ·±³öÏÖfastÁ´½ÓÒÑÂúÇé¿ö£¬Èç¹û¾­³£³öÏÖ£¬ÊÇ·ñ¿¼ÂÇ×î¾É¸²¸ÇÇé¿ö
+    if (working_list6.count > nf_conntrack_max)
+    {
+        return NF_ACCEPT;
+    }
+    /*ÅжÏÊÇ·ñÓÐÏÂÒ»Ìø*/
+    if (!dst)
+    { 
+        return NF_ACCEPT;
+    }
+	_neighbour = dst_neigh_lookup_skb(dst, skb);
+	if(!_neighbour)
+	{
+		return NF_ACCEPT;
+	}
+    
+    if (memcmp(_neighbour->ha, zeromac, ETH_ALEN) == 0)
+    {
+        goto accept;
+    }
+
+    if (!(ct = nf_ct_get(skb, &ctinfo)))
+    {
+        goto accept;
+    }
+    protocol = nf_ct_protonum(ct);
+
+
+    /* only forward */
+    if (!skb->skb_iif)
+    {
+        goto accept;
+    }
+
+
+    //¹ýÂ˲»ÐèÒª¾­¹ýfastnatµÄЭÒé°ü
+    if (check_skip_ports(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all))
+    {
+        goto accept;
+    }
+
+    dir = CTINFO2DIR(ctinfo);
+    
+    rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+    
+    if (IPPROTO_TCP == protocol)
+    {
+        /* only established */
+        if(!test_bit(IPS_ASSURED_BIT, &ct->status))
+        {
+			goto accept;
+        }
+    }
+    else if (IPPROTO_UDP != protocol)
+    {
+        goto accept;
+    }
+
+    spin_lock_bh(&fast6_spinlock);
+    if (!(fast6_entry = fast_get_entry(&working_list6, ct, dir)))
+    {
+        spin_unlock_bh(&fast6_spinlock);
+        goto accept;
+    }
+    fast6_entry->fast_spinlock = &fast6_spinlock;
+
+    //Ê״ν¨Á´£¬»ñÈ¡ct¼ÆÊýËø£¬²¢É¾³ýct¶¨Ê±Æ÷£»Ê״ν¨Á´Öظ´°ü£¬²»ÄܲÙ×÷
+    if (!(fast6_entry->flags & FAST_ALL_DIR))
+    {
+        nf_conntrack_get(&ct->ct_general);
+        //del_timer(&ct->timeout);
+        ct->timeout = fast6_entry->timeout.expires;
+    }
+
+    fast6_entry_data = &fast6_entry->data[dir];
+    fast6_entry_data->tuplehash.tuple = ct->tuplehash[dir].tuple;
+
+    memcpy(fast6_entry_data->dmac, _neighbour->ha, ETH_ALEN);
+    fast6_entry_data->priority = skb->priority;
+    fast6_entry_data->mark = skb->mark;
+    fast6_entry_data->outdev = out;
+
+    if (!record_MAC_header(working_hash6, ct, fast6_entry, fast6_entry_data, _neighbour, out, htons(ETH_P_IPV6)))
+    {
+        spin_unlock_bh(&fast6_spinlock);
+        goto accept;
+    }
+
+    //´Ë´¦±£Ö¤Õý·´Á½¸ö±ê¼Çλ²»³åÍ»
+    fast6_entry->flags = fast6_entry->flags | (1 << dir);
+    
+    fast_add_entry(working_hash6, fast6_entry_data);
+    
+    if (fast6_entry->flags == FAST_ALL_DIR)
+    {
+        fast6_entry->data[0].indev = fast6_entry->data[1].outdev;
+        fast6_entry->data[1].indev = fast6_entry->data[0].outdev;
+    }
+
+    spin_unlock_bh(&fast6_spinlock);
+
+    ct->fast_ct.isFast = FAST_CT_WND6;
+
+accept:
+	
+	neigh_release(_neighbour);
+    return NF_ACCEPT;
+}
+
+/*´¦Àí֪ͨÁ´Ê¼þ*/
+int fast6_event(traverse_command_t *cmd)
+{
+    spin_lock_bh(&fast6_spinlock);
+    traverse_process(&working_list6, cmd);
+    spin_unlock_bh(&fast6_spinlock);
+	return 0;
+}
+
+//fastnat_level¹Ø±Õ£¬Çå¿ÕËùÓÐipv6¿ìËÙת·¢ÐÅÏ¢
+void fast6_cleanup_links(void)
+{
+    spin_lock_bh(&fast6_spinlock);
+    fast_cleanup_links(&working_list6);
+    spin_unlock_bh(&fast6_spinlock);
+}
+
+int tsp_fast6_init(void)
+{
+    int ret;
+    
+    print_sun(SUN_DBG,"start init fast6\n");
+
+    working_hash6 = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, /*&fast6hash_vmalloc,*/ 1);
+    if (!working_hash6) 
+    {
+        print_sun(SUN_DBG, "Unable to create working_hash6\n");
+        return -EINVAL;
+    }
+
+    spin_lock_init(&fast6_spinlock);
+
+    ret = nf_register_net_hook(&init_net, &fast6_hook);
+    if (ret != 0)
+    {
+        print_sun(SUN_DBG,"init fast6 failed\n");
+        goto err;
+    }
+    
+    print_sun(SUN_DBG,"init fast6 done\n");
+    return 0;
+    
+err:
+    nf_ct_free_hashtable(working_hash6, /*fast6_hash_vmalloc, */nf_conntrack_htable_size);
+    return -EINVAL;
+}
+
+int tsp_fast6_cleanup(void)
+{
+    nf_unregister_net_hook(&init_net, &fast6_hook);
+    nf_ct_free_hashtable(working_hash6, /*fast6_hash_vmalloc,*/ nf_conntrack_htable_size);
+    
+    print_sun(SUN_DBG,"fast6 cleanup done\n");
+    return 0;
+}
+
diff --git a/upstream/linux-5.10/net/core/fastproc/fast6_fw.c b/upstream/linux-5.10/net/core/fastproc/fast6_fw.c
new file mode 100755
index 0000000..322175b
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fast6_fw.c
@@ -0,0 +1,395 @@
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter/xt_multiport.h>
+#include <linux/netfilter/xt_iprange.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/route.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <net/SI/fast6.h>
+#include <net/SI/fast_common.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/ip6_fib.h>
+#include <net/SI/net_track.h>
+#include <net/SI/net_other.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/net_cache.h>
+#include <net/SI/print_sun.h>
+
+MODULE_LICENSE("GPL");
+
+/* ***************** ipv6 ¿ìËÙת·¢Ïà¹Ø±äÁ¿ ************************* */
+
+
+/* **************************** º¯ÊýÉêÃ÷ ************************ */
+
+
+/* **************************** º¯ÊýʵÏÖ ************************ */
+extern u32 rt6_peer_genid(void);
+
+int dst_expire_count_v6 = 0;
+extern int no_neighbour;
+extern void ntl_ct_set_iw(struct sk_buff *skb, struct nf_conn *ct, int ct_dir);
+int fast6_fw_recv(struct nf_conn *tmpl,
+                  struct sk_buff *skb,
+                  struct nf_conn *ct,
+                  struct nf_conntrack_l4proto *l4proto,
+                  unsigned int dataoff,
+                  int dir,
+                  u_int8_t protonum)
+{
+    struct net_device *dev = NULL;
+    enum ip_conntrack_info ctinfo;
+    int ret;
+    int  rdir;
+    struct ethhdr * eth;
+    __u8 next_hdr = 0;
+    unsigned char *l4head;
+    struct dst_entry *dst_dir = NULL;
+    struct neighbour *_neighbour = NULL;
+
+	__be16			vlan_proto_raw = skb->vlan_proto;
+	__u16			vlan_tci_raw = skb->vlan_tci;
+	
+    rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+    dst_dir = dst_get_by_ct(ct, dir);
+
+    struct nf_hook_state state = {
+        .hook = NF_INET_PRE_ROUTING,
+        .net = &init_net,
+        .in	= skb->dev,
+        .pf	= NFPROTO_IPV6,
+    };
+
+
+    //TCP±ØÐëË«Ïò½¨Á´£¬UDPµ¥Ïò¼´¿É
+    if (!dst_dir)
+    {
+        goto err_out;
+    }
+
+    // Èç¹û¼Ç¼ÁËÇŵ㣬¾ÍÖ±½ÓÖ¸ÏòÇŵã
+    if (ct->fast_ct.fast_brport[dir])
+    {
+        rcu_read_lock();
+        dev = rcu_dereference_protected(ct->fast_ct.fast_brport[dir], 1);
+        rcu_read_unlock();
+    }
+    else {
+        dev = dst_dir->dev;
+    }
+
+    /*¼ì²é°ü³¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚÉ豸MTU*/
+    if (!dev || (skb->len > dev->mtu))
+    {
+        skbinfo_add(NULL, SKB_OVER_MTU);
+        goto err_out;
+    }
+
+    //»Ø´«µÄ°üÖ±½ÓÊͷŲ¢¼ÆÊý
+    if (strcmp(skb->dev->name, dev->name) == 0)
+    {
+        skbinfo_add(NULL, SKB_LOOP);
+        //nf_conntrack_put(&ct->ct_general);
+        kfree_skb(skb);
+        goto drop_packet;
+    }
+
+    if (dir == 1) {
+        ctinfo = IP_CT_ESTABLISHED_REPLY;
+    } else {
+        if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+            ctinfo = IP_CT_ESTABLISHED;
+        } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
+            ctinfo = IP_CT_RELATED;
+        } else {
+            ctinfo = IP_CT_NEW;
+        }
+    }
+
+
+
+    ret = nf_conntrack_handle_packet_fast(ct, skb, dataoff, ctinfo, &state);
+    if (ret <= 0) {
+        skb->_nfct = 0;
+        goto err_out; // fastʧ°Üǰ¶¼²»Äܸü¸ÄskbµÄÄÚÈÝ£¬·ñÔòʧ°Ü¾ÍÒª×ö»Ö¸´²Ù×÷
+    }
+    //Åжϳö¿ÚdevµÄÍ·²¿¿Õ¼äÊÇ·ñ×ã¹»£¬²»¹»ÐèÒªexpand
+    if (!(skb = fast_expand_headroom_v6(skb, dev))) {
+        //nf_conntrack_put(&ct->ct_general);
+        goto drop_packet;
+    }
+
+    fast_tcpdump(skb);
+
+    //Èç¹û×¥°üÃüÖУ¬Êý¾Ý»áclone£¬fast³É¹¦ÐèÒª¸Ä±ädataÄÚÈÝ£¬ÐèÒªÖØÐÂcopyÒ»·Ý
+    if (skb_cloned(skb))
+    {
+        if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+        {
+            print_sun(SUN_DBG, "fast6_fw_recv clone copy failed !!!\n");
+            kfree_skb(skb);
+            goto drop_packet;
+        }
+        clean_cache(skb->data,skb->len);
+    }
+
+    //½öµ±fastnat³É¹¦£¬²ÅÄÜÉèÖÃIPÍ·Ê×µØÖ·,ÒÔ¹©cacheË¢ÐÂʱʹÓÃ
+    skb_reset_network_header(skb);
+    skb->isFastnat = 1;
+    nf_ct_set(skb, (struct nf_conn *)&ct->ct_general, ctinfo);
+
+
+    //»ùÓÚÍø¿ÚµÄÁ÷Á¿Í³¼Æ  --- ²Î¿¼linuxÔ­ÉúµÄÇý¶¯£¬Í³¼ÆµÄ¶¼ÊÇIP°ü³¤¶È
+    if (fastnat_level == FAST_NET_DEVICE)
+    {
+        skb->dev->stats.rx_packets++;
+        skb->dev->stats.rx_bytes += skb->len;
+    }
+
+    if (dev->flags & IFF_UP)
+    {
+        if (!(dev->flags & IFF_POINTOPOINT)) {
+            //·Çppp¶Ë¿Ú²Å×¼¸³Öµ£¬·ñÔòΪIPÍ·
+            skb_push(skb, ETH_HLEN);
+			skb_reset_mac_header(skb);
+			if(skb->isvlan == 1)
+			{
+				struct vlan_ethhdr *vlan_eth = (struct vlan_ethhdr*)(skb->data - VLAN_HLEN);
+				skb->vlan_proto = vlan_eth->h_vlan_proto;
+				skb->vlan_tci = ntohs(vlan_eth->h_vlan_TCI);				
+			}
+            eth = (struct ethhdr *)skb->data;
+            //³ö¿Údev macµØÖ·×÷ΪÊý¾Ý°üÔ´macµØÖ·
+            memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
+            _neighbour = dst_neigh_lookup_skb(dst_dir, skb);
+            if (_neighbour)
+            {
+                memcpy(eth->h_dest, _neighbour->ha, ETH_ALEN);
+                neigh_release(_neighbour);
+            }
+            else {
+                __flush_dcache_area(skb->data, skb->len);
+                kfree_skb(skb);
+                no_neighbour++;
+                goto drop_packet;
+            }
+
+            eth->h_proto = htons(ETH_P_IPV6);
+        }
+        skb->dev = dev;
+        skb->now_location |= FASTNAT_SUCC;
+		skb->vlan_proto = vlan_proto_raw;
+		skb->vlan_tci = vlan_tci_raw;
+        __flush_dcache_area(skb->data, skb->len);
+        spin_unlock_bh(&fast_fw_spinlock);
+        dev_queue_xmit(skb);
+		spin_lock_bh(&fast_fw_spinlock);
+    }
+    else
+    {
+        print_sun(SUN_DBG, "ERR &&&&&& %s DOWN, kfree_skb!!!!!!!! \n", skb->dev->name);
+        kfree_skb(skb);
+    }
+
+
+    print_sun(SUN_DBG, "skb : 0x%x, fast6_fw succ--------", skb);
+
+succ_out:
+drop_packet:
+    if (tmpl)
+        nf_conntrack_put(&tmpl->ct_general);
+    dst_release(dst_dir);
+    return 1;
+
+err_out :
+    dst_release(dst_dir);
+
+    nf_conntrack_put(&ct->ct_general);
+    print_sun(SUN_DBG, "skb : 0x%x, fast6_fw fail!!!!!!!!!!", skb);
+    if (tmpl) {
+        nf_ct_set(skb, (struct nf_conn *)&tmpl->ct_general, IP_CT_NEW);
+    }
+    else {
+        skb->_nfct = 0;
+    }
+    return 0; /* not fast nat */
+}
+
+//¹¤×÷ÔÚPOST_ROUTING½Úµã£¬ÓÃÓÚfastÁ´½ÓµÄ¸³ÖµºÍÌí¼Óhash±í
+unsigned int napt_handle6_fw(void *priv,
+                             struct sk_buff *skb,
+                             const struct nf_hook_state *state)
+{
+    struct nf_conn *ct;
+    enum ip_conntrack_info ctinfo;
+    u_int8_t protocol;
+    enum ip_conntrack_dir dir;
+    struct dst_entry *dst = skb_dst(skb);
+#ifdef CONFIG_ATHRS_HW_NAT
+    u_int32_t mask =0;
+#endif
+    struct neighbour *_neighbour = NULL;
+    struct net_device *out = state->out;
+
+    //¿ìËÙת·¢×Ü¿ª¹Ø
+    if (fastnat_level == FAST_CLOSE || fastnat_level == FAST_CLOSE_KEEP_LINK)
+    {
+        return NF_ACCEPT;
+    }
+
+    //¿ìËÙת·¢×Ó¹¦ÄÜλͼ¿ª¹Ø
+    if (!test_bit(FAST_TYPE_VERSION_BIT, &fast_switch)
+            || !test_bit(FAST_TYPE_FW6_BIT, &fast_switch))
+    {
+        return NF_ACCEPT;
+    }
+
+    if (!out)
+    {
+        return NF_ACCEPT;
+    }
+
+    //Ö»ÓÐTCP¡¢UDPÖ§³Öfast£¬ICMPÕÒµ½µÄct¿ÉÄÜÊÇTCP¡¢UDPµÄ£¬ÀýÈç: ¶Ë¿Ú²»¿É´ï£¬ËùÒÔ±ØÐëÏÔʾÅжÏ
+    if (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP && ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
+        return NF_ACCEPT;
+
+    //×é²¥²»½¨Á´
+    if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
+    {
+        return NF_ACCEPT;
+    }
+
+    /*ÅжÏÊÇ·ñÓÐÏÂÒ»Ìø*/
+    if(!dst)
+    {
+        return NF_ACCEPT;
+    }
+
+    _neighbour = dst_neigh_lookup_skb(dst, skb);
+    if(!_neighbour)
+    {
+        return NF_ACCEPT;
+    }
+
+    if (memcmp(_neighbour->ha, zeromac, ETH_ALEN) == 0)
+    {
+        goto accept;
+    }
+
+    /*¼ì²é°ü³¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚÉ豸MTU*/
+    if (dst->dev && (skb->len > dst->dev->mtu))
+    {
+        goto accept;
+    }
+    if (!(ct = nf_ct_get(skb, &ctinfo)))
+    {
+        goto accept;
+    }
+
+    protocol = nf_ct_protonum(ct);
+
+    /* only forward */
+    if (!skb->skb_iif)
+    {
+        goto accept;
+    }
+
+    //¹ýÂ˲»ÐèÒª¾­¹ýfastnatµÄЭÒé°ü
+    if (check_skip_ports(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all))
+    {
+        goto accept;
+    }
+
+    dir = CTINFO2DIR(ctinfo);
+
+    /*Ö»Õë¶ÔTCP/UDP½øÐÐfastnat*/
+    if (IPPROTO_TCP == protocol)
+    {
+        /*TCPÈý´ÎÎÕÊֳɹ¦*/
+        if(!test_bit(IPS_ASSURED_BIT, &ct->status))
+        {
+            goto accept;
+        }
+    }
+    else if (IPPROTO_UDP != protocol)
+    {
+        goto accept;
+    }
+
+    spin_lock_bh(&fast_fw_spinlock);
+    if (ct->fast_ct.fast_dst[dir] && (ct->fast_ct.fast_dst[dir] != dst))
+    {
+        fast_fw_conn_release(ct);
+    }
+
+    if (!ct->fast_ct.fast_dst[dir])
+    {
+        rcu_assign_pointer(ct->fast_ct.fast_dst[dir], dst);
+        ct->fast_ct.fast_brport[dir] = getBridgePort(_neighbour, out);
+        fast_dst_add_ct(dst, ct);
+    }
+
+    ct->fast_ct.isFast = FAST_CT_FW6;
+    spin_unlock_bh(&fast_fw_spinlock);
+accept:
+
+    neigh_release(_neighbour);
+    return NF_ACCEPT;
+}
+
+static struct nf_hook_ops fast6_fw_hook = {
+    .hook = napt_handle6_fw,
+    //.owner = THIS_MODULE,
+    .pf = PF_INET6,
+    .hooknum = NF_INET_POST_ROUTING,
+    .priority = NF_IP6_PRI_LAST,
+};
+
+
+int fast6_fw_init(void)
+{
+    int ret = 0;
+
+    ret = nf_register_net_hook(&init_net, &fast6_fw_hook);
+    if (ret != 0)
+    {
+        print_sun(SUN_ERR,"init fast6_fw_init failed\n");
+        return -EINVAL;
+    }
+    print_sun(SUN_DBG,"init fast6_fw_init done\n");
+
+    return 0;
+}
+
+int fast6_fw_cleanup(void)
+{
+    fast_release_all(RELEASE_ALL_DST);
+    nf_unregister_net_hook(&init_net, &fast6_fw_hook);
+    return 0;
+}
+
diff --git a/upstream/linux-5.10/net/core/fastproc/fast_common.c b/upstream/linux-5.10/net/core/fastproc/fast_common.c
new file mode 100755
index 0000000..69f3761
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fast_common.c
@@ -0,0 +1,2113 @@
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/xt_multiport.h>
+#include <linux/netfilter/xt_iprange.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/route.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <net/SI/fast_common.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/SI/net_track.h>
+#include <net/SI/net_other.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/net_cache.h>
+#include <net/SI/print_sun.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+
+
+MODULE_LICENSE("GPL");
+
+/* ************************** ¿ìËÙת·¢¹«ÓõıäÁ¿ ************************** */
+struct kmem_cache *fast_head_cache;
+
+spinlock_t fast_fw_spinlock;             //×ÔÐýËø£¬±£»¤×ª·¢Êý¾ÝÏ໥²Ù×÷
+spinlock_t fastlocal_spinlock;           //×ÔÐýËø£¬±£»¤±¾µØÊý¾ÝÏ໥²Ù×÷
+
+/*
+* 0: ¹Ø±Õfastnat£¬×ß±ê×¼linux£¬¿ÉÒÔÖжÏÔ­Á´½ÓÖØÐÂÁ´½Ó
+* 1: ×ßIP²ã±ê×¼fasnat£¬½ø¶øÒýÓÃÈíÖжϵ÷¶È£¬ÐÔÄÜÂԲ³¡¾°ÈçÁ÷¿Ø¹¦ÄÜ
+* 2: ×ßnet_device²ãÃæµÄÇý¶¯µ½Çý¶¯£¬ÎÞÈíÖжÏ
+* 5: ¹Ø±Õfastnat£¬×ß±ê×¼linux£¬±£³ÖÔ­Á´½Ó²»ÖØÐÂÁ´½Ó
+* ¿ÉÒÔͨ¹ýprocÐÞ¸ÄÖµ
+*/
+int fastnat_level = FAST_NET_DEVICE;/*FAST_NET_DEVICE; modify by zdd, close fastnat*/
+
+/* λͼ·½Ê½  --- ¸÷×Ó¹¦ÄÜ¿ìËÙת·¢¿ª¹Ø£¬²Î¿¼fast_common.hÖж¨Òå*/
+
+//unsigned long fast_switch = 0x67;
+unsigned long fast_switch = 0x0;
+
+/* ************************ ×ÓÍø¼ä ¿ìËÙת·¢Ïà¹Ø±äÁ¿ ************************ */
+int fastbr_level = 1;                 //ÊÇ·ñ´ò¿ªfastbr¹¦ÄÜ
+
+/* **************************** ƽ̨»¯Ïà¹Ø±äÁ¿ **************************** */
+/*
+ *ÏÂÃæbr_nameµÈ¼¸¸ö×Ö·û´®Êý×éͨ¹ýproc½ÚµãÉèÖã¬
+ *´Ë´¦½öÊǶ¨ÒåºÍ¸³³õÖµ¡£
+ */
+char br_name[MAX_NET_DEVICE_NAME_LEN + 1] = "br0";
+char ps_name[MAX_NET_DEVICE_NAME_LEN + 1] = "wan1";
+char usb_name[MAX_NET_DEVICE_NAME_LEN + 1] = "usblan0";
+char ppp_name[MAX_NET_DEVICE_NAME_LEN + 1] = "ppp";
+//cp:µ¥ºË£¬Ä£¿éÐÎ̬ÐèҪ·Óɶ¨ÖÆ
+//ap:Ë«ºË£¬Ä£¿éÐÎ̬¿É¼òµ¥ÇŽÓ
+char need_jilian[MAX_NET_DEVICE_NAME_LEN + 1] = "0";
+
+//ÒÔÏÂÈý¸öÓÅÏȼ¶ÅäÖã¬Ö»×¼ÍøÂç×é¹Ç¸Éµ÷Õû£¬·ñÔòÑÏÖØÓ°Ïì¸÷¸ö·½ÏòµÄÐÔÄÜ
+int  fast_br_level = 1;
+int  fast_fwd_level = 2;
+int  fast_local_level = 0;
+unsigned char zeromac[ETH_ALEN] = "";
+
+/* ÄÚºËÁ´Â·×´Ì¬µÈµÄ±äÁ¿¡¢½Ó¿ÚÌ壬À´×ÔÄںˣ¬ÒÆÖ²ÐÂÄÚºËʱÐèÒª¸üÐÂ*/
+#define sNO TCP_CONNTRACK_NONE
+#define sSS TCP_CONNTRACK_SYN_SENT
+#define sSR TCP_CONNTRACK_SYN_RECV
+#define sES TCP_CONNTRACK_ESTABLISHED
+#define sFW TCP_CONNTRACK_FIN_WAIT
+#define sCW TCP_CONNTRACK_CLOSE_WAIT
+#define sLA TCP_CONNTRACK_LAST_ACK
+#define sTW TCP_CONNTRACK_TIME_WAIT
+#define sCL TCP_CONNTRACK_CLOSE
+#define sS2 TCP_CONNTRACK_SYN_SENT2
+#define sIV TCP_CONNTRACK_MAX
+#define sIG TCP_CONNTRACK_IGNORE
+
+/* What TCP flags are set from RST/SYN/FIN/ACK. */
+enum tcp_bit_set {
+    TCP_SYN_SET,
+    TCP_SYNACK_SET,
+    TCP_FIN_SET,
+    TCP_ACK_SET,
+    TCP_RST_SET,
+    TCP_NONE_SET,
+};
+
+//À´×Ônf_conntrack_proto_tcp.c
+static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
+    {
+        /* ORIGINAL */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*syn*/       { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sS2 },
+        /*
+         *    sNO -> sSS    Initialize a new connection
+         *    sSS -> sSS    Retransmitted SYN
+         *    sS2 -> sS2    Late retransmitted SYN
+         *    sSR -> sIG
+         *    sES -> sIG    Error: SYNs in window outside the SYN_SENT state
+         *            are errors. Receiver will reply with RST
+         *            and close the connection.
+         *            Or we are not in sync and hold a dead connection.
+         *    sFW -> sIG
+         *    sCW -> sIG
+         *    sLA -> sIG
+         *    sTW -> sSS    Reopened connection (RFC 1122).
+         *    sCL -> sSS
+         */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
+        /*
+         *    sNO -> sIV    Too late and no reason to do anything
+         *    sSS -> sIV    Client can't send SYN and then SYN/ACK
+         *    sS2 -> sSR    SYN/ACK sent to SYN2 in simultaneous open
+         *    sSR -> sSR    Late retransmitted SYN/ACK in simultaneous open
+         *    sES -> sIV    Invalid SYN/ACK packets sent by the client
+         *    sFW -> sIV
+         *    sCW -> sIV
+         *    sLA -> sIV
+         *    sTW -> sIV
+         *    sCL -> sIV
+         */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
+        /*
+         *    sNO -> sIV    Too late and no reason to do anything...
+         *    sSS -> sIV    Client migth not send FIN in this state:
+         *            we enforce waiting for a SYN/ACK reply first.
+         *    sS2 -> sIV
+         *    sSR -> sFW    Close started.
+         *    sES -> sFW
+         *    sFW -> sLA    FIN seen in both directions, waiting for
+         *            the last ACK.
+         *            Migth be a retransmitted FIN as well...
+         *    sCW -> sLA
+         *    sLA -> sLA    Retransmitted FIN. Remain in the same state.
+         *    sTW -> sTW
+         *    sCL -> sCL
+         */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*ack*/       { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
+        /*
+         *    sNO -> sES    Assumed.
+         *    sSS -> sIV    ACK is invalid: we haven't seen a SYN/ACK yet.
+         *    sS2 -> sIV
+         *    sSR -> sES    Established state is reached.
+         *    sES -> sES    :-)
+         *    sFW -> sCW    Normal close request answered by ACK.
+         *    sCW -> sCW
+         *    sLA -> sTW    Last ACK detected.
+         *    sTW -> sTW    Retransmitted last ACK. Remain in the same state.
+         *    sCL -> sCL
+         */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
+        /*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
+    },
+    {
+        /* REPLY */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*syn*/       { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sS2 },
+        /*
+         *    sNO -> sIV    Never reached.
+         *    sSS -> sS2    Simultaneous open
+         *    sS2 -> sS2    Retransmitted simultaneous SYN
+         *    sSR -> sIV    Invalid SYN packets sent by the server
+         *    sES -> sIV
+         *    sFW -> sIV
+         *    sCW -> sIV
+         *    sLA -> sIV
+         *    sTW -> sIV    Reopened connection, but server may not do it.
+         *    sCL -> sIV
+         */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
+        /*
+         *    sSS -> sSR    Standard open.
+         *    sS2 -> sSR    Simultaneous open
+         *    sSR -> sIG    Retransmitted SYN/ACK, ignore it.
+         *    sES -> sIG    Late retransmitted SYN/ACK?
+         *    sFW -> sIG    Might be SYN/ACK answering ignored SYN
+         *    sCW -> sIG
+         *    sLA -> sIG
+         *    sTW -> sIG
+         *    sCL -> sIG
+         */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
+        /*
+         *    sSS -> sIV    Server might not send FIN in this state.
+         *    sS2 -> sIV
+         *    sSR -> sFW    Close started.
+         *    sES -> sFW
+         *    sFW -> sLA    FIN seen in both directions.
+         *    sCW -> sLA
+         *    sLA -> sLA    Retransmitted FIN.
+         *    sTW -> sTW
+         *    sCL -> sCL
+         */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*ack*/       { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIG },
+        /*
+         *    sSS -> sIG    Might be a half-open connection.
+         *    sS2 -> sIG
+         *    sSR -> sSR    Might answer late resent SYN.
+         *    sES -> sES    :-)
+         *    sFW -> sCW    Normal close request answered by ACK.
+         *    sCW -> sCW
+         *    sLA -> sTW    Last ACK detected.
+         *    sTW -> sTW    Retransmitted last ACK.
+         *    sCL -> sCL
+         */
+        /*          sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2    */
+        /*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
+        /*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
+    }
+};
+
+#define SECS * HZ
+#define MINS * 60 SECS
+#define HOURS * 60 MINS
+#define DAYS * 24 HOURS
+
+//À´×Ônf_conntrack_proto_tcp.c
+unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] __read_mostly = {
+    [TCP_CONNTRACK_SYN_SENT]    = 2 MINS,
+    [TCP_CONNTRACK_SYN_RECV]    = 5 MINS, //60 SECS,
+    [TCP_CONNTRACK_ESTABLISHED]    = 2 HOURS, //5 DAYS
+    [TCP_CONNTRACK_FIN_WAIT]    = 2 MINS,
+    [TCP_CONNTRACK_CLOSE_WAIT]    = 60 SECS,
+    [TCP_CONNTRACK_LAST_ACK]    = 30 SECS,
+    [TCP_CONNTRACK_TIME_WAIT]    = 2 MINS,
+    [TCP_CONNTRACK_CLOSE]        = 120 SECS, /*normal is 10SEC*/
+    [TCP_CONNTRACK_SYN_SENT2]    = 2 MINS,
+    /* RFC1122 says the R2 limit should be at least 100 seconds.
+       Linux uses 15 packets as limit, which corresponds
+       to ~13-30min depending on RTO. */
+    //[TCP_CONNTRACK_MAX]    = 2 MINS,
+    //[TCP_CONNTRACK_IGNORE]    = 2 MINS,
+    [TCP_CONNTRACK_RETRANS]    = 5 MINS,
+    [TCP_CONNTRACK_UNACK]      = 5 MINS,
+};
+
+unsigned int fast_udp_timeout_stream = 180*HZ;
+unsigned int fast_udp_timeout = 120*HZ; /*normal is 30*HZ*/
+
+//²»Ö§³ÖfastnatµÄЭÒéÀàÐÍ
+//²»ÔÙʹÓ㬸ijÉͨ¹ýproc¶¯Ì¬´«Èëµ½nofast_proto£¬ÔÝʱ±£Áô¶Îʱ¼ä£¬Èÿª·¢Á˽â¶ÔÓ¦µÄ¶Ë¿ÚЭÒéºÅ
+unsigned int nofast_port[NOFAST_PROTO_MAX] = {
+    21,      // FTP¶Ë¿Ú£¬ÓÐʱ±»Îļþ·þÎñЭÒé (FSP)ʹÓÃ
+    22,      // ssh °²È«Shell(SSH)·þÎñ
+    23,      // telnet Telnet ·þÎñ
+    25,      // smtp ¼òµ¥Óʼþ´«ÊäЭÒé(SMTP)
+    53,      // domain ÓòÃû·þÎñ(Èç BIND)
+    67,      // server¶Ëdhcp·þÎñ¶Ë¿Ú
+    68,      // client¶Ëdhcp·þÎñ¶Ë¿Ú
+    69,      // tftp СÎļþ´«ÊäЭÒé(TFTP)
+    110,     // ÓʾÖЭÒé°æ±¾3
+    115,     // sftp °²È«Îļþ´«ÊäЭÒé(SFTP)·þÎñ
+    123,     // ntp ÍøÂçʱ¼äЭÒé(NTP)
+    443,     // https °²È«³¬Îı¾´«ÊäЭÒé(HTTP)
+    500,     // isakmp »¥ÁªÍø°²È«¹ØÁªºÍÔ¿³×¹ÜÀíЭÒé(ISAKMP)
+    1352,    // Lotus Notes
+    1723,    // PPTP TCP
+    1990,    // stun-p1 cisco STUN Priority 1 port
+    1991,    // stun-p2 cisco STUN Priority 2 port
+    1992,    // stun-p3 cisco STUN Priority 3 port,ipsendmsg IPsendmsg
+    1993,    // snmp-tcp-port cisco SNMP TCP port
+    1994,    // stun-port cisco serial tunnel portTCP
+    1995,    // perf-port cisco perf portTCP
+    1996,    // tr-rsrb-port cisco Remote SRB portTCP
+    1997,    // gdp-port Cisco Íø¹Ø·¢ÏÖЭÒé(GDP)
+    1998,    // x25-svc-port cisco X.25 service
+    4500,    // NAT-T UDP
+    5060     // ¶Ë¿Ú¶Ë¿Ú:5060/udpÃèÊö:SessionInitiationProtocol(SIP»Ø»°·¢ÆðЭÒé)
+};
+
+/* ******************************* º¯ÊýÉêÃ÷ ******************************* */
+int (*fast_nat4_proc)(struct sk_buff *skb);
+int (*fast_nat6_proc)(struct sk_buff *skb);
+int (*fast_fw4_proc)(struct nf_conn *tmpl,
+                     struct sk_buff *skb,
+                     struct nf_conn *ct,
+                     struct nf_conntrack_l4proto *l4proto,
+                     unsigned int dataoff,
+                     int dir,
+                     u_int8_t protonum);
+int (*fast_fw6_proc)(struct nf_conn *tmpl,
+                     struct sk_buff *skb,
+                     struct nf_conn *ct,
+                     struct nf_conntrack_l4proto *l4proto,
+                     unsigned int dataoff,
+                     int dir,
+                     u_int8_t protonum);
+
+int (*fast_local4_proc)(struct nf_conn *tmpl,
+                        struct sk_buff *skb,
+                        struct nf_conn *ct,
+                        struct nf_conntrack_l4proto *l4proto,
+                        unsigned int dataoff,
+                        int dir,
+                        u_int8_t protonum);
+int (*fast_local6_proc)(struct nf_conn *tmpl,
+                        struct sk_buff *skb,
+                        struct nf_conn *ct,
+                        struct nf_conntrack_l4proto *l4proto,
+                        unsigned int dataoff,
+                        int dir,
+                        u_int8_t protonum);
+int (*fast_local4_output_proc)(struct sk_buff *skb);
+int (*fast_local6_output_proc)(struct sk_buff *skb);
+
+int (*fast_br_proc)(struct sk_buff *skb);
+
+extern int fast_nat_recv(struct sk_buff *skb);
+extern int fast6_recv(struct sk_buff *skb);
+
+unsigned long iphdr_err_num =0;
+unsigned long ip6hdr_err_num =0;
+unsigned long tcphdr_err_num =0;
+unsigned long tcp6hdr_err_num =0;
+
+extern int fast4_fw_recv(struct nf_conn *tmpl,
+                         struct sk_buff *skb,
+                         struct nf_conn *ct,
+                         struct nf_conntrack_l4proto *l4proto,
+                         unsigned int dataoff,
+                         int dir,
+                         u_int8_t protonum);
+extern int fast6_fw_recv(struct nf_conn *tmpl,
+                         struct sk_buff *skb,
+                         struct nf_conn *ct,
+                         struct nf_conntrack_l4proto *l4proto,
+                         unsigned int dataoff,
+                         int dir,
+                         u_int8_t protonum);
+
+//extern int fast_br(struct sk_buff *skb);
+//extern struct net_device *getbrport_bydst(struct net_device *dev,unsigned char *dest);
+extern struct nf_conntrack_tuple_hash *
+nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
+                      const struct nf_conntrack_tuple *tuple);
+
+
+extern int (*fast_from_softirq) (struct sk_buff *skb);
+extern int (*fast_from_driver)(struct sk_buff *skb, struct net_device* dev);
+
+extern void fastnat_cleanup_links(void);
+extern void fast6_cleanup_links(void);
+
+extern fast_entry_t *cur_timeout_entry;
+extern int tcpack_timeout(fast_entry_t *entry, unsigned long *next_schedule, int *set_next);
+extern int tcpack_rel(fast_entry_t *entry);
+
+extern int tsp_fastnat_init(void);
+extern int tsp_fastnat_cleanup(void);
+
+extern int fast4_fw_init(void);
+extern int fast6_fw_init(void);
+
+extern int fast4_fw_cleanup(void);
+extern int fast6_fw_cleanup(void);
+
+extern int tsp_fast6_init(void);
+extern int tsp_fast6_cleanup(void);
+
+extern int fastnat_event(traverse_command_t *cmd);
+extern int fast6_event(traverse_command_t *cmd);
+
+//¿ìËÙת·¢procÎļþµÄ³õʼ»¯
+extern int fast_conntrack_init_proc(void );
+
+//ÄÚºËÆ½Ì¨»¯procÎļþµÄ³õʼ»¯
+extern int net_adapter_init_proc(void );
+
+unsigned int (*tsp_mirror_handle)(struct sk_buff *skb);
+
+extern void net_dbg_perf_dev_recv(char * packet_addr,char* node_str);
+extern void net_dbg_perf_clear_last_item(struct sk_buff *skb);
+
+
+/* ******************************* º¯ÊýʵÏÖ ******************************* */
+static int fast_iphdr_check(struct sk_buff *skb, int proto)
+{
+    const struct iphdr *iph;
+    const struct ipv6hdr *ip6h;
+    u32 len;
+
+    if (proto == ETH_P_IP)
+    {
+        iph = ip_hdr(skb);
+
+        if (iph->ihl < 5 || iph->version != 4)
+            return 0;
+
+        len = ntohs(iph->tot_len);
+        if (skb->len < len) {
+            return 0;
+        }
+        if (len < (iph->ihl*4))
+            return 0;
+    }
+    else if(proto == ETH_P_IPV6)
+    {
+        ip6h = ipv6_hdr(skb);
+        if (ip6h->version != 6)
+            return 0;
+
+        len = ntohs(ip6h->payload_len);
+        if (len || ip6h->nexthdr != NEXTHDR_HOP) {
+            if (len + sizeof(struct ipv6hdr) > skb->len) {
+                return 0;
+            }
+        }
+
+    }
+
+    return 1;
+}
+
+/*
+ * Based on ipv6_skip_exthdr() in net/ipv6/exthdr.c
+ *
+ * This function parses (probably truncated) exthdr set "hdr"
+ * of length "len". "nexthdrp" initially points to some place,
+ * where type of the first header can be found.
+ *
+ * It skips all well-known exthdrs, and returns pointer to the start
+ * of unparsable area i.e. the first header with unknown type.
+ * if success, *nexthdr is updated by type/protocol of this header.
+ *
+ * NOTES: - it may return pointer pointing beyond end of packet,
+ *          if the last recognized header is truncated in the middle.
+ *        - if packet is truncated, so that all parsed headers are skipped,
+ *          it returns -1.
+ *        - if packet is fragmented, return pointer of the fragment header.
+ *        - ESP is unparsable for now and considered like
+ *          normal payload protocol.
+ *        - Note also special handling of AUTH header. Thanks to IPsec wizards.
+ */
+
+static int nf_ct_ipv6_skip_exthdr(const struct sk_buff *skb, int start,
+                                  u8 *nexthdrp, int len)
+{
+    u8 nexthdr = *nexthdrp;
+
+    while (ipv6_ext_hdr(nexthdr)) {
+        struct ipv6_opt_hdr hdr;
+        int hdrlen;
+
+        if (len < (int)sizeof(struct ipv6_opt_hdr))
+            return -1;
+        if (nexthdr == NEXTHDR_NONE)
+            break;
+        if (nexthdr == NEXTHDR_FRAGMENT)
+            break;
+        if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
+            BUG();
+        if (nexthdr == NEXTHDR_AUTH)
+            hdrlen = (hdr.hdrlen+2)<<2;
+        else
+            hdrlen = ipv6_optlen(&hdr);
+
+        nexthdr = hdr.nexthdr;
+        len -= hdrlen;
+        start += hdrlen;
+    }
+
+    *nexthdrp = nexthdr;
+    return start;
+}
+
+
+static int fast_tcphdr_check(struct sk_buff *skb, int proto)
+{
+    const struct iphdr *iph = NULL;
+    const struct ipv6hdr *ip6h = NULL;
+    struct tcphdr *tcph = NULL;
+    unsigned int iphdr_len = 0;
+    unsigned int ip6hdr_len = 0;
+    unsigned int tcphdr_len = 0;
+    unsigned char *l4head = NULL;
+    __u8 protonum;
+    int extoff = 0;
+
+
+
+    if (proto == ETH_P_IP)
+    {
+        iph = ip_hdr(skb);
+        iphdr_len = iph->ihl * 4;
+        tcph = (struct tcphdr *)((unsigned char*)iph + iphdr_len);
+        tcphdr_len = sizeof(struct tcphdr);
+
+        if (tcphdr_len > skb->len - iphdr_len)
+            return 0;
+
+        //tcpÍ·³¤¶ÈºÍdoffÊÇ·ñÆ¥Åä
+        if (tcph->doff < tcphdr_len/4)
+            return 0;
+
+        if (tcph->doff*4 > skb->len - iphdr_len)
+            return 0;
+    }
+    else if(proto == ETH_P_IPV6)
+    {
+        ip6h = ipv6_hdr(skb);
+        ip6hdr_len = sizeof(struct ipv6hdr);
+        tcphdr_len = sizeof(struct tcphdr);
+
+        //²Î¿¼º¯Êýipv6_get_l4proto£¬È¡³öËIJãЭÒéºÅ
+        extoff = skb_network_offset(skb) + ip6hdr_len;
+        protonum = 0;
+        if (skb_copy_bits(skb, skb_network_offset(skb) + offsetof(struct ipv6hdr, nexthdr),
+                          &protonum, sizeof(protonum)) != 0) {
+            return 0;
+        }
+        extoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &protonum, skb->len - extoff);
+
+        if(protonum != NEXTHDR_TCP)
+            return 1;
+
+        tcph = (struct tcphdr *)((unsigned char*)ip6h + extoff);
+        if (tcphdr_len > skb->len - extoff)
+            return 0;
+
+        //tcpÍ·³¤¶ÈºÍdoffÊÇ·ñÆ¥Åä
+        if (tcph->doff < tcphdr_len/4)
+            return 0;
+
+        if (tcph->doff*4 > skb->len - extoff)
+            return 0;
+    }
+
+
+    return 1;
+}
+static inline int deliver_skb(struct sk_buff *skb,
+                              struct packet_type *pt_prev,
+                              struct net_device *orig_dev)
+{
+    atomic_inc(&skb->users.refs);
+    return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+}
+
+void *get_ct_for_ap(struct sk_buff *skb)
+{
+	if(skb){
+		enum ip_conntrack_info ctinfo;
+		struct nf_conn * ct = nf_ct_get(skb, &ctinfo);
+		if(ct){
+			nf_conntrack_get(&ct->ct_general);
+			nf_conntrack_get(&ct->ct_general);
+			return &ct->ct_general;
+		}
+	}
+	return NULL;
+}
+
+void put_ct_for_ap(void *pct)
+{
+	struct nf_conn *ct = (struct nf_conn *)pct;
+	nf_conntrack_put((struct nf_conntrack *)ct);
+	nf_conntrack_put((struct nf_conntrack *)ct);
+}
+
+//´Ë´¦½øÐÐRAW_PACKETÀàÐ͵Äsocket½ÓÊÕ´¦Àí£¬ÒÔ½â¾ö__netif_receive_skbÖÐÕý³£×¥°üʱ£¬Êý¾Ý°üÄÚÈÝÒѱ»Ð޸ĵÄÇé¿ö
+void fast_tcpdump(struct sk_buff *skb)
+{
+    struct packet_type *ptype = NULL;
+
+    rcu_read_lock();
+    list_for_each_entry_rcu(ptype, &ptype_all, list)
+    {
+        if (!ptype->dev || ptype->dev == skb->dev)
+        {
+            skbinfo_add(NULL, SKB_IRQ_FREE);
+            deliver_skb(skb, ptype, skb->dev);
+        }
+    }
+    rcu_read_unlock();
+}
+
+//²Î¿¼ip_finish_output2,À©³äskbÍ·²¿
+struct sk_buff *fast_expand_headroom(struct sk_buff *skb, struct net_device *dev) {
+    unsigned int hh_len = LL_RESERVED_SPACE(dev);
+    struct sk_buff *skb2 = NULL;
+    if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
+        skb2 = skb_realloc_headroom(skb, max(hh_len, NET_SKB_PAD));
+        if(skb2)
+            clean_cache(skb2->data,skb2->len);
+        kfree_skb(skb);
+        return skb2;
+    }
+    return skb;
+}
+
+//²Î¿¼ip6_xmit,À©³äskbÍ·²¿
+struct sk_buff *fast_expand_headroom_v6(struct sk_buff *skb, struct net_device *dev) {
+    unsigned int hh_len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr);
+    struct sk_buff *skb2 = NULL;
+    if (unlikely(skb_headroom(skb) < hh_len)) {
+        skb2 = skb_realloc_headroom(skb, max(hh_len, NET_SKB_PAD));
+        if(skb2)
+            clean_cache(skb2->data,skb2->len);
+        kfree_skb(skb);
+        return skb2;
+    }
+    return skb;
+}
+
+
+/* ɾ³ýÒ»ÌõÁ¬½Ó */
+fast_entry_t *fn_list_del(fast_list_t *list_head, fast_entry_t *entry)
+{
+    fast_entry_t *ret_entry = NULL, **pprev = NULL;
+
+    if (!entry)
+    {
+        return NULL;
+    }
+
+    pprev = &list_head->next;
+    for (ret_entry = list_head->next; ret_entry; ret_entry = ret_entry->next)
+    {
+        if (ret_entry == entry)
+        {
+            *pprev = ret_entry->next;
+            list_head->count--;
+            break;
+        }
+        pprev = &ret_entry->next;
+    }
+    kmem_cache_free(fast_head_cache, entry);
+    netslab_dec(FAST_SLAB);
+    return NULL;
+}
+
+//Ìí¼Ó½Úµã
+void fn_list_add(fast_list_t *list_head, fast_entry_t *entry)
+{
+    entry->next = list_head->next;
+    list_head->next = entry;
+    list_head->count++;
+}
+
+//²éѯÊý¾Ý
+fast_entry_data_t *fast_find_entry_data(const struct hlist_nulls_head *working_hash, const struct nf_conntrack_tuple *tuple)
+{
+    struct nf_conntrack_tuple_hash *h;
+    struct hlist_nulls_node *n;
+    unsigned int hash;
+
+    hash = hash_conntrack_fast(tuple);
+    hlist_nulls_for_each_entry_rcu(h, n, &working_hash[hash], hnnode)
+    {
+        if (nf_ct_tuple_equal(tuple, &h->tuple))
+        {
+            return fast_hash_to_data(h);
+        }
+    }
+
+    return NULL;
+}
+
+//Ìí¼Ó½Úµã
+int fast_add_entry(struct hlist_nulls_head *working_hash, fast_entry_data_t *entry_data)
+{
+    unsigned int hash;
+
+    hash = hash_conntrack_fast(&entry_data->tuplehash.tuple);
+    if (fast_find_entry_data(working_hash, &entry_data->tuplehash.tuple))
+    {
+        return 0;
+    }
+
+    hlist_nulls_add_head_rcu(&entry_data->tuplehash.hnnode, &working_hash[hash]);
+    //ÒòΪÔÚµ±Ç°Á÷³ÌÖУ¬devÒѾ­±»holdסÁË£¬ËùÒÔ´Ë´¦²»ÓÃrcu_read_lock();±£»¤£¬ÎÊÌâ²»´ó
+    dev_hold(entry_data->outdev);
+
+    return 0;
+}
+
+static void workinghash_del_node(fast_entry_t *entry)
+{
+    int i = 0;
+
+    for (i = 0; i < IP_CT_DIR_MAX; i++)
+    {
+        //if (entry->flags & (1 << i))
+        if ((entry->flags & (1 << i))&&(0 != entry->data[i].tuplehash.hnnode.next))
+        {
+            hlist_nulls_del_rcu(&entry->data[i].tuplehash.hnnode);
+            //hlist_nulls_del(&entry->data[i].tuplehash.hnnode);
+            dev_put(entry->data[i].outdev);
+        }
+    }
+}
+
+/*ɾ³ýÁ¬½Ó*/
+static void fastlist_del_entry(fast_list_t *list_head, fast_entry_t *entry)
+{
+    tcpack_rel(entry);
+    //nf_ct_put(entry->ct);
+    atomic_dec(&(entry->ct->ct_general.use));
+    fn_list_del(list_head, entry);
+}
+
+/*fast³¬Ê±´¦Àí£¬É¾³ýÁ¬½Ó*/
+/*jiangjing, ÐÞ¸ÄÈë¿Ú²ÎÊýÀàÐÍΪunsigned long*/
+extern spinlock_t fast6_spinlock;
+extern spinlock_t fastnat_spinlock;
+static void fast_timeout(struct timer_list *ptimer)
+{
+    fast_entry_t *entry = (fast_entry_t *)(ptimer->data);
+    fast_entry_t *ret_entry = NULL;
+    struct fast_list_s *list_head = entry->list_head;
+    spinlock_t *fast_spinlock = entry->fast_spinlock;
+    if(fast_spinlock == &fast6_spinlock || fast_spinlock == &fastnat_spinlock)
+    {
+        spin_lock_bh(fast_spinlock);
+
+        for (ret_entry = list_head->next; ret_entry; ret_entry = ret_entry->next)
+        {
+            if (ret_entry == entry)
+            {
+                workinghash_del_node(entry);
+                fastlist_del_entry(entry->list_head, entry);
+                spin_unlock_bh(fast_spinlock);
+                return;
+            }
+        }
+        spin_unlock_bh(fast_spinlock);
+    }
+}
+
+//²éѯ½Úµã£¬²é²»µ½Ôòд´½¨
+fast_entry_t *fast_get_entry(fast_list_t *list_head, struct nf_conn *ct, char dir)
+{
+    fast_entry_t *ret = NULL;
+    u_int8_t protocol;
+    unsigned long expires;
+
+    for (ret = list_head->next; ret; ret = ret->next)
+    {
+        if (ret->ct == ct)
+        {
+            protocol = nf_ct_protonum(ct);
+            if (IPPROTO_TCP == protocol)
+            {
+                /*tcp*/
+                expires = jiffies + tcp_timeouts[ct->proto.tcp.state];
+            }
+            else
+            {
+                /*udp*/
+                if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status))
+                {
+                    expires = jiffies + fast_udp_timeout_stream;
+                }
+                else
+                {
+                    expires = jiffies + fast_udp_timeout;
+                }
+
+
+            }
+            mod_timer(&ret->timeout, expires);
+            return ret;
+        }
+    }
+
+    /*Ö»Õë¶Ôoriginal·½Ïò´´½¨Á¬½Ó*/
+    if (IP_CT_DIR_ORIGINAL != dir)
+    {
+        return NULL;
+    }
+
+    //Á½ÖÖ¶¼ÊÇslab»úÖÆ£¬kmallocÊÇͨÓÃslab£¬ºóÕßÊÇרÊôslab£¬¸Ä³ÉרÊôslab
+    ret = kmem_cache_alloc(fast_head_cache, GFP_ATOMIC);
+    if (ret == NULL)
+    {
+        print_sun(SUN_ERR,"fast_get_entry: kmalloc fail!\n");
+        return NULL;
+    }
+    netslab_inc(FAST_SLAB);
+    memset(ret, 0, sizeof(fast_entry_t));
+    ret->ct = ct;
+    ret->list_head = list_head;
+
+    //ÉèÖö¨Ê±Æ÷
+    __init_timer(&ret->timeout, NULL, 0);
+
+    protocol = nf_ct_protonum(ct);
+    if (IPPROTO_TCP == protocol)
+    {
+        /*tcp*/
+        ret->timeout.expires = jiffies + tcp_timeouts[ct->proto.tcp.state];
+    }
+    else
+    {
+        /*udp*/
+        if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status))
+        {
+            ret->timeout.expires = jiffies + fast_udp_timeout_stream;
+        }
+        else
+        {
+            ret->timeout.expires = jiffies + fast_udp_timeout;
+        }
+    }
+
+    ret->timeout.data = (unsigned long)ret;
+    ret->timeout.function = fast_timeout;
+    add_timer(&ret->timeout);
+
+    fn_list_add(list_head, ret);
+
+    return ret;
+}
+
+unsigned int get_conntrack_index(const struct tcphdr *tcph)
+{
+    if (tcph->rst) return TCP_RST_SET;
+    else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
+    else if (tcph->fin) return TCP_FIN_SET;
+    else if (tcph->ack) return TCP_ACK_SET;
+    else return TCP_NONE_SET;
+}
+
+/*¸üÐÂtcp³¬Ê±¶¨Ê±Æ÷*/
+void update_tcp_timeout(fast_entry_t *entry, fast_entry_data_t *entry_data, struct tcphdr *tcph)
+{
+    enum tcp_conntrack new_state, old_state;
+    unsigned int dir, index;
+
+    old_state = entry->ct->proto.tcp.state;
+    dir = entry_data->tuplehash.tuple.dst.dir;
+
+    if (tcph == NULL || old_state >=TCP_CONNTRACK_MAX)
+    {
+        print_sun(SUN_ERR,"update_tcp_timeout tcph is null! \n");
+        return;
+    }
+    index = get_conntrack_index(tcph);
+
+    /*¸üÐÂTCPÁ´½Ó״̬*/
+    new_state = tcp_conntracks[dir][index][old_state];
+    if(old_state != new_state)
+    {
+        //²Î¿¼nf_conntrack_proto_tcp.cº¯Êýtcp_packet
+        if (new_state == TCP_CONNTRACK_IGNORE)
+            new_state = TCP_CONNTRACK_SYN_RECV;
+        else if (new_state == TCP_CONNTRACK_MAX)
+            return;
+
+        entry->ct->proto.tcp.state = new_state;
+        //¶¨Ê±Æ÷´æ»îʱ¼ä¸Ä±äʱ²Åµ÷Óó¬Ê±¼ì²â£¬¼õÉÙ±éÀúÁ´±í´ÎÊý
+        mod_timer(&entry->timeout, jiffies + tcp_timeouts[new_state]);
+    }
+}
+
+/* ¼Ç¼¶ÔÓ¦µÄmacÐÅÏ¢£¬³É¹¦·µ»Ø1£¬·ñÔò·µ»Ø0 */
+int record_MAC_header(const struct hlist_nulls_head *working_hash, struct nf_conn *ct,
+                      fast_entry_t *entry, fast_entry_data_t *entry_data,
+                      struct neighbour *neigh, const struct net_device *out, int proto)
+{
+    struct ethhdr *eth;
+    struct net_device *dst_out = NULL;
+    int i;
+
+    if (out == NULL)
+        goto REL;
+
+    //½ö¶ÔÒÔÌ«ÍøÀàÍø¿Ú½øÐÐMACÍ·Ô¤¸³Öµ£¬ÆäËüµÄÈçPPP¡¢PPPoEÕâЩ£¬½ÔÊǿɱäµÄ£¬²»ÄÜÔ¤¸³Öµ£»²Î¼ûalloc_netdevºÍalloc_etherdev½Ó¿Ú
+    if (out->type != ARPHRD_ETHER)
+        return 1;
+
+    //¶ÔÓÚ³ö¿ÚÎªÍøÇÅbrʱ£¬Ö±½Ó»ñÈ¡L2ÍøÂçÉ豸£¬¼´usb0»òwifi0
+    if (out->priv_flags & IFF_EBRIDGE)
+    {
+        if(out->dev_addr == NULL)
+            goto REL;
+
+        if (fastbr_level == 1)
+        {
+            dst_out = getbrport_bydst(out, neigh->ha);
+            if (dst_out == NULL)
+            {
+                print_sun(SUN_DBG,"!!!!! getbrport_bydst fail \n");
+                goto REL;
+            }
+            entry_data->outdev = dst_out;
+        }
+        else
+        {
+            entry_data->outdev = out;
+        }
+        entry_data->hh_flag = 1;
+        eth = (struct ethhdr *)entry_data->hh_data;
+        eth->h_proto = proto;
+        memcpy(eth->h_source, out->dev_addr, ETH_ALEN);
+        memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
+    }
+    //pppת·¢£¬Ö»ÐèÒª´«ËÍIP°ü
+    else if (strncmp(out->name, ppp_name, strlen(ppp_name)) == 0)
+    {
+        if(out->dev_addr == NULL)
+            goto REL;
+
+        entry_data->outdev = out;
+        entry_data->hh_flag = 0;
+    }
+    //ÆÕͨµÄÒÔÌ«ÍøÊý¾Ýת·¢
+    else
+    {
+        if(out->dev_addr == NULL)
+            goto REL;
+
+        //¶ÔÓÚwifi station/RJ45/USBµÈ£¬ÐèÒª¸³ÖµMACÍ·
+        entry_data->outdev = out;
+        entry_data->hh_flag = 1;
+        eth = (struct ethhdr *)entry_data->hh_data;
+        eth->h_proto = proto;
+        memcpy(eth->h_source, out->dev_addr, ETH_ALEN);
+        memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
+    }
+    return 1;
+
+REL:
+    //֮ǰÁ´½Ó¿ÉÄÜÒѱ»´´½¨£¬ÐèҪɾ³ýËùÓÐ×ÊÔ´
+    for (i = 0; i < IP_CT_DIR_MAX; i++)
+    {
+        if (entry->flags & (1 << i))
+        {
+            hlist_nulls_del(&entry->data[i].tuplehash.hnnode);
+            dev_put(entry->data[i].outdev);
+        }
+    }
+    //»Ö¸´ctµÄ³¬Ê±
+    //add_timer(&ct->timeout);
+    //nf_ct_put(ct);
+    atomic_dec(&(ct->ct_general.use));
+    del_timer(&entry->timeout);
+    fn_list_del(entry->list_head, entry);
+    return 0;
+}
+
+/* ¸ù¾ÝÄ¿µÄMACºÍÍøÇŲéÕÒÇŵ㣬ÕÒµ½·µ»ØÇŵ㣬·ñÔò·µ»ØNULL */
+struct net_device *getBridgePort(struct neighbour *neigh, const struct net_device *out)
+{
+    struct net_device *dst_out = NULL;
+
+    if (!test_bit(FAST_TYPE_BR_LOCAL_BIT, &fast_switch))
+        return NULL;
+
+    if (!out || !neigh)
+        return NULL;
+
+    //½ö¶ÔÒÔÌ«ÍøÀàÍø¿Ú½øÐÐMACÍ·Ô¤¸³Öµ
+    if (out->type != ARPHRD_ETHER)
+        return NULL;
+
+    //¶ÔÓÚ³ö¿ÚÎªÍøÇÅʱ£¬Ö±½Ó»ñÈ¡L2ÇŵãÉ豸
+    if (out->priv_flags & IFF_EBRIDGE)
+    {
+        if (out->dev_addr == NULL)
+            return NULL;
+
+        //»ñÈ¡Çŵã
+        dst_out = getbrport_bydst(out, neigh->ha);
+        if (dst_out && dst_out != out)
+            return dst_out;
+
+        print_sun(SUN_DBG, "!!!!! getbrport_bydst fail \n");
+    }
+    return NULL;
+}
+
+//Åжϲ»ÐèÒª¾­¹ýfastnatµÄЭÒé°ü
+int check_skip_ports(unsigned int net_dst_port)
+{
+    int i = 0;
+    unsigned int dst_port = htons(net_dst_port);
+
+    if (!dst_port)
+        return 0;
+
+    for (i = 0; i < sizeof(nofast_port)/sizeof(nofast_port[0]); i++)
+    {
+        if (dst_port == nofast_port[i])
+        {
+            return 1;
+        }
+    }
+    return 0;
+}
+
+//Á´±íµÄ²Ù×÷Ö÷Ì庯Êý£¬ÄÚ²¿ÊµÏÖ³¬Ê±¡¢É豸ʼþµÈÁ´±í²Ù×÷
+void traverse_process(fast_list_t *list_head, unsigned long param)
+{
+    fast_entry_t *entry, *next;
+    traverse_command_t *cmd;
+    int i, need_del;
+
+    cmd = (traverse_command_t *)param;
+    if (!cmd)
+    {
+        return;
+    }
+
+    for(entry = list_head->next; entry; entry = next)
+    {
+        next = entry->next;
+        need_del = 0;
+
+        if (cmd->cmd == TRAVERSE_CMD_DEV_DOWN)
+        {
+            for (i = 0; i < IP_CT_DIR_MAX; i++)
+            {
+                if (entry->flags & (1 << i))
+                {
+                    const struct nf_conn_nat *nat = nfct_nat(entry->ct);
+                    if ((entry->data[i].outdev && entry->data[i].outdev->ifindex == cmd->arg)
+                            || (nat && nat->masq_index == cmd->arg))
+                    {
+                        need_del = FAST_ALL_DIR;
+                        break;
+                    }
+                }
+            }
+        }
+
+        if (need_del)
+        {
+            del_timer(&entry->timeout);
+            workinghash_del_node(entry);
+            if (need_del == FAST_ALL_DIR)
+            {
+                fastlist_del_entry(entry->list_head, entry);
+            }
+        }
+    }
+}
+
+//fastnat_level¹Ø±Õ£¬Çå¿ÕËùÓÐipv4,ipv6¿ìËÙת·¢ÐÅÏ¢£¬±£ÁôÔ­ctÁ¬½Ó
+void fast_cleanup_links(fast_list_t *list_head)
+{
+    fast_entry_t *entry, *next;
+
+    for (entry = list_head->next; entry; entry = next)
+    {
+        next = entry->next;
+        //ɾ³ýentry×Ô¶¨ÒåµÄ¶¨Ê±Æ÷
+        del_timer(&entry->timeout);
+
+        workinghash_del_node(entry);
+
+        //»Ö¸´ctµÄ³¬Ê±
+        //add_timer(&entry->ct->timeout);
+        fn_list_del(list_head, entry);
+
+    }
+}
+
+void athr_fast_dump(int ctl)
+{
+
+}
+/* Built-in default zone used e.g. by modules. */
+const struct nf_conntrack_zone nf_ct_zone_dflt_fast = {
+    .id	= NF_CT_DEFAULT_ZONE_ID,
+    .dir	= NF_CT_DEFAULT_ZONE_DIR,
+};
+
+/* ***************** ¿ìËÙת·¢Í³Ò»´¦Àíº¯Êý ********************************/
+/* ²éѯÂú×ã¿ìËÙת·¢µÄctÐÅÏ¢ --- ²Î¿¼nf_conntrack_inʵÏÖ */
+struct nf_conn *skb_get_ct(struct nf_conn **tmpl,
+                           struct sk_buff *skb,
+                           struct nf_conntrack_l4proto **l4proto,
+                           unsigned int *dataoff,
+                           u_int8_t pf,
+                           unsigned int hooknum,
+                           int *dir,
+                           u_int8_t *protonum)
+{
+    int ret;
+    struct nf_conntrack_tuple tuple;
+    struct nf_conntrack_tuple_hash *h;
+    struct nf_conn *ct;
+    struct nf_conntrack_zone * zone;
+    enum ip_conntrack_info ctinfo;
+    struct nf_conntrack_zone tmp;
+    u32 hash;
+
+
+    //ÒÔϲο¼nf_conntrack_inʵÏÖ²éѯÒÑÓÐct
+    *tmpl = nf_ct_get(skb, &ctinfo);
+    if (*tmpl || ctinfo == IP_CT_UNTRACKED) {
+        /* Previously seen (loopback or untracked)?  Ignore. */
+        if ((*tmpl && !nf_ct_is_template(*tmpl)) ||
+                ctinfo == IP_CT_UNTRACKED) {
+            goto err_out;
+        }
+        skb->_nfct = 0;
+    }
+
+    *dataoff = get_l4proto_fast(skb, skb_network_offset(skb), pf, protonum);
+
+    if (*dataoff <= 0) {
+        goto err_out;
+    }
+
+
+    *l4proto = nf_ct_l4proto_find(*protonum);
+
+
+    if (*protonum != IPPROTO_TCP && *protonum != IPPROTO_UDP)
+        goto err_out;
+
+    if (!nf_ct_get_tuple_fast(skb, skb_network_offset(skb),
+                              *dataoff, pf, *protonum, &init_net, &tuple)) {
+        goto err_out;
+    }
+
+
+    zone = nf_ct_zone_tmpl(*tmpl, skb, &tmp);
+    hash = hash_conntrack_raw_fast(&tuple, &init_net);
+
+    h = nf_conntrack_find_fast(&init_net, zone, &tuple, hash);
+
+    if (!h || IS_ERR(h))
+        goto err_out;
+
+    ct = nf_ct_tuplehash_to_ctrack(h);
+
+
+    if (!ct || IS_ERR(ct)) {
+        goto err_out;
+    }
+
+    if (test_bit(IPS_DYING_BIT, &ct->status) || test_bit(IPS_UNTRACKED_BIT, &ct->status))
+    {
+        nf_conntrack_put(&ct->ct_general);
+        goto err_out;
+    }
+
+    if (*tmpl && *tmpl == ct)
+    {
+        nf_conntrack_put(&ct->ct_general);
+        goto err_out;
+    }
+
+    //TCP±ØÐëË«Ïò½¨Á´ºó²Å×ß¿ìËÙת·¢
+    if (IPPROTO_TCP == *protonum && !test_bit(IPS_ASSURED_BIT, &ct->status))
+    {
+        nf_conntrack_put(&ct->ct_general);
+        goto err_out;
+    }
+
+    if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
+        *dir = 1;
+    } else {
+        *dir = 0;
+    }
+    return ct;
+
+err_out :
+    print_sun(SUN_DBG, "skb : 0x%x, skb_get_ct fail!!!!!!!!!!", skb);
+    if (*tmpl) {
+        nf_ct_set(skb, (struct nf_conn *)&((*tmpl)->ct_general), ctinfo);
+    }
+    else {
+        skb->_nfct = 0;
+    }
+    return NULL;
+}
+
+//еÄfastģʽ´¦Àí·½Ê½
+int fast_for_ip_new(struct sk_buff *skb,
+                    int(*fast_fw)(struct nf_conn *, struct sk_buff *, struct nf_conn *,
+                                  struct nf_conntrack_l4proto *, unsigned int, int, u_int8_t),
+                    int(*fast_local_proc)(struct nf_conn *, struct sk_buff *, struct nf_conn *,
+                            struct nf_conntrack_l4proto *, unsigned int, int, u_int8_t),
+                    int proto)
+{
+    struct nf_conn *ct = NULL, *tmpl = NULL;
+    struct nf_conntrack_l4proto *l4proto;
+    unsigned int dataoff;
+    u_int8_t protonum;
+    int dir = 0;
+    int ret = 0;
+
+    if (proto == ETH_P_IP)
+        ct = skb_get_ct(&tmpl, skb, &l4proto, &dataoff, PF_INET, NF_INET_PRE_ROUTING, &dir, &protonum);
+    else if (proto == ETH_P_IPV6)
+        ct = skb_get_ct(&tmpl, skb, &l4proto, &dataoff, PF_INET6, NF_INET_PRE_ROUTING, &dir, &protonum);
+    if (!ct)
+    {
+        if (fast_br_proc && fast_br_proc(skb))
+        {
+            fastbr_num++;
+            return 1;
+        }
+        return 0;
+    }
+
+    spin_lock_bh(&fast_fw_spinlock);
+    /*TCP±ØÐëÈý´ÎÎÕÊֳɹ¦¡¢fast½¨Á´³É¹¦*/
+    if (IPPROTO_TCP == protonum || NEXTHDR_TCP == protonum)
+    {
+        int rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+        if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
+            nf_conntrack_put(&ct->ct_general);
+            spin_unlock_bh(&fast_fw_spinlock);
+            return 0;
+        }
+        if (!(ct->fast_ct.fast_dst[dir] && ct->fast_ct.fast_dst[rdir])) {
+            nf_conntrack_put(&ct->ct_general);
+            spin_unlock_bh(&fast_fw_spinlock);
+            return 0;
+        }
+    }
+    switch(ct->fast_ct.isFast)
+    {
+    case FAST_CT_FW4:
+    case FAST_CT_FW6:
+        if (fast_fw && fast_fw(tmpl, skb, ct, l4proto, dataoff, dir, protonum)) {
+            if (proto == ETH_P_IP)
+            {
+                fastnat_num++;
+            }
+
+            else if (proto == ETH_P_IPV6)
+                fast6_num++;
+            ret = 1;
+            break;
+        }
+        ret = 0;
+        break;
+    case FAST_CT_LOCAL4:
+    case FAST_CT_LOCAL6:
+        if (fast_local_proc && fast_local_proc(tmpl, skb, ct, l4proto, dataoff, dir, protonum)) {
+            if (proto == ETH_P_IP)
+                fast_local4_rcv_num++;
+            else if (proto == ETH_P_IPV6)
+                fast_local6_rcv_num++;
+            ret = 1;
+            break;
+        }
+        ret = 0;
+        break;
+    default:
+        nf_conntrack_put(&ct->ct_general);
+        if (fast_br_proc && fast_br_proc(skb)) {
+            fastbr_num++;
+            ret = 1;
+            break;
+        }
+        ret = 0;
+        break;
+    }
+
+    spin_unlock_bh(&fast_fw_spinlock);
+    return ret;
+}
+
+//¾ÉµÄfastģʽ´¦Àí·½Ê½
+int fast_for_ip(struct sk_buff *skb, int(*fast_fw)(struct sk_buff *),
+                int(* fast_local_proc)(struct nf_conn *, struct sk_buff *, struct nf_conn *,
+                                       struct nf_conntrack_l4proto *, unsigned int, int, u_int8_t),
+                int proto)
+{
+    struct nf_conn *ct = NULL, *tmpl = NULL;
+    struct nf_conntrack_l4proto *l4proto;
+    unsigned int dataoff;
+    u_int8_t protonum;
+    int dir = 0;
+    //ת·¢¡¢ÇŽӡ¢±¾µØË³Ðò´¦Àí
+    if (fast_fwd_level > fast_br_level && fast_br_level > fast_local_level)
+    {
+        if (fast_fw && fast_fw(skb))
+        {
+            if (proto == ETH_P_IP)
+            {
+                fastnat_num++;
+            }
+            else if (proto == ETH_P_IPV6)
+                fast6_num++;
+            return 1;
+        }
+        else if (fast_br_proc && fast_br_proc(skb) == 1)
+        {
+            fastbr_num++;
+            return 1;
+        }
+    }
+    //ת·¢¡¢±¾µØ¡¢ÇŽÓ˳Ðò´¦Àí
+    else if (fast_fwd_level > fast_local_level && fast_local_level > fast_br_level)
+    {
+        if (fast_fw && fast_fw(skb))
+        {
+            if (proto == ETH_P_IP)
+                fastnat_num++;
+            else if (proto == ETH_P_IPV6)
+                fast6_num++;
+            return 1;
+        }
+        else if (fast_br_proc && fast_br_proc(skb) == 1)
+        {
+            fastbr_num++;
+            return 1;
+        }
+    }
+    //ÇŽӡ¢×ª·¢¡¢±¾µØË³Ðò´¦Àí
+    else if (fast_br_level > fast_fwd_level && fast_fwd_level > fast_local_level)
+    {
+        if (fast_br_proc && fast_br_proc(skb) == 1)
+        {
+            fastbr_num++;
+            return 1;
+        }
+        else if (fast_fw && fast_fw(skb))
+        {
+            if (proto == ETH_P_IP)
+                fastnat_num++;
+            else if (proto == ETH_P_IPV6)
+                fast6_num++;
+            return 1;
+        }
+    }
+    //ÇŽӡ¢±¾µØ¡¢×ª·¢Ë³Ðò´¦Àí
+    else if(fast_br_level > fast_local_level && fast_local_level > fast_fwd_level)
+    {
+        if (fast_br_proc && fast_br_proc(skb) == 1)
+        {
+            fastbr_num++;
+            return 1;
+        }
+        else if (fast_local_proc)
+        {
+            if (proto == ETH_P_IP)
+                ct = skb_get_ct(&tmpl, skb, &l4proto, &dataoff, PF_INET, NF_INET_PRE_ROUTING, &dir, &protonum);
+            else if (proto == ETH_P_IPV6)
+                ct = skb_get_ct(&tmpl, skb, &l4proto, &dataoff, PF_INET6, NF_INET_PRE_ROUTING, &dir, &protonum);
+
+            if(!ct)
+                return 0;
+
+            if (fast_local_proc(tmpl, skb, ct, l4proto, dataoff, dir, protonum)) {
+                if (proto == ETH_P_IP)
+                    fast_local4_rcv_num++;
+                else if (proto == ETH_P_IPV6)
+                    fast_local6_rcv_num++;
+                return 1;
+            }
+        }
+        else if (fast_fw && fast_fw(skb))
+        {
+            if (proto == ETH_P_IP)
+                fastnat_num ++;
+            else if (proto == ETH_P_IPV6)
+                fast6_num++;
+            return 1;
+        }
+    }
+    //±¾µØ¡¢×ª·¢¡¢ÇŽÓ˳Ðò´¦Àí
+    else if(fast_local_level > fast_fwd_level && fast_fwd_level > fast_br_level)
+    {
+        if (fast_fw && fast_fw(skb))
+        {
+            if (proto == ETH_P_IP)
+                fastnat_num++;
+            else if (proto == ETH_P_IPV6)
+                fast6_num++;
+            return 1;
+        }
+        else if (fast_br_proc == 1 && fast_br_proc(skb) == 1)
+        {
+            fastbr_num++;
+            return 1;
+        }
+    }
+    //±¾µØ¡¢ÇŽӡ¢×ª·¢Ë³Ðò´¦Àí
+    else if(fast_local_level > fast_br_level && fast_br_level > fast_fwd_level)
+    {
+        if (fast_br_proc && fast_br_proc(skb) == 1)
+        {
+            fastbr_num++;
+            return 1;
+        }
+        else if (fast_fw && fast_fw(skb))
+        {
+            if (proto == ETH_P_IP)
+                fastnat_num++;
+            else if (proto == ETH_P_IPV6)
+                fast6_num++;
+            return 1;
+        }
+    }
+    return 0;
+}
+
+int btrunk_fw = 0;
+module_param(btrunk_fw, int, 0644);
+//extern int fast_fwd_ip4addr_conflict(struct sk_buff *skb);
+//extern int fast_for_multicast(struct sk_buff *skb);
+/*ÓÉÓÚ¿ÉÄܲ»´æÔÚMACÖ¡Í·£¬ÈçPSÍø¿Ú£¬ËùÒÔÐèҪͨ¹ýIPͷʶ±ð³öskb->protocolÖµ*/
+int fast_for_ipdata(struct sk_buff *skb)
+{
+
+    struct iphdr *iph;
+
+    if (skb->len > 1000)
+        skb_big_num++;
+    else if (skb->len < 100)
+        skb_small_num++;
+
+    if (skb->dev == NULL)
+        return 0;
+    if (skb->protocol == htons(ETH_P_IP)) //ipv4
+    {
+        skb_num4++;
+        skb_bytes4 += skb->len;
+
+        if(btrunk_fw && fast_fwd_ip4addr_conflict(skb) == 1)
+        {
+            return 1;
+        }
+        //Ö÷²¥µÄ¿ìËÙת·¢£¬´ýʵÏÖ£¬ÔÝʱֻ×öͳ¼Æ
+        if (ipv4_is_multicast(ip_hdr(skb)->daddr))
+        {
+            multicast_num4++;
+            if(btrunk_fw && fast_for_multicast(skb) == 1)
+            {
+                return 1;
+            }
+            return 0;
+        }
+        //¹ã²¥²»Ö§³Ö¿ìËÙת·¢£¬Ö»×öͳ¼Æ
+        else if (ipv4_is_lbcast(ip_hdr(skb)->daddr)) {
+            broadcast_num4++;
+            return 0;
+        }
+
+        if (ip_is_fragment(ip_hdr(skb)))
+        {
+            skbinfo_add(NULL, SKB_FRAG);
+            return 0;
+        }
+
+        //Ö»ÓÐTCPºÍUDP½øÐпìËÙת·¢
+        if (ip_hdr(skb)->protocol != IPPROTO_TCP && ip_hdr(skb)->protocol != IPPROTO_UDP)
+        {
+            return 0;
+        }
+
+        if(!fast_iphdr_check(skb, ETH_P_IP))
+        {
+            iphdr_err_num++;
+            kfree_skb(skb);
+            return 1;
+        }
+
+        /*Èç¹ûÓÐpadding£¬ÔòÈ¥³ýskbβ²¿µÄpadding, ²Î¿¼ip_rcv*/
+        skb_trim(skb, ntohs(ip_hdr(skb)->tot_len));
+
+        /* tcpÍ·²¿ÐÅÏ¢¼à²â*/
+        if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
+            if (!fast_tcphdr_check(skb, ETH_P_IP))	{
+                tcphdr_err_num++;
+                //kfree_skb(skb);
+                return 0;
+            }
+        }
+        if (!test_bit(FAST_TYPE_VERSION_BIT, &fast_switch))
+            return fast_for_ip(skb, fast_nat4_proc, fast_local4_proc, ETH_P_IP);
+        else
+            return fast_for_ip_new(skb, fast_fw4_proc, fast_local4_proc, ETH_P_IP);
+
+    }
+    else if (skb->protocol == htons(ETH_P_IPV6)) //ipv6
+    {
+        skb_num6++;
+        skb_bytes6 += skb->len;
+
+        //×é²¥µÄ¿ìËÙת·¢£¬´ýʵÏÖ£¬ÔÝʱֱ½Ó·µ»Ø¿ìËÙת·¢Ê§°Ü
+        if(ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
+        {
+            multicast_num6++;
+            return 0;
+        }
+
+        if(!fast_iphdr_check(skb, ETH_P_IPV6))
+        {
+            ip6hdr_err_num++;
+            kfree_skb(skb);
+            return 1;
+        }
+        /*Èç¹ûÓÐpadding£¬ÔòÈ¥³ýskbβ²¿µÄpadding£¬²Î¿¼ipv6_rcv*/
+        skb_trim(skb, ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr));
+        /* tcpÍ·²¿ÐÅÏ¢¼à²â*/
+        if (!fast_tcphdr_check(skb, ETH_P_IPV6))	{
+            tcp6hdr_err_num++;
+            //kfree_skb(skb);
+            return 0;
+        }
+        if (!test_bit(FAST_TYPE_VERSION_BIT, &fast_switch))
+            return fast_for_ip(skb, fast_nat6_proc, fast_local6_proc, ETH_P_IPV6);
+        else
+            return fast_for_ip_new(skb, fast_fw6_proc, fast_local6_proc, ETH_P_IPV6);
+    }
+    else
+        skb_unknown++;
+    return 0;
+}
+
+/*skbÖ¸ÕëÌø×ªµ½IPÍ·*/
+static int set_skbdata_toip(struct sk_buff *skb)
+{
+    __be16 next_pro = skb->protocol;
+again:
+    if (next_pro == htons(ETH_P_IP) || next_pro == htons(ETH_P_IPV6))
+    {
+        skb_set_network_header(skb, 0);
+        skb_reset_mac_len(skb);
+        skb->protocol = next_pro;
+        return 1;
+    }
+    //vlan
+    else if (next_pro == cpu_to_be16(ETH_P_8021Q))
+    {
+        skb->isvlan = 1;
+        skb_pull(skb, VLAN_HLEN);
+        next_pro = *((__be16 *)(skb->data - 2));
+        goto again;
+    }
+
+    //pppoe
+    else if (next_pro == htons(ETH_P_PPP_SES))
+    {
+        if (*(skb->data + 6) == 0x00 && *(skb->data + 7) == 0x21)
+        {
+            next_pro = htons(ETH_P_IP);
+            __skb_pull(skb, PPPOE_HEADER_LEN);
+            goto again;
+        }
+        else if(*(skb->data+ 6) == 0x00 && *(skb->data + 7) == 0x57)
+        {
+            next_pro = htons(ETH_P_IPV6);
+            __skb_pull(skb, PPPOE_HEADER_LEN);
+            goto again;
+        }
+    }
+    return 0;
+}
+
+/*¶ÔÄÚºËdev.cÖÐÊý¾Ý½øÐпìËÙ´¦Àí£¬ÓÐip°ü¡¢ppp°üµÈ*/
+static int try_fast_for_netcoredata(struct sk_buff *skb)
+{
+    __be16 old_pro = skb->protocol;
+    unsigned int old_len = skb->len;
+    unsigned char * old_data = skb->data;
+    __be16 old_netheader = skb->network_header;
+
+    //ipÍ·4×Ö½Ú¶ÔÆë
+    //if (((unsigned long)skb->data)%4 != 0)
+    //panic("ERR: fast from dev skb->data%4 != 0");
+
+    if (skb->indev == NULL)
+        skb->indev = skb->dev;
+    //ÐèҪ׼ȷ¶¨Î»µ½IPÍ·£¬ÆÚ¼ä¿ÉÄÜÌø¹ýppp/mac/pppoeµÈ¸÷ÖÖ²ã2Í·²¿
+    if (set_skbdata_toip(skb) == 1 && fast_for_ipdata(skb))
+        return 1;
+
+    //Èç¹û¿ìËÙ´¦Àíʧ°Ü£¬±ØÐëͨ¹ýÈçϸ³Öµ·µ»Ø»ØÔ­Ê¼skb²ÎÊýÖµ£¬½»Óɱê×¼linuxÄں˴¦Àí
+    skb->protocol = old_pro;
+    skb->data = old_data;
+    skb->len = old_len;
+    skb->network_header = old_netheader;
+    return 0;
+}
+
+/*¶ÔskbÖÐÖ¸ÏòMACÖ¡Í·µÄÇý¶¯Éϱ¨Êý¾Ý½øÐпìËÙ´¦Àí£¬¿ÉÓÃÓÚËùÓÐÒÔÌ«Íø¼Ü¹¹µÄÍøÂçÉ豸Éϱ¨Êý¾ÝµÄ¿ìËÙ´¦Àí*/
+static int try_fast_for_macdata(struct sk_buff *skb, struct net_device *dev)
+{
+    /*
+    struct ethhdr *eth;
+    if (!(skb->network_header == 0 || skb->network_header == ~0U))
+        panic("network_header    ERR!!!!!!!!!!\n");
+    skb->dev = dev;
+    if (skb->indev == NULL)
+        skb->indev = dev;
+    skb_reset_mac_header(skb);
+    eth = eth_hdr(skb);
+    skb->protocol = eth->h_proto;
+    skb_pull(skb, ETH_HLEN);
+    */
+
+    //if (tsp_mirror_handle)
+        //tsp_mirror_handle(skb);
+
+    //×ÔÑÐtcp/udp°ü¼Ç¼¶ª°üʱÑÓ¹¦ÄÜ£¬ÐèÒªserver¶ÎÄܰ²×°×ÔÑÐÓ¦Óã¬Èç¹û¿ìËÙת·¢Ê§°Ü£¬ÐèÒªÇå³þ¼Ç¼
+    //net_dbg_perf_dev_recv((char *)skb, skb->dev->name);
+    if (try_fast_for_netcoredata(skb))
+    {
+        return 1;
+    }
+    //×ÔÑÐtcp/udp°ü¼Ç¼¶ª°üʱÑÓ¹¦ÄÜ£¬ÐèÒªserver¶ÎÄܰ²×°×ÔÑÐÓ¦ÓÃ
+    //net_dbg_perf_clear_last_item(skb);
+
+    //skb_push(skb, ETH_HLEN);
+    return 0;
+}
+
+EXPORT_SYMBOL_GPL(tsp_mirror_handle);
+
+/*´¦Àí֪ͨÁ´Ê¼þ*/
+static int fast_event(struct notifier_block *this, unsigned long event, struct net_device *dev)
+{
+    traverse_command_t cmd;
+
+    switch (event) {
+    case NETDEV_DOWN:
+        if (dev)
+        {
+            cmd.cmd = TRAVERSE_CMD_DEV_DOWN;
+            cmd.arg = dev->ifindex;
+
+            fastnat_event(&cmd);
+            fast6_event(&cmd);
+        }
+        break;
+    }
+    return NOTIFY_DONE;
+}
+
+static int fast_device_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+    struct net_device *dev = (struct net_device *)ptr;
+
+    return fast_event(this, event, dev);
+}
+
+static int fast_inet_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+    struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
+
+    return fast_event(this, event, dev);
+}
+
+/*priority should be higher than masquerade, otherwise kernel will hang*/
+static struct notifier_block fast_dev_notifier = {
+    .notifier_call    = fast_device_event,
+    .priority = 1,
+};
+
+/*priority should be higher than masquerade, otherwise kernel will hang*/
+static struct notifier_block fast_inet_notifier = {
+    .notifier_call    = fast_inet_event,
+    .priority = 1,
+};
+
+void fast_device_down_event_by_name(char *dev_name)
+{
+    struct net_device *dev = NULL;
+
+    if (!dev_name)
+    {
+        print_sun(SUN_ERR,"fast_device_down_event_by_name dev_name is null \n");
+        return;
+    }
+
+    dev = dev_get_by_name(&init_net, dev_name);
+    if (!dev)
+    {
+        print_sun(SUN_ERR,"fast_device_down_event_by_name dev not found \n");
+        return;
+    }
+
+    fast_event(NULL, NETDEV_DOWN, dev);
+
+    /*add by jiangjing*/
+    dev_put(dev);
+}
+
+/**** ÒÔϲ¿·ÖÊÇеÄfastģʽʹÓú¯Êý ****/
+extern void fast_local_conn_release(struct nf_conn *ct);
+extern void fast_local_sock_release(struct sock *sk);
+
+
+/* ½«¿ìËÙת·¢¹ØÁªµÄct¼Óµ½sock¼Ç¼ÖÐ */
+void fast_dst_add_ct(struct dst_entry *dst, struct nf_conn *ct)
+{
+    struct conn_list *entry;
+    int conn_flag = 0;
+
+    list_for_each_entry_rcu(entry, &dst->conn_head, list)
+    {
+        if (entry->nfct == ct)
+        {
+            conn_flag = 1;
+            break;
+        }
+    }
+
+    if (conn_flag == 0)
+    {
+        struct conn_list *conn_list_node =(struct conn_list*)kzalloc(sizeof(struct conn_list), GFP_KERNEL);
+        if(conn_list_node) {
+            rcu_assign_pointer(conn_list_node->nfct, ct);
+            list_add_rcu(&conn_list_node->list, &dst->conn_head);
+        }
+    }
+}
+
+/* ¸ù¾Ýnet_deviceµÄÊÍ·Å£¬ÊÍ·Åת·¢ÀàÏà¹ØÄÚÈÝ */
+void fast_fw_conn_release_by_dev(struct net_device* dev)
+{
+    int hash = 0;
+    struct nf_conntrack_tuple_hash *h;
+    struct hlist_nulls_node *n;
+    struct nf_conn *ct;
+    int dir;
+    struct net_device *net;
+
+    if(fastnat_level == FAST_CLOSE)
+        return ;
+
+    rcu_read_lock();
+    for (hash = 0; hash < nf_conntrack_htable_size; hash++)
+    {
+        local_bh_disable();
+        hlist_nulls_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnnode)
+        {
+            if (h)
+            {
+                ct = nf_ct_tuplehash_to_ctrack(h);
+
+                //ÊÍ·Å´ËÁ´½ÓÏà¹Ø¿ìËÙת·¢ÐÅÏ¢
+                spin_lock_bh(&fast_fw_spinlock);
+                for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++)
+                {
+                    net = ct->fast_ct.fast_brport[dir];
+                    if (net != NULL)
+                    {
+                        if(!strcmp(dev->name, net->name))
+                        {
+                            ct->fast_ct.fast_brport[dir] = NULL;
+                        }
+                    }
+                }
+                spin_unlock_bh(&fast_fw_spinlock);
+            }
+        }
+        local_bh_enable();
+    }
+    rcu_read_unlock();
+}
+/* ¸ù¾ÝconnµÄÊÍ·Å£¬ÊÍ·Åת·¢ÀàÏà¹ØÄÚÈÝ */
+void fast_fw_conn_release(struct nf_conn *ct)
+{
+    struct dst_entry *dst;
+    struct conn_list *entry;
+    int dir;
+
+    for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++)
+    {
+        if (!(dst = dst_get_by_ct(ct, dir)))
+            continue;
+
+        list_for_each_entry_rcu(entry, &dst->conn_head, list)
+        {
+            if (entry->nfct == ct)
+            {
+                entry->nfct = NULL;
+                __list_del_entry(&entry->list);
+                kfree(entry);
+                break;
+            }
+        }
+        //ÔÚdst_get_by_ctÖÐholdһϣ¬ËùÒÔÕâÀïÒªrelease
+        dst_release(dst);
+        rcu_assign_pointer(ct->fast_ct.fast_dst[dir], NULL);
+        ct->fast_ct.fast_brport[dir] = NULL;
+    }
+    ct->fast_ct.isFast = 0;
+}
+
+//¸ù¾Ýdst_entryµÄÊÍ·Å£¬ÊÍ·Åת·¢ÀàÏà¹ØÄÚÈÝ
+void fast_fw_dst_entry_release(struct dst_entry *dst)
+{
+    struct conn_list *entry = NULL;
+    struct conn_list *entry_tmp = NULL;
+    struct nf_conn *ct;
+    struct list_head *tmp;
+
+    list_for_each_entry_safe(entry, entry_tmp, &dst->conn_head, list) {
+
+        rcu_assign_pointer(ct, entry->nfct);
+        if (!ct)
+            continue;
+
+        if (ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL] && ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL] == dst) {
+            rcu_assign_pointer(ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL], NULL);
+            ct->fast_ct.fast_brport[IP_CT_DIR_ORIGINAL]   = NULL;
+        }
+        else if (ct->fast_ct.fast_dst[IP_CT_DIR_REPLY] && ct->fast_ct.fast_dst[IP_CT_DIR_REPLY] == dst) {
+            rcu_assign_pointer(ct->fast_ct.fast_dst[IP_CT_DIR_REPLY], NULL);
+            ct->fast_ct.fast_brport[IP_CT_DIR_REPLY]      = NULL;
+        }
+        else
+            print_sun(SUN_ERR,"fast_fw_dst_entry_release \n");
+
+        if (!ct->fast_ct.fast_brport[IP_CT_DIR_ORIGINAL] && !ct->fast_ct.fast_brport[IP_CT_DIR_REPLY])
+            ct->fast_ct.isFast = 0;
+        entry->nfct = NULL;
+        __list_del_entry(&entry->list);
+        kfree(entry);
+    }
+}
+
+/* connÊÍ·Å֪ͨº¯Êý£¬Í¨ÖªfastÊÍ·ÅÏà¹ØÄÚÈÝ */
+void fast_conn_release(struct nf_conn *ct, int mark)
+{
+    spin_lock_bh(&fast_fw_spinlock);
+    if ((ct->fast_ct.isFast == FAST_CT_FW4 || ct->fast_ct.isFast == FAST_CT_FW6) && (mark & RELEASE_ALL_DST))
+    {
+        fast_fw_conn_release(ct);
+    }
+    spin_unlock_bh(&fast_fw_spinlock);
+}
+
+/* dst_entryÊÍ·Å֪ͨº¯Êý£¬Í¨ÖªfastÊÍ·ÅÏà¹ØÄÚÈÝ */
+void fast_dst_entry_release(struct dst_entry * dst)
+{
+    spin_lock_bh(&fast_fw_spinlock);
+    fast_fw_dst_entry_release(dst);
+    spin_unlock_bh(&fast_fw_spinlock);
+}
+
+/* ÊÍ·ÅÐÂfastģʽÏÂËùÓпìËÙת·¢ÐÅÏ¢: Ö»ÓÐctÊǺÍdst¡¢sk¶¼¹ØÁªµÄ£¬ËùÒÔͨ¹ýctÀ´²éѯ */
+void fast_release_all(int mark)
+{
+    int hash = 0;
+    struct nf_conntrack_tuple_hash *h;
+    struct hlist_nulls_node *n;
+    struct nf_conn *ct;
+
+    rcu_read_lock();
+    for (hash = 0; hash < nf_conntrack_htable_size; hash++) {
+        local_bh_disable();
+        hlist_nulls_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnnode) {
+            if (h)
+            {
+                ct = nf_ct_tuplehash_to_ctrack(h);
+                if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+                    continue;
+
+                //ÊÍ·Å´ËÁ´½ÓÏà¹Ø¿ìËÙת·¢ÐÅÏ¢
+                fast_conn_release(ct, mark);
+
+                nf_ct_put(ct);
+            }
+        }
+        local_bh_enable();
+    }
+    rcu_read_unlock();
+}
+
+/**** ÒÔϲ¿·ÖÊÇоÉfastģʽ¹²Óõĺ¯Êý ****/
+
+//¸ù¾ÝÓÅÏȼ¶ÅäÖò»Í¬²ã´ÎµÄ¹³×Óº¯Êý£¬Ä¿Ç°µÄ²ßÂÔÊǸߵȼ¶µÄ¹³×Óº¯Êý±»¸³Öµºó£¬µÍµÈ¼¶µÄ¹³×Óº¯ÊýÒ²Ò»¶¨´æÔÚ£¬
+//ÒÔ½â¾öijЩÉ豸ûÓиߵȼ¶µÄ¹³×Óº¯ÊýÇé¿ö£»¸Ã²ßÂÔ½ö»áÔì³É¹³×Óº¯Êý¶à´ÎÆ¥Åäʧ°ÜµÄ¿ÕÅÜ£¬²»»á´æÔÚÐÔÄÜÆ¿¾±
+void set_fast_level_cb(int param)
+{
+    //¸ù¾Ý¿ìËÙת·¢¼¶±ðÉèÖù©Íⲿµ÷Óú¯ÊýÖ¸Õë
+    if (param == FAST_CLOSE || param == FAST_CLOSE_KEEP_LINK)  //¹Ø±Õ¿ìËÙת·¢
+    {
+        fast_from_softirq = NULL;
+        fast_from_driver = NULL;
+    }
+    else if (param == FAST_NET_CORE) //Äں˲ã¿ìËÙת·¢£¬FAST_NEWÖ»Ö§³ÖÈíÖжÏÖе÷ÓÿìËÙת·¢
+    {
+        fast_from_softirq = try_fast_for_netcoredata;
+        fast_from_driver = NULL;
+    }
+    //net_deviceµ½net_device£¬²»½øÈëIPÈíÖжÏ
+    else if (param == FAST_NET_DEVICE)
+    {
+        fast_from_softirq = try_fast_for_netcoredata;
+        fast_from_driver = try_fast_for_macdata;
+    }
+    else
+        print_sun(SUN_ERR,"fastnat_level error, shoud be 0~2!\n");
+}
+
+/* ÉèÖø÷×Ó¹¦ÄܵĿìËÙת·¢µ÷Óú¯Êý */
+void set_fast_switch_cb(unsigned long param)
+{
+    //¸ù¾ÝоɿìËÙת·¢ÉèÖÃʹÓõIJ»Í¬ipv4¡¢ipv6½Ó¿Ú
+    if (test_bit(FAST_TYPE_VERSION_BIT, &param)) //еÄfastģʽ
+    {
+        //еÄfastģʽÏÂ: ÉèÖÃIPv4±¾µØ¿ìËÙת·¢»Øµ÷º¯Êý
+        if (test_bit(FAST_TYPE_FW4_BIT, &fast_switch))
+            fast_fw4_proc = fast4_fw_recv;
+        else
+            fast_fw4_proc = NULL;
+
+        //еÄfastģʽÏÂ: ÉèÖÃIPv6±¾µØ¿ìËÙת·¢»Øµ÷º¯Êý
+        if (test_bit(FAST_TYPE_FW6_BIT, &fast_switch))
+            fast_fw6_proc = fast6_fw_recv;
+        else
+            fast_fw6_proc = NULL;
+
+        fast_local4_proc        = NULL;
+        fast_local4_output_proc = NULL;
+
+        fast_local6_proc        = NULL;
+        fast_local6_output_proc = NULL;
+
+        //ÉèÖÃ×ÓÍø¼ä¿ìËÙת·¢»Øµ÷º¯Êý
+        if (test_bit(FAST_TYPE_BR_BIT, &fast_switch))
+            fast_br_proc = fast_br;
+        else
+            fast_br_proc = NULL;
+
+        fast_nat4_proc = NULL;
+        fast_nat6_proc = NULL;
+    }
+    else //ÀϵÄfastģʽ
+    {
+        //ÀϵÄfastģʽÏÂ: ÉèÖÃIPv4±¾µØ¿ìËÙת·¢»Øµ÷º¯Êý
+        fast_nat4_proc = fast_nat_recv;
+
+        //ÀϵÄfastģʽÏÂ: ÉèÖÃIPv6±¾µØ¿ìËÙת·¢»Øµ÷º¯Êý
+        fast_nat6_proc = fast6_recv;
+
+        fast_br_proc = fast_br;
+        fast_local4_proc         = NULL;
+        fast_local4_output_proc  = NULL;
+        fast_local6_proc         = NULL;
+        fast_local6_output_proc  = NULL;
+        fast_fw4_proc = NULL;
+        fast_fw6_proc = NULL;
+    }
+}
+
+void fast_level_change(int new_level)
+{
+    int old_level = 0;
+
+    old_level = fastnat_level;
+
+    if (old_level == new_level)
+        return;
+
+    fastnat_level = new_level;
+
+    //¸ù¾Ý×îеÄfastnat level£¬µ÷Õû»Øµ÷º¯Êý
+    set_fast_level_cb(fastnat_level);
+
+    //fastnat¹Ø±Õ£¬Çå¿ÕËùÓÐÐÅÏ¢
+    if (fastnat_level == FAST_CLOSE || fastnat_level == FAST_CLOSE_KEEP_LINK)
+    {
+        if (!test_bit(FAST_TYPE_VERSION_BIT, &fast_switch))
+        {
+            fastnat_cleanup_links();
+            fast6_cleanup_links();
+        }
+        else
+        {
+            fast_release_all(RELEASE_ALL_DST | RELEASE_ALL_SK);
+        }
+    }
+}
+
+void fast_switch_change(unsigned long new_switch)
+{
+    unsigned long old_switch = fast_switch;
+
+    if (old_switch == new_switch)
+        return;
+
+    fast_switch = new_switch;
+
+    //¸ù¾Ý×Ó¹¦ÄÜ¿ìËÙת·¢µÄλͼ¿ª¹Ø£¬ÉèÖÃ×Ó¹¦Äܻص÷º¯Êý
+    set_fast_switch_cb(fast_switch);
+
+    //×Ó¹¦ÄÜ¿ìËÙת·¢´Ó´ò¿ªµ½¹Ø±ÕµÄ£¬ÐèÒªÇå³þÏà¹Ø¿ìËÙת·¢ÐÅÏ¢
+    //´ÓÀϵÄfastÇе½ÐµÄfast£¬ÐèÒªÇå³þÀÏfast±£´æµÄÏà¹Ø¿ìËÙת·¢Êý¾Ý
+    if (!test_bit(FAST_TYPE_VERSION_BIT, &old_switch) && test_bit(FAST_TYPE_VERSION_BIT, &new_switch))
+    {
+        //ÀϵÄfast²»Çø·ÖIPv4¡¢IPv6µÄ·Ö¿ª¿ØÖÆ
+        fastnat_cleanup_links();
+
+        fast6_cleanup_links();
+    }
+    //´ÓеÄfastÇе½¾ÉµÄfast£¬ÐèÒªÇå³þÐÂfast±£´æµÄÏà¹Ø¿ìËÙת·¢Êý¾Ý
+    else if (test_bit(FAST_TYPE_VERSION_BIT, &old_switch) && !test_bit(FAST_TYPE_VERSION_BIT, &new_switch))
+    {
+        fast_release_all(RELEASE_ALL_DST | RELEASE_ALL_SK);
+    }
+
+    if (test_bit(FAST_TYPE_VERSION_BIT, &new_switch) && (!test_bit(FAST_TYPE_FW4_BIT, &new_switch) || !test_bit(FAST_TYPE_FW6_BIT, &new_switch))) {
+        fast_release_all(RELEASE_ALL_DST);
+    }
+    if (test_bit(FAST_TYPE_VERSION_BIT, &new_switch) && (!test_bit(FAST_TYPE_LOCAL4_BIT, &new_switch) || !test_bit(FAST_TYPE_LOCAL6_BIT, &new_switch))) {
+        fast_release_all(RELEASE_ALL_SK);
+    }
+}
+
+//¼Ç¼ÓÐDST_NOCACHE±êÖ¾µÄdst³öÏֵĴÎÊý
+int no_cache = 0;
+//¼Ç¼ÔÚʹÓÃdstʱ£¬dst->neighbourΪ¿ÕµÄ´ÎÊý
+int no_neighbour = 0;
+struct dst_entry * dst_get_by_ct(struct nf_conn * ct, int dir)
+{
+    struct dst_entry *dst;
+    struct neighbour *_neighbour = NULL;
+
+    rcu_read_lock();
+    dst = rcu_dereference_protected(ct->fast_ct.fast_dst[dir], 1);
+
+    if(dst)
+        dst_hold_and_use(dst, jiffies);
+    else {
+        dst = NULL;
+    }
+    rcu_read_unlock();
+    return dst;
+}
+
+/*fast³õʼ»¯*/
+static int __init
+tsp_fast_init(void)
+{
+    int ret4 = 0, ret6 = 0;
+
+    memset(zeromac, 0, sizeof(zeromac));
+
+    spin_lock_init(&fast_fw_spinlock);
+    //²ÉÓÃרÊôslab»úÖÆ
+    fast_head_cache = kmem_cache_create("fast_head_cache",
+                                        sizeof(struct fast_entry_s),
+                                        0,
+                                        SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+                                        NULL);
+
+    //·Ö±ðµ÷ÓÃipv4¡¢ipv6µÄ³õʼ»¯º¯Êý
+    ret4 = tsp_fastnat_init();
+    ret6 = tsp_fast6_init();
+    fast4_fw_init();
+    fast6_fw_init();
+
+    if ((ret4 != 0) && (ret6 != 0))
+        return -EINVAL;
+
+    /*×¢²á֪ͨÁ´*/
+    register_netdevice_notifier(&fast_dev_notifier);
+    register_inetaddr_notifier(&fast_inet_notifier);
+
+    //¿ìËÙת·¢ºÍƽ̨»¯procÎļþ³õʼ»¯
+    set_fast_level_cb(fastnat_level);
+    set_fast_switch_cb(fast_switch);
+    fast_conntrack_init_proc();
+
+    net_adapter_init_proc();
+    return 0;
+}
+
+static void __exit
+tsp_fast_cleanup(void)
+{
+    set_fast_level_cb(FAST_CLOSE);
+    set_fast_switch_cb(0);
+    unregister_netdevice_notifier(&fast_dev_notifier);
+    unregister_inetaddr_notifier(&fast_inet_notifier);
+
+    //·Ö±ðµ÷ÓÃipv4¡¢ipv6µÄ×¢Ïúº¯Êý
+    tsp_fastnat_cleanup();
+    tsp_fast6_cleanup();
+    fast4_fw_cleanup();
+    fast6_fw_cleanup();
+}
+
+late_initcall(tsp_fast_init);
+module_exit(tsp_fast_cleanup);
+
diff --git a/upstream/linux-5.10/net/core/fastproc/fast_track.c b/upstream/linux-5.10/net/core/fastproc/fast_track.c
new file mode 100755
index 0000000..37f3cfc
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fast_track.c
@@ -0,0 +1,1203 @@
+#include <linux/types.h>

+#include <linux/netfilter.h>

+#include <linux/slab.h>

+#include <linux/module.h>

+#include <linux/proc_fs.h>    /* Necessary because we use proc fs */

+#include <linux/skbuff.h>

+#include <linux/proc_fs.h>

+#include <linux/seq_file.h>

+#include <linux/percpu.h>

+#include <linux/netdevice.h>

+#include <linux/security.h>

+#include <net/net_namespace.h>

+#ifdef CONFIG_SYSCTL

+#include <linux/sysctl.h>

+#endif

+#include <linux/rculist_nulls.h>

+#include <net/netfilter/nf_conntrack.h>

+#include <net/netfilter/nf_conntrack_core.h>

+#include <net/netfilter/nf_conntrack_l4proto.h>

+#include <net/netfilter/nf_conntrack_expect.h>

+#include <net/netfilter/nf_conntrack_helper.h>

+#include <net/netfilter/nf_conntrack_acct.h>

+#include <net/netfilter/nf_conntrack_zones.h>

+#include <net/netfilter/nf_conntrack_timestamp.h>

+#include <net/SI/fastnat.h>

+#include <net/SI/fast6.h>

+#include <net/SI/fast_common.h>

+#include <net/SI/netioc_proc.h>

+

+

+#define PORT_LEN 10

+extern int fastnat_ack_param;

+extern int ackdrop_maxnum;

+extern unsigned int ct_iptables_syn_sw;

+

+

+static unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)

+{

+    struct nf_conn_acct *acct;

+    struct nf_conn_counter *counter;

+

+    acct = nf_conn_acct_find(ct);

+    if (!acct)

+        return 0;

+

+    counter = acct->counter;

+    seq_printf(s, "packets=%llu bytes=%llu ",

+               (unsigned long long)atomic64_read(&counter[dir].packets),

+               (unsigned long long)atomic64_read(&counter[dir].bytes));

+

+    return 0;

+}

+

+static void *fastnat_level_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+    if (*pos >= 1)

+        return NULL;

+    return 1;

+}

+

+static void *fastnat_level_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+    (*pos)++;

+    return NULL;

+}

+

+static void fastnat_level_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+    return;

+}

+

+/* return 0 on success, 1 in case of error */

+static int fastnat_level_seq_show(struct seq_file *s, void *v)

+{

+    seq_printf(s, "fastnat_level: %d\n", fastnat_level);

+    return 0;

+}

+

+static const struct seq_operations fastnat_level_seq_ops = {

+    .start = fastnat_level_seq_start,

+    .next  = fastnat_level_seq_next,

+    .stop  = fastnat_level_seq_stop,

+    .show  = fastnat_level_seq_show

+};

+

+static int fastnat_level_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &fastnat_level_seq_ops);

+}

+

+//Óû§Ð´ÃüÁîµÄ²Î¿¼´úÂ룬¿ÉÓÃÀ´ÅäÖùØÁªÉèÖá¢Íø¿ÚÃûµÈ²Ù×÷

+static ssize_t fastnat_level_set(struct file *file,

+                                 const char __user *buffer, size_t count, loff_t *pos)

+{

+    size_t size;

+    char char_fastnat[5] = {0};

+    int level = 0;

+

+    //countβ²¿°üº¬ÁË1¸ö½áÊø·û£¬Ö»Ö§³Ö0-3ÊäÈë

+    if (count != 2)

+        return -EINVAL;

+

+    if (copy_from_user(char_fastnat, buffer, 1))

+        return -EFAULT;

+

+    if ((char_fastnat[0] < '0' || char_fastnat[0] > '2') && (char_fastnat[0] != '5'))

+        return -EINVAL;

+

+    level = (int)(char_fastnat[0] - '0');

+

+    //ÖØÐÂÉèÖÿìËÙת·¢¹³×Óº¯Êý

+    fast_level_change(level);

+    return count;

+}

+

+static void *fast_switch_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+    if (*pos >= 1)

+        return NULL;

+    return 1;

+}

+

+static void *fast_switch_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+    (*pos)++;

+    return NULL;

+}

+

+static void fast_switch_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+    return;

+}

+

+/* return 0 on success, 1 in case of error */

+static int fast_switch_seq_show(struct seq_file *s, void *v)

+{

+    seq_printf(s, "fast_switch: 0x%x\n", (unsigned int)fast_switch);

+    return 0;

+}

+

+static const struct seq_operations fast_switch_seq_ops = {

+    .start = fast_switch_seq_start,

+    .next  = fast_switch_seq_next,

+    .stop  = fast_switch_seq_stop,

+    .show  = fast_switch_seq_show

+};

+

+static int fast_switch_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &fast_switch_seq_ops);

+}

+

+//Óû§Ð´ÃüÁîµÄ²Î¿¼´úÂ룬¿ÉÓÃÀ´ÅäÖùØÁªÉèÖá¢Íø¿ÚÃûµÈ²Ù×÷

+static ssize_t fast_switch_set(struct file *file,

+                               const char __user *buffer, size_t count, loff_t *pos)

+{

+    size_t size;

+    char char_fastnat[5] = {0};

+    int level = 0, i = 0;

+

+    //countβ²¿°üº¬ÁË1¸ö½áÊø·û£¬Ö»Ö§³Ö0-3ÊäÈë

+    if (count > 5)

+        return -EINVAL;

+

+    memset(char_fastnat, 0,  5);

+    if (copy_from_user(char_fastnat, buffer, 5))

+        return -EFAULT;

+

+    for(i = 0; i < count - 1; i++) {

+        if(char_fastnat[i] < '0' || char_fastnat[i] > '9')

+            return -EINVAL;

+        level = (int)(char_fastnat[i] - '0') + level*10;

+    }

+

+    //ÖØÐÂÉèÖÿìËÙת·¢¹³×Óº¯Êý

+    fast_switch_change(level);

+    return count;

+}

+

+static void *fastbr_level_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+    if (*pos >= 1)

+        return NULL;

+    return 1;

+}

+

+static void *fastbr_level_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+    (*pos)++;

+    return NULL;

+}

+

+static void fastbr_level_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+    return;

+}

+

+/* return 0 on success, 1 in case of error */

+static int fastbr_level_seq_show(struct seq_file *s, void *v)

+{

+    seq_printf(s, "fastbr_level: %d\n", fastbr_level);

+    return 0;

+}

+

+static const struct seq_operations fastbr_level_seq_ops = {

+    .start = fastbr_level_seq_start,

+    .next  = fastbr_level_seq_next,

+    .stop  = fastbr_level_seq_stop,

+    .show  = fastbr_level_seq_show

+};

+

+static int fastbr_level_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &fastbr_level_seq_ops);

+}

+

+//Óû§Ð´ÃüÁîµÄ²Î¿¼´úÂ룬¿ÉÓÃÀ´ÅäÖùØÁªÉèÖá¢Íø¿ÚÃûµÈ²Ù×÷

+static ssize_t fastbr_level_set(struct file *file,

+                                const char __user *buffer, size_t count, loff_t *pos)

+{

+    size_t size;

+    char char_fastbr[5] = {0};

+

+    //countβ²¿°üº¬ÁË1¸ö½áÊø·û£¬Ö»Ö§³Ö0-1ÊäÈë

+    if (count != 2)

+        return -EINVAL;

+

+    if (copy_from_user(char_fastbr, buffer, 1))

+        return -EFAULT;

+

+    if (char_fastbr[0] < '0' || char_fastbr[0] > '1')

+        return -EINVAL;

+

+    fastbr_level = (int)(char_fastbr[0] - '0');

+

+    return count;

+}

+

+static void *fastnat_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+    spin_lock_bh(&fastnat_spinlock);

+    if (*pos >= nf_conntrack_htable_size)

+        return NULL;

+    else

+    {

+        if (*pos == 0)

+        {

+            seq_printf(seq, "fastnat have %d conn!!!\nskb_num4:%d,fastnat_num:%d\n",

+                       working_list.count, skb_num4, fastnat_num);

+            seq_printf(seq, "fastbr_sum:%d,fastbr_num:%d\n",

+                       skb_num4 + skb_num6 + skb_unknown - fastnat_num - fast6_num, fastbr_num);

+

+            if ((fastnat_ack_param == 1) && (ackdrop_maxnum  >= 1))

+            {

+                seq_printf(seq, "fastnat ack_delay_stats : total_count = %u, forword_count = %u, drop_count = %u, "

+                           "timeout_xmit_count = %u, timeout_drop_count = %u\n",

+                           (unsigned int)ack_delay_stats.total_count, (unsigned int)ack_delay_stats.forword_count,

+                           (unsigned int)ack_delay_stats.drop_count, (unsigned int)ack_delay_stats.timeout_xmit_count,

+                           (unsigned int)ack_delay_stats.timeout_drop_count);

+            }

+        }

+        return &working_hash[*pos];

+    }

+

+}

+

+static void *fastnat_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+    (*pos)++;

+    //return fastnat_get_next(s, v);

+    if (*pos >= nf_conntrack_htable_size)

+        return NULL;

+    else

+        return &working_hash[*pos];

+}

+

+

+static void fastnat_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+    spin_unlock_bh(&fastnat_spinlock);

+}

+

+/* return 0 on success, 1 in case of error */

+static int fastnat_seq_show(struct seq_file *s, void *v)

+{

+    struct hlist_nulls_head    *head = (struct hlist_nulls_head *) v;

+    struct nf_conntrack_tuple_hash *h;

+    struct hlist_nulls_node *n;

+    fast_entry_data_t *nat_entry_data;

+    fast_entry_t *nat_entry = NULL;

+    const struct nf_conntrack_l3proto *l3proto;

+    const struct nf_conntrack_l4proto *l4proto;

+    int ret = 0;

+

+    hlist_nulls_for_each_entry(h, n, head, hnnode)

+    {

+        nat_entry_data = fast_hash_to_data(h);

+        nat_entry = fast_data_to_entry(nat_entry_data);

+

+        if (unlikely(!atomic_inc_not_zero(&nat_entry->ct->ct_general.use)))

+            return 0;

+

+        /* we only want to print DIR_ORIGINAL */

+        if (NF_CT_DIRECTION(h))

+        {

+            nf_ct_put(nat_entry->ct);

+            continue;

+        }

+

+        l4proto = nf_ct_l4proto_find(nf_ct_protonum(nat_entry->ct));

+

+

+        ret = -ENOSPC;

+        seq_printf(s, "l3proto: %u l4proto: %u %lu %lu %lu ",

+                   nf_ct_l3num(nat_entry->ct), nf_ct_protonum(nat_entry->ct),

+                   (unsigned long)(nat_entry->timeout.expires/HZ),

+                   (unsigned long)(tcp_timeouts[nat_entry->ct->proto.tcp.state]/HZ), (unsigned long)(jiffies/HZ));

+        //tcp_conntrack_namesÖ»¶¨ÒåÁ˵½TCP_CONNTRACK_MAXµÄÃû³Æ£¬¶østateºóÃæ»¹ÓжàÓàµÄ״̬

+        if (nat_entry->ct->proto.tcp.state < TCP_CONNTRACK_MAX)

+        {

+#ifdef CONFIG_NF_CONNTRACK_PROCFS

+            if (l4proto->print_conntrack)

+            {

+                l4proto->print_conntrack(s, nat_entry->ct);

+            }

+#endif

+        }

+

+        print_tuple(s, &nat_entry->ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,

+                    l4proto);

+

+        if (seq_print_acct(s, nat_entry->ct, IP_CT_DIR_ORIGINAL))

+        {

+            nf_ct_put(nat_entry->ct);

+            continue;

+        }

+

+        if (!(test_bit(IPS_SEEN_REPLY_BIT, &nat_entry->ct->status)))

+            seq_printf(s, "[UNREPLIED] ");

+

+

+        print_tuple(s, &nat_entry->ct->tuplehash[IP_CT_DIR_REPLY].tuple,

+                    l4proto);

+

+        if (seq_print_acct(s, nat_entry->ct, IP_CT_DIR_REPLY))

+        {

+            nf_ct_put(nat_entry->ct);

+            continue;

+        }

+

+        if (test_bit(IPS_ASSURED_BIT, &nat_entry->ct->status))

+            seq_printf(s, "[ASSURED] ");

+

+

+        seq_printf(s, "NAT_ip=%pI4 NAT_port==%hu  \n",&nat_entry->data[IP_CT_DIR_ORIGINAL].nat_addr,ntohs(nat_entry->data[IP_CT_DIR_ORIGINAL].nat_port));

+        nf_ct_put(nat_entry->ct);

+    }

+

+    return 0;

+}

+

+

+static const struct seq_operations fastnat_seq_ops = {

+    .start = fastnat_seq_start,

+    .next  = fastnat_seq_next,

+    .stop  = fastnat_seq_stop,

+    .show  = fastnat_seq_show

+};

+

+static int fastnat_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &fastnat_seq_ops);

+}

+

+static void *fast6_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+    spin_lock_bh(&fast6_spinlock);

+    if (*pos >= nf_conntrack_htable_size)

+        return NULL;

+    else

+    {

+        if (*pos == 0)

+        {

+#if 0

+            seq_printf(seq, "fastnat ack_delay_stats : total_count = %d, forword_count = %d, drop_count = %d, "

+                       "timeout_xmit_count = %d, timeout_drop_count = %d\n",

+                       ack_delay_stats.total_count, ack_delay_stats.forword_count, ack_delay_stats.drop_count,

+                       ack_delay_stats.timeout_xmit_count, ack_delay_stats.timeout_drop_count);

+            seq_printf(seq, "fastnat have %d conn!!!\nfastnat_recv_count:%d,fastnat_real_count:%d\n",

+                       working_list.count,fastnat_recv_count,fastnat_real_count);

+            seq_printf(seq, "send_2_ps_failed:%u, send_2_usb_failed:%u\n", send_2_ps_failed, send_2_usb_failed);

+#endif

+            seq_printf(seq, "fast6 have %d conn!!!\nskb_num6:%d,fast6_num:%d\n",

+                       working_list6.count, skb_num6, fast6_num);

+        }

+        return &working_hash6[*pos];

+    }

+

+}

+

+static void *fast6_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+    (*pos)++;

+    if (*pos >= nf_conntrack_htable_size)

+        return NULL;

+    else

+        return &working_hash6[*pos];

+}

+

+

+static void fast6_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+    spin_unlock_bh(&fast6_spinlock);

+}

+

+/* return 0 on success, 1 in case of error */

+static int fast6_seq_show(struct seq_file *s, void *v)

+{

+    struct hlist_nulls_head    *head = (struct hlist_nulls_head *) v;

+    struct nf_conntrack_tuple_hash *h;

+    struct hlist_nulls_node *n;

+    fast_entry_data_t *fast6_entry_data;

+    fast_entry_t *fast6_entry = NULL;

+    const struct nf_conntrack_l3proto *l3proto;

+    const struct nf_conntrack_l4proto *l4proto;

+    int ret = 0;

+

+    hlist_nulls_for_each_entry(h, n, head, hnnode)

+    {

+        fast6_entry_data = fast_hash_to_data(h);

+        fast6_entry = fast_data_to_entry(fast6_entry_data);

+

+        if (unlikely(!atomic_inc_not_zero(&fast6_entry->ct->ct_general.use)))

+            return 0;

+

+        /* we only want to print DIR_ORIGINAL */

+        if (NF_CT_DIRECTION(h))

+        {

+            nf_ct_put(fast6_entry->ct);

+            continue;

+        }

+

+        l4proto = nf_ct_l4proto_find(nf_ct_protonum(fast6_entry->ct));

+

+        ret = -ENOSPC;

+        seq_printf(s, "l3proto: %u l4proto: %u %lu %lu %lu ",

+                   nf_ct_l3num(fast6_entry->ct), nf_ct_protonum(fast6_entry->ct),

+                   (unsigned long)(fast6_entry->timeout.expires/HZ),

+                   (unsigned long)(tcp_timeouts[fast6_entry->ct->proto.tcp.state]/HZ), (unsigned long)(jiffies/HZ));

+

+        //tcp_conntrack_namesÖ»¶¨ÒåÁ˵½TCP_CONNTRACK_MAXµÄÃû³Æ£¬¶østateºóÃæ»¹ÓжàÓàµÄ״̬

+        if (fast6_entry->ct->proto.tcp.state < TCP_CONNTRACK_MAX)

+        {

+#ifdef CONFIG_NF_CONNTRACK_PROCFS

+            if (l4proto->print_conntrack)

+            {

+                l4proto->print_conntrack(s, fast6_entry->ct);

+            }

+#endif

+        }

+

+        print_tuple(s, &fast6_entry->ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,

+                    l4proto);

+

+        if (seq_print_acct(s, fast6_entry->ct, IP_CT_DIR_ORIGINAL))

+        {

+            nf_ct_put(fast6_entry->ct);

+            continue;

+        }

+

+        if (!(test_bit(IPS_SEEN_REPLY_BIT, &fast6_entry->ct->status)))

+            seq_printf(s, "[UNREPLIED] ");

+

+        print_tuple(s, &fast6_entry->ct->tuplehash[IP_CT_DIR_REPLY].tuple,

+                    l4proto);

+

+        if (seq_print_acct(s, fast6_entry->ct, IP_CT_DIR_REPLY))

+        {

+            nf_ct_put(fast6_entry->ct);

+            continue;

+        }

+

+        if (test_bit(IPS_ASSURED_BIT, &fast6_entry->ct->status))

+            seq_printf(s, "[ASSURED] ");

+

+

+        seq_printf(s, "\n");

+        nf_ct_put(fast6_entry->ct);

+    }

+

+    return 0;

+}

+

+static const struct seq_operations fast6_seq_ops = {

+    .start = fast6_seq_start,

+    .next  = fast6_seq_next,

+    .stop  = fast6_seq_stop,

+    .show  = fast6_seq_show

+};

+

+static int fast6_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &fast6_seq_ops);

+}

+

+//Óû§Ð´ÃüÁîµÄ²Î¿¼´úÂ룬¿ÉÓÃÀ´ÅäÖùØÁªÉèÖá¢Íø¿ÚÃûµÈ²Ù×÷

+static ssize_t dev_down_set(struct file *file,

+                            const char __user *buffer, size_t count, loff_t *pos)

+{

+    size_t size;

+    char dev_name[MAX_NET_DEVICE_NAME_LEN + 1] = {0};

+

+    //countβ²¿°üº¬ÁË1¸ö½áÊø·û

+    size = min(count - 1, MAX_NET_DEVICE_NAME_LEN);

+    if (copy_from_user(dev_name, buffer, size))

+        return -EFAULT;

+

+    //ɾ³ý´ËÍøÂçÉ豸Ïà¹Øipv4,ipv6Á´½Ó

+    fast_device_down_event_by_name(dev_name);

+

+    return count;

+}

+

+static void *nofast_port_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+    if (*pos >= 1)

+        return NULL;

+    return 1;

+}

+

+static void *nofast_port_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+    (*pos)++;

+    return NULL;

+}

+

+static void nofast_port_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+    return;

+}

+

+static int nofast_port_seq_show(struct seq_file *s, void *v)

+{

+    int i = 0;

+

+    if (nofast_port[0] == 0)

+    {

+        seq_printf(s, "All ports support fast! \n");

+    }

+    else

+    {

+        seq_printf(s, "Not supported ports include:\n%d", nofast_port[0]);

+

+        for (i = 1; i < NOFAST_PROTO_MAX; i++)

+        {

+            //¶Ë¿ÚºÅÓöµ½0½áÊø

+            if (nofast_port[i] == 0)

+                break;

+            seq_printf(s, "+%d", nofast_port[i]);

+        }

+        seq_printf(s, "\n\n");

+    }

+    return 0;

+}

+

+static const struct seq_operations nofast_port_seq_ops = {

+    .start = nofast_port_seq_start,

+    .next  = nofast_port_seq_next,

+    .stop  = nofast_port_seq_stop,

+    .show  = nofast_port_seq_show

+};

+

+static int nofast_port_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &nofast_port_seq_ops);

+}

+

+struct nf_conntrack_tuple tuple_info;

+/*

+1~6 fast Á´½ÓÐÅÏ¢²éѯ

+8 skbÊͷŵã²éѯ

+9 socket ¸ú×ÙÐÅÏ¢²éѯ

+*/

+int getconn_type = 0;

+

+static void *conn_datainfo_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+    if (*pos >= 1)

+        return NULL;

+    seq_printf(seq, "start fast4 count:%ld, fw:%ld, local4_recv:%ld, local4_output:%ld\n",

+               (long)skb_num4, (long)fastnat_num, (long)fast_local4_rcv_num, (long)fast_local4_output_num);

+    seq_printf(seq, "start fast6 count:%ld, fw:%ld, local6_recv:%ld, local6_output:%ld\n",

+               (long)skb_num6, (long)fast6_num, (long)fast_local6_rcv_num, (long)fast_local6_output_num);

+    return 1;

+}

+

+static void *conn_datainfo_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+    (*pos)++;

+    return NULL;

+}

+

+static void conn_datainfo_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+    return;

+}

+

+static int conn_datainfo_seq_show(struct seq_file *s, void *v)

+{

+    int i = 0, j = 0;

+    struct nf_conntrack_tuple_hash *h;

+    struct nf_conntrack_tuple_hash *h_rdir;

+    struct nf_conn * ct;

+    struct hlist_nulls_node *n;

+

+    for(i = 0; i < nf_conntrack_htable_size; i++) {

+        hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[i], hnnode) {

+            if(h->tuple.dst.dir != IP_CT_DIR_ORIGINAL)

+                continue;

+            if(tuple_info.dst.protonum && tuple_info.dst.protonum != h->tuple.dst.protonum)

+                continue;

+            if(tuple_info.dst.u3.ip && memcmp(&tuple_info.dst.u3.ip6, h->tuple.dst.u3.ip6, 16) != 0)

+                continue;

+            if(tuple_info.src.u3.ip && memcmp(&tuple_info.src.u3.ip6, h->tuple.src.u3.ip6, 16) != 0)

+                continue;

+            if(tuple_info.dst.u.all && tuple_info.dst.u.all != h->tuple.dst.u.all) {

+                continue;

+            }

+            if(tuple_info.src.u.all && tuple_info.src.u.all != h->tuple.src.u.all) {

+                continue;

+            }

+

+            ct = container_of(h, struct nf_conn, tuplehash[h->tuple.dst.dir]);

+

+            spin_lock_bh(&fast_fw_spinlock);

+            if(getconn_type && getconn_type != ct->fast_ct.isFast)

+            {

+                spin_unlock_bh(&fast_fw_spinlock);

+                continue;

+            }

+            if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))

+            {

+                spin_unlock_bh(&fast_fw_spinlock);

+                continue;

+            }

+

+            h_rdir = &ct->tuplehash[IP_CT_DIR_REPLY];

+            if(h->tuple.src.l3num == AF_INET) {

+                /*seq_printf(s, "ctinfo protonum: %d  Original sip: %08x, sport: %d, dip: %08x, dport: %d, packets: %lu , bytes: %lu;",

+                    h->tuple.dst.protonum, ntohl(h->tuple.src.u3.ip), ntohs(h->tuple.src.u.all), ntohl(h->tuple.dst.u3.ip), ntohs(h->tuple.dst.u.all),

+                    ct->packet_info[IP_CT_DIR_ORIGINAL].packets, ct->packet_info[IP_CT_DIR_ORIGINAL].bytes);

+                seq_printf(s, "    reply sip: %08x, sport: %d, dip: %08x, dport: %d, packets: %lu , bytes: %lu\n",

+                    ntohl(h_rdir->tuple.src.u3.ip), ntohs(h_rdir->tuple.src.u.all), ntohl(h_rdir->tuple.dst.u3.ip), ntohs(h_rdir->tuple.dst.u.all),

+                    ct->packet_info[IP_CT_DIR_REPLY].packets, ct->packet_info[IP_CT_DIR_REPLY].bytes);*/

+                seq_printf(s, "ctinfo protonum: %d  Original sip: %08x, sport: %d, dip: %08x, dport: %d;",

+                           h->tuple.dst.protonum, ntohl(h->tuple.src.u3.ip), ntohs(h->tuple.src.u.all), ntohl(h->tuple.dst.u3.ip), ntohs(h->tuple.dst.u.all));

+                seq_printf(s, "    reply sip: %08x, sport: %d, dip: %08x, dport: %d\n",

+                           ntohl(h_rdir->tuple.src.u3.ip), ntohs(h_rdir->tuple.src.u.all), ntohl(h_rdir->tuple.dst.u3.ip), ntohs(h_rdir->tuple.dst.u.all));

+            }

+            else if(h->tuple.src.l3num == AF_INET6) {

+                /*seq_printf(s, "ctinfo  protonum: %d  Original sip: %x:%x:%x:%x:%x:%x:%x:%x sport: %d, dip: %x:%x:%x:%x:%x:%x:%x:%x, dport: %d, packets: %lu , bytes: %lu;",

+                    h->tuple.dst.protonum, ntohs(h->tuple.src.u3.in6.s6_addr16[0]), ntohs(h->tuple.src.u3.in6.s6_addr16[1]), ntohs(h->tuple.src.u3.in6.s6_addr16[2]), ntohs(h->tuple.src.u3.in6.s6_addr16[3]),

+                    ntohs(h->tuple.src.u3.in6.s6_addr16[4]), ntohs(h->tuple.src.u3.in6.s6_addr16[5]), ntohs(h->tuple.src.u3.in6.s6_addr16[6]), ntohs(h->tuple.src.u3.in6.s6_addr16[7]), ntohs(h->tuple.src.u.all),

+                    ntohs(h->tuple.dst.u3.in6.s6_addr16[0]), ntohs(h->tuple.dst.u3.in6.s6_addr16[1]), ntohs(h->tuple.dst.u3.in6.s6_addr16[2]), ntohs(h->tuple.dst.u3.in6.s6_addr16[3]),

+                    ntohs(h->tuple.dst.u3.in6.s6_addr16[4]), ntohs(h->tuple.dst.u3.in6.s6_addr16[5]), ntohs(h->tuple.dst.u3.in6.s6_addr16[6]), ntohs(h->tuple.dst.u3.in6.s6_addr16[7]), ntohs(h->tuple.dst.u.all),

+                    ct->packet_info[IP_CT_DIR_ORIGINAL].packets, ct->packet_info[IP_CT_DIR_ORIGINAL].bytes);

+                seq_printf(s, "    Reply sip: %x:%x:%x:%x:%x:%x:%x:%x sport: %d, dip: %x:%x:%x:%x:%x:%x:%x:%x, dport: %d, packets: %lu , bytes: %lu\n",

+                    ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[0]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[1]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[2]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[3]),

+                    ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[4]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[5]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[6]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[7]), ntohs(h_rdir->tuple.src.u.all),

+                    ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[0]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[1]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[2]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[3]),

+                    ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[4]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[5]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[6]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[7]), ntohs(h_rdir->tuple.dst.u.all),

+                    ct->packet_info[IP_CT_DIR_REPLY].packets, ct->packet_info[IP_CT_DIR_REPLY].bytes);*/

+                seq_printf(s, "ctinfo  protonum: %d  Original sip: %x:%x:%x:%x:%x:%x:%x:%x sport: %d, dip: %x:%x:%x:%x:%x:%x:%x:%x, dport: %d, packets: xx , bytes: xx;",

+                           h->tuple.dst.protonum, ntohs(h->tuple.src.u3.in6.s6_addr16[0]), ntohs(h->tuple.src.u3.in6.s6_addr16[1]), ntohs(h->tuple.src.u3.in6.s6_addr16[2]), ntohs(h->tuple.src.u3.in6.s6_addr16[3]),

+                           ntohs(h->tuple.src.u3.in6.s6_addr16[4]), ntohs(h->tuple.src.u3.in6.s6_addr16[5]), ntohs(h->tuple.src.u3.in6.s6_addr16[6]), ntohs(h->tuple.src.u3.in6.s6_addr16[7]), ntohs(h->tuple.src.u.all),

+                           ntohs(h->tuple.dst.u3.in6.s6_addr16[0]), ntohs(h->tuple.dst.u3.in6.s6_addr16[1]), ntohs(h->tuple.dst.u3.in6.s6_addr16[2]), ntohs(h->tuple.dst.u3.in6.s6_addr16[3]),

+                           ntohs(h->tuple.dst.u3.in6.s6_addr16[4]), ntohs(h->tuple.dst.u3.in6.s6_addr16[5]), ntohs(h->tuple.dst.u3.in6.s6_addr16[6]), ntohs(h->tuple.dst.u3.in6.s6_addr16[7]), ntohs(h->tuple.dst.u.all));

+                seq_printf(s, "    Reply sip: %x:%x:%x:%x:%x:%x:%x:%x sport: %d, dip: %x:%x:%x:%x:%x:%x:%x:%x, dport: %d, packets: xx , bytes: xx\n",

+                           ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[0]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[1]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[2]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[3]),

+                           ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[4]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[5]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[6]), ntohs(h_rdir->tuple.src.u3.in6.s6_addr16[7]), ntohs(h_rdir->tuple.src.u.all),

+                           ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[0]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[1]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[2]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[3]),

+                           ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[4]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[5]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[6]), ntohs(h_rdir->tuple.dst.u3.in6.s6_addr16[7]), ntohs(h_rdir->tuple.dst.u.all));

+            }

+            if(ct->fast_ct.isFast == FAST_CT_LOCAL6 || ct->fast_ct.isFast == FAST_CT_LOCAL4) {

+                seq_printf(s, "ctinfo ->ISFAST: %d, sk: %#llx\n", ct->fast_ct.isFast, (UINT64)ct->fast_ct.sk);

+            } else if(ct->fast_ct.isFast == FAST_CT_FW6 || ct->fast_ct.isFast == FAST_CT_FW4) {

+                seq_printf(s, "ctinfo ->ISFAST: %d", ct->fast_ct.isFast);

+                if(ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL])

+                    seq_printf(s, "    Original fast_dst: %#llx", (UINT64)ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL]);

+                if(ct->fast_ct.fast_dst[IP_CT_DIR_REPLY])

+                    seq_printf(s, "    Reply fast_dst: %#llx", (UINT64)ct->fast_ct.fast_dst[IP_CT_DIR_REPLY]);

+                if(ct->fast_ct.fast_brport[IP_CT_DIR_ORIGINAL])

+                    seq_printf(s, "    Original fast_brport: %#llx", (UINT64)ct->fast_ct.fast_brport[IP_CT_DIR_ORIGINAL]);

+                if(ct->fast_ct.fast_brport[IP_CT_DIR_REPLY])

+                    seq_printf(s, "    Reply fast_brport: %#llx", (UINT64)ct->fast_ct.fast_brport[IP_CT_DIR_REPLY]);

+                seq_printf(s, "\n");

+            }

+

+            spin_unlock_bh(&fast_fw_spinlock);

+            nf_ct_put(ct);

+        }

+    }

+    return 0;

+}

+

+static const struct seq_operations conn_datainfo_seq_ops= {

+    .start = conn_datainfo_seq_start,

+    .next  = conn_datainfo_seq_next,

+    .stop  = conn_datainfo_seq_stop,

+    .show  = conn_datainfo_seq_show

+

+};

+

+static int conn_datainfo_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &conn_datainfo_seq_ops);

+}

+

+//Ê®½øÖÆ×Ö·û´®×ª»¯ÎªÕûÊý

+static int str2int(char *str)

+{

+    int i = 0, value = 0, negative = 1;

+    int len = strlen(str);

+

+    for (i = 0; i < len; i++)

+    {

+        //Ìø¹ýÇ°ÃæµÄ¿Õ¸ñ

+        if ((value == 0) && (str[i] == ' '))

+            continue;

+

+        //µÚÒ»¸öÓÐЧ×Ö·ûÊǸººÅ

+        if ((negative == 1) && (str[i] == '-'))

+        {

+            negative = -1;

+            continue;

+        }

+

+        //Óöµ½·ÇÊ®½øÖÆÊý×ÖÔò½áÊø

+        if (str[i] < '0' || str[i] > '9')

+            break;

+        value = value * 10 + (str[i] - '0');

+    }

+    return value * negative;

+}

+

+static void parse_nofast_port(const char *str, char split)

+{

+    char *p = NULL;

+    char *pre = str;

+    char portStr[PORT_LEN] = {0}; //ЭÒé¶Ë¿ÚºÅ×î´óΪ65535

+    int count = 0, port = 0, len = 0;

+

+    memset(nofast_port, 0, NOFAST_PROTO_MAX * sizeof(nofast_port[0]));

+

+    for (; (p = strchr(pre, split)) != NULL; pre = p + 1)

+    {

+        //µÚÒ»¸ö×Ö·û¾ÍÊÇ·Ö¸ô·û

+        if (p == pre)

+            continue;

+

+        memset(portStr, 0, PORT_LEN);

+        len = min(p - pre, PORT_LEN - 1);

+        snprintf(portStr,len+1,"%s",pre);

+        port = str2int(portStr);

+        if (port <= 0 || port > 65535) //¶Ë¿ÚºÅ×î´ó65535

+        {

+            continue;

+        }

+        nofast_port[count++] = port;

+        if (count == NOFAST_PROTO_MAX)

+            return;

+    }

+

+    if (*pre != '\0') //×îºóÒ»¸ö²»ÊÇ·Ö¸ô·û

+    {

+        memset(portStr, 0, PORT_LEN);

+        len = min(str + strlen(str) - pre, PORT_LEN - 1);

+        snprintf(portStr,len+1,"%s",pre);

+        port = str2int(portStr);

+        if (port <= 0 || port > 65535) //¶Ë¿ÚºÅ×î´ó65535

+        {

+            return;

+        }

+        nofast_port[count++] = port;

+    }

+}

+

+//Óû§Ð´ÃüÁîµÄ²Î¿¼´úÂ룬¿ÉÓÃÀ´ÅäÖùØÁªÉèÖá¢Íø¿ÚÃûµÈ²Ù×÷

+static ssize_t nofast_port_set(struct file *file,

+                               const char __user *buffer, size_t count, loff_t *pos)

+{

+    size_t size;

+    char proto[1024] = {0};

+

+    //countβ²¿°üº¬ÁË1¸ö½áÊø·û

+    size = min(count - 1, 1024);

+    if (copy_from_user(proto, buffer, size))

+        return -EFAULT;

+

+    //½âÎö×Ö·û´®

+    parse_nofast_port(proto, '+');

+

+    return count;

+}

+

+extern int in4_pton(const char *src, int srclen,

+                    u8 *dst,

+                    int delim, const char **end);

+extern int in6_pton(const char *src, int srclen,

+                    u8 *dst,

+                    int delim, const char **end);

+

+

+static void conn_datainfo_get_str(char *str, char *start, char *end) {

+    strncat(str, start, end - start);

+    *(str + (unsigned long)end - (unsigned long)start) = '\0';

+}

+

+/***************************************

+ÊäÈë¸ñʽ:    Ô´ip+Ô´port+Ä¿µÄip+Ä¿µÄport+l4ЭÒéÀàÐÍ+fastÁ´½ÓÀàÐÍ(²Î¿¼ enum conn_fast_type)

+ʵÀý:        192.168.0.100+1111+192.168.30.102+2222+6+4

+×¢Òâ:        ȱʡijһÏĬÈÏÄÇÒ»ÏîȫƥÅä

+            +++++:±íʾÊä³öÈ«²¿Á´½ÓÐÅÏ¢

+***************************************/

+static ssize_t conn_datainfo_set(struct file *file,

+                                 const char __user *buffer, size_t count, loff_t *pos)

+{

+    char tuple[1024] = "";

+    int i = 0;

+    char *split[5];

+    char sip[40] = "";

+    char sport[6] = "";

+    char dip[40] = "";

+    char dport[6] = "";

+    char protonum[6] = "";

+    char conn_type[6] = "";

+    const char *end;

+

+    tuple[1023] = '\0';

+    if (strncpy_from_user(tuple, (char *)buffer, count) <= 0) {

+        return -EFAULT;

+    }

+    memset(&tuple_info, 0,sizeof(struct nf_conntrack_tuple));

+    for(i = 0; i < 5; i++) {

+        if(i == 0)

+            split[i] = strchr(tuple, '+');

+        else

+            split[i] = strchr(split[i-1] + 1, '+');

+        if(!split[i])

+            goto err_out;

+        switch(i) {

+        case 0:

+            conn_datainfo_get_str(sip, tuple, split[i]);

+            break;

+        case 1:

+            conn_datainfo_get_str(sport, split[i-1] + 1, split[i]);

+            break;

+        case 2:

+            conn_datainfo_get_str(dip, split[i-1] + 1, split[i]);

+            break;

+        case 3:

+            conn_datainfo_get_str(dport, split[i-1] + 1, split[i]);

+            break;

+        case 4:

+            conn_datainfo_get_str(protonum, split[i-1] + 1, split[i]);

+            break;

+        default:

+            goto err_out;

+        }

+    }

+    strncat(conn_type, split[i-1] + 1, sizeof(conn_type)-strlen(conn_type)-1);

+    if(strlen(sip) > 0) {

+        if(strchr(sip,'.') != NULL && in4_pton(sip, strlen(sip), (u8 *)&tuple_info.src.u3.in, -1, &end) != 1)

+            goto err_out;

+        else if(strchr(sip,':') != NULL && in6_pton(sip, strlen(sip), (u8 *)&tuple_info.src.u3.in6, -1, &end) != 1)

+            goto err_out;

+    }

+    if(strlen(sport) > 0) {

+        for(i = 0; i < strlen(sport); i++) {

+            if(sport[i] < '0' || sport[i] > '9')

+                goto err_out;

+            tuple_info.src.u.all = sport[i] - '0' + tuple_info.src.u.all*10;

+        }

+        if(tuple_info.src.u.all > 65535)

+            goto err_out;

+        tuple_info.src.u.all = htons(tuple_info.src.u.all);

+    }

+    if(strlen(dip) > 0) {

+        if(strchr(dip,'.') != NULL && in4_pton(dip, strlen(dip), (u8 *)&tuple_info.dst.u3.in, -1, &end) != 1)

+            goto err_out;

+        else if(strchr(dip,':') != NULL && in6_pton(dip, strlen(dip), (u8 *)&tuple_info.dst.u3.in6, -1, &end) != 1)

+            goto err_out;

+    }

+    if(strlen(dport) > 0) {

+        for(i = 0; i < strlen(dport); i++) {

+            if(dport[i] < '0' || dport[i] > '9')

+                goto err_out;

+            tuple_info.dst.u.all = dport[i] - '0' + tuple_info.dst.u.all*10;

+        }

+        if(tuple_info.dst.u.all > 65535)

+            goto err_out;

+        tuple_info.dst.u.all = htons(tuple_info.dst.u.all);

+    }

+    if(strlen(protonum) > 0) {

+        for(i = 0; i < strlen(protonum); i++) {

+            if(protonum[i] < '0' || protonum[i] > '9')

+                goto err_out;

+            tuple_info.dst.protonum = protonum[i] - '0' + tuple_info.dst.protonum*10;

+        }

+    }

+    if(strlen(conn_type) > 0) {

+        getconn_type = 0;

+        for(i = 0; i < strlen(conn_type) - 1; i++) {

+            if(conn_type[i] < '0' || conn_type[i] > '9')

+                goto err_out;

+            getconn_type = conn_type[i] - '0' + getconn_type*10;

+        }

+    }

+    return count;

+err_out:

+    memset(&tuple_info, 0,sizeof(struct nf_conntrack_tuple));

+    getconn_type = 0;

+    return -EFAULT;

+}

+

+extern int pkt_lost_track;

+static void *pkt_lostinfo_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+    if (*pos >= 1)

+        return NULL;

+    return 1;

+}

+

+static void *pkt_lostinfo_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+    (*pos)++;

+    return NULL;

+}

+

+static void pkt_lostinfo_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+    return;

+}

+

+static int pkt_lostinfo_seq_show(struct seq_file *s, void *v)

+{

+    return 0;

+

+}

+

+static const struct seq_operations pkt_lostinfo_seq_ops= {

+    .start = pkt_lostinfo_seq_start,

+    .next  = pkt_lostinfo_seq_next,

+    .stop  = pkt_lostinfo_seq_stop,

+    .show  = pkt_lostinfo_seq_show,

+};

+

+static int pkt_lostinfo_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &pkt_lostinfo_seq_ops);

+}

+

+static ssize_t pkt_lostinfo_set(struct file *file,

+                                const char __user *buffer, size_t count, loff_t *pos)

+{

+    size_t size;

+    char temp[5] = {0};

+

+    //countβ²¿°üº¬ÁË1¸ö½áÊø·û£¬Ö»Ö§³Ö0-1ÊäÈë

+    if (count != 2)

+        return -EINVAL;

+

+    if (copy_from_user(temp, buffer, 1))

+        return -EFAULT;

+

+    if (temp[0] < '0' || temp[0] > '1')

+        return -EINVAL;

+

+

+

+    return count;

+}

+

+

+static ssize_t dev_reset_set(struct file *file,

+		const char __user *buffer, size_t count, loff_t *pos)

+{

+	struct net_device *dev = NULL;

+	size_t size;

+	char dev_name[MAX_NET_DEVICE_NAME_LEN + 1] = {0};

+

+	//countβ²¿°üº¬ÁË1¸ö½áÊø·û

+	size = min(count - 1, MAX_NET_DEVICE_NAME_LEN);

+	if (copy_from_user(dev_name, buffer, size))

+		return -EFAULT;

+

+	//ɾ³ý´ËÍøÂçÉ豸Ïà¹Østat

+	dev = dev_get_by_name(&init_net, dev_name);

+	if (dev){

+		memset(&dev->stats, 0, sizeof(struct net_device_stats));

+		atomic_long_set(&dev->rx_dropped, 0);

+		dev_put(dev);

+	}else

+		printk("dev_reset_set %s not find\n", dev_name);

+	return count;

+}

+

+

+static void *ct_iptables_syn_seq_start(struct seq_file *seq, loff_t *pos)

+__acquires(RCU)

+{

+	if (*pos >= 1)

+		return NULL;

+	return 1;

+}

+

+static void *ct_iptables_syn_seq_next(struct seq_file *s, void *v, loff_t *pos)

+{

+	(*pos)++;

+	return NULL;

+}

+

+static void ct_iptables_syn_seq_stop(struct seq_file *s, void *v)

+__releases(RCU)

+{

+	return;

+}

+

+static int ct_iptables_syn_seq_show(struct seq_file *s, void *v)

+{

+	seq_printf(s, "ct_iptables_syn_sw: %u\n", ct_iptables_syn_sw);

+	return 0;

+

+}

+

+static const struct seq_operations ct_iptables_syn_seq_ops= {

+	.start = ct_iptables_syn_seq_start,

+	.next  = ct_iptables_syn_seq_next,

+	.stop  = ct_iptables_syn_seq_stop,

+	.show  = ct_iptables_syn_seq_show,

+};

+

+static int ct_iptables_syn_open(struct inode *inode, struct file *file)

+{

+    return seq_open(file, &ct_iptables_syn_seq_ops);

+}

+

+static ssize_t ct_iptables_syn_set(struct file *file,

+                                const char __user *buffer, size_t count, loff_t *pos)

+{

+    size_t size;

+    char temp[5] = {0};

+

+    //countβ²¿°üº¬ÁË1¸ö½áÊø·û£¬Ö»Ö§³Ö0-1ÊäÈë

+    if (count != 2)

+        return -EINVAL;

+

+    if (copy_from_user(temp, buffer, 1))

+        return -EFAULT;

+

+    if (temp[0] < '0' || temp[0] > '1')

+        return -EINVAL;

+

+	ct_iptables_syn_sw = (unsigned int)(temp[0] - '0');

+

+    return count;

+}

+

+static const struct proc_ops fastnat_level_file_ops = {

+    .proc_open    = fastnat_level_open,

+    .proc_read    = seq_read,

+    .proc_lseek  = seq_lseek,

+    .proc_release = seq_release, //seq_release_privateËÆºõÒ²¿ÉÒÔ£¬µ«ÊÇsingle_release£¬seq_release_net»áÓпÕÖ¸ÕëÒì³£

+    .proc_write = fastnat_level_set,

+};

+

+static const struct proc_ops fast_switch_file_ops = {

+    .proc_open    = fast_switch_open,

+    .proc_read    = seq_read,

+    .proc_lseek  = seq_lseek,

+    .proc_release = seq_release, //seq_release_privateËÆºõÒ²¿ÉÒÔ£¬µ«ÊÇsingle_release£¬seq_release_net»áÓпÕÖ¸ÕëÒì³£

+    .proc_write = fast_switch_set,

+};

+

+static const struct proc_ops fastbr_level_file_ops = {

+    .proc_open    = fastbr_level_open,

+    .proc_read    = seq_read,

+    .proc_lseek  = seq_lseek,

+    .proc_release = seq_release,

+    .proc_write = fastbr_level_set,

+};

+

+static const struct proc_ops fastnat_file_ops = {

+    .proc_open    = fastnat_open,

+    .proc_read    = seq_read,

+    .proc_lseek  = seq_lseek,

+    .proc_release = seq_release,

+};

+

+static const struct proc_ops fast6_file_ops = {

+    .proc_open    = fast6_open,

+    .proc_read    = seq_read,

+    .proc_lseek  = seq_lseek,

+    .proc_release = seq_release,

+};

+

+static const struct proc_ops dev_down_file_ops = {

+    .proc_write = dev_down_set,

+};

+

+static const struct proc_ops nofast_port_file_ops = {

+    .proc_open    = nofast_port_open,

+    .proc_read    = seq_read,

+    .proc_lseek  = seq_lseek,

+    .proc_release = seq_release,

+    .proc_write = nofast_port_set,

+};

+

+static const struct proc_ops conn_datainfo_file_ops = {

+    .proc_open = conn_datainfo_open,

+    .proc_read = seq_read,

+    .proc_lseek = seq_lseek,

+    .proc_release = seq_release,

+    .proc_write = conn_datainfo_set

+};

+

+static const struct proc_ops pkt_lostinfo_file_ops = {

+    .proc_open = pkt_lostinfo_open,

+    .proc_read = seq_read,

+    .proc_lseek = seq_lseek,

+    .proc_release = seq_release,

+    .proc_write = pkt_lostinfo_set,

+};

+

+static const struct proc_ops dev_reset_file_ops = {

+	.proc_write = dev_reset_set,

+};

+

+static const struct proc_ops ct_iptables_syn_file_ops = {

+    .proc_open = ct_iptables_syn_open,

+    .proc_read = seq_read,

+    .proc_lseek = seq_lseek,

+    .proc_release = seq_release,

+    .proc_write = ct_iptables_syn_set,

+};

+

+

+//¿ìËÙת·¢procÎļþµÄ³õʼ»¯

+int fast_conntrack_init_proc(void)

+{

+    //Èý²ã¿ìËÙת·¢×Ü¿ª¹Ø

+    proc_create("fastnat_level", 0440, init_net.proc_net, &fastnat_level_file_ops);

+

+    //Èý²ã¿ìËÙת·¢Ð¾ɿª¹Ø£¬¸÷ÀàÐÍ¿ª¹Ø

+    proc_create("fast_switch", 0440, init_net.proc_net, &fast_switch_file_ops);

+

+    //¶þ²ã¿ìËÙת·¢µÈ¼¶0-1

+    proc_create("fastbr_level", 0440, init_net.proc_net, &fastbr_level_file_ops);

+

+    //ipv4¿ìËÙת·¢Ïà¹ØÍ³¼Æ

+    proc_create("fastnat", 0440, init_net.proc_net, &fastnat_file_ops);

+

+    //ipv6¿ìËÙת·¢Ïà¹ØÍ³¼Æ

+    proc_create("fast6", 0440, init_net.proc_net, &fast6_file_ops);

+

+    //ijЩÉ豸²»ÄÜdown²Ù×÷£¬µ«ÊÇÈ¥¼¤»îºóÓÖҪɾ³ýÏà¹ØÁ´½Ó

+    proc_create("dev_down", 0440, init_net.proc_net, &dev_down_file_ops);

+

+    //²»Ö§³ÖfastnatµÄЭÒé¶Ë¿Ú£¬Ö§³Ö¶¯Ì¬ÅäÖÃ

+    proc_create("nofast_port", 0440, init_net.proc_net, &nofast_port_file_ops);

+

+    //¶ÁÈ¡Á´½ÓÐÅÏ¢

+    proc_create("conn_datainfo", 0440, init_net.proc_net, &conn_datainfo_file_ops);

+

+    //¶ÁÈ¡Á´½Ó¶ª°üÐÅÏ¢

+    //proc_create("pkt_lostinfo", 0440, init_net.proc_net, &pkt_lostinfo_file_ops);

+

+    //reset dev stats

+    proc_create("dev_reset_stats", 0440, init_net.proc_net, &dev_reset_file_ops);

+

+	proc_create("ct_iptables_syn", 0440, init_net.proc_net, &ct_iptables_syn_file_ops);

+    return 1;

+}

+

+EXPORT_SYMBOL(fast_conntrack_init_proc);

+

+

diff --git a/upstream/linux-5.10/net/core/fastproc/fastnat.c b/upstream/linux-5.10/net/core/fastproc/fastnat.c
new file mode 100755
index 0000000..717454c
--- /dev/null
+++ b/upstream/linux-5.10/net/core/fastproc/fastnat.c
@@ -0,0 +1,687 @@
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <net/ip.h>
+#include <linux/if_arp.h>
+#include <linux/timer.h>
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/xt_multiport.h>
+#include <linux/netfilter/xt_iprange.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/route.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <net/SI/fastnat.h>
+#include <net/SI/net_other.h>
+#include <net/SI/netioc_proc.h>
+#include <net/SI/print_sun.h>
+#include <net/SI/net_track.h>
+#include <linux/netfilter.h>
+#include <net/SI/fast_common.h>
+
+MODULE_LICENSE("GPL");
+
+/* ***************** ipv4 ¿ìËÙת·¢Ïà¹Ø±äÁ¿ ************************* */
+spinlock_t fastnat_spinlock;          //×ÔÐýËø£¬±£»¤Á´±íµÄ²Ù×÷
+fast_list_t working_list = {0};
+struct hlist_nulls_head *working_hash;
+
+/* **************************** º¯ÊýÉêÃ÷ ************************ */
+
+
+/* **************************** º¯ÊýʵÏÖ ************************ */
+
+/*»ñÈ¡±¨ÎÄÎåÔª×éÐÅÏ¢*/
+static inline int fast_nat_get_tuple(struct sk_buff *skb, struct nf_conntrack_tuple *tuple)
+{
+    struct iphdr  *iph;
+    struct udphdr *udph;
+    struct tcphdr *tcph;
+#if 0
+    if (!skb || !tuple)
+    {
+        return -1;
+    }
+
+    /* only IP packets */
+    if (htons(ETH_P_IP) != skb->protocol)
+    {
+        return -1;
+    }
+#endif
+    iph = (struct iphdr *)skb->data;
+#if 0
+    /* not deal with fragment packets now */
+    if (ntohs(iph->frag_off) & (IP_MF | IP_OFFSET))
+    {
+        skbinfo_add(NULL,SKB_FRAG);
+        return -1;
+    }
+
+    if (iph->ttl <= 1)
+    {
+        return -1;
+    }
+#endif
+    memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
+
+    /* only tcp/udp */
+    if (IPPROTO_UDP == iph->protocol)
+    {
+        udph = (struct udphdr *)(skb->data + iph->ihl * 4);
+        tuple->src.u.udp.port = udph->source;
+        tuple->dst.u.udp.port = udph->dest;
+        skb_udpnum++;
+    }
+    else if (IPPROTO_TCP == iph->protocol)
+    {
+        tcph = (struct tcphdr *)(skb->data + iph->ihl * 4);
+        tuple->src.u.tcp.port = tcph->source;
+        tuple->dst.u.tcp.port = tcph->dest;
+        skb_tcpnum++;
+    }
+    else
+    {
+        return -1;
+    }
+
+    tuple->src.l3num = AF_INET;
+    tuple->src.u3.ip = iph->saddr;
+    tuple->dst.u3.ip = iph->daddr;
+    tuple->dst.protonum = iph->protocol;
+    tuple->dst.dir = IP_CT_DIR_ORIGINAL;
+
+    return 0;
+}
+
+//´Ë´¦ÐèÒª±£³ÖºÍdev_xmit_completeÒ»ÖÂ
+//ÓÃinlineÎÞ·¨ÌáÈ¡µ½¹«¹²ÎļþÖУ¬Ö»ÄÜfastnat¡¢fast6¸÷·ÅÒ»·Ý
+static inline bool start_xmit_complete(int rc)
+{
+    /*
+     * Positive cases with an skb consumed by a driver:
+     * - successful transmission (rc == NETDEV_TX_OK)
+     * - error while transmitting (rc < 0)
+     * - error while queueing to a different device (rc & NET_XMIT_MASK)
+     */
+    if (likely(rc < NET_XMIT_MASK))
+        return true;
+
+    return false;
+}
+#ifndef CONFIG_PREEMPT_RT_FULL
+extern int *vir_addr_ddrnet;
+#endif
+
+//ipv4Êý¾Ý°üµÄ¿ìËÙ´¦Àí£¬hashÓÃRCU»úÖÆ½øÐб£»¤£¬×ܵÄÁ¬½ÓÁ´±íÓÃspin½øÐб£»¤
+int fast_nat_recv(struct sk_buff *skb)
+{
+    struct nf_conntrack_tuple tuple;
+    fast_entry_data_t *nat_entry_data = NULL;
+    fast_entry_t *nat_entry = NULL;
+    struct iphdr *iph = NULL;
+    struct udphdr *udph = NULL;
+    struct tcphdr *tcph = NULL;
+    __sum16 *cksum = NULL;
+    __be32 *oldip = NULL;
+    __be16 *oldport = NULL;
+    struct net_device *dev = NULL;
+    //u_int32_t skip_nat = 0;
+    struct sk_buff *skb2 = NULL;
+
+    iph = (struct iphdr *)skb->data;
+    //if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
+        //goto err_out;
+
+    if (fast_nat_get_tuple(skb, &tuple) < 0)
+    {
+        print_sun(SUN_DBG, "fast_nat_get_tuple  ERR  !!!\n");
+        goto err_out;
+    }
+
+    rcu_read_lock();
+    nat_entry_data = fast_find_entry_data(working_hash, &tuple);
+    if (unlikely(nat_entry_data == NULL))
+    {
+        rcu_read_unlock();
+        print_sun(SUN_DBG, "fast_nat_find  ERR  !!!\n");
+        goto err_out;
+    }
+
+    dev = nat_entry_data->outdev;
+    if (unlikely(!dev))
+    {
+        rcu_read_unlock();
+        goto err_out;
+    }
+
+    /*¼ì²é°ü³¤¶ÈÊÇ·ñ³¬¹ý³ö¿ÚÉ豸MTU*/
+    if (unlikely(skb->len > dev->mtu))
+    {
+        skbinfo_add(NULL, SKB_OVER_MTU);
+        rcu_read_unlock();
+        print_sun(SUN_DBG, "fast_nat_recv outdev mtu ERR !!!\n");
+        goto err_out;
+    }
+	
+    //»Ø´«µÄ°üÖ±½ÓÊͷŲ¢¼ÆÊý
+    if (unlikely(skb->dev == dev))
+    {
+        skbinfo_add(NULL, SKB_LOOP);
+        rcu_read_unlock();
+        kfree_skb(skb);
+        return 1;
+    }
+
+    nat_entry = fast_data_to_entry(nat_entry_data);
+    if (unlikely(!nat_entry))
+    {
+        rcu_read_unlock();
+        print_sun(SUN_DBG, "fast_nat_recv fast_nat_data_to_entry null !!!\n");
+        goto err_out;
+    }
+
+    /* Ö»Óе±Ë«Ïò¿ìËÙÁ´½Ó¶¼½¨Á¢³É¹¦²Å×ßFASTNAT£¬·ñÔò×ß±ê×¼Á÷³Ì */
+    if ((nat_entry->flags != FAST_ALL_DIR) && (IPPROTO_UDP != iph->protocol))
+    {
+        rcu_read_unlock();
+        print_sun(SUN_DBG, "fast_nat_recv flags is not FAST_ALL_DIR !!!\n");
+        goto err_out;
+    }
+
+    if (unlikely(!(skb2 = fast_expand_headroom(skb, dev)))) {
+        rcu_read_unlock();
+        return 1;
+    }
+
+    if (unlikely(skb2 != skb))
+    {
+        iph = (struct iphdr *)skb2->data;
+        skb = skb2;
+    }
+
+    fast_tcpdump(skb);
+
+    //Èç¹û×¥°üÃüÖУ¬Êý¾Ý»áclone£¬fast³É¹¦ÐèÒª¸Ä±ädataÄÚÈÝ£¬ÐèÒªÖØÐÂcopyÒ»·Ý
+    if (skb_cloned(skb))
+    {
+        print_sun(SUN_DBG, "fast_nat_recv clone \n");
+        if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+        {
+            rcu_read_unlock();
+            print_sun(SUN_DBG, "fast_nat_recv clone copy failed !!!\n");
+            kfree_skb(skb);
+            return 1;
+        }
+    }
+
+
+    //½öµ±fastnat³É¹¦£¬²ÅÄÜÉèÖÃIPÍ·Ê×µØÖ·,ÒÔ¹©cacheË¢ÐÂʱʹÓÃ
+    skb_reset_network_header(skb);
+    skb->isFastnat = 1;
+	if (likely(skb_get_nfct(skb) == 0)){
+		skb_set_nfct(skb, (unsigned long)nat_entry->ct);
+		nf_conntrack_get(&nat_entry->ct->ct_general);
+	}
+    //²»Ö§³ÖNAT£¬Ô´µØÖ·/Ä¿µÄµØÖ·/¶Ë¿ÚºÅ¶¼²»ÐèÒª¸Ä±ä£¬Ö±½Ó͸´«¾ÍÐÐ
+    //if (nat_entry_data->is_not_nat)
+        //skip_nat = 1;
+    if (!nat_entry_data->is_not_nat)//(!skip_nat)
+    {
+        /*½øÐÐnatת»»*/
+        if (IPPROTO_TCP == iph->protocol)
+        {
+            tcph = (struct tcphdr *)(skb->data + iph->ihl * 4);
+            cksum = &tcph->check;
+            oldport = (FN_TYPE_SRC == nat_entry_data->type)? (&tcph->source): (&tcph->dest);
+        }
+        else if (IPPROTO_UDP == iph->protocol)
+        {
+            udph = (struct udphdr *)(skb->data + iph->ihl * 4);
+            cksum = &udph->check;
+            oldport = (FN_TYPE_SRC == nat_entry_data->type)? (&udph->source): (&udph->dest);
+        }
+
+        oldip = (FN_TYPE_SRC == nat_entry_data->type)? (&iph->saddr) : (&iph->daddr);
+
+        if (cksum != NULL && (0!=*cksum || IPPROTO_TCP == iph->protocol))
+        {
+            inet_proto_csum_replace4(cksum, skb, *oldip, nat_entry_data->nat_addr, 0);
+            inet_proto_csum_replace2(cksum, skb, *oldport, nat_entry_data->nat_port, 0);
+        }
+        csum_replace4(&iph->check, *oldip, nat_entry_data->nat_addr);
+        if(oldport)
+            *oldport = nat_entry_data->nat_port;
+        *oldip = nat_entry_data->nat_addr;
+    }
+    else
+    {
+        if (IPPROTO_TCP == iph->protocol)
+        {
+            tcph = (struct tcphdr *)(skb->data + iph->ihl * 4);
+        }
+    }
+
+    skb->priority = nat_entry_data->priority;
+    skb->mark = nat_entry_data->mark;
+
+    //»ùÓÚctÁ´½ÓµÄÁ÷Á¿Í³¼Æ --- ͳ¼ÆIP°ü²»ÊÇMAC°ü
+    if (nat_entry_data->tuplehash.tuple.dst.dir == IP_CT_DIR_ORIGINAL){
+        nat_entry->ct->packet_info[IP_CT_DIR_ORIGINAL].packets++;
+        nat_entry->ct->packet_info[IP_CT_DIR_ORIGINAL].bytes += skb->len;
+		if(unlikely(nat_entry->ct->indev[IP_CT_DIR_ORIGINAL] == NULL))
+			nat_entry->ct->indev[IP_CT_DIR_ORIGINAL] = skb->indev;
+		if(unlikely(nat_entry->ct->outdev[IP_CT_DIR_ORIGINAL] == NULL))
+			nat_entry->ct->outdev[IP_CT_DIR_ORIGINAL] = dev;
+    } else if (nat_entry_data->tuplehash.tuple.dst.dir == IP_CT_DIR_REPLY){
+        nat_entry->ct->packet_info[IP_CT_DIR_REPLY].packets++;
+        nat_entry->ct->packet_info[IP_CT_DIR_REPLY].bytes += skb->len;
+		if(unlikely(nat_entry->ct->indev[IP_CT_DIR_REPLY] == NULL))
+			nat_entry->ct->indev[IP_CT_DIR_REPLY] = skb->indev;
+		if(unlikely(nat_entry->ct->outdev[IP_CT_DIR_REPLY] == NULL))
+			nat_entry->ct->outdev[IP_CT_DIR_REPLY] = dev;
+    } else {
+        printk("fastnat packet error\n");
+    }
+
+    //ÄÚºË×Ô´øµÄ»ùÓÚÁ´½ÓµÄÁ÷Á¿Í³¼Æ
+    struct nf_conn_counter *acct = (struct nf_conn_counter *)nf_conn_acct_find((const struct nf_conn *)nat_entry->ct);
+    if (acct) {
+        enum ip_conntrack_info ctinfo;
+        if (nat_entry_data->tuplehash.tuple.dst.dir == IP_CT_DIR_ORIGINAL)
+            ctinfo = IP_CT_ESTABLISHED;
+        else
+            ctinfo = IP_CT_ESTABLISHED_REPLY;
+
+        atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
+        atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes);
+    }
+
+    /* ¶¨Öƹ¦ÄÜ£¬ÎªÁ˽â¾öµ¥UDP¹à°üʱ£¬ÎÞ·¨ÖªÏþindev½øÐÐÁ÷Á¿Í³¼ÆÎÊÌâ¶¨ÖÆ */
+    if (unlikely(nat_entry_data->indev == NULL))
+    {
+        nat_entry_data->indev = skb->dev;
+    }
+
+    // ͳ¼ÆÈë¿ÚÍøÂçÉ豸µÄ½ÓÊÕ°üÊýÁ¿  --- ²Î¿¼linuxÔ­ÉúµÄÇý¶¯£¬Í³¼ÆµÄ¶¼ÊÇIP°ü³¤¶È
+    if (likely(fastnat_level == FAST_NET_DEVICE))
+    {
+        nat_entry_data->indev->stats.rx_packets++;
+        nat_entry_data->indev->stats.rx_bytes += skb->len;
+    }
+
+    skb->dev = dev;
+
+    //Ö»Óе±ÓÐMACÍ·Ô¤¸³ÖµÊ±£¬²Å×¼¸³Öµ£¬·ñÔòΪIPÍ·
+    skb_push(skb, ETH_HLEN);
+    if (likely(nat_entry_data->hh_flag))
+    {
+        memcpy(skb->data, nat_entry_data->hh_data, ETH_HLEN);
+    }
+
+    /*¸üÐÂÁ´½Ó³¬Ê±*/
+    if (IPPROTO_TCP == iph->protocol)
+    {
+        mod_timer(&nat_entry->timeout, jiffies + tcp_timeouts[nat_entry->ct->proto.tcp.state]);
+        update_tcp_timeout(nat_entry, nat_entry_data, tcph);
+        nat_entry->ct->timeout = jiffies + tcp_timeouts[nat_entry->ct->proto.tcp.state];
+
+        if(ackfilter(skb, nat_entry, &working_list) == 1)
+        {
+            rcu_read_unlock();
+            //spin_unlock_bh(&fastnat_spinlock);
+            return 1;
+        }
+    }
+    else if (IPPROTO_UDP == iph->protocol)
+    {
+        /*udp*/
+        if (test_bit(IPS_SEEN_REPLY_BIT, &nat_entry->ct->status))
+        {
+            mod_timer(&nat_entry->timeout, jiffies + fast_udp_timeout_stream);
+            nat_entry->ct->timeout = jiffies + fast_udp_timeout_stream;
+        }
+        else
+        {
+            mod_timer(&nat_entry->timeout, jiffies + fast_udp_timeout);
+            nat_entry->ct->timeout = jiffies + fast_udp_timeout;
+        }
+    }
+
+    if (likely(skb->dev->flags & IFF_UP))
+    {
+        //pppÖ»ÐèÒª´«ÊäIP°ü
+		if (unlikely(skb->dev->type == ARPHRD_PPP))//(strncmp(skb->dev->name, ppp_name, strlen(ppp_name)) == 0)
+        {
+            skb_pull(skb, ETH_HLEN);
+        }
+
+        skb->now_location |= FASTNAT_SUCC;
+        if (likely(fastnat_level == FAST_NET_DEVICE))
+        {
+            //print_sun(SUN_DBG, "fastnat-2 dev_queue_xmit, send to:%s iph->id=0x%02x!!!!!!!! \n", skb->dev->name, iph->id);
+			if (skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev) >= NET_XMIT_MASK) {
+				skb->dev->stats.tx_dropped++;
+				kfree_skb(skb);
+			}
+        }
+        //¶ÔÓÚÁ÷¿ØµÈÌØÊâÓ¦Óã¬Ö»ÄÜ×ß±ê×¼µÄfastnatÁ÷³Ì£¬·ñÔòÎÞ·¨½øÐвå¼þÖ´ÐÐ
+        else if (fastnat_level == FAST_NET_CORE)
+        {
+            //print_sun(SUN_DBG, "fastnat ok-1, send to:%s !!!!!!!! \n", skb->dev->name);
+
+            dev_queue_xmit(skb);
+        }
+        else
+            print_sun(SUN_DBG,"fastnat_level:%d is not supported !!!!!!!! \n", fastnat_level);
+
+        nat_entry_data->packet_num++;
+    }
+    else
+    {
+        print_sun(SUN_DBG, "ERR &&&&&& %s DOWN, kfree_skb!!!!!!!! \n", skb->dev->name);
+        kfree_skb(skb);
+    }
+
+    //print_sun(SUN_DBG, "skb : 0x%x, fastnat succ--------", skb);
+    //nf_ct_dump_tuple(&tuple);
+    rcu_read_unlock();
+
+    return 1;
+
+err_out :
+    print_sun(SUN_DBG, "skb : 0x%x, fastnat FAIL!!!!!!!!!!", skb);
+    return 0; /* not fast nat */
+}
+
+static struct nf_hook_ops nat_hook = {
+    .hook = napt_handle,
+//    .owner = THIS_MODULE,
+    .pf = PF_INET,
+    .hooknum = NF_INET_POST_ROUTING,
+    .priority = NF_IP_PRI_LAST,
+};
+
+//¹¤×÷ÔÚPOST_ROUTING½Úµã£¬ÓÃÓÚfastÁ´½ÓµÄ¸³ÖµºÍÌí¼Óhash±í
+unsigned int napt_handle(void *priv,
+                         struct sk_buff *skb,
+                         const struct nf_hook_state *state)
+{
+    struct nf_conn *ct;
+    enum ip_conntrack_info ctinfo;
+    u_int8_t protocol;
+    fast_entry_t *nat_entry;
+    fast_entry_data_t *entry_data;
+    enum ip_conntrack_dir dir, rdir;
+    struct dst_entry *dst = skb_dst(skb);
+    struct net_device *out = state->out;
+#ifdef CONFIG_ATHRS_HW_NAT
+    u_int32_t mask =0;
+#endif
+    struct neighbour *_neighbour = NULL;
+
+    if (fastnat_level == FAST_CLOSE || fastnat_level == FAST_CLOSE_KEEP_LINK)
+    {
+        return NF_ACCEPT;
+    }
+
+    if (test_bit(FAST_TYPE_VERSION_BIT, &fast_switch))
+    {
+        return NF_ACCEPT;
+    }
+
+    if (ip_hdr(skb)->protocol != IPPROTO_TCP && ip_hdr(skb)->protocol != IPPROTO_UDP)
+        return NF_ACCEPT;
+
+    if (!out)
+    {
+        return NF_ACCEPT;
+    }
+
+    //¹ã²¥¡¢×é²¥²»½¨Á´
+    if (ipv4_is_multicast(ip_hdr(skb)->daddr) || ipv4_is_lbcast(ip_hdr(skb)->daddr))
+    {
+        return NF_ACCEPT;
+    }
+
+    //´Ë´¦Òª¹Ø×¢ÊÇ·ñ»áƵ·±³öÏÖfastÁ´½ÓÒÑÂúÇé¿ö£¬Èç¹û¾­³£³öÏÖ£¬ÊÇ·ñ¿¼ÂÇ×î¾É¸²¸ÇÇé¿ö
+    if (working_list.count > nf_conntrack_max)
+    {
+        return NF_ACCEPT;
+    }
+
+    if (!dst)
+    {
+        return NF_ACCEPT;
+    }
+
+    _neighbour = dst_neigh_lookup_skb(dst, skb);
+    if (!_neighbour)
+    {
+        print_sun(SUN_DBG,"napt_handle() _neighbour = null\n");
+        return NF_ACCEPT;
+    }
+
+    if (memcmp(_neighbour->ha, zeromac, ETH_ALEN) == 0)
+    {
+        if (strncmp(out->name, ppp_name, strlen(ppp_name)) != 0)
+        {
+            goto accept;
+        }
+    }
+
+    if (!(ct = nf_ct_get(skb, &ctinfo)))
+    {
+        print_sun(SUN_DBG,"napt_handle() ct = null\n");
+        goto accept;
+    }
+
+    protocol = nf_ct_protonum(ct);
+    print_sun(SUN_DBG,"napt_handle() protocol = %d\n", protocol);
+
+    if (ct->master == NULL)
+    {
+        //const struct nf_conntrack_helper *helper;
+        struct nf_conn_help *temp_help = nfct_help(ct);
+        //¶ÔÓÚijÌõÁ´½ÓÉÏ´æÔÚhelpµÈ¹³×Ó£¬±ØÐë½»ÓÉlinux±ê×¢Äں˴¦Àí£¬·ñÔòÄں˲»ÄÜ»ñÈ¡Ïà¹ØµÄÊý¾Ý°üÐÅÏ¢
+        if(temp_help!=NULL)
+        {
+            //helper = rcu_dereference(temp_help->helper);
+            //if(!(helper->tuple.src.u.all == htons(21)&&helper->tuple.dst.protonum == IPPROTO_TCP)) {
+            goto accept;
+            //   }
+        }
+    }
+
+
+    /* only forward */
+    if (!skb->skb_iif)
+    {
+        goto accept;
+    }
+
+
+    //¹ýÂ˲»ÐèÒª¾­¹ýfastnatµÄЭÒé°ü,¸ù¾Ý¶Ë¿ÚºÅ½øÐйýÂË
+    if (check_skip_ports(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all))
+    {
+        goto accept;
+    }
+
+    dir = CTINFO2DIR(ctinfo);
+
+    rdir = (IP_CT_DIR_ORIGINAL == dir) ? IP_CT_DIR_REPLY: IP_CT_DIR_ORIGINAL;
+    print_sun(SUN_DBG,"napt_handle() dir=%d, rdir=%d\n", dir, rdir);
+    /*Ö»Õë¶ÔTCP/UDP½øÐÐfastnat*/
+    if (IPPROTO_TCP == protocol)
+    {
+        /* only established */
+        /*TCPÈý´ÎÎÕÊֳɹ¦*/
+        if(!test_bit(IPS_ASSURED_BIT, &ct->status))
+        {
+            goto accept;
+        }
+    }
+    else if (IPPROTO_UDP != protocol)
+    {
+        goto accept;
+    }
+
+    spin_lock_bh(&fastnat_spinlock);
+    if (!(nat_entry = fast_get_entry(&working_list, ct, dir)))
+    {
+        print_sun(SUN_DBG,"napt_handle() nat_entry=%p\n", nat_entry);
+        spin_unlock_bh(&fastnat_spinlock);
+        goto accept;
+    }
+    nat_entry->fast_spinlock = &fastnat_spinlock;
+
+    //Ê״ν¨Á´£¬»ñÈ¡ct¼ÆÊýËø£¬²¢É¾³ýct¶¨Ê±Æ÷£»Ê״ν¨Á´Öظ´°ü£¬²»ÄܲÙ×÷
+    if (!(nat_entry->flags & FAST_ALL_DIR))
+    {
+        nf_conntrack_get(&ct->ct_general);
+        //del_timer(&ct->timeout);
+        ct->timeout = nat_entry->timeout.expires;
+
+    }
+
+    entry_data = &nat_entry->data[dir];
+    entry_data->tuplehash.tuple = ct->tuplehash[dir].tuple;
+    memcpy(entry_data->dmac, _neighbour->ha, ETH_ALEN);
+    entry_data->priority = skb->priority;
+    entry_data->mark = skb->mark;
+    entry_data->outdev = out;
+
+    /*¼Ç¼MACµØÖ·µ½entry_data->hh_data*/
+    if (!record_MAC_header(working_hash, ct, nat_entry, entry_data, _neighbour, out, htons(ETH_P_IP)))
+    {
+        spin_unlock_bh(&fastnat_spinlock);
+        goto accept;
+    }
+    print_sun(SUN_DBG,"napt_handle() ct->status=0x%x\n", ct->status);
+    /*»ñÈ¡natת»»ÐèÒªµÄIPºÍportÐÅÏ¢*/
+    if (test_bit(IPS_SRC_NAT_BIT, &ct->status))
+    {
+        if(IP_CT_DIR_ORIGINAL == dir)
+        {
+            entry_data->nat_addr = ct->tuplehash[rdir].tuple.dst.u3.ip;
+            entry_data->nat_port = ct->tuplehash[rdir].tuple.dst.u.all;
+            entry_data->type = FN_TYPE_SRC;
+        }
+        else
+        {
+            entry_data->nat_addr = ct->tuplehash[rdir].tuple.src.u3.ip;
+            entry_data->nat_port = ct->tuplehash[rdir].tuple.src.u.all;
+            entry_data->type = FN_TYPE_DST;
+        }
+    }
+    else if (test_bit(IPS_DST_NAT_BIT, &ct->status))
+    {
+        if (IP_CT_DIR_ORIGINAL == dir)
+        {
+            entry_data->nat_addr = ct->tuplehash[rdir].tuple.src.u3.ip;
+            entry_data->nat_port = ct->tuplehash[rdir].tuple.src.u.all;
+            entry_data->type = FN_TYPE_DST;
+        }
+        else
+        {
+            entry_data->nat_addr = ct->tuplehash[rdir].tuple.dst.u3.ip;
+            entry_data->nat_port = ct->tuplehash[rdir].tuple.dst.u.all;
+            entry_data->type = FN_TYPE_SRC;
+        }
+    }
+    else //²»Ö§³ÖNATµÄ³¡¾°
+    {
+        //´ËÖÖ³¡¾°£¬Ô´µØÖ·/Ä¿µÄµØÖ·/¶Ë¿ÚºÅ¶¼²»ÐèÒª¸Ä±ä£¬Ö±½Ó͸´«¾ÍÐÐ
+        entry_data->is_not_nat = 1;
+    }
+
+    //´Ë´¦±£Ö¤Õý·´Á½¸ö±ê¼Çλ²»³åÍ»
+    nat_entry->flags = nat_entry->flags | (1 << dir);
+
+    //Ìí¼Óhash½Úµã
+    fast_add_entry(working_hash, entry_data);
+    if (nat_entry->flags == FAST_ALL_DIR)
+    {
+        nat_entry->data[0].indev = nat_entry->data[1].outdev;
+        nat_entry->data[1].indev = nat_entry->data[0].outdev;
+    }
+
+    spin_lock_bh(&fast_fw_spinlock);
+    ct->fast_ct.isFast = FAST_CT_WND4;
+    spin_unlock_bh(&fast_fw_spinlock);
+
+    spin_unlock_bh(&fastnat_spinlock);
+
+accept:
+    neigh_release(_neighbour);
+    return NF_ACCEPT;
+}
+
+/*´¦Àí֪ͨÁ´Ê¼þ*/
+int fastnat_event(traverse_command_t *cmd)
+{
+    spin_lock_bh(&fastnat_spinlock);
+    traverse_process(&working_list, cmd);
+    spin_unlock_bh(&fastnat_spinlock);
+    return 0;
+}
+
+//fastnat_level¹Ø±Õ£¬Çå¿ÕËùÓÐipv4¿ìËÙת·¢ÐÅÏ¢
+void fastnat_cleanup_links(void)
+{
+    spin_lock_bh(&fastnat_spinlock);
+    fast_cleanup_links(&working_list);
+    spin_unlock_bh(&fastnat_spinlock);
+}
+
+/*fastnat³õʼ»¯*/
+int tsp_fastnat_init(void)
+{
+    int ret;
+
+    print_sun(SUN_DBG,"start init fastnat\n");
+
+    working_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, /*&fastnat_hash_vmalloc,*/ 1);
+    if (!working_hash)
+    {
+        print_sun(SUN_ERR, "Unable to create working_hash\n");
+        return -EINVAL;
+    }
+
+    spin_lock_init(&fastnat_spinlock);
+
+    ret = nf_register_net_hook(&init_net, &nat_hook);
+    if (ret != 0)
+    {
+        print_sun(SUN_ERR,"init fastnat failed\n");
+        goto err;
+    }
+    print_sun(SUN_DBG,"init fastnat done\n");
+
+    return 0;
+
+err:
+    nf_ct_free_hashtable(working_hash, /*fastnat_hash_vmalloc, */nf_conntrack_htable_size);
+    return -EINVAL;
+}
+
+int tsp_fastnat_cleanup(void)
+{
+    nf_unregister_net_hook(&init_net, &nat_hook);
+    nf_ct_free_hashtable(working_hash, /*fastnat_hash_vmalloc,*/ nf_conntrack_htable_size);
+
+    print_sun(SUN_DBG,"fastnat cleanup done\n");
+    return 0;
+}
+
diff --git a/upstream/linux-5.10/net/netfilter/nf_conntrack_core.c b/upstream/linux-5.10/net/netfilter/nf_conntrack_core.c
new file mode 100755
index 0000000..e92413e
--- /dev/null
+++ b/upstream/linux-5.10/net/netfilter/nf_conntrack_core.c
@@ -0,0 +1,2837 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Connection state tracking for netfilter.  This is separated from,
+   but required by, the NAT layer; it can also be used by an iptables
+   extension. */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
+ * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/jhash.h>
+#include <linux/siphash.h>
+#include <linux/err.h>
+#include <linux/percpu.h>
+#include <linux/moduleparam.h>
+#include <linux/notifier.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/socket.h>
+#include <linux/mm.h>
+#include <linux/nsproxy.h>
+#include <linux/rculist_nulls.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
+#include <net/netfilter/nf_conntrack_labels.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netns/hash.h>
+#include <net/ip.h>
+
+#include "nf_internals.h"
+
+#ifdef CONFIG_FASTNAT_MODULE
+#include <net/SI/fast_common.h>
+#endif
+
+__cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
+EXPORT_SYMBOL_GPL(nf_conntrack_locks);
+
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
+
+struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_hash);
+
+struct conntrack_gc_work {
+	struct delayed_work	dwork;
+	u32			next_bucket;
+	bool			exiting;
+	bool			early_drop;
+};
+
+static __read_mostly struct kmem_cache *nf_conntrack_cachep;
+static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
+static __read_mostly bool nf_conntrack_locks_all;
+
+/* serialize hash resizes and nf_ct_iterate_cleanup */
+static DEFINE_MUTEX(nf_conntrack_mutex);
+
+#define GC_SCAN_INTERVAL	(120u * HZ)
+#define GC_SCAN_MAX_DURATION	msecs_to_jiffies(10)
+
+static struct conntrack_gc_work conntrack_gc_work;
+
+void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
+{
+	/* 1) Acquire the lock */
+	spin_lock(lock);
+
+	/* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
+	 * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
+	 */
+	if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
+		return;
+
+	/* fast path failed, unlock */
+	spin_unlock(lock);
+
+	/* Slow path 1) get global lock */
+	spin_lock(&nf_conntrack_locks_all_lock);
+
+	/* Slow path 2) get the lock we want */
+	spin_lock(lock);
+
+	/* Slow path 3) release the global lock */
+	spin_unlock(&nf_conntrack_locks_all_lock);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_lock);
+
+static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
+{
+	h1 %= CONNTRACK_LOCKS;
+	h2 %= CONNTRACK_LOCKS;
+	spin_unlock(&nf_conntrack_locks[h1]);
+	if (h1 != h2)
+		spin_unlock(&nf_conntrack_locks[h2]);
+}
+
+/* return true if we need to recompute hashes (in case hash table was resized) */
+static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
+				     unsigned int h2, unsigned int sequence)
+{
+	h1 %= CONNTRACK_LOCKS;
+	h2 %= CONNTRACK_LOCKS;
+	if (h1 <= h2) {
+		nf_conntrack_lock(&nf_conntrack_locks[h1]);
+		if (h1 != h2)
+			spin_lock_nested(&nf_conntrack_locks[h2],
+					 SINGLE_DEPTH_NESTING);
+	} else {
+		nf_conntrack_lock(&nf_conntrack_locks[h2]);
+		spin_lock_nested(&nf_conntrack_locks[h1],
+				 SINGLE_DEPTH_NESTING);
+	}
+	if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
+		nf_conntrack_double_unlock(h1, h2);
+		return true;
+	}
+	return false;
+}
+
+static void nf_conntrack_all_lock(void)
+	__acquires(&nf_conntrack_locks_all_lock)
+{
+	int i;
+
+	spin_lock(&nf_conntrack_locks_all_lock);
+
+	nf_conntrack_locks_all = true;
+
+	for (i = 0; i < CONNTRACK_LOCKS; i++) {
+		spin_lock(&nf_conntrack_locks[i]);
+
+		/* This spin_unlock provides the "release" to ensure that
+		 * nf_conntrack_locks_all==true is visible to everyone that
+		 * acquired spin_lock(&nf_conntrack_locks[]).
+		 */
+		spin_unlock(&nf_conntrack_locks[i]);
+	}
+}
+
+static void nf_conntrack_all_unlock(void)
+	__releases(&nf_conntrack_locks_all_lock)
+{
+	/* All prior stores must be complete before we clear
+	 * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
+	 * might observe the false value but not the entire
+	 * critical section.
+	 * It pairs with the smp_load_acquire() in nf_conntrack_lock()
+	 */
+	smp_store_release(&nf_conntrack_locks_all, false);
+	spin_unlock(&nf_conntrack_locks_all_lock);
+}
+
+unsigned int nf_conntrack_htable_size __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
+
+unsigned int nf_conntrack_max __read_mostly;
+EXPORT_SYMBOL_GPL(nf_conntrack_max);
+seqcount_spinlock_t nf_conntrack_generation __read_mostly;
+static unsigned int nf_conntrack_hash_rnd __read_mostly;
+
+static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
+			      const struct net *net)
+{
+	unsigned int n;
+	u32 seed;
+
+	get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
+
+	/* The direction must be ignored, so we hash everything up to the
+	 * destination ports (which is a multiple of 4) and treat the last
+	 * three bytes manually.
+	 */
+	seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
+	n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
+	return jhash2((u32 *)tuple, n, seed ^
+		      (((__force __u16)tuple->dst.u.all << 16) |
+		      tuple->dst.protonum));
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+u32 hash_conntrack_raw_fast(const struct nf_conntrack_tuple *tuple,
+			      const struct net *net)
+{
+	return hash_conntrack_raw(tuple, net);
+}
+#endif
+
+static u32 scale_hash(u32 hash)
+{
+	return reciprocal_scale(hash, nf_conntrack_htable_size);
+}
+
+static u32 __hash_conntrack(const struct net *net,
+			    const struct nf_conntrack_tuple *tuple,
+			    unsigned int size)
+{
+	return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
+}
+
+static u32 hash_conntrack(const struct net *net,
+			  const struct nf_conntrack_tuple *tuple)
+{
+	return scale_hash(hash_conntrack_raw(tuple, net));
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+u_int32_t hash_conntrack_fast(const struct nf_conntrack_tuple *tuple)
+{
+  return __hash_conntrack(&init_net, tuple, 32);
+}
+#endif
+
+static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
+				  unsigned int dataoff,
+				  struct nf_conntrack_tuple *tuple)
+{	struct {
+		__be16 sport;
+		__be16 dport;
+	} _inet_hdr, *inet_hdr;
+
+	/* Actually only need first 4 bytes to get ports. */
+	inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
+	if (!inet_hdr)
+		return false;
+
+	tuple->src.u.udp.port = inet_hdr->sport;
+	tuple->dst.u.udp.port = inet_hdr->dport;
+	return true;
+}
+
+static bool
+nf_ct_get_tuple(const struct sk_buff *skb,
+		unsigned int nhoff,
+		unsigned int dataoff,
+		u_int16_t l3num,
+		u_int8_t protonum,
+		struct net *net,
+		struct nf_conntrack_tuple *tuple)
+{
+	unsigned int size;
+	const __be32 *ap;
+	__be32 _addrs[8];
+
+	memset(tuple, 0, sizeof(*tuple));
+
+	tuple->src.l3num = l3num;
+	switch (l3num) {
+	case NFPROTO_IPV4:
+		nhoff += offsetof(struct iphdr, saddr);
+		size = 2 * sizeof(__be32);
+		break;
+	case NFPROTO_IPV6:
+		nhoff += offsetof(struct ipv6hdr, saddr);
+		size = sizeof(_addrs);
+		break;
+	default:
+		return true;
+	}
+
+	ap = skb_header_pointer(skb, nhoff, size, _addrs);
+	if (!ap)
+		return false;
+
+	switch (l3num) {
+	case NFPROTO_IPV4:
+		tuple->src.u3.ip = ap[0];
+		tuple->dst.u3.ip = ap[1];
+		break;
+	case NFPROTO_IPV6:
+		memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
+		memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
+		break;
+	}
+
+	tuple->dst.protonum = protonum;
+	tuple->dst.dir = IP_CT_DIR_ORIGINAL;
+
+	switch (protonum) {
+#if IS_ENABLED(CONFIG_IPV6)
+	case IPPROTO_ICMPV6:
+		return icmpv6_pkt_to_tuple(skb, dataoff, net, tuple);
+#endif
+	case IPPROTO_ICMP:
+		return icmp_pkt_to_tuple(skb, dataoff, net, tuple);
+#ifdef CONFIG_NF_CT_PROTO_GRE
+	case IPPROTO_GRE:
+		return gre_pkt_to_tuple(skb, dataoff, net, tuple);
+#endif
+	case IPPROTO_TCP:
+	case IPPROTO_UDP: /* fallthrough */
+		return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+	case IPPROTO_UDPLITE:
+		return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+	case IPPROTO_SCTP:
+		return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+	case IPPROTO_DCCP:
+		return nf_ct_get_tuple_ports(skb, dataoff, tuple);
+#endif
+	default:
+		break;
+	}
+
+	return true;
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+bool nf_ct_get_tuple_fast(const struct sk_buff *skb,
+		unsigned int nhoff,
+		unsigned int dataoff,
+		u_int16_t l3num,
+		u_int8_t protonum,
+		struct net *net,
+		struct nf_conntrack_tuple *tuple)
+{
+  return nf_ct_get_tuple(skb, nhoff, dataoff, l3num, protonum, net, tuple);
+}
+#endif
+
+static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
+			    u_int8_t *protonum)
+{
+	int dataoff = -1;
+	const struct iphdr *iph;
+	struct iphdr _iph;
+
+	iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+	if (!iph)
+		return -1;
+
+	/* Conntrack defragments packets, we might still see fragments
+	 * inside ICMP packets though.
+	 */
+	if (iph->frag_off & htons(IP_OFFSET))
+		return -1;
+
+	dataoff = nhoff + (iph->ihl << 2);
+	*protonum = iph->protocol;
+
+	/* Check bogus IP headers */
+	if (dataoff > skb->len) {
+		pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u\n",
+			 nhoff, iph->ihl << 2, skb->len);
+		return -1;
+	}
+	return dataoff;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
+			    u8 *protonum)
+{
+	int protoff = -1;
+	unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
+	__be16 frag_off;
+	u8 nexthdr;
+
+	if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
+			  &nexthdr, sizeof(nexthdr)) != 0) {
+		pr_debug("can't get nexthdr\n");
+		return -1;
+	}
+	protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
+	/*
+	 * (protoff == skb->len) means the packet has not data, just
+	 * IPv6 and possibly extensions headers, but it is tracked anyway
+	 */
+	if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
+		pr_debug("can't find proto in pkt\n");
+		return -1;
+	}
+
+	*protonum = nexthdr;
+	return protoff;
+}
+#endif
+
+static int get_l4proto(const struct sk_buff *skb,
+		       unsigned int nhoff, u8 pf, u8 *l4num)
+{
+	switch (pf) {
+	case NFPROTO_IPV4:
+		return ipv4_get_l4proto(skb, nhoff, l4num);
+#if IS_ENABLED(CONFIG_IPV6)
+	case NFPROTO_IPV6:
+		return ipv6_get_l4proto(skb, nhoff, l4num);
+#endif
+	default:
+		*l4num = 0;
+		break;
+	}
+	return -1;
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+int get_l4proto_fast(const struct sk_buff *skb,
+		       unsigned int nhoff, u8 pf, u8 *l4num)
+{
+	return get_l4proto(skb, nhoff, pf, l4num);
+}
+#endif
+
+bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
+		       u_int16_t l3num,
+		       struct net *net, struct nf_conntrack_tuple *tuple)
+{
+	u8 protonum;
+	int protoff;
+
+	protoff = get_l4proto(skb, nhoff, l3num, &protonum);
+	if (protoff <= 0)
+		return false;
+
+	return nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple);
+}
+EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
+
+bool
+nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
+		   const struct nf_conntrack_tuple *orig)
+{
+	memset(inverse, 0, sizeof(*inverse));
+
+	inverse->src.l3num = orig->src.l3num;
+
+	switch (orig->src.l3num) {
+	case NFPROTO_IPV4:
+		inverse->src.u3.ip = orig->dst.u3.ip;
+		inverse->dst.u3.ip = orig->src.u3.ip;
+		break;
+	case NFPROTO_IPV6:
+		inverse->src.u3.in6 = orig->dst.u3.in6;
+		inverse->dst.u3.in6 = orig->src.u3.in6;
+		break;
+	default:
+		break;
+	}
+
+	inverse->dst.dir = !orig->dst.dir;
+
+	inverse->dst.protonum = orig->dst.protonum;
+
+	switch (orig->dst.protonum) {
+	case IPPROTO_ICMP:
+		return nf_conntrack_invert_icmp_tuple(inverse, orig);
+#if IS_ENABLED(CONFIG_IPV6)
+	case IPPROTO_ICMPV6:
+		return nf_conntrack_invert_icmpv6_tuple(inverse, orig);
+#endif
+	}
+
+	inverse->src.u.all = orig->dst.u.all;
+	inverse->dst.u.all = orig->src.u.all;
+	return true;
+}
+EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
+
+/* Generate a almost-unique pseudo-id for a given conntrack.
+ *
+ * intentionally doesn't re-use any of the seeds used for hash
+ * table location, we assume id gets exposed to userspace.
+ *
+ * Following nf_conn items do not change throughout lifetime
+ * of the nf_conn:
+ *
+ * 1. nf_conn address
+ * 2. nf_conn->master address (normally NULL)
+ * 3. the associated net namespace
+ * 4. the original direction tuple
+ */
+u32 nf_ct_get_id(const struct nf_conn *ct)
+{
+	static __read_mostly siphash_key_t ct_id_seed;
+	unsigned long a, b, c, d;
+
+	net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
+
+	a = (unsigned long)ct;
+	b = (unsigned long)ct->master;
+	c = (unsigned long)nf_ct_net(ct);
+	d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+				   sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
+				   &ct_id_seed);
+#ifdef CONFIG_64BIT
+	return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
+#else
+	return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
+#endif
+}
+EXPORT_SYMBOL_GPL(nf_ct_get_id);
+
+static void
+clean_from_lists(struct nf_conn *ct)
+{
+	pr_debug("clean_from_lists(%p)\n", ct);
+	hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+	hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
+
+	/* Destroy all pending expectations */
+	nf_ct_remove_expectations(ct);
+}
+
+/* must be called with local_bh_disable */
+static void nf_ct_add_to_dying_list(struct nf_conn *ct)
+{
+	struct ct_pcpu *pcpu;
+
+	/* add this conntrack to the (per cpu) dying list */
+	ct->cpu = smp_processor_id();
+	pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
+
+	spin_lock(&pcpu->lock);
+	hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
+			     &pcpu->dying);
+	spin_unlock(&pcpu->lock);
+}
+
+/* must be called with local_bh_disable */
+static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
+{
+	struct ct_pcpu *pcpu;
+
+	/* add this conntrack to the (per cpu) unconfirmed list */
+	ct->cpu = smp_processor_id();
+	pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
+
+	spin_lock(&pcpu->lock);
+	hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
+			     &pcpu->unconfirmed);
+	spin_unlock(&pcpu->lock);
+}
+
+/* must be called with local_bh_disable */
+static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
+{
+	struct ct_pcpu *pcpu;
+
+	/* We overload first tuple to link into unconfirmed or dying list.*/
+	pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
+
+	spin_lock(&pcpu->lock);
+	BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
+	hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+	spin_unlock(&pcpu->lock);
+}
+
+#define NFCT_ALIGN(len)	(((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
+
+/* Released via destroy_conntrack() */
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+				 const struct nf_conntrack_zone *zone,
+				 gfp_t flags)
+{
+	struct nf_conn *tmpl, *p;
+
+	if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
+		tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
+		if (!tmpl)
+			return NULL;
+
+		p = tmpl;
+		tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
+		if (tmpl != p) {
+			tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
+			tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
+		}
+	} else {
+		tmpl = kzalloc(sizeof(*tmpl), flags);
+		if (!tmpl)
+			return NULL;
+	}
+
+	tmpl->status = IPS_TEMPLATE;
+	write_pnet(&tmpl->ct_net, net);
+	nf_ct_zone_add(tmpl, zone);
+	atomic_set(&tmpl->ct_general.use, 0);
+
+	return tmpl;
+}
+EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
+
+void nf_ct_tmpl_free(struct nf_conn *tmpl)
+{
+	nf_ct_ext_destroy(tmpl);
+
+	if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
+		kfree((char *)tmpl - tmpl->proto.tmpl_padto);
+	else
+		kfree(tmpl);
+}
+EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
+
+static void destroy_gre_conntrack(struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CT_PROTO_GRE
+	struct nf_conn *master = ct->master;
+
+	if (master)
+		nf_ct_gre_keymap_destroy(master);
+#endif
+}
+
+static void
+destroy_conntrack(struct nf_conntrack *nfct)
+{
+	struct nf_conn *ct = (struct nf_conn *)nfct;
+
+	pr_debug("destroy_conntrack(%p)\n", ct);
+	WARN_ON(atomic_read(&nfct->use) != 0);
+
+	if (unlikely(nf_ct_is_template(ct))) {
+		nf_ct_tmpl_free(ct);
+		return;
+	}
+
+	if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE))
+		destroy_gre_conntrack(ct);
+
+	local_bh_disable();
+	/* Expectations will have been removed in clean_from_lists,
+	 * except TFTP can create an expectation on the first packet,
+	 * before connection is in the list, so we need to clean here,
+	 * too.
+	 */
+	nf_ct_remove_expectations(ct);
+
+	nf_ct_del_from_dying_or_unconfirmed_list(ct);
+
+	local_bh_enable();
+
+	if (ct->master)
+		nf_ct_put(ct->master);
+
+	pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
+	nf_conntrack_free(ct);
+}
+
+static void nf_ct_delete_from_lists(struct nf_conn *ct)
+{
+	struct net *net = nf_ct_net(ct);
+	unsigned int hash, reply_hash;
+	unsigned int sequence;
+
+	nf_ct_helper_destroy(ct);
+
+	local_bh_disable();
+	do {
+		sequence = read_seqcount_begin(&nf_conntrack_generation);
+		hash = hash_conntrack(net,
+				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+		reply_hash = hash_conntrack(net,
+					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
+
+	clean_from_lists(ct);
+	nf_conntrack_double_unlock(hash, reply_hash);
+
+	nf_ct_add_to_dying_list(ct);
+
+	local_bh_enable();
+}
+
+bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
+{
+	struct nf_conn_tstamp *tstamp;
+
+	if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
+		return false;
+
+	tstamp = nf_conn_tstamp_find(ct);
+	if (tstamp) {
+		s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
+
+		tstamp->stop = ktime_get_real_ns();
+		if (timeout < 0)
+			tstamp->stop -= jiffies_to_nsecs(-timeout);
+	}
+
+	if (nf_conntrack_event_report(IPCT_DESTROY, ct,
+				    portid, report) < 0) {
+		/* destroy event was not delivered. nf_ct_put will
+		 * be done by event cache worker on redelivery.
+		 */
+		nf_ct_delete_from_lists(ct);
+		nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
+		return false;
+	}
+
+	nf_conntrack_ecache_work(nf_ct_net(ct));
+	nf_ct_delete_from_lists(ct);
+	nf_ct_put(ct);
+	return true;
+}
+EXPORT_SYMBOL_GPL(nf_ct_delete);
+
+static inline bool
+nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
+		const struct nf_conntrack_tuple *tuple,
+		const struct nf_conntrack_zone *zone,
+		const struct net *net)
+{
+	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+
+	/* A conntrack can be recreated with the equal tuple,
+	 * so we need to check that the conntrack is confirmed
+	 */
+	return nf_ct_tuple_equal(tuple, &h->tuple) &&
+	       nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
+	       nf_ct_is_confirmed(ct) &&
+	       net_eq(net, nf_ct_net(ct));
+}
+
+static inline bool
+nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
+{
+	return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+				 &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
+	       nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
+				 &ct2->tuplehash[IP_CT_DIR_REPLY].tuple) &&
+	       nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) &&
+	       nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) &&
+	       net_eq(nf_ct_net(ct1), nf_ct_net(ct2));
+}
+
+/* caller must hold rcu readlock and none of the nf_conntrack_locks */
+static void nf_ct_gc_expired(struct nf_conn *ct)
+{
+	if (!atomic_inc_not_zero(&ct->ct_general.use))
+		return;
+
+	if (nf_ct_should_gc(ct))
+		nf_ct_kill(ct);
+
+	nf_ct_put(ct);
+}
+
+/*
+ * Warning :
+ * - Caller must take a reference on returned object
+ *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
+ */
+static struct nf_conntrack_tuple_hash *
+____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
+		      const struct nf_conntrack_tuple *tuple, u32 hash)
+{
+	struct nf_conntrack_tuple_hash *h;
+	struct hlist_nulls_head *ct_hash;
+	struct hlist_nulls_node *n;
+	unsigned int bucket, hsize;
+
+begin:
+	nf_conntrack_get_ht(&ct_hash, &hsize);
+	bucket = reciprocal_scale(hash, hsize);
+
+	hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
+		struct nf_conn *ct;
+
+		ct = nf_ct_tuplehash_to_ctrack(h);
+		if (nf_ct_is_expired(ct)) {
+			nf_ct_gc_expired(ct);
+			continue;
+		}
+
+		if (nf_ct_key_equal(h, tuple, zone, net))
+			return h;
+	}
+	/*
+	 * if the nulls value we got at the end of this lookup is
+	 * not the expected one, we must restart lookup.
+	 * We probably met an item that was moved to another chain.
+	 */
+	if (get_nulls_value(n) != bucket) {
+		NF_CT_STAT_INC_ATOMIC(net, search_restart);
+		goto begin;
+	}
+
+	return NULL;
+}
+
+/* Find a connection corresponding to a tuple. */
+static struct nf_conntrack_tuple_hash *
+__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
+			const struct nf_conntrack_tuple *tuple, u32 hash)
+{
+	struct nf_conntrack_tuple_hash *h;
+	struct nf_conn *ct;
+
+	rcu_read_lock();
+
+	h = ____nf_conntrack_find(net, zone, tuple, hash);
+	if (h) {
+		/* We have a candidate that matches the tuple we're interested
+		 * in, try to obtain a reference and re-check tuple
+		 */
+		ct = nf_ct_tuplehash_to_ctrack(h);
+		if (likely(atomic_inc_not_zero(&ct->ct_general.use))) {
+			if (likely(nf_ct_key_equal(h, tuple, zone, net)))
+				goto found;
+
+			/* TYPESAFE_BY_RCU recycled the candidate */
+			nf_ct_put(ct);
+		}
+
+		h = NULL;
+	}
+found:
+	rcu_read_unlock();
+
+	return h;
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+struct nf_conntrack_tuple_hash *nf_conntrack_find_fast(struct net *net, const struct nf_conntrack_zone *zone,
+			  const struct nf_conntrack_tuple *tuple, u32 hash)
+{
+	return __nf_conntrack_find_get(net, zone, tuple, hash);
+}
+#endif
+
+struct nf_conntrack_tuple_hash *
+nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
+		      const struct nf_conntrack_tuple *tuple)
+{
+	return __nf_conntrack_find_get(net, zone, tuple,
+				       hash_conntrack_raw(tuple, net));
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
+
+static void __nf_conntrack_hash_insert(struct nf_conn *ct,
+				       unsigned int hash,
+				       unsigned int reply_hash)
+{
+	hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
+			   &nf_conntrack_hash[hash]);
+	hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
+			   &nf_conntrack_hash[reply_hash]);
+}
+
+int
+nf_conntrack_hash_check_insert(struct nf_conn *ct)
+{
+	const struct nf_conntrack_zone *zone;
+	struct net *net = nf_ct_net(ct);
+	unsigned int hash, reply_hash;
+	struct nf_conntrack_tuple_hash *h;
+	struct hlist_nulls_node *n;
+	unsigned int sequence;
+
+	zone = nf_ct_zone(ct);
+
+	local_bh_disable();
+	do {
+		sequence = read_seqcount_begin(&nf_conntrack_generation);
+		hash = hash_conntrack(net,
+				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+		reply_hash = hash_conntrack(net,
+					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
+
+	/* See if there's one in the list already, including reverse */
+	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
+		if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+				    zone, net))
+			goto out;
+
+	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
+		if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+				    zone, net))
+			goto out;
+
+	smp_wmb();
+	/* The caller holds a reference to this object */
+	atomic_set(&ct->ct_general.use, 2);
+	__nf_conntrack_hash_insert(ct, hash, reply_hash);
+	nf_conntrack_double_unlock(hash, reply_hash);
+	NF_CT_STAT_INC(net, insert);
+	local_bh_enable();
+	return 0;
+
+out:
+	nf_conntrack_double_unlock(hash, reply_hash);
+	local_bh_enable();
+	return -EEXIST;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
+
+void nf_ct_acct_add(struct nf_conn *ct, u32 dir, unsigned int packets,
+		    unsigned int bytes)
+{
+	struct nf_conn_acct *acct;
+
+	acct = nf_conn_acct_find(ct);
+	if (acct) {
+		struct nf_conn_counter *counter = acct->counter;
+
+		atomic64_add(packets, &counter[dir].packets);
+		atomic64_add(bytes, &counter[dir].bytes);
+	}
+}
+EXPORT_SYMBOL_GPL(nf_ct_acct_add);
+
+static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+			     const struct nf_conn *loser_ct)
+{
+	struct nf_conn_acct *acct;
+
+	acct = nf_conn_acct_find(loser_ct);
+	if (acct) {
+		struct nf_conn_counter *counter = acct->counter;
+		unsigned int bytes;
+
+		/* u32 should be fine since we must have seen one packet. */
+		bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
+		nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), bytes);
+	}
+}
+
+static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
+{
+	struct nf_conn_tstamp *tstamp;
+
+	atomic_inc(&ct->ct_general.use);
+	ct->status |= IPS_CONFIRMED;
+
+	/* set conntrack timestamp, if enabled. */
+	tstamp = nf_conn_tstamp_find(ct);
+	if (tstamp)
+		tstamp->start = ktime_get_real_ns();
+}
+
+/* caller must hold locks to prevent concurrent changes */
+static int __nf_ct_resolve_clash(struct sk_buff *skb,
+				 struct nf_conntrack_tuple_hash *h)
+{
+	/* This is the conntrack entry already in hashes that won race. */
+	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *loser_ct;
+
+	loser_ct = nf_ct_get(skb, &ctinfo);
+
+	if (nf_ct_is_dying(ct))
+		return NF_DROP;
+
+	if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
+	    nf_ct_match(ct, loser_ct)) {
+		struct net *net = nf_ct_net(ct);
+
+		nf_conntrack_get(&ct->ct_general);
+
+		nf_ct_acct_merge(ct, ctinfo, loser_ct);
+		nf_ct_add_to_dying_list(loser_ct);
+		nf_conntrack_put(&loser_ct->ct_general);
+		nf_ct_set(skb, ct, ctinfo);
+
+		NF_CT_STAT_INC(net, clash_resolve);
+		return NF_ACCEPT;
+	}
+
+	return NF_DROP;
+}
+
+/**
+ * nf_ct_resolve_clash_harder - attempt to insert clashing conntrack entry
+ *
+ * @skb: skb that causes the collision
+ * @repl_idx: hash slot for reply direction
+ *
+ * Called when origin or reply direction had a clash.
+ * The skb can be handled without packet drop provided the reply direction
+ * is unique or there the existing entry has the identical tuple in both
+ * directions.
+ *
+ * Caller must hold conntrack table locks to prevent concurrent updates.
+ *
+ * Returns NF_DROP if the clash could not be handled.
+ */
+static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
+{
+	struct nf_conn *loser_ct = (struct nf_conn *)skb_nfct(skb);
+	const struct nf_conntrack_zone *zone;
+	struct nf_conntrack_tuple_hash *h;
+	struct hlist_nulls_node *n;
+	struct net *net;
+
+	zone = nf_ct_zone(loser_ct);
+	net = nf_ct_net(loser_ct);
+
+	/* Reply direction must never result in a clash, unless both origin
+	 * and reply tuples are identical.
+	 */
+	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[repl_idx], hnnode) {
+		if (nf_ct_key_equal(h,
+				    &loser_ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+				    zone, net))
+			return __nf_ct_resolve_clash(skb, h);
+	}
+
+	/* We want the clashing entry to go away real soon: 1 second timeout. */
+	WRITE_ONCE(loser_ct->timeout, nfct_time_stamp + HZ);
+
+	/* IPS_NAT_CLASH removes the entry automatically on the first
+	 * reply.  Also prevents UDP tracker from moving the entry to
+	 * ASSURED state, i.e. the entry can always be evicted under
+	 * pressure.
+	 */
+	loser_ct->status |= IPS_FIXED_TIMEOUT | IPS_NAT_CLASH;
+
+	__nf_conntrack_insert_prepare(loser_ct);
+
+	/* fake add for ORIGINAL dir: we want lookups to only find the entry
+	 * already in the table.  This also hides the clashing entry from
+	 * ctnetlink iteration, i.e. conntrack -L won't show them.
+	 */
+	hlist_nulls_add_fake(&loser_ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+
+	hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
+				 &nf_conntrack_hash[repl_idx]);
+
+	NF_CT_STAT_INC(net, clash_resolve);
+	return NF_ACCEPT;
+}
+
+/**
+ * nf_ct_resolve_clash - attempt to handle clash without packet drop
+ *
+ * @skb: skb that causes the clash
+ * @h: tuplehash of the clashing entry already in table
+ * @reply_hash: hash slot for reply direction
+ *
+ * A conntrack entry can be inserted to the connection tracking table
+ * if there is no existing entry with an identical tuple.
+ *
+ * If there is one, @skb (and the assocated, unconfirmed conntrack) has
+ * to be dropped.  In case @skb is retransmitted, next conntrack lookup
+ * will find the already-existing entry.
+ *
+ * The major problem with such packet drop is the extra delay added by
+ * the packet loss -- it will take some time for a retransmit to occur
+ * (or the sender to time out when waiting for a reply).
+ *
+ * This function attempts to handle the situation without packet drop.
+ *
+ * If @skb has no NAT transformation or if the colliding entries are
+ * exactly the same, only the to-be-confirmed conntrack entry is discarded
+ * and @skb is associated with the conntrack entry already in the table.
+ *
+ * Failing that, the new, unconfirmed conntrack is still added to the table
+ * provided that the collision only occurs in the ORIGINAL direction.
+ * The new entry will be added only in the non-clashing REPLY direction,
+ * so packets in the ORIGINAL direction will continue to match the existing
+ * entry.  The new entry will also have a fixed timeout so it expires --
+ * due to the collision, it will only see reply traffic.
+ *
+ * Returns NF_DROP if the clash could not be resolved.
+ */
+static __cold noinline int
+nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h,
+		    u32 reply_hash)
+{
+	/* This is the conntrack entry already in hashes that won race. */
+	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+	const struct nf_conntrack_l4proto *l4proto;
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *loser_ct;
+	struct net *net;
+	int ret;
+
+	loser_ct = nf_ct_get(skb, &ctinfo);
+	net = nf_ct_net(loser_ct);
+
+	l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
+	if (!l4proto->allow_clash)
+		goto drop;
+
+	ret = __nf_ct_resolve_clash(skb, h);
+	if (ret == NF_ACCEPT)
+		return ret;
+
+	ret = nf_ct_resolve_clash_harder(skb, reply_hash);
+	if (ret == NF_ACCEPT)
+		return ret;
+
+drop:
+	nf_ct_add_to_dying_list(loser_ct);
+	NF_CT_STAT_INC(net, drop);
+	NF_CT_STAT_INC(net, insert_failed);
+	return NF_DROP;
+}
+
+/* Confirm a connection given skb; places it in hash table */
+int
+__nf_conntrack_confirm(struct sk_buff *skb)
+{
+	const struct nf_conntrack_zone *zone;
+	unsigned int hash, reply_hash;
+	struct nf_conntrack_tuple_hash *h;
+	struct nf_conn *ct;
+	struct nf_conn_help *help;
+	struct hlist_nulls_node *n;
+	enum ip_conntrack_info ctinfo;
+	struct net *net;
+	unsigned int sequence;
+	int ret = NF_DROP;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	net = nf_ct_net(ct);
+
+	/* ipt_REJECT uses nf_conntrack_attach to attach related
+	   ICMP/TCP RST packets in other direction.  Actual packet
+	   which created connection will be IP_CT_NEW or for an
+	   expected connection, IP_CT_RELATED. */
+	if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+		return NF_ACCEPT;
+
+	zone = nf_ct_zone(ct);
+	local_bh_disable();
+
+	do {
+		sequence = read_seqcount_begin(&nf_conntrack_generation);
+		/* reuse the hash saved before */
+		hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
+		hash = scale_hash(hash);
+		reply_hash = hash_conntrack(net,
+					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+
+	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
+
+	/* We're not in hash table, and we refuse to set up related
+	 * connections for unconfirmed conns.  But packet copies and
+	 * REJECT will give spurious warnings here.
+	 */
+
+	/* Another skb with the same unconfirmed conntrack may
+	 * win the race. This may happen for bridge(br_flood)
+	 * or broadcast/multicast packets do skb_clone with
+	 * unconfirmed conntrack.
+	 */
+	if (unlikely(nf_ct_is_confirmed(ct))) {
+		WARN_ON_ONCE(1);
+		nf_conntrack_double_unlock(hash, reply_hash);
+		local_bh_enable();
+		return NF_DROP;
+	}
+
+	pr_debug("Confirming conntrack %p\n", ct);
+	/* We have to check the DYING flag after unlink to prevent
+	 * a race against nf_ct_get_next_corpse() possibly called from
+	 * user context, else we insert an already 'dead' hash, blocking
+	 * further use of that particular connection -JM.
+	 */
+	nf_ct_del_from_dying_or_unconfirmed_list(ct);
+
+	if (unlikely(nf_ct_is_dying(ct))) {
+		nf_ct_add_to_dying_list(ct);
+		NF_CT_STAT_INC(net, insert_failed);
+		goto dying;
+	}
+
+	/* See if there's one in the list already, including reverse:
+	   NAT could have grabbed it without realizing, since we're
+	   not in the hash.  If there is, we lost race. */
+	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
+		if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+				    zone, net))
+			goto out;
+
+	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
+		if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+				    zone, net))
+			goto out;
+
+	/* Timer relative to confirmation time, not original
+	   setting time, otherwise we'd get timer wrap in
+	   weird delay cases. */
+	ct->timeout += nfct_time_stamp;
+
+	__nf_conntrack_insert_prepare(ct);
+
+	/* Since the lookup is lockless, hash insertion must be done after
+	 * starting the timer and setting the CONFIRMED bit. The RCU barriers
+	 * guarantee that no other CPU can find the conntrack before the above
+	 * stores are visible.
+	 */
+	__nf_conntrack_hash_insert(ct, hash, reply_hash);
+	nf_conntrack_double_unlock(hash, reply_hash);
+	local_bh_enable();
+
+	help = nfct_help(ct);
+	if (help && help->helper)
+		nf_conntrack_event_cache(IPCT_HELPER, ct);
+
+	nf_conntrack_event_cache(master_ct(ct) ?
+				 IPCT_RELATED : IPCT_NEW, ct);
+	return NF_ACCEPT;
+
+out:
+	ret = nf_ct_resolve_clash(skb, h, reply_hash);
+dying:
+	nf_conntrack_double_unlock(hash, reply_hash);
+	local_bh_enable();
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
+
+/* Returns true if a connection correspondings to the tuple (required
+   for NAT). */
+int
+nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+			 const struct nf_conn *ignored_conntrack)
+{
+	struct net *net = nf_ct_net(ignored_conntrack);
+	const struct nf_conntrack_zone *zone;
+	struct nf_conntrack_tuple_hash *h;
+	struct hlist_nulls_head *ct_hash;
+	unsigned int hash, hsize;
+	struct hlist_nulls_node *n;
+	struct nf_conn *ct;
+
+	zone = nf_ct_zone(ignored_conntrack);
+
+	rcu_read_lock();
+ begin:
+	nf_conntrack_get_ht(&ct_hash, &hsize);
+	hash = __hash_conntrack(net, tuple, hsize);
+
+	hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
+		ct = nf_ct_tuplehash_to_ctrack(h);
+
+		if (ct == ignored_conntrack)
+			continue;
+
+		if (nf_ct_is_expired(ct)) {
+			nf_ct_gc_expired(ct);
+			continue;
+		}
+
+		if (nf_ct_key_equal(h, tuple, zone, net)) {
+			/* Tuple is taken already, so caller will need to find
+			 * a new source port to use.
+			 *
+			 * Only exception:
+			 * If the *original tuples* are identical, then both
+			 * conntracks refer to the same flow.
+			 * This is a rare situation, it can occur e.g. when
+			 * more than one UDP packet is sent from same socket
+			 * in different threads.
+			 *
+			 * Let nf_ct_resolve_clash() deal with this later.
+			 */
+			if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+					      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
+					      nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
+				continue;
+
+			NF_CT_STAT_INC_ATOMIC(net, found);
+			rcu_read_unlock();
+			return 1;
+		}
+	}
+
+	if (get_nulls_value(n) != hash) {
+		NF_CT_STAT_INC_ATOMIC(net, search_restart);
+		goto begin;
+	}
+
+	rcu_read_unlock();
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
+
+#define NF_CT_EVICTION_RANGE	8
+
+/* There's a small race here where we may free a just-assured
+   connection.  Too bad: we're in trouble anyway. */
+static unsigned int early_drop_list(struct net *net,
+				    struct hlist_nulls_head *head)
+{
+	struct nf_conntrack_tuple_hash *h;
+	struct hlist_nulls_node *n;
+	unsigned int drops = 0;
+	struct nf_conn *tmp;
+
+	hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
+		tmp = nf_ct_tuplehash_to_ctrack(h);
+
+		if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
+			continue;
+
+		if (nf_ct_is_expired(tmp)) {
+			nf_ct_gc_expired(tmp);
+			continue;
+		}
+
+		if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
+		    !net_eq(nf_ct_net(tmp), net) ||
+		    nf_ct_is_dying(tmp))
+			continue;
+
+		if (!atomic_inc_not_zero(&tmp->ct_general.use))
+			continue;
+
+		/* kill only if still in same netns -- might have moved due to
+		 * SLAB_TYPESAFE_BY_RCU rules.
+		 *
+		 * We steal the timer reference.  If that fails timer has
+		 * already fired or someone else deleted it. Just drop ref
+		 * and move to next entry.
+		 */
+		if (net_eq(nf_ct_net(tmp), net) &&
+		    nf_ct_is_confirmed(tmp) &&
+		    nf_ct_delete(tmp, 0, 0))
+			drops++;
+
+		nf_ct_put(tmp);
+	}
+
+	return drops;
+}
+
+static noinline int early_drop(struct net *net, unsigned int hash)
+{
+	unsigned int i, bucket;
+
+	for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
+		struct hlist_nulls_head *ct_hash;
+		unsigned int hsize, drops;
+
+		rcu_read_lock();
+		nf_conntrack_get_ht(&ct_hash, &hsize);
+		if (!i)
+			bucket = reciprocal_scale(hash, hsize);
+		else
+			bucket = (bucket + 1) % hsize;
+
+		drops = early_drop_list(net, &ct_hash[bucket]);
+		rcu_read_unlock();
+
+		if (drops) {
+			NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static bool gc_worker_skip_ct(const struct nf_conn *ct)
+{
+	return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct);
+}
+
+static bool gc_worker_can_early_drop(const struct nf_conn *ct)
+{
+	const struct nf_conntrack_l4proto *l4proto;
+
+	if (!test_bit(IPS_ASSURED_BIT, &ct->status))
+		return true;
+
+	l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
+	if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
+		return true;
+
+	return false;
+}
+
+static void gc_worker(struct work_struct *work)
+{
+	unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
+	unsigned int i, hashsz, nf_conntrack_max95 = 0;
+	unsigned long next_run = GC_SCAN_INTERVAL;
+	struct conntrack_gc_work *gc_work;
+	gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
+
+	i = gc_work->next_bucket;
+	if (gc_work->early_drop)
+		nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
+
+	do {
+		struct nf_conntrack_tuple_hash *h;
+		struct hlist_nulls_head *ct_hash;
+		struct hlist_nulls_node *n;
+		struct nf_conn *tmp;
+
+		rcu_read_lock();
+
+		nf_conntrack_get_ht(&ct_hash, &hashsz);
+		if (i >= hashsz) {
+			rcu_read_unlock();
+			break;
+		}
+
+		hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
+			struct net *net;
+
+			tmp = nf_ct_tuplehash_to_ctrack(h);
+
+			if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
+				nf_ct_offload_timeout(tmp);
+				continue;
+			}
+
+			if (nf_ct_is_expired(tmp)) {
+				nf_ct_gc_expired(tmp);
+				continue;
+			}
+
+			if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
+				continue;
+
+			net = nf_ct_net(tmp);
+			if (atomic_read(&net->ct.count) < nf_conntrack_max95)
+				continue;
+
+			/* need to take reference to avoid possible races */
+			if (!atomic_inc_not_zero(&tmp->ct_general.use))
+				continue;
+
+			if (gc_worker_skip_ct(tmp)) {
+				nf_ct_put(tmp);
+				continue;
+			}
+
+			if (gc_worker_can_early_drop(tmp))
+				nf_ct_kill(tmp);
+
+			nf_ct_put(tmp);
+		}
+
+		/* could check get_nulls_value() here and restart if ct
+		 * was moved to another chain.  But given gc is best-effort
+		 * we will just continue with next hash slot.
+		 */
+		rcu_read_unlock();
+		cond_resched();
+		i++;
+
+		if (time_after(jiffies, end_time) && i < hashsz) {
+			gc_work->next_bucket = i;
+			next_run = 0;
+			break;
+		}
+	} while (i < hashsz);
+
+	if (gc_work->exiting)
+		return;
+
+	/*
+	 * Eviction will normally happen from the packet path, and not
+	 * from this gc worker.
+	 *
+	 * This worker is only here to reap expired entries when system went
+	 * idle after a busy period.
+	 */
+	if (next_run) {
+		gc_work->early_drop = false;
+		gc_work->next_bucket = 0;
+	}
+	queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
+}
+
+static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
+{
+	INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
+	gc_work->exiting = false;
+}
+
+static struct nf_conn *
+__nf_conntrack_alloc(struct net *net,
+		     const struct nf_conntrack_zone *zone,
+		     const struct nf_conntrack_tuple *orig,
+		     const struct nf_conntrack_tuple *repl,
+		     gfp_t gfp, u32 hash)
+{
+	struct nf_conn *ct;
+
+	/* We don't want any race condition at early drop stage */
+	atomic_inc(&net->ct.count);
+
+	if (nf_conntrack_max &&
+	    unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
+		if (!early_drop(net, hash)) {
+			if (!conntrack_gc_work.early_drop)
+				conntrack_gc_work.early_drop = true;
+			atomic_dec(&net->ct.count);
+			net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	/*
+	 * Do not use kmem_cache_zalloc(), as this cache uses
+	 * SLAB_TYPESAFE_BY_RCU.
+	 */
+	ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
+	if (ct == NULL)
+		goto out;
+
+	spin_lock_init(&ct->lock);
+	ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
+	ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
+	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
+	/* save hash for reusing when confirming */
+	*(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
+	ct->status = 0;
+	WRITE_ONCE(ct->timeout, 0);
+	write_pnet(&ct->ct_net, net);
+	memset(&ct->__nfct_init_offset, 0,
+	       offsetof(struct nf_conn, proto) -
+	       offsetof(struct nf_conn, __nfct_init_offset));
+
+	nf_ct_zone_add(ct, zone);
+
+	/* Because we use RCU lookups, we set ct_general.use to zero before
+	 * this is inserted in any list.
+	 */
+	atomic_set(&ct->ct_general.use, 0);
+	return ct;
+out:
+	atomic_dec(&net->ct.count);
+	return ERR_PTR(-ENOMEM);
+}
+
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+				   const struct nf_conntrack_zone *zone,
+				   const struct nf_conntrack_tuple *orig,
+				   const struct nf_conntrack_tuple *repl,
+				   gfp_t gfp)
+{
+	return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
+
+void nf_conntrack_free(struct nf_conn *ct)
+{
+	struct net *net = nf_ct_net(ct);
+
+	/* A freed object has refcnt == 0, that's
+	 * the golden rule for SLAB_TYPESAFE_BY_RCU
+	 */
+	WARN_ON(atomic_read(&ct->ct_general.use) != 0);
+
+	nf_ct_ext_destroy(ct);
+	kmem_cache_free(nf_conntrack_cachep, ct);
+	smp_mb__before_atomic();
+	atomic_dec(&net->ct.count);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_free);
+
+
+/* Allocate a new conntrack: we return -ENOMEM if classification
+   failed due to stress.  Otherwise it really is unclassifiable. */
+static noinline struct nf_conntrack_tuple_hash *
+init_conntrack(struct net *net, struct nf_conn *tmpl,
+	       const struct nf_conntrack_tuple *tuple,
+	       struct sk_buff *skb,
+	       unsigned int dataoff, u32 hash)
+{
+	struct nf_conn *ct;
+	struct nf_conn_help *help;
+	struct nf_conntrack_tuple repl_tuple;
+	struct nf_conntrack_ecache *ecache;
+	struct nf_conntrack_expect *exp = NULL;
+	const struct nf_conntrack_zone *zone;
+	struct nf_conn_timeout *timeout_ext;
+	struct nf_conntrack_zone tmp;
+	int dir = 0;
+
+	if (!nf_ct_invert_tuple(&repl_tuple, tuple)) {
+		pr_debug("Can't invert tuple.\n");
+		return NULL;
+	}
+
+	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
+	ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
+				  hash);
+	if (IS_ERR(ct))
+		return (struct nf_conntrack_tuple_hash *)ct;
+		
+#ifdef CONFIG_FASTNAT_MODULE
+    RCU_INIT_POINTER(ct->fast_ct.fast_dst[IP_CT_DIR_ORIGINAL], NULL);
+    RCU_INIT_POINTER(ct->fast_ct.fast_dst[IP_CT_DIR_REPLY], NULL);
+    ct->fast_ct.fast_brport[IP_CT_DIR_ORIGINAL] = NULL;
+    ct->fast_ct.fast_brport[IP_CT_DIR_REPLY]    = NULL;
+    ct->fast_ct.isFast = 0; //CT_FAST_NOT
+    RCU_INIT_POINTER(ct->fast_ct.sk, NULL);
+#endif
+	ct->packet_info[IP_CT_DIR_ORIGINAL].packets = 0;
+    ct->packet_info[IP_CT_DIR_ORIGINAL].bytes   = 0;
+    ct->packet_info[IP_CT_DIR_REPLY].packets    = 0;
+    ct->packet_info[IP_CT_DIR_REPLY].bytes      = 0;
+	for(dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++)
+	{
+		ct->indev[dir] = NULL;
+		ct->outdev[dir] = NULL;
+	}
+	if (!nf_ct_add_synproxy(ct, tmpl)) {
+		nf_conntrack_free(ct);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
+
+	if (timeout_ext)
+		nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
+				      GFP_ATOMIC);
+
+	nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+	nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
+	nf_ct_labels_ext_add(ct);
+
+	ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
+	nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
+				 ecache ? ecache->expmask : 0,
+			     GFP_ATOMIC);
+
+	local_bh_disable();
+	if (net->ct.expect_count) {
+		spin_lock(&nf_conntrack_expect_lock);
+		exp = nf_ct_find_expectation(net, zone, tuple);
+		if (exp) {
+			pr_debug("expectation arrives ct=%p exp=%p\n",
+				 ct, exp);
+			/* Welcome, Mr. Bond.  We've been expecting you... */
+			__set_bit(IPS_EXPECTED_BIT, &ct->status);
+			/* exp->master safe, refcnt bumped in nf_ct_find_expectation */
+			ct->master = exp->master;
+			if (exp->helper) {
+				help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
+				if (help)
+					rcu_assign_pointer(help->helper, exp->helper);
+			}
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+			ct->mark = exp->master->mark;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+			ct->secmark = exp->master->secmark;
+#endif
+			NF_CT_STAT_INC(net, expect_new);
+		}
+		spin_unlock(&nf_conntrack_expect_lock);
+	}
+	if (!exp)
+		__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
+
+	/* Now it is inserted into the unconfirmed list, bump refcount */
+	nf_conntrack_get(&ct->ct_general);
+	nf_ct_add_to_unconfirmed_list(ct);
+
+	local_bh_enable();
+
+	if (exp) {
+		if (exp->expectfn)
+			exp->expectfn(ct, exp);
+		nf_ct_expect_put(exp);
+	}
+
+	return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
+}
+
+/* On success, returns 0, sets skb->_nfct | ctinfo */
+static int
+resolve_normal_ct(struct nf_conn *tmpl,
+		  struct sk_buff *skb,
+		  unsigned int dataoff,
+		  u_int8_t protonum,
+		  const struct nf_hook_state *state)
+{
+	const struct nf_conntrack_zone *zone;
+	struct nf_conntrack_tuple tuple;
+	struct nf_conntrack_tuple_hash *h;
+	enum ip_conntrack_info ctinfo;
+	struct nf_conntrack_zone tmp;
+	struct nf_conn *ct;
+	u32 hash;
+
+	if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
+			     dataoff, state->pf, protonum, state->net,
+			     &tuple)) {
+		pr_debug("Can't get tuple\n");
+		return 0;
+	}
+
+	/* look for tuple match */
+	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
+	hash = hash_conntrack_raw(&tuple, state->net);
+	h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
+	if (!h) {
+		h = init_conntrack(state->net, tmpl, &tuple,
+				   skb, dataoff, hash);
+		if (!h)
+			return 0;
+		if (IS_ERR(h))
+			return PTR_ERR(h);
+	}
+	ct = nf_ct_tuplehash_to_ctrack(h);
+
+	/* It exists; we have (non-exclusive) reference. */
+	if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
+		ctinfo = IP_CT_ESTABLISHED_REPLY;
+	} else {
+		/* Once we've had two way comms, always ESTABLISHED. */
+		if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+			pr_debug("normal packet for %p\n", ct);
+			ctinfo = IP_CT_ESTABLISHED;
+		} else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
+			pr_debug("related packet for %p\n", ct);
+			ctinfo = IP_CT_RELATED;
+		} else {
+			pr_debug("new packet for %p\n", ct);
+			ctinfo = IP_CT_NEW;
+		}
+	}
+	nf_ct_set(skb, ct, ctinfo);
+	return 0;
+}
+
+/*
+ * icmp packets need special treatment to handle error messages that are
+ * related to a connection.
+ *
+ * Callers need to check if skb has a conntrack assigned when this
+ * helper returns; in such case skb belongs to an already known connection.
+ */
+static unsigned int __cold
+nf_conntrack_handle_icmp(struct nf_conn *tmpl,
+			 struct sk_buff *skb,
+			 unsigned int dataoff,
+			 u8 protonum,
+			 const struct nf_hook_state *state)
+{
+	int ret;
+
+	if (state->pf == NFPROTO_IPV4 && protonum == IPPROTO_ICMP)
+		ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state);
+#if IS_ENABLED(CONFIG_IPV6)
+	else if (state->pf == NFPROTO_IPV6 && protonum == IPPROTO_ICMPV6)
+		ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state);
+#endif
+	else
+		return NF_ACCEPT;
+
+	if (ret <= 0)
+		NF_CT_STAT_INC_ATOMIC(state->net, error);
+
+	return ret;
+}
+
+static int generic_packet(struct nf_conn *ct, struct sk_buff *skb,
+			  enum ip_conntrack_info ctinfo)
+{
+	const unsigned int *timeout = nf_ct_timeout_lookup(ct);
+
+	if (!timeout)
+		timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
+
+	nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
+	return NF_ACCEPT;
+}
+
+/* Returns verdict for packet, or -1 for invalid. */
+static int nf_conntrack_handle_packet(struct nf_conn *ct,
+				      struct sk_buff *skb,
+				      unsigned int dataoff,
+				      enum ip_conntrack_info ctinfo,
+				      const struct nf_hook_state *state)
+{
+	switch (nf_ct_protonum(ct)) {
+	case IPPROTO_TCP:
+		return nf_conntrack_tcp_packet(ct, skb, dataoff,
+					       ctinfo, state);
+	case IPPROTO_UDP:
+		return nf_conntrack_udp_packet(ct, skb, dataoff,
+					       ctinfo, state);
+	case IPPROTO_ICMP:
+		return nf_conntrack_icmp_packet(ct, skb, ctinfo, state);
+#if IS_ENABLED(CONFIG_IPV6)
+	case IPPROTO_ICMPV6:
+		return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+	case IPPROTO_UDPLITE:
+		return nf_conntrack_udplite_packet(ct, skb, dataoff,
+						   ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+	case IPPROTO_SCTP:
+		return nf_conntrack_sctp_packet(ct, skb, dataoff,
+						ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+	case IPPROTO_DCCP:
+		return nf_conntrack_dccp_packet(ct, skb, dataoff,
+						ctinfo, state);
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE
+	case IPPROTO_GRE:
+		return nf_conntrack_gre_packet(ct, skb, dataoff,
+					       ctinfo, state);
+#endif
+	}
+
+	return generic_packet(ct, skb, ctinfo);
+}
+
+#ifdef CONFIG_FASTNAT_MODULE
+int nf_conntrack_handle_packet_fast(struct nf_conn *ct,
+				      struct sk_buff *skb,
+				      unsigned int dataoff,
+				      enum ip_conntrack_info ctinfo,
+				      const struct nf_hook_state *state)
+{
+	return nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
+}
+
+void nf_conntrack_put(struct nf_conntrack *nfct)
+{ 
+    if (nfct && atomic_dec_and_test(&nfct->use)){
+        fast_conn_release((struct nf_conn *)nfct, RELEASE_ALL_DST | RELEASE_ALL_SK);
+        nf_conntrack_destroy(nfct);
+    }
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_put);
+#endif
+
+
+
+unsigned int
+nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
+{
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct, *tmpl;
+	u_int8_t protonum;
+	int dataoff, ret;
+
+	tmpl = nf_ct_get(skb, &ctinfo);
+	if (tmpl || ctinfo == IP_CT_UNTRACKED) {
+		/* Previously seen (loopback or untracked)?  Ignore. */
+		if ((tmpl && !nf_ct_is_template(tmpl)) ||
+		     ctinfo == IP_CT_UNTRACKED)
+			return NF_ACCEPT;
+		skb->_nfct = 0;
+	}
+
+	/* rcu_read_lock()ed by nf_hook_thresh */
+	dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
+	if (dataoff <= 0) {
+		pr_debug("not prepared to track yet or error occurred\n");
+		NF_CT_STAT_INC_ATOMIC(state->net, invalid);
+		ret = NF_ACCEPT;
+		goto out;
+	}
+
+	if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
+		ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
+					       protonum, state);
+		if (ret <= 0) {
+			ret = -ret;
+			goto out;
+		}
+		/* ICMP[v6] protocol trackers may assign one conntrack. */
+		if (skb->_nfct)
+			goto out;
+	}
+repeat:
+	ret = resolve_normal_ct(tmpl, skb, dataoff,
+				protonum, state);
+	if (ret < 0) {
+		/* Too stressed to deal. */
+		NF_CT_STAT_INC_ATOMIC(state->net, drop);
+		ret = NF_DROP;
+		goto out;
+	}
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct) {
+		/* Not valid part of a connection */
+		NF_CT_STAT_INC_ATOMIC(state->net, invalid);
+		ret = NF_ACCEPT;
+		goto out;
+	}
+
+	ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
+	if (ret <= 0) {
+		/* Invalid: inverse of the return code tells
+		 * the netfilter core what to do */
+		pr_debug("nf_conntrack_in: Can't track with proto module\n");
+		nf_conntrack_put(&ct->ct_general);
+		skb->_nfct = 0;
+		/* Special case: TCP tracker reports an attempt to reopen a
+		 * closed/aborted connection. We have to go back and create a
+		 * fresh conntrack.
+		 */
+		if (ret == -NF_REPEAT)
+			goto repeat;
+
+		NF_CT_STAT_INC_ATOMIC(state->net, invalid);
+		if (ret == -NF_DROP)
+			NF_CT_STAT_INC_ATOMIC(state->net, drop);
+
+		ret = -ret;
+		goto out;
+	}
+
+	if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
+	    !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
+		nf_conntrack_event_cache(IPCT_REPLY, ct);
+out:
+	if (tmpl)
+		nf_ct_put(tmpl);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_in);
+
+/* Alter reply tuple (maybe alter helper).  This is for NAT, and is
+   implicitly racy: see __nf_conntrack_confirm */
+void nf_conntrack_alter_reply(struct nf_conn *ct,
+			      const struct nf_conntrack_tuple *newreply)
+{
+	struct nf_conn_help *help = nfct_help(ct);
+
+	/* Should be unconfirmed, so not in hash table yet */
+	WARN_ON(nf_ct_is_confirmed(ct));
+
+	pr_debug("Altering reply tuple of %p to ", ct);
+	nf_ct_dump_tuple(newreply);
+
+	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
+	if (ct->master || (help && !hlist_empty(&help->expectations)))
+		return;
+
+	rcu_read_lock();
+	__nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
+
+/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
+void __nf_ct_refresh_acct(struct nf_conn *ct,
+			  enum ip_conntrack_info ctinfo,
+			  const struct sk_buff *skb,
+			  u32 extra_jiffies,
+			  bool do_acct)
+{
+	/* Only update if this is not a fixed timeout */
+	if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
+		goto acct;
+
+	/* If not in hash table, timer will not be active yet */
+	if (nf_ct_is_confirmed(ct))
+		extra_jiffies += nfct_time_stamp;
+
+	if (READ_ONCE(ct->timeout) != extra_jiffies)
+		WRITE_ONCE(ct->timeout, extra_jiffies);
+acct:
+	if (do_acct)
+		nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
+}
+EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
+
+bool nf_ct_kill_acct(struct nf_conn *ct,
+		     enum ip_conntrack_info ctinfo,
+		     const struct sk_buff *skb)
+{
+	nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
+
+	return nf_ct_delete(ct, 0, 0);
+}
+EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
+
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_conntrack.h>
+#include <linux/mutex.h>
+
+/* Generic function for tcp/udp/sctp/dccp and alike. */
+int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
+			       const struct nf_conntrack_tuple *tuple)
+{
+	if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
+	    nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
+
+const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
+	[CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
+	[CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
+};
+EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
+
+int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
+			       struct nf_conntrack_tuple *t,
+			       u_int32_t flags)
+{
+	if (flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) {
+		if (!tb[CTA_PROTO_SRC_PORT])
+			return -EINVAL;
+
+		t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
+	}
+
+	if (flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) {
+		if (!tb[CTA_PROTO_DST_PORT])
+			return -EINVAL;
+
+		t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
+
+unsigned int nf_ct_port_nlattr_tuple_size(void)
+{
+	static unsigned int size __read_mostly;
+
+	if (!size)
+		size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
+
+	return size;
+}
+EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
+#endif
+
+/* Used by ipt_REJECT and ip6t_REJECT. */
+static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
+{
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+
+	/* This ICMP is in reverse direction to the packet which caused it */
+	ct = nf_ct_get(skb, &ctinfo);
+	if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
+		ctinfo = IP_CT_RELATED_REPLY;
+	else
+		ctinfo = IP_CT_RELATED;
+
+	/* Attach to new skbuff, and increment count */
+	nf_ct_set(nskb, ct, ctinfo);
+	nf_conntrack_get(skb_nfct(nskb));
+}
+
+static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
+				 struct nf_conn *ct,
+				 enum ip_conntrack_info ctinfo)
+{
+	struct nf_conntrack_tuple_hash *h;
+	struct nf_conntrack_tuple tuple;
+	struct nf_nat_hook *nat_hook;
+	unsigned int status;
+	int dataoff;
+	u16 l3num;
+	u8 l4num;
+
+	l3num = nf_ct_l3num(ct);
+
+	dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
+	if (dataoff <= 0)
+		return -1;
+
+	if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
+			     l4num, net, &tuple))
+		return -1;
+
+	if (ct->status & IPS_SRC_NAT) {
+		memcpy(tuple.src.u3.all,
+		       ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all,
+		       sizeof(tuple.src.u3.all));
+		tuple.src.u.all =
+			ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all;
+	}
+
+	if (ct->status & IPS_DST_NAT) {
+		memcpy(tuple.dst.u3.all,
+		       ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all,
+		       sizeof(tuple.dst.u3.all));
+		tuple.dst.u.all =
+			ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all;
+	}
+
+	h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple);
+	if (!h)
+		return 0;
+
+	/* Store status bits of the conntrack that is clashing to re-do NAT
+	 * mangling according to what it has been done already to this packet.
+	 */
+	status = ct->status;
+
+	nf_ct_put(ct);
+	ct = nf_ct_tuplehash_to_ctrack(h);
+	nf_ct_set(skb, ct, ctinfo);
+
+	nat_hook = rcu_dereference(nf_nat_hook);
+	if (!nat_hook)
+		return 0;
+
+	if (status & IPS_SRC_NAT &&
+	    nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_SRC,
+				IP_CT_DIR_ORIGINAL) == NF_DROP)
+		return -1;
+
+	if (status & IPS_DST_NAT &&
+	    nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_DST,
+				IP_CT_DIR_ORIGINAL) == NF_DROP)
+		return -1;
+
+	return 0;
+}
+
+/* This packet is coming from userspace via nf_queue, complete the packet
+ * processing after the helper invocation in nf_confirm().
+ */
+static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
+			       enum ip_conntrack_info ctinfo)
+{
+	const struct nf_conntrack_helper *helper;
+	const struct nf_conn_help *help;
+	int protoff;
+
+	help = nfct_help(ct);
+	if (!help)
+		return 0;
+
+	helper = rcu_dereference(help->helper);
+	if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
+		return 0;
+
+	switch (nf_ct_l3num(ct)) {
+	case NFPROTO_IPV4:
+		protoff = skb_network_offset(skb) + ip_hdrlen(skb);
+		break;
+#if IS_ENABLED(CONFIG_IPV6)
+	case NFPROTO_IPV6: {
+		__be16 frag_off;
+		u8 pnum;
+
+		pnum = ipv6_hdr(skb)->nexthdr;
+		protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
+					   &frag_off);
+		if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
+			return 0;
+		break;
+	}
+#endif
+	default:
+		return 0;
+	}
+
+	if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
+	    !nf_is_loopback_packet(skb)) {
+		if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
+			NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
+			return -1;
+		}
+	}
+
+	/* We've seen it coming out the other side: confirm it */
+	return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0;
+}
+
+static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
+{
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct;
+	int err;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct)
+		return 0;
+
+	if (!nf_ct_is_confirmed(ct)) {
+		err = __nf_conntrack_update(net, skb, ct, ctinfo);
+		if (err < 0)
+			return err;
+
+		ct = nf_ct_get(skb, &ctinfo);
+	}
+
+	return nf_confirm_cthelper(skb, ct, ctinfo);
+}
+
+static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+				       const struct sk_buff *skb)
+{
+	const struct nf_conntrack_tuple *src_tuple;
+	const struct nf_conntrack_tuple_hash *hash;
+	struct nf_conntrack_tuple srctuple;
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (ct) {
+		src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
+		memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
+		return true;
+	}
+
+	if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+			       NFPROTO_IPV4, dev_net(skb->dev),
+			       &srctuple))
+		return false;
+
+	hash = nf_conntrack_find_get(dev_net(skb->dev),
+				     &nf_ct_zone_dflt,
+				     &srctuple);
+	if (!hash)
+		return false;
+
+	ct = nf_ct_tuplehash_to_ctrack(hash);
+	src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
+	memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
+	nf_ct_put(ct);
+
+	return true;
+}
+
+/* Bring out ya dead! */
+static struct nf_conn *
+get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
+		void *data, unsigned int *bucket)
+{
+	struct nf_conntrack_tuple_hash *h;
+	struct nf_conn *ct;
+	struct hlist_nulls_node *n;
+	spinlock_t *lockp;
+
+	for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
+		struct hlist_nulls_head *hslot = &nf_conntrack_hash[*bucket];
+
+		if (hlist_nulls_empty(hslot))
+			continue;
+
+		lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
+		local_bh_disable();
+		nf_conntrack_lock(lockp);
+		hlist_nulls_for_each_entry(h, n, hslot, hnnode) {
+			if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
+				continue;
+			/* All nf_conn objects are added to hash table twice, one
+			 * for original direction tuple, once for the reply tuple.
+			 *
+			 * Exception: In the IPS_NAT_CLASH case, only the reply
+			 * tuple is added (the original tuple already existed for
+			 * a different object).
+			 *
+			 * We only need to call the iterator once for each
+			 * conntrack, so we just use the 'reply' direction
+			 * tuple while iterating.
+			 */
+			ct = nf_ct_tuplehash_to_ctrack(h);
+			if (iter(ct, data))
+				goto found;
+		}
+		spin_unlock(lockp);
+		local_bh_enable();
+		cond_resched();
+	}
+
+	return NULL;
+found:
+	atomic_inc(&ct->ct_general.use);
+	spin_unlock(lockp);
+	local_bh_enable();
+	return ct;
+}
+
+static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
+				  void *data, u32 portid, int report)
+{
+	unsigned int bucket = 0;
+	struct nf_conn *ct;
+
+	might_sleep();
+
+	mutex_lock(&nf_conntrack_mutex);
+	while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
+		/* Time to push up daises... */
+
+		nf_ct_delete(ct, portid, report);
+		nf_ct_put(ct);
+		cond_resched();
+	}
+	mutex_unlock(&nf_conntrack_mutex);
+}
+
+struct iter_data {
+	int (*iter)(struct nf_conn *i, void *data);
+	void *data;
+	struct net *net;
+};
+
+static int iter_net_only(struct nf_conn *i, void *data)
+{
+	struct iter_data *d = data;
+
+	if (!net_eq(d->net, nf_ct_net(i)))
+		return 0;
+
+	return d->iter(i, d->data);
+}
+
+static void
+__nf_ct_unconfirmed_destroy(struct net *net)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		struct nf_conntrack_tuple_hash *h;
+		struct hlist_nulls_node *n;
+		struct ct_pcpu *pcpu;
+
+		pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+
+		spin_lock_bh(&pcpu->lock);
+		hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
+			struct nf_conn *ct;
+
+			ct = nf_ct_tuplehash_to_ctrack(h);
+
+			/* we cannot call iter() on unconfirmed list, the
+			 * owning cpu can reallocate ct->ext at any time.
+			 */
+			set_bit(IPS_DYING_BIT, &ct->status);
+		}
+		spin_unlock_bh(&pcpu->lock);
+		cond_resched();
+	}
+}
+
+void nf_ct_unconfirmed_destroy(struct net *net)
+{
+	might_sleep();
+
+	if (atomic_read(&net->ct.count) > 0) {
+		__nf_ct_unconfirmed_destroy(net);
+		nf_queue_nf_hook_drop(net);
+		synchronize_net();
+	}
+}
+EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy);
+
+void nf_ct_iterate_cleanup_net(struct net *net,
+			       int (*iter)(struct nf_conn *i, void *data),
+			       void *data, u32 portid, int report)
+{
+	struct iter_data d;
+
+	might_sleep();
+
+	if (atomic_read(&net->ct.count) == 0)
+		return;
+
+	d.iter = iter;
+	d.data = data;
+	d.net = net;
+
+	nf_ct_iterate_cleanup(iter_net_only, &d, portid, report);
+}
+EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
+
+/**
+ * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
+ * @iter: callback to invoke for each conntrack
+ * @data: data to pass to @iter
+ *
+ * Like nf_ct_iterate_cleanup, but first marks conntracks on the
+ * unconfirmed list as dying (so they will not be inserted into
+ * main table).
+ *
+ * Can only be called in module exit path.
+ */
+void
+nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
+{
+	struct net *net;
+
+	down_read(&net_rwsem);
+	for_each_net(net) {
+		if (atomic_read(&net->ct.count) == 0)
+			continue;
+		__nf_ct_unconfirmed_destroy(net);
+		nf_queue_nf_hook_drop(net);
+	}
+	up_read(&net_rwsem);
+
+	/* Need to wait for netns cleanup worker to finish, if its
+	 * running -- it might have deleted a net namespace from
+	 * the global list, so our __nf_ct_unconfirmed_destroy() might
+	 * not have affected all namespaces.
+	 */
+	net_ns_barrier();
+
+	/* a conntrack could have been unlinked from unconfirmed list
+	 * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy().
+	 * This makes sure its inserted into conntrack table.
+	 */
+	synchronize_net();
+
+	nf_ct_iterate_cleanup(iter, data, 0, 0);
+}
+EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
+
+static int kill_all(struct nf_conn *i, void *data)
+{
+	return net_eq(nf_ct_net(i), data);
+}
+
+void nf_ct_free_hashtable(void *hash, unsigned int size)
+{
+	if (is_vmalloc_addr(hash))
+		vfree(hash);
+	else
+		free_pages((unsigned long)hash,
+			   get_order(sizeof(struct hlist_head) * size));
+}
+EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
+
+void nf_conntrack_cleanup_start(void)
+{
+	conntrack_gc_work.exiting = true;
+	RCU_INIT_POINTER(ip_ct_attach, NULL);
+}
+
+void nf_conntrack_cleanup_end(void)
+{
+	RCU_INIT_POINTER(nf_ct_hook, NULL);
+	cancel_delayed_work_sync(&conntrack_gc_work.dwork);
+	kvfree(nf_conntrack_hash);
+
+	nf_conntrack_proto_fini();
+	nf_conntrack_seqadj_fini();
+	nf_conntrack_labels_fini();
+	nf_conntrack_helper_fini();
+	nf_conntrack_timeout_fini();
+	nf_conntrack_ecache_fini();
+	nf_conntrack_tstamp_fini();
+	nf_conntrack_acct_fini();
+	nf_conntrack_expect_fini();
+
+	kmem_cache_destroy(nf_conntrack_cachep);
+}
+
+/*
+ * Mishearing the voices in his head, our hero wonders how he's
+ * supposed to kill the mall.
+ */
+void nf_conntrack_cleanup_net(struct net *net)
+{
+	LIST_HEAD(single);
+
+	list_add(&net->exit_list, &single);
+	nf_conntrack_cleanup_net_list(&single);
+}
+
+void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
+{
+	int busy;
+	struct net *net;
+
+	/*
+	 * This makes sure all current packets have passed through
+	 *  netfilter framework.  Roll on, two-stage module
+	 *  delete...
+	 */
+	synchronize_net();
+i_see_dead_people:
+	busy = 0;
+	list_for_each_entry(net, net_exit_list, exit_list) {
+		nf_ct_iterate_cleanup(kill_all, net, 0, 0);
+		if (atomic_read(&net->ct.count) != 0)
+			busy = 1;
+	}
+	if (busy) {
+		schedule();
+		goto i_see_dead_people;
+	}
+
+	list_for_each_entry(net, net_exit_list, exit_list) {
+		nf_conntrack_proto_pernet_fini(net);
+		nf_conntrack_ecache_pernet_fini(net);
+		nf_conntrack_expect_pernet_fini(net);
+		free_percpu(net->ct.stat);
+		free_percpu(net->ct.pcpu_lists);
+	}
+}
+
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
+{
+	struct hlist_nulls_head *hash;
+	unsigned int nr_slots, i;
+
+	if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
+		return NULL;
+
+	BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
+	nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
+
+	hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL);
+
+	if (hash && nulls)
+		for (i = 0; i < nr_slots; i++)
+			INIT_HLIST_NULLS_HEAD(&hash[i], i);
+
+	return hash;
+}
+EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
+
+int nf_conntrack_hash_resize(unsigned int hashsize)
+{
+	int i, bucket;
+	unsigned int old_size;
+	struct hlist_nulls_head *hash, *old_hash;
+	struct nf_conntrack_tuple_hash *h;
+	struct nf_conn *ct;
+
+	if (!hashsize)
+		return -EINVAL;
+
+	hash = nf_ct_alloc_hashtable(&hashsize, 1);
+	if (!hash)
+		return -ENOMEM;
+
+	mutex_lock(&nf_conntrack_mutex);
+	old_size = nf_conntrack_htable_size;
+	if (old_size == hashsize) {
+		mutex_unlock(&nf_conntrack_mutex);
+		kvfree(hash);
+		return 0;
+	}
+
+	local_bh_disable();
+	nf_conntrack_all_lock();
+	write_seqcount_begin(&nf_conntrack_generation);
+
+	/* Lookups in the old hash might happen in parallel, which means we
+	 * might get false negatives during connection lookup. New connections
+	 * created because of a false negative won't make it into the hash
+	 * though since that required taking the locks.
+	 */
+
+	for (i = 0; i < nf_conntrack_htable_size; i++) {
+		while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
+			h = hlist_nulls_entry(nf_conntrack_hash[i].first,
+					      struct nf_conntrack_tuple_hash, hnnode);
+			ct = nf_ct_tuplehash_to_ctrack(h);
+			hlist_nulls_del_rcu(&h->hnnode);
+			bucket = __hash_conntrack(nf_ct_net(ct),
+						  &h->tuple, hashsize);
+			hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
+		}
+	}
+	old_size = nf_conntrack_htable_size;
+	old_hash = nf_conntrack_hash;
+
+	nf_conntrack_hash = hash;
+	nf_conntrack_htable_size = hashsize;
+
+	write_seqcount_end(&nf_conntrack_generation);
+	nf_conntrack_all_unlock();
+	local_bh_enable();
+
+	mutex_unlock(&nf_conntrack_mutex);
+
+	synchronize_net();
+	kvfree(old_hash);
+	return 0;
+}
+
+int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
+{
+	unsigned int hashsize;
+	int rc;
+
+	if (current->nsproxy->net_ns != &init_net)
+		return -EOPNOTSUPP;
+
+	/* On boot, we can set this without any fancy locking. */
+	if (!nf_conntrack_hash)
+		return param_set_uint(val, kp);
+
+	rc = kstrtouint(val, 0, &hashsize);
+	if (rc)
+		return rc;
+
+	return nf_conntrack_hash_resize(hashsize);
+}
+
+static __always_inline unsigned int total_extension_size(void)
+{
+	/* remember to add new extensions below */
+	BUILD_BUG_ON(NF_CT_EXT_NUM > 9);
+
+	return sizeof(struct nf_ct_ext) +
+	       sizeof(struct nf_conn_help)
+#if IS_ENABLED(CONFIG_NF_NAT)
+		+ sizeof(struct nf_conn_nat)
+#endif
+		+ sizeof(struct nf_conn_seqadj)
+		+ sizeof(struct nf_conn_acct)
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+		+ sizeof(struct nf_conntrack_ecache)
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+		+ sizeof(struct nf_conn_tstamp)
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+		+ sizeof(struct nf_conn_timeout)
+#endif
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+		+ sizeof(struct nf_conn_labels)
+#endif
+#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+		+ sizeof(struct nf_conn_synproxy)
+#endif
+	;
+};
+
+int nf_conntrack_init_start(void)
+{
+	unsigned long nr_pages = totalram_pages();
+	int max_factor = 8;
+	int ret = -ENOMEM;
+	int i;
+
+	/* struct nf_ct_ext uses u8 to store offsets/size */
+	BUILD_BUG_ON(total_extension_size() > 255u);
+
+	seqcount_spinlock_init(&nf_conntrack_generation,
+			       &nf_conntrack_locks_all_lock);
+
+	for (i = 0; i < CONNTRACK_LOCKS; i++)
+		spin_lock_init(&nf_conntrack_locks[i]);
+
+	if (!nf_conntrack_htable_size) {
+		/* Idea from tcp.c: use 1/16384 of memory.
+		 * On i386: 32MB machine has 512 buckets.
+		 * >= 1GB machines have 16384 buckets.
+		 * >= 4GB machines have 65536 buckets.
+		 */
+		nf_conntrack_htable_size
+			= (((nr_pages << PAGE_SHIFT) / 16384)
+			   / sizeof(struct hlist_head));
+		if (nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
+			nf_conntrack_htable_size = 65536;
+		else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
+			nf_conntrack_htable_size = 16384;
+		if (nf_conntrack_htable_size < 32)
+			nf_conntrack_htable_size = 32;
+
+		/* Use a max. factor of four by default to get the same max as
+		 * with the old struct list_heads. When a table size is given
+		 * we use the old value of 8 to avoid reducing the max.
+		 * entries. */
+		max_factor = 4;
+	}
+
+	nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
+	if (!nf_conntrack_hash)
+		return -ENOMEM;
+
+	nf_conntrack_max = max_factor * nf_conntrack_htable_size;
+
+	nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
+						sizeof(struct nf_conn),
+						NFCT_INFOMASK + 1,
+						SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
+	if (!nf_conntrack_cachep)
+		goto err_cachep;
+
+	ret = nf_conntrack_expect_init();
+	if (ret < 0)
+		goto err_expect;
+
+	ret = nf_conntrack_acct_init();
+	if (ret < 0)
+		goto err_acct;
+
+	ret = nf_conntrack_tstamp_init();
+	if (ret < 0)
+		goto err_tstamp;
+
+	ret = nf_conntrack_ecache_init();
+	if (ret < 0)
+		goto err_ecache;
+
+	ret = nf_conntrack_timeout_init();
+	if (ret < 0)
+		goto err_timeout;
+
+	ret = nf_conntrack_helper_init();
+	if (ret < 0)
+		goto err_helper;
+
+	ret = nf_conntrack_labels_init();
+	if (ret < 0)
+		goto err_labels;
+
+	ret = nf_conntrack_seqadj_init();
+	if (ret < 0)
+		goto err_seqadj;
+
+	ret = nf_conntrack_proto_init();
+	if (ret < 0)
+		goto err_proto;
+
+	conntrack_gc_work_init(&conntrack_gc_work);
+	queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ);
+
+	return 0;
+
+err_proto:
+	nf_conntrack_seqadj_fini();
+err_seqadj:
+	nf_conntrack_labels_fini();
+err_labels:
+	nf_conntrack_helper_fini();
+err_helper:
+	nf_conntrack_timeout_fini();
+err_timeout:
+	nf_conntrack_ecache_fini();
+err_ecache:
+	nf_conntrack_tstamp_fini();
+err_tstamp:
+	nf_conntrack_acct_fini();
+err_acct:
+	nf_conntrack_expect_fini();
+err_expect:
+	kmem_cache_destroy(nf_conntrack_cachep);
+err_cachep:
+	kvfree(nf_conntrack_hash);
+	return ret;
+}
+
+static struct nf_ct_hook nf_conntrack_hook = {
+	.update		= nf_conntrack_update,
+	.destroy	= destroy_conntrack,
+	.get_tuple_skb  = nf_conntrack_get_tuple_skb,
+};
+
+void nf_conntrack_init_end(void)
+{
+	/* For use by REJECT target */
+	RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
+	RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook);
+}
+
+/*
+ * We need to use special "null" values, not used in hash table
+ */
+#define UNCONFIRMED_NULLS_VAL	((1<<30)+0)
+#define DYING_NULLS_VAL		((1<<30)+1)
+
+int nf_conntrack_init_net(struct net *net)
+{
+	int ret = -ENOMEM;
+	int cpu;
+
+	BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
+	BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS);
+	atomic_set(&net->ct.count, 0);
+
+	net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
+	if (!net->ct.pcpu_lists)
+		goto err_stat;
+
+	for_each_possible_cpu(cpu) {
+		struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+
+		spin_lock_init(&pcpu->lock);
+		INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
+		INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
+	}
+
+	net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
+	if (!net->ct.stat)
+		goto err_pcpu_lists;
+
+	ret = nf_conntrack_expect_pernet_init(net);
+	if (ret < 0)
+		goto err_expect;
+
+	nf_conntrack_acct_pernet_init(net);
+	nf_conntrack_tstamp_pernet_init(net);
+	nf_conntrack_ecache_pernet_init(net);
+	nf_conntrack_helper_pernet_init(net);
+	nf_conntrack_proto_pernet_init(net);
+
+	return 0;
+
+err_expect:
+	free_percpu(net->ct.stat);
+err_pcpu_lists:
+	free_percpu(net->ct.pcpu_lists);
+err_stat:
+	return ret;
+}
diff --git a/upstream/linux-5.10/net/netfilter/xt_DSCP.c b/upstream/linux-5.10/net/netfilter/xt_DSCP.c
new file mode 100755
index 0000000..eababc3
--- /dev/null
+++ b/upstream/linux-5.10/net/netfilter/xt_DSCP.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* x_tables module for setting the IPv4/IPv6 DSCP field, Version 1.8
+ *
+ * (C) 2002 by Harald Welte <laforge@netfilter.org>
+ * based on ipt_FTOS.c (C) 2000 by Matthew G. Marsh <mgm@paktronix.com>
+ *
+ * See RFC2474 for a description of the DSCP field within the IP Header.
+*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/dsfield.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_DSCP.h>
+
+MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+MODULE_DESCRIPTION("Xtables: DSCP/TOS field modification");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_DSCP");
+MODULE_ALIAS("ip6t_DSCP");
+MODULE_ALIAS("ipt_TOS");
+MODULE_ALIAS("ip6t_TOS");
+
+static unsigned int
+dscp_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_DSCP_info *dinfo = par->targinfo;
+	u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT;
+
+	if (dscp != dinfo->dscp) {
+		if (skb_ensure_writable(skb, sizeof(struct iphdr)))
+			return NF_DROP;
+
+		ipv4_change_dsfield(ip_hdr(skb),
+				    (__force __u8)(~XT_DSCP_MASK),
+				    dinfo->dscp << XT_DSCP_SHIFT);
+
+	}
+	return XT_CONTINUE;
+}
+
+static unsigned int
+dscp_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_DSCP_info *dinfo = par->targinfo;
+	u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT;
+
+	if (dscp != dinfo->dscp) {
+		if (skb_ensure_writable(skb, sizeof(struct ipv6hdr)))
+			return NF_DROP;
+
+		ipv6_change_dsfield(ipv6_hdr(skb),
+				    (__force __u8)(~XT_DSCP_MASK),
+				    dinfo->dscp << XT_DSCP_SHIFT);
+	}
+	return XT_CONTINUE;
+}
+
+static int dscp_tg_check(const struct xt_tgchk_param *par)
+{
+	const struct xt_DSCP_info *info = par->targinfo;
+
+	if (info->dscp > XT_DSCP_MAX)
+		return -EDOM;
+	return 0;
+}
+
+static unsigned int
+tos_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_tos_target_info *info = par->targinfo;
+	struct iphdr *iph = ip_hdr(skb);
+	u_int8_t orig, nv;
+
+	orig = ipv4_get_dsfield(iph);
+	nv   = (orig & ~info->tos_mask) ^ info->tos_value;
+
+	if (orig != nv) {
+		if (skb_ensure_writable(skb, sizeof(struct iphdr)))
+			return NF_DROP;
+		iph = ip_hdr(skb);
+		ipv4_change_dsfield(iph, 0, nv);
+	}
+
+	return XT_CONTINUE;
+}
+
+static unsigned int
+tos_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_tos_target_info *info = par->targinfo;
+	struct ipv6hdr *iph = ipv6_hdr(skb);
+	u_int8_t orig, nv;
+
+	orig = ipv6_get_dsfield(iph);
+	nv   = (orig & ~info->tos_mask) ^ info->tos_value;
+
+	if (orig != nv) {
+		if (skb_ensure_writable(skb, sizeof(struct iphdr)))
+			return NF_DROP;
+		iph = ipv6_hdr(skb);
+		ipv6_change_dsfield(iph, 0, nv);
+	}
+
+	return XT_CONTINUE;
+}
+
+static struct xt_target dscp_tg_reg[] __read_mostly = {
+	{
+		.name		= "DSCP",
+		.family		= NFPROTO_IPV4,
+		.checkentry	= dscp_tg_check,
+		.target		= dscp_tg,
+		.targetsize	= sizeof(struct xt_DSCP_info),
+		.table		= "mangle",
+		.me		= THIS_MODULE,
+	},
+	{
+		.name		= "DSCP",
+		.family		= NFPROTO_IPV6,
+		.checkentry	= dscp_tg_check,
+		.target		= dscp_tg6,
+		.targetsize	= sizeof(struct xt_DSCP_info),
+		.table		= "mangle",
+		.me		= THIS_MODULE,
+	},
+	{
+		.name		= "TOS",
+		.revision	= 1,
+		.family		= NFPROTO_IPV4,
+		.table		= "mangle",
+		.target		= tos_tg,
+		.targetsize	= sizeof(struct xt_tos_target_info),
+		.me		= THIS_MODULE,
+	},
+	{
+		.name		= "TOS",
+		.revision	= 1,
+		.family		= NFPROTO_IPV6,
+		.table		= "mangle",
+		.target		= tos_tg6,
+		.targetsize	= sizeof(struct xt_tos_target_info),
+		.me		= THIS_MODULE,
+	},
+};
+
+static int __init dscp_tg_init(void)
+{
+	return xt_register_targets(dscp_tg_reg, ARRAY_SIZE(dscp_tg_reg));
+}
+
+static void __exit dscp_tg_exit(void)
+{
+	xt_unregister_targets(dscp_tg_reg, ARRAY_SIZE(dscp_tg_reg));
+}
+
+module_init(dscp_tg_init);
+module_exit(dscp_tg_exit);
diff --git a/upstream/linux-5.10/net/netfilter/xt_RATEEST.c b/upstream/linux-5.10/net/netfilter/xt_RATEEST.c
new file mode 100755
index 0000000..0d5c422
--- /dev/null
+++ b/upstream/linux-5.10/net/netfilter/xt_RATEEST.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * (C) 2007 Patrick McHardy <kaber@trash.net>
+ */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/gen_stats.h>
+#include <linux/jhash.h>
+#include <linux/rtnetlink.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <net/gen_stats.h>
+#include <net/netlink.h>
+#include <net/netns/generic.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_RATEEST.h>
+#include <net/netfilter/xt_rateest.h>
+
+#define RATEEST_HSIZE	16
+
+struct xt_rateest_net {
+	struct mutex hash_lock;
+	struct hlist_head hash[RATEEST_HSIZE];
+};
+
+static unsigned int xt_rateest_id;
+
+static unsigned int jhash_rnd __read_mostly;
+
+static unsigned int xt_rateest_hash(const char *name)
+{
+	return jhash(name, sizeof_field(struct xt_rateest, name), jhash_rnd) &
+	       (RATEEST_HSIZE - 1);
+}
+
+static void xt_rateest_hash_insert(struct xt_rateest_net *xn,
+				   struct xt_rateest *est)
+{
+	unsigned int h;
+
+	h = xt_rateest_hash(est->name);
+	hlist_add_head(&est->list, &xn->hash[h]);
+}
+
+static struct xt_rateest *__xt_rateest_lookup(struct xt_rateest_net *xn,
+					      const char *name)
+{
+	struct xt_rateest *est;
+	unsigned int h;
+
+	h = xt_rateest_hash(name);
+	hlist_for_each_entry(est, &xn->hash[h], list) {
+		if (strcmp(est->name, name) == 0) {
+			est->refcnt++;
+			return est;
+		}
+	}
+
+	return NULL;
+}
+
+struct xt_rateest *xt_rateest_lookup(struct net *net, const char *name)
+{
+	struct xt_rateest_net *xn = net_generic(net, xt_rateest_id);
+	struct xt_rateest *est;
+
+	mutex_lock(&xn->hash_lock);
+	est = __xt_rateest_lookup(xn, name);
+	mutex_unlock(&xn->hash_lock);
+	return est;
+}
+EXPORT_SYMBOL_GPL(xt_rateest_lookup);
+
+void xt_rateest_put(struct net *net, struct xt_rateest *est)
+{
+	struct xt_rateest_net *xn = net_generic(net, xt_rateest_id);
+
+	mutex_lock(&xn->hash_lock);
+	if (--est->refcnt == 0) {
+		hlist_del(&est->list);
+		gen_kill_estimator(&est->rate_est);
+		/*
+		 * gen_estimator est_timer() might access est->lock or bstats,
+		 * wait a RCU grace period before freeing 'est'
+		 */
+		kfree_rcu(est, rcu);
+	}
+	mutex_unlock(&xn->hash_lock);
+}
+EXPORT_SYMBOL_GPL(xt_rateest_put);
+
+static unsigned int
+xt_rateest_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_rateest_target_info *info = par->targinfo;
+	struct gnet_stats_basic_packed *stats = &info->est->bstats;
+
+	spin_lock_bh(&info->est->lock);
+	stats->bytes += skb->len;
+	stats->packets++;
+	spin_unlock_bh(&info->est->lock);
+
+	return XT_CONTINUE;
+}
+
+static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
+{
+	struct xt_rateest_net *xn = net_generic(par->net, xt_rateest_id);
+	struct xt_rateest_target_info *info = par->targinfo;
+	struct xt_rateest *est;
+	struct {
+		struct nlattr		opt;
+		struct gnet_estimator	est;
+	} cfg;
+	int ret;
+
+	if (strnlen(info->name, sizeof(est->name)) >= sizeof(est->name))
+		return -ENAMETOOLONG;
+
+	net_get_random_once(&jhash_rnd, sizeof(jhash_rnd));
+
+	mutex_lock(&xn->hash_lock);
+	est = __xt_rateest_lookup(xn, info->name);
+	if (est) {
+		mutex_unlock(&xn->hash_lock);
+		/*
+		 * If estimator parameters are specified, they must match the
+		 * existing estimator.
+		 */
+		if ((!info->interval && !info->ewma_log) ||
+		    (info->interval != est->params.interval ||
+		     info->ewma_log != est->params.ewma_log)) {
+			xt_rateest_put(par->net, est);
+			return -EINVAL;
+		}
+		info->est = est;
+		return 0;
+	}
+
+	ret = -ENOMEM;
+	est = kzalloc(sizeof(*est), GFP_KERNEL);
+	if (!est)
+		goto err1;
+
+	strlcpy(est->name, info->name, sizeof(est->name));
+	spin_lock_init(&est->lock);
+	est->refcnt		= 1;
+	est->params.interval	= info->interval;
+	est->params.ewma_log	= info->ewma_log;
+
+	cfg.opt.nla_len		= nla_attr_size(sizeof(cfg.est));
+	cfg.opt.nla_type	= TCA_STATS_RATE_EST;
+	cfg.est.interval	= info->interval;
+	cfg.est.ewma_log	= info->ewma_log;
+
+	ret = gen_new_estimator(&est->bstats, NULL, &est->rate_est,
+				&est->lock, NULL, &cfg.opt);
+	if (ret < 0)
+		goto err2;
+
+	info->est = est;
+	xt_rateest_hash_insert(xn, est);
+	mutex_unlock(&xn->hash_lock);
+	return 0;
+
+err2:
+	kfree(est);
+err1:
+	mutex_unlock(&xn->hash_lock);
+	return ret;
+}
+
+static void xt_rateest_tg_destroy(const struct xt_tgdtor_param *par)
+{
+	struct xt_rateest_target_info *info = par->targinfo;
+
+	xt_rateest_put(par->net, info->est);
+}
+
+static struct xt_target xt_rateest_tg_reg __read_mostly = {
+	.name       = "RATEEST",
+	.revision   = 0,
+	.family     = NFPROTO_UNSPEC,
+	.target     = xt_rateest_tg,
+	.checkentry = xt_rateest_tg_checkentry,
+	.destroy    = xt_rateest_tg_destroy,
+	.targetsize = sizeof(struct xt_rateest_target_info),
+	.usersize   = offsetof(struct xt_rateest_target_info, est),
+	.me         = THIS_MODULE,
+};
+
+static __net_init int xt_rateest_net_init(struct net *net)
+{
+	struct xt_rateest_net *xn = net_generic(net, xt_rateest_id);
+	int i;
+
+	mutex_init(&xn->hash_lock);
+	for (i = 0; i < ARRAY_SIZE(xn->hash); i++)
+		INIT_HLIST_HEAD(&xn->hash[i]);
+	return 0;
+}
+
+static struct pernet_operations xt_rateest_net_ops = {
+	.init = xt_rateest_net_init,
+	.id   = &xt_rateest_id,
+	.size = sizeof(struct xt_rateest_net),
+};
+
+static int __init xt_rateest_tg_init(void)
+{
+	int err = register_pernet_subsys(&xt_rateest_net_ops);
+
+	if (err)
+		return err;
+	return xt_register_target(&xt_rateest_tg_reg);
+}
+
+static void __exit xt_rateest_tg_fini(void)
+{
+	xt_unregister_target(&xt_rateest_tg_reg);
+	unregister_pernet_subsys(&xt_rateest_net_ops);
+}
+
+
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xtables: packet rate estimator");
+MODULE_ALIAS("ipt_RATEEST");
+MODULE_ALIAS("ip6t_RATEEST");
+module_init(xt_rateest_tg_init);
+module_exit(xt_rateest_tg_fini);
diff --git a/upstream/linux-5.10/net/netfilter/xt_TCPMSS.c b/upstream/linux-5.10/net/netfilter/xt_TCPMSS.c
new file mode 100755
index 0000000..122db9f
--- /dev/null
+++ b/upstream/linux-5.10/net/netfilter/xt_TCPMSS.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This is a module which is used for setting the MSS option in TCP packets.
+ *
+ * Copyright (C) 2000 Marc Boucher <marc@mbsi.ca>
+ * Copyright (C) 2007 Patrick McHardy <kaber@trash.net>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/gfp.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <net/dst.h>
+#include <net/flow.h>
+#include <net/ipv6.h>
+#include <net/route.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_tcpudp.h>
+#include <linux/netfilter/xt_TCPMSS.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
+MODULE_DESCRIPTION("Xtables: TCP Maximum Segment Size (MSS) adjustment");
+MODULE_ALIAS("ipt_TCPMSS");
+MODULE_ALIAS("ip6t_TCPMSS");
+
+static inline unsigned int
+optlen(const u_int8_t *opt, unsigned int offset)
+{
+	/* Beware zero-length options: make finite progress */
+	if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0)
+		return 1;
+	else
+		return opt[offset+1];
+}
+
+static u_int32_t tcpmss_reverse_mtu(struct net *net,
+				    const struct sk_buff *skb,
+				    unsigned int family)
+{
+	struct flowi fl;
+	struct rtable *rt = NULL;
+	u_int32_t mtu     = ~0U;
+
+	if (family == PF_INET) {
+		struct flowi4 *fl4 = &fl.u.ip4;
+		memset(fl4, 0, sizeof(*fl4));
+		fl4->daddr = ip_hdr(skb)->saddr;
+	} else {
+		struct flowi6 *fl6 = &fl.u.ip6;
+
+		memset(fl6, 0, sizeof(*fl6));
+		fl6->daddr = ipv6_hdr(skb)->saddr;
+	}
+
+	nf_route(net, (struct dst_entry **)&rt, &fl, false, family);
+	if (rt != NULL) {
+		mtu = dst_mtu(&rt->dst);
+		dst_release(&rt->dst);
+	}
+	return mtu;
+}
+
+static int
+tcpmss_mangle_packet(struct sk_buff *skb,
+		     const struct xt_action_param *par,
+		     unsigned int family,
+		     unsigned int tcphoff,
+		     unsigned int minlen)
+{
+	const struct xt_tcpmss_info *info = par->targinfo;
+	struct tcphdr *tcph;
+	int len, tcp_hdrlen;
+	unsigned int i;
+	__be16 oldval;
+	u16 newmss;
+	u8 *opt;
+
+	/* This is a fragment, no TCP header is available */
+	if (par->fragoff != 0)
+		return 0;
+
+	if (skb_ensure_writable(skb, skb->len))
+		return -1;
+
+	len = skb->len - tcphoff;
+	if (len < (int)sizeof(struct tcphdr))
+		return -1;
+
+	tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
+	tcp_hdrlen = tcph->doff * 4;
+
+	if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
+		return -1;
+
+	if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
+		struct net *net = xt_net(par);
+		unsigned int in_mtu = tcpmss_reverse_mtu(net, skb, family);
+		unsigned int min_mtu = min(dst_mtu(skb_dst(skb)), in_mtu);
+
+		if (min_mtu <= minlen) {
+			net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
+					    min_mtu);
+			return -1;
+		}
+		newmss = min_mtu - minlen;
+	} else
+		newmss = info->mss;
+
+	opt = (u_int8_t *)tcph;
+	for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
+		if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
+			u_int16_t oldmss;
+
+			oldmss = (opt[i+2] << 8) | opt[i+3];
+
+			/* Never increase MSS, even when setting it, as
+			 * doing so results in problems for hosts that rely
+			 * on MSS being set correctly.
+			 */
+			if (oldmss <= newmss)
+				return 0;
+
+			opt[i+2] = (newmss & 0xff00) >> 8;
+			opt[i+3] = newmss & 0x00ff;
+
+			inet_proto_csum_replace2(&tcph->check, skb,
+						 htons(oldmss), htons(newmss),
+						 false);
+			return 0;
+		}
+	}
+
+	/* There is data after the header so the option can't be added
+	 * without moving it, and doing so may make the SYN packet
+	 * itself too large. Accept the packet unmodified instead.
+	 */
+	if (len > tcp_hdrlen)
+		return 0;
+
+	/* tcph->doff has 4 bits, do not wrap it to 0 */
+	if (tcp_hdrlen >= 15 * 4)
+		return 0;
+
+	/*
+	 * MSS Option not found ?! add it..
+	 */
+	if (skb_tailroom(skb) < TCPOLEN_MSS) {
+		if (pskb_expand_head(skb, 0,
+				     TCPOLEN_MSS - skb_tailroom(skb),
+				     GFP_ATOMIC))
+			return -1;
+		tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
+	}
+
+	skb_put(skb, TCPOLEN_MSS);
+
+	/*
+	 * IPv4: RFC 1122 states "If an MSS option is not received at
+	 * connection setup, TCP MUST assume a default send MSS of 536".
+	 * IPv6: RFC 2460 states IPv6 has a minimum MTU of 1280 and a minimum
+	 * length IPv6 header of 60, ergo the default MSS value is 1220
+	 * Since no MSS was provided, we must use the default values
+	 */
+	if (xt_family(par) == NFPROTO_IPV4)
+		newmss = min(newmss, (u16)536);
+	else
+		newmss = min(newmss, (u16)1220);
+
+	opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
+	memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
+
+	inet_proto_csum_replace2(&tcph->check, skb,
+				 htons(len), htons(len + TCPOLEN_MSS), true);
+	opt[0] = TCPOPT_MSS;
+	opt[1] = TCPOLEN_MSS;
+	opt[2] = (newmss & 0xff00) >> 8;
+	opt[3] = newmss & 0x00ff;
+
+	inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), false);
+
+	oldval = ((__be16 *)tcph)[6];
+	tcph->doff += TCPOLEN_MSS/4;
+	inet_proto_csum_replace2(&tcph->check, skb,
+				 oldval, ((__be16 *)tcph)[6], false);
+	return TCPOLEN_MSS;
+}
+
+static unsigned int
+tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	struct iphdr *iph = ip_hdr(skb);
+	__be16 newlen;
+	int ret;
+
+	ret = tcpmss_mangle_packet(skb, par,
+				   PF_INET,
+				   iph->ihl * 4,
+				   sizeof(*iph) + sizeof(struct tcphdr));
+	if (ret < 0)
+		return NF_DROP;
+	if (ret > 0) {
+		iph = ip_hdr(skb);
+		newlen = htons(ntohs(iph->tot_len) + ret);
+		csum_replace2(&iph->check, iph->tot_len, newlen);
+		iph->tot_len = newlen;
+	}
+	return XT_CONTINUE;
+}
+
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+static unsigned int
+tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+	u8 nexthdr;
+	__be16 frag_off, oldlen, newlen;
+	int tcphoff;
+	int ret;
+
+	nexthdr = ipv6h->nexthdr;
+	tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
+	if (tcphoff < 0)
+		return NF_DROP;
+	ret = tcpmss_mangle_packet(skb, par,
+				   PF_INET6,
+				   tcphoff,
+				   sizeof(*ipv6h) + sizeof(struct tcphdr));
+	if (ret < 0)
+		return NF_DROP;
+	if (ret > 0) {
+		ipv6h = ipv6_hdr(skb);
+		oldlen = ipv6h->payload_len;
+		newlen = htons(ntohs(oldlen) + ret);
+		if (skb->ip_summed == CHECKSUM_COMPLETE)
+			skb->csum = csum_add(csum_sub(skb->csum, oldlen),
+					     newlen);
+		ipv6h->payload_len = newlen;
+	}
+	return XT_CONTINUE;
+}
+#endif
+
+/* Must specify -p tcp --syn */
+static inline bool find_syn_match(const struct xt_entry_match *m)
+{
+	const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
+
+	if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
+	    tcpinfo->flg_cmp & TCPHDR_SYN &&
+	    !(tcpinfo->invflags & XT_TCP_INV_FLAGS))
+		return true;
+
+	return false;
+}
+
+static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
+{
+	const struct xt_tcpmss_info *info = par->targinfo;
+	const struct ipt_entry *e = par->entryinfo;
+	const struct xt_entry_match *ematch;
+
+	if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
+	    (par->hook_mask & ~((1 << NF_INET_FORWARD) |
+			   (1 << NF_INET_LOCAL_OUT) |
+			   (1 << NF_INET_POST_ROUTING))) != 0) {
+		pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
+		return -EINVAL;
+	}
+	if (par->nft_compat)
+		return 0;
+
+	xt_ematch_foreach(ematch, e)
+		if (find_syn_match(ematch))
+			return 0;
+	pr_info_ratelimited("Only works on TCP SYN packets\n");
+	return -EINVAL;
+}
+
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
+{
+	const struct xt_tcpmss_info *info = par->targinfo;
+	const struct ip6t_entry *e = par->entryinfo;
+	const struct xt_entry_match *ematch;
+
+	if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
+	    (par->hook_mask & ~((1 << NF_INET_FORWARD) |
+			   (1 << NF_INET_LOCAL_OUT) |
+			   (1 << NF_INET_POST_ROUTING))) != 0) {
+		pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
+		return -EINVAL;
+	}
+	if (par->nft_compat)
+		return 0;
+
+	xt_ematch_foreach(ematch, e)
+		if (find_syn_match(ematch))
+			return 0;
+	pr_info_ratelimited("Only works on TCP SYN packets\n");
+	return -EINVAL;
+}
+#endif
+
+static struct xt_target tcpmss_tg_reg[] __read_mostly = {
+	{
+		.family		= NFPROTO_IPV4,
+		.name		= "TCPMSS",
+		.checkentry	= tcpmss_tg4_check,
+		.target		= tcpmss_tg4,
+		.targetsize	= sizeof(struct xt_tcpmss_info),
+		.proto		= IPPROTO_TCP,
+		.me		= THIS_MODULE,
+	},
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+	{
+		.family		= NFPROTO_IPV6,
+		.name		= "TCPMSS",
+		.checkentry	= tcpmss_tg6_check,
+		.target		= tcpmss_tg6,
+		.targetsize	= sizeof(struct xt_tcpmss_info),
+		.proto		= IPPROTO_TCP,
+		.me		= THIS_MODULE,
+	},
+#endif
+};
+
+static int __init tcpmss_tg_init(void)
+{
+	return xt_register_targets(tcpmss_tg_reg, ARRAY_SIZE(tcpmss_tg_reg));
+}
+
+static void __exit tcpmss_tg_exit(void)
+{
+	xt_unregister_targets(tcpmss_tg_reg, ARRAY_SIZE(tcpmss_tg_reg));
+}
+
+module_init(tcpmss_tg_init);
+module_exit(tcpmss_tg_exit);
diff --git a/upstream/linux-5.10/net/netfilter/xt_hl.c b/upstream/linux-5.10/net/netfilter/xt_hl.c
new file mode 100755
index 0000000..c1a70f8
--- /dev/null
+++ b/upstream/linux-5.10/net/netfilter/xt_hl.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IP tables module for matching the value of the TTL
+ * (C) 2000,2001 by Harald Welte <laforge@netfilter.org>
+ *
+ * Hop Limit matching module
+ * (C) 2001-2002 Maciej Soltysiak <solt@dns.toxicfilms.tv>
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_ipv4/ipt_ttl.h>
+#include <linux/netfilter_ipv6/ip6t_hl.h>
+
+MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
+MODULE_DESCRIPTION("Xtables: Hoplimit/TTL field match");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_ttl");
+MODULE_ALIAS("ip6t_hl");
+
+static bool ttl_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct ipt_ttl_info *info = par->matchinfo;
+	const u8 ttl = ip_hdr(skb)->ttl;
+
+	switch (info->mode) {
+	case IPT_TTL_EQ:
+		return ttl == info->ttl;
+	case IPT_TTL_NE:
+		return ttl != info->ttl;
+	case IPT_TTL_LT:
+		return ttl < info->ttl;
+	case IPT_TTL_GT:
+		return ttl > info->ttl;
+	}
+
+	return false;
+}
+
+static bool hl_mt6(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct ip6t_hl_info *info = par->matchinfo;
+	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+	switch (info->mode) {
+	case IP6T_HL_EQ:
+		return ip6h->hop_limit == info->hop_limit;
+	case IP6T_HL_NE:
+		return ip6h->hop_limit != info->hop_limit;
+	case IP6T_HL_LT:
+		return ip6h->hop_limit < info->hop_limit;
+	case IP6T_HL_GT:
+		return ip6h->hop_limit > info->hop_limit;
+	}
+
+	return false;
+}
+
+static struct xt_match hl_mt_reg[] __read_mostly = {
+	{
+		.name       = "ttl",
+		.revision   = 0,
+		.family     = NFPROTO_IPV4,
+		.match      = ttl_mt,
+		.matchsize  = sizeof(struct ipt_ttl_info),
+		.me         = THIS_MODULE,
+	},
+	{
+		.name       = "hl",
+		.revision   = 0,
+		.family     = NFPROTO_IPV6,
+		.match      = hl_mt6,
+		.matchsize  = sizeof(struct ip6t_hl_info),
+		.me         = THIS_MODULE,
+	},
+};
+
+static int __init hl_mt_init(void)
+{
+	return xt_register_matches(hl_mt_reg, ARRAY_SIZE(hl_mt_reg));
+}
+
+static void __exit hl_mt_exit(void)
+{
+	xt_unregister_matches(hl_mt_reg, ARRAY_SIZE(hl_mt_reg));
+}
+
+module_init(hl_mt_init);
+module_exit(hl_mt_exit);
diff --git a/upstream/pub/include/infra/pub_debug_info.h b/upstream/pub/include/infra/pub_debug_info.h
new file mode 100755
index 0000000..10357aa
--- /dev/null
+++ b/upstream/pub/include/infra/pub_debug_info.h
@@ -0,0 +1,67 @@
+#ifndef _PUB_DEBUG_INFO_H_
+#define _PUB_DEBUG_INFO_H_
+
+#include <stdarg.h>
+
+#define DEBUG_INFO_DEV_PATH "/dev/debug_info"
+
+/* AP²àºÍCAP²àµÄPS\KERNEL\DRIVER\FS\APP  ÒÔSTART~ENDÎªÇø¼ä£¬¸÷²¿·ÖÔ¤ÁôÁË100¸öID */
+#define MODULE_ID_PS_START			(1)
+#define MODULE_ID_PS_NAS			(1)
+#define MODULE_ID_PS_RRC			(2)
+#define MODULE_ID_PS_L2				(3)
+#define MODULE_ID_PS_UICC			(99)
+#define MODULE_ID_PS_END			(100)
+
+#define MODULE_ID_AP_KERNEL_START	(101)
+#define MODULE_ID_AP_KERNEL_END		(200)
+
+#define MODULE_ID_CAP_KERNEL_START	(201)
+#define MODULE_ID_CAP_KERNEL_END	(300)
+
+#define MODULE_ID_AP_DRIVES_START	(301)
+#define MODULE_ID_AP_USB			(301)
+#define MODULE_ID_AP_REBOOT			(302)
+#define MODULE_ID_AP_TSC			(303)
+#define MODULE_ID_AP_PSM			(304)
+#define MODULE_ID_AP_NAND			(305)
+#define MODULE_ID_AP_MMC			(306)
+#define MODULE_ID_AP_WIFI			(307)
+#define MODULE_ID_AP_DRIVES_END		(400)
+
+#define MODULE_ID_CAP_DRIVES_START	(401)
+#define MODULE_ID_CAP_USB			(401)
+#define MODULE_ID_CAP_TSC			(402)
+#define MODULE_ID_CAP_PSM			(403)
+#define MODULE_ID_CAP_NAND			(404)
+#define MODULE_ID_CAP_SPI			(405)
+#define MODULE_ID_CAP_MMC			(406)
+#define MODULE_ID_CAP_UART			(407)
+#define MODULE_ID_CAP_DRIVES_END	(500)
+
+#define MODULE_ID_AP_FS_START		(501)
+#define MODULE_ID_AP_JFFS2			(501)
+#define MODULE_ID_AP_FS_END			(600)
+
+#define MODULE_ID_CAP_FS_START		(601)
+#define MODULE_ID_CAP_FS_END		(700)
+
+#define MODULE_ID_AP_APP_START		(701)
+#define MODULE_ID_AP_FOTA			(701)
+#define MODULE_ID_AP_FS_CHECK		(702)
+#define MODULE_ID_AP_APP_END		(800)
+
+#define MODULE_ID_CAP_APP_START		(801)
+#define MODULE_ID_CAP_FOTA			(801)
+#define MODULE_ID_CAP_FS_CHECK		(802)
+#define MODULE_ID_CAP_APP_END		(900)
+
+#if defined(_USE_ZXIC_DEBUG_INFO) && !defined(CONFIG_SYSTEM_RECOVERY)
+int sc_debug_info_vrecord(unsigned int id, const char *format, va_list args);
+int sc_debug_info_record(unsigned int id, const char *format, ...);
+#else
+static inline int sc_debug_info_vrecord(unsigned int id, const char *format, va_list args) { return 0; }
+static inline int sc_debug_info_record(unsigned int id, const char *format, ...) { return 0; }
+#endif
+
+#endif
\ No newline at end of file
diff --git a/upstream/pub/project/zx297520v3/include/drv/NvParam_drv.h b/upstream/pub/project/zx297520v3/include/drv/NvParam_drv.h
new file mode 100755
index 0000000..66c6f6d
--- /dev/null
+++ b/upstream/pub/project/zx297520v3/include/drv/NvParam_drv.h
@@ -0,0 +1,223 @@
+/***********************************************************************

+* Copyright (C) 2016, ZTE Corporation.

+*

+* File Name:  nvparam_drv.h

+* File Mark:

+* Description:  

+* Others:

+* Version:   v1.0

+* Author:   wangxia

+* Date:         2016-03-12

+*

+* History 1:

+*     Date:

+*     Version:

+*     Author:

+*     Modification:

+* History 2:

+**********************************************************************/

+#ifndef NVPARAM_DRV_H

+#define NVPARAM_DRV_H

+

+/**************************************************************************

+ *                                  Include files                         *

+ **************************************************************************/

+#include "RWNvConfig.h"

+#include "NvParam_tsc.h"

+/**************************************************************************

+ *                                  Macro                                 *

+ **************************************************************************/

+#define	DRV_NV_ADDR					OS_FLASH_DRV_RW_NONFAC_BASE_ADDR

+#define	DRV_NV_SIZE					OS_FLASH_DRV_RW_NONFAC_SIZE	/*16K*/

+

+/*=====================================================================================================================

+|----------------|----------------|---------------|--------------|----------------|-----------------|-----------------|

+|  public(256B)  |    system(3K)  |  platfor(3K)  | highspeed(4K)| peripheral(3K) | 	audio(1K) 	| reserved(1.75K) |

+|----------------|----------------|---------------|--------------|----------------|-----------------|-----------------|

+=======================================================================================================================*/

+

+#define	DRV_PUB_NV_ADDR				DRV_NV_ADDR

+#define	DRV_PUB_NV_SIZE				(256)

+#define	DRV_SYS_NV_ADDR				(DRV_PUB_NV_ADDR + DRV_PUB_NV_SIZE)

+#define	DRV_SYS_NV_SIZE				(3 * 1024)

+#define	DRV_PLAT_NV_ADDR			(DRV_SYS_NV_ADDR + DRV_SYS_NV_SIZE)

+#define	DRV_PLAT_NV_SIZE			(3 * 1024)

+#define	DRV_HS_PERI_NV_ADDR			(DRV_PLAT_NV_ADDR + DRV_PLAT_NV_SIZE)

+#define	DRV_HS_PERI_NV_SIZE			(4 * 1024)

+#define	DRV_PERI_NV_ADDR			(DRV_HS_PERI_NV_ADDR + DRV_HS_PERI_NV_SIZE)

+#define	DRV_PERI_NV_SIZE			(3 * 1024)

+#define	DRV_AUDIO_NV_ADDR			(DRV_PERI_NV_ADDR + DRV_PERI_NV_SIZE)

+#define	DRV_AUDIO_NV_SIZE			(1 * 1024)

+#define	DRV_RSVD_NV_ADDR			(DRV_AUDIO_NV_ADDR + DRV_AUDIO_NV_SIZE)

+#define	DRV_RSVD_NV_SIZE			(1 * 1024 + 768)

+

+#define	DRV_TOTAL_NV_SIZE			(DRV_PUB_NV_SIZE+DRV_SYS_NV_SIZE+DRV_PLAT_NV_SIZE+DRV_HS_PERI_NV_SIZE+DRV_PERI_NV_SIZE+DRV_AUDIO_NV_SIZE+DRV_RSVD_NV_SIZE)

+

+/* user interface */

+#define DRV_PUB_NV_ITEM_ADDR(x) 	(DRV_PUB_NV_ADDR + (UINT32)(&(((T_ZDrvNv_PubData*)(0x0))->x)))

+#define DRV_PUB_NV_ITEM_SIZE(x) 	(sizeof(((T_ZDrvNv_PubData*)(0x0))->x))

+

+#define DRV_SYS_NV_ITEM_ADDR(x) 	(DRV_SYS_NV_ADDR + (UINT32)(&(((T_ZDrvNv_SysData*)(0x0))->x)))

+#define DRV_SYS_NV_ITEM_SIZE(x) 	(sizeof(((T_ZDrvNv_SysData*)(0x0))->x))

+

+#define DRV_PLAT_NV_ITEM_ADDR(x) 	(DRV_PLAT_NV_ADDR + (UINT32)(&(((T_ZDrvNv_PlatData*)(0x0))->x)))

+#define DRV_PLAT_NV_ITEM_SIZE(x) 	(sizeof(((T_ZDrvNv_PlatData*)(0x0))->x))

+

+#define DRV_HS_PERI_NV_ITEM_ADDR(x) (DRV_HS_PERI_NV_ADDR + (UINT32)(&(((T_ZDrvNv_HSPeriData*)(0x0))->x)))

+#define DDRV_HS_PER_NV_ITEM_SIZE(x) (sizeof(((T_ZDrvNv_HSPeriData*)(0x0))->x))

+

+#define DRV_PER_NV_ITEM_ADDR(x) 	(DRV_PERI_NV_ADDR + (UINT32)(&(((T_ZDrvNv_PeriData*)(0x0))->x)))

+#define DRV_PER_NV_ITEM_SIZE(x) 	(sizeof(((T_ZDrvNv_PeriData*)(0x0))->x))

+

+#define DRV_AUDIO_NV_ITEM_ADDR(x) 	(DRV_AUDIO_NV_ADDR + (UINT32)(&(((T_ZDrvNv_AudioData*)(0x0))->x)))

+#define DRV_AUDIO_NV_ITEM_SIZE(x) 	(sizeof(((T_ZDrvNv_AudioData*)(0x0))->x))

+

+#define OS_FLASH_VOICE_DRV_RW_NONFAC_BASE_ADDR         (OS_FLASH_DRV_RW_NONFAC_BASE_ADDR + 15360)

+#define OS_FLASH_VOICE_DRV_NONFAC_SIZE     1024

+

+#if DRV_TOTAL_NV_SIZE > (OS_FLASH_DRV_RW_NONFAC_SIZE)

+#error "error drv nv config!!!"

+#endif

+

+/****************************************************************************

+* 	                         Types

+****************************************************************************/

+

+

+/******************************************************

+* 	         Drv NV Config

+******************************************************/

+/***********************************

+1. public nv_data

+************************************/

+typedef struct _T_ZDrvNv_PubData

+{

+	/* 0x00 */ 	CHAR  	chipName[16];

+	/* 0x10 */ 	CHAR	prjName[16];

+	/* 0x20 */ 	CHAR 	externalVer[16];

+	/* 0x30 */ 	CHAR 	internalVer[16];

+	/* 0x40 */ 	CHAR 	releaseTime[16];

+	/* 0x50 */ 	UINT8 	productType;

+	/* 0x51 */ 	UINT8 	reserved[DRV_PUB_NV_SIZE - 0x51];

+} __attribute__ ((packed)) T_ZDrvNv_PubData;

+

+/***********************************

+2. system group nv_data

+************************************/

+typedef struct _T_ZDrvNv_SysData

+{

+	/* 0x000 */	T_SYS_NV_TSC_CONFIG tsc_config;

+				UINT8 reserved0[12];

+	/* 0x70 */	UINT32 buck1OnoffFlag;

+	/* 0x74  */	UINT32 wdtSwitch;

+	/* 0x78  */	UINT32 wdtPriority;

+	/* 0x7C */	UINT8 uiccmodeSwitch;

+	/* 0x7D */	UINT8 uiccPreSwitch; 

+	/* 0x7E */	UINT8 uicc1modeSwitch;

+	/* 0x7F */	UINT8 uicc1PreSwitch; 

+	/* 0x80 */	UINT8 ishpnotsupported;

+              UINT8 reserved[DRV_SYS_NV_SIZE - 129];

+} __attribute__ ((packed)) T_ZDrvNv_SysData;

+

+/***********************************

+3. platform group nv_data

+************************************/

+typedef struct _T_ZDrvNv_PlatData

+{

+	UINT8 reserved[DRV_PLAT_NV_SIZE];

+} __attribute__ ((packed)) T_ZDrvNv_PlatData;

+

+/***********************************

+4. hign-speed peripheral group nv_data

+************************************/

+typedef struct _T_ZDrvNv_HSPeriData

+{

+	UINT8 reserved[DRV_HS_PERI_NV_SIZE];

+} __attribute__ ((packed)) T_ZDrvNv_HSPeriData;

+

+/***********************************

+5. common peripheral group nv_data

+************************************/

+typedef struct _T_ZDrvNv_PeriData

+{

+    UINT8 bat_det;

+    UINT8 reserved[DRV_PERI_NV_SIZE-1];

+} __attribute__ ((packed)) T_ZDrvNv_PeriData;

+

+/***********************************

+6. audio group nv_data

+************************************/

+typedef struct _T_ZDrvNv_AudioData

+{

+    UINT8 reserved[DRV_AUDIO_NV_SIZE];

+} __attribute__ ((packed)) T_ZDrvNv_AudioData;

+

+/***********************************

+7. all driver_used nv_data

+************************************/

+typedef struct _T_ZDrv_NvData

+{

+	/* 0x0000 */	T_ZDrvNv_PubData	pubData;

+	/* 0x0100 */	T_ZDrvNv_SysData	sysData;

+	/* 0x0D00 */	T_ZDrvNv_PlatData	platData;

+	/* 0x1900 */	T_ZDrvNv_HSPeriData	HSPeriData;

+	/* 0x2900 */	T_ZDrvNv_PeriData	periData;

+	/* 0x3500 */	T_ZDrvNv_AudioData	audioData;

+	/* 0x3900 */	UINT8				reserved[DRV_RSVD_NV_SIZE];

+} T_ZDrv_NvData;

+

+

+/******************************************************

+* 	check struct size

+******************************************************/

+static inline CHAR zDrvNv_CheckTypeSize(void)

+{ \

+	CHAR __dummy1[(sizeof(T_ZDrv_NvData)==DRV_NV_SIZE)?1:-1]={0}; \

+	CHAR __dummy2[(sizeof(T_ZDrvNv_PubData)==DRV_PUB_NV_SIZE)?1:-1]={0}; \

+	CHAR __dummy3[(sizeof(T_ZDrvNv_SysData)==DRV_SYS_NV_SIZE)?1:-1]={0}; \

+	CHAR __dummy4[(sizeof(T_ZDrvNv_PlatData)==DRV_PLAT_NV_SIZE)?1:-1]={0}; \

+	CHAR __dummy5[(sizeof(T_ZDrvNv_HSPeriData)==DRV_HS_PERI_NV_SIZE)?1:-1]={0}; \

+	CHAR __dummy6[(sizeof(T_ZDrvNv_PeriData)==DRV_PERI_NV_SIZE)?1:-1]={0}; \

+	CHAR __dummy7[(sizeof(T_ZDrvNv_AudioData)==DRV_AUDIO_NV_SIZE)?1:-1]={0}; \

+	return (__dummy1[0]+__dummy2[0]+__dummy3[0]+__dummy4[0]+__dummy5[0]+__dummy6[0]+__dummy7[0]); \

+}

+

+/******************************************************

+* 	old struct

+******************************************************/

+#if 0

+typedef struct _T_Sys_Drv_Nv_Data

+{

+	T_SYS_NV_TSC_CONFIG tsc_config;

+	UINT8 reserved[6];

+	UINT32 wdtSwitch;

+}T_Sys_Drv_Nv_Data;

+#endif

+typedef struct _T_Drv_Nv_Data

+{

+	UINT32 VpData[1024];//add by lvwenhua for voice 2013.12.6

+}T_Drv_Nv_Data;

+

+#define DRV_NV_ITEM_ADDRESS(x) (DRV_AUDIO_NV_ADDR + (UINT32)(&(((T_Drv_Nv_Data*)(0x0))->x)))

+//flag use 32byte

+typedef struct _T_Audio_NvFlag

+{

+    UINT8           isVpConfigInitOn;

+    UINT8           isVpParamInNv;

+    UINT8           isUseSlicCodec;

+    UINT8           isUseVoiceProc;//4 UINT8           isUseNXP;

+    UINT8           isUseCodecDsp;	

+	UINT8			isUseNvWrite;

+	UINT8			isCloseVpBufferBak;

+	UINT8			isUseTdm;//8

+	UINT8			isUseRxDtmfDet;

+	UINT8			isUseTxDtmfDet;     

+    UINT8			isUseRxMixData;

+	UINT8			isUseTxMixData;//12

+	UINT8			isUseEcall;

+    UINT8           reserved[19];//32-13

+	

+}  T_Audio_NvFlag;

+

+#endif

+

diff --git a/upstream/pub/project/zx297520v3/include/infra/ram_base_config_7520v3.h b/upstream/pub/project/zx297520v3/include/infra/ram_base_config_7520v3.h
new file mode 100755
index 0000000..6a1626d
--- /dev/null
+++ b/upstream/pub/project/zx297520v3/include/infra/ram_base_config_7520v3.h
@@ -0,0 +1,347 @@
+/*******************************************************************************

+* °æÈ¨ËùÓÐ (C)2015, ÖÐÐËͨѶ¹É·ÝÓÐÏÞ¹«Ë¾¡£

+* 

+* ÎļþÃû³Æ:     ram_config_7520v3.h

+* Îļþ±êʶ:     ram_config_7520v3.h

+* ÄÚÈÝÕªÒª:     zx297520v3оƬƽ̨´æ´¢µØÖ·ÅäÖÃÍ·Îļþ

+* ʹÓ÷½·¨:     #include "ram_config.h"

+* 

+* ÐÞ¸ÄÈÕÆÚ        °æ±¾ºÅ      Ð޸ıê¼Ç        ÐÞ¸ÄÈË          ÐÞ¸ÄÄÚÈÝ

+* ------------------------------------------------------------------------------

+* 2015/06/08      V1.0        Create          ÁõÑÇÄÏ          ´´½¨

+* 

+*******************************************************************************/

+

+#ifndef _RAM_BASE_CONFIG_7520V3

+#define _RAM_BASE_CONFIG_7520V3

+

+/*******************************************************************************

+*                                   Í·Îļþ                                     *

+*******************************************************************************/

+

+/*******************************************************************************

+*                                   ºê¶¨Òå                                     *

+*******************************************************************************/

+

+/* IRAM0»ùµØÖ· */

+#ifdef DDR_BASE_ADDR_LINUX_VA

+#define IRAM_BASE_ADDR                  ((unsigned long)(ZX_IRAM0_BASE))

+#else

+#define IRAM_BASE_ADDR                  (0x82000000UL>>CPU_SHIFT)

+#endif

+#define IRAM_BASE_LEN                   (0x00010000UL>>CPU_SHIFT)

+

+/* 1K, Òì³£ÏòÁ¿±í: 0x82000000/0x41000000 */

+#define IRAM_BASE_ADDR_VECTOR           (IRAM_BASE_ADDR) 

+#define IRAM_BASE_LEN_VECTOR            ((1 * 1024UL)>>CPU_SHIFT)

+#define OTP_SECURE_PUK_BASE              IRAM_BASE_ADDR_VECTOR + 0x4

+

+/* 12K£¬Çý¶¯ºË¼äͨѶ */

+#define IRAM_BASE_ADDR_DRV              (IRAM_BASE_ADDR_VECTOR + IRAM_BASE_LEN_VECTOR)

+#define IRAM_BASE_LEN_DRV               ((12 * 1024UL)>>CPU_SHIFT)

+

+/* 1K£¬Ê¡µçÃüÁî½»»¥ */

+#define IRAM_BASE_ADDR_PSM              (IRAM_BASE_ADDR_DRV + IRAM_BASE_LEN_DRV)

+#define IRAM_BASE_LEN_PSM               ((1 * 1024UL)>>CPU_SHIFT)

+

+/* 4K£¬PSÓëPHYÐÅÏ¢½»»¥£¬¹«¹²ÒµÎñ  */

+#define IRAM_BASE_ADDR_PUB              (IRAM_BASE_ADDR_PSM + IRAM_BASE_LEN_PSM)

+#define IRAM_BASE_LEN_PUB               ((4 * 1024UL)>>CPU_SHIFT)

+

+/* 512B£¬PSÓëPHYÐÅÏ¢½»»¥£¬É䯵¹«¹²ÒµÎñ  */

+#define IRAM_BASE_ADDR_PUB_RF           (IRAM_BASE_ADDR_PUB)

+#define IRAM_BASE_LEN_PUB_RF            (512UL>>CPU_SHIFT)

+

+/* 32B£¬¸¨Ä£Ê½AFC»º´æÊý¾Ý¿Õ¼ä    */

+#define IRAM_BASE_ADDR_SLAVE_AFC        (IRAM_BASE_ADDR_PUB_RF + IRAM_BASE_LEN_PUB_RF)

+#define IRAM_BASE_LEN_SLAVE_AFC         (32UL>>CPU_SHIFT)

+

+/* 1K£¬Î¿ØÊý¾Ý´æ·Å  */

+#define IRAM_BASE_ADDR_TPC              (IRAM_BASE_ADDR_PUB + IRAM_BASE_LEN_PUB)

+#define IRAM_BASE_LEN_TPC               ((1 * 1024UL)>>CPU_SHIFT)

+

+/* 2K£¬ÖжÏÏ̹߳켣´æ·Å */

+#define IRAM_BASE_ADDR_OS_STATISTIC     (IRAM_BASE_ADDR_TPC + IRAM_BASE_LEN_TPC)

+#define IRAM_BASE_LEN_OS_STATISTIC      ((2 * 1024UL)>>CPU_SHIFT)

+

+/* 1K,ϵͳ¸ú×ټǼ */

+#define IRAM_BASE_ADDR_SYS_TRACE        (IRAM_BASE_ADDR_OS_STATISTIC + IRAM_BASE_LEN_OS_STATISTIC)

+#define IRAM_BASE_LEN_SYS_TRACE         ((1 * 1024UL)>>CPU_SHIFT)

+

+/* IRAM ICPµØÖ·     */

+#define ICP_CMD_BASE_ADDR               (IRAM_BASE_ADDR)

+#define ICP_DRV_BASE_ADDR               (IRAM_BASE_ADDR_DRV)

+#define DUAL_STANDBY_INTERF_GSM_USE_INFO_BASE_ADDR  (IRAM_BASE_ADDR_GSM)

+

+/* ¸÷ºËIRAM¹ì¼£µØÖ· */

+#define IRAM_BASE_ADDR_OS_STATISTIC_PSCPU   (IRAM_BASE_ADDR_OS_STATISTIC)

+#define IRAM_BASE_LEN_OS_STATISTIC_PSCPU    (0x200UL>>CPU_SHIFT)

+#define IRAM_BASE_ADDR_OS_STATISTIC_PHYCPU  (IRAM_BASE_ADDR_OS_STATISTIC_PSCPU + IRAM_BASE_LEN_OS_STATISTIC_PSCPU)

+#define IRAM_BASE_LEN_OS_STATISTIC_PHYCPU   (0x200UL>>CPU_SHIFT)

+#define IRAM_BASE_ADDR_OS_STATISTIC_APCPU   (IRAM_BASE_ADDR_OS_STATISTIC_PHYCPU + IRAM_BASE_LEN_OS_STATISTIC_PHYCPU)

+#define IRAM_BASE_LEN_OS_STATISTIC_APCPU    (0x400UL>>CPU_SHIFT)

+

+/* ¸÷ºËIRAM¸ú×ÙµØÖ· */

+#define IRAM_BASE_ADDR_SYS_TRACE_RMCPU      (IRAM_BASE_ADDR_SYS_TRACE)

+#define IRAM_BASE_ADDR_SYS_TRACE_APCPU      (IRAM_BASE_ADDR_SYS_TRACE + (0x10>>CPU_SHIFT))

+#define IRAM_BASE_ADDR_SYS_TRACE_PSCPU      (IRAM_BASE_ADDR_SYS_TRACE + (0x20>>CPU_SHIFT))

+#define IRAM_BASE_ADDR_SYS_TRACE_PHYCPU     (IRAM_BASE_ADDR_SYS_TRACE + (0x30>>CPU_SHIFT))

+

+/* phy logÓÅ»¯·½°¸¸´Óà IRAM_BASE_ADDR_SYS_TRACE ºó512×Ö½Ú¿Õ¼ä */

+#define IRAM_BASE_ADDR_ZCAT_PHY_LOG         (IRAM_BASE_ADDR_SYS_TRACE + (0x200>>CPU_SHIFT))

+

+/* phy log¶ªÊ§¸ú×Ù·½°¸¸´Óà IRAM_BASE_ADDR_SYS_TRACE ºó64×Ö½Ú¿Õ¼ä */

+#define IRAM_BASE_PHY_LOG_DROP_TRACE    	(IRAM_BASE_ADDR_ZCAT_PHY_LOG + (0x200>>CPU_SHIFT) - (0x40>>CPU_SHIFT))

+

+/* ¼Ç¼ramdumpģʽ: 4×Ö½Ú¿Õ¼ä*/

+#define IRAM_BASE_ADDR_RAMDUMP_MODE         (IRAM_BASE_PHY_LOG_DROP_TRACE - (0x04>>CPU_SHIFT))

+

+/* ¼Ç¼SHM bufferµØÖ·: 16×Ö½Ú¿Õ¼ä*/

+#define IRAM_BASE_ADDR_SHM_REMOTE_REGION    (IRAM_BASE_ADDR_RAMDUMP_MODE - (0x10>>CPU_SHIFT))

+

+/* ¼Ç¼zcatģʽ: 4×Ö½Ú¿Õ¼ä*/

+#define IRAM_BASE_ADDR_ZCAT_MODE            (IRAM_BASE_ADDR_SHM_REMOTE_REGION - (0x04>>CPU_SHIFT))

+

+/* ¸´Óù켣µÄǰ4¸ö×ֽڼǼboot´«µÝµÄDDR sizeÐÅÏ¢ */

+#define IRAM_BASE_ADDR_BOOT_DDR             (IRAM_BASE_ADDR_DRV - (0x04>>CPU_SHIFT))

+

+/* IRAM1»ùµØÖ· */

+#ifdef  DDR_BASE_ADDR_LINUX_VA

+#define IRAM1_BASE_ADDR                  ((unsigned long)(ZX_IRAM1_BASE))

+#else

+#define IRAM1_BASE_ADDR                  (0x00100000>>CPU_SHIFT)

+#endif

+#define IRAM1_BASE_LEN                   (0x00003000>>CPU_SHIFT)

+

+

+#define DDR_BASE_ADDR                   (0x20000000UL>>CPU_SHIFT)

+

+/* 3M£¬ÎïÀí²ã°æ±¾£¬ÓÉPS¼ÓÔØ  */

+/* 7520µÄZSPÅäÖÃΪ·ÇCacheÇø£¬Ö»ÄÜÅäÖÃ4¸ö¶Î£¬ÇÒÿ¸ö¶ÎµØÖ·»¹ÓÐÌØ¶¨ÒªÇ󣬸õØÖ·±ä¶¯ÐèÓëÎïÀí²ãÈ·ÈÏ */

+#ifdef  DDR_BASE_ADDR_LINUX_VA

+#define DDR_BASE_ADDR_PHY               ((unsigned long)(ZX_DDR_PHYCODE_BASE))

+#else

+#define DDR_BASE_ADDR_PHY               (DDR_BASE_ADDR)

+#endif

+

+#ifdef _USE_LTE_ONLY

+#define DDR_BASE_LEN_PHY                (0x00200000UL>>CPU_SHIFT)

+#else

+#define DDR_BASE_LEN_PHY                (0x00300000UL>>CPU_SHIFT)

+#endif

+#define DDR_BASE_OFF_PHY                (0)

+

+/* 1.5M£¬ÎïÀí²ãDATA/HARQ/CRC */

+#define DDR_BASE_ADDR_PHY_DATA          (DDR_BASE_ADDR_PHY + DDR_BASE_LEN_PHY)

+#define DDR_BASE_LEN_PHY_DATA           (0x00180000UL>>CPU_SHIFT)

+#define DDR_BASE_OFF_PHY_DATA           (DDR_BASE_OFF_PHY + DDR_BASE_LEN_PHY)

+

+/* 1.0M£¬Ð­ÒéÕ»ÓëÎïÀí²ã½»»¥ */ 

+#define DDR_BASE_ADDR_LTE_DATA          (DDR_BASE_ADDR_PHY + DDR_BASE_LEN_PHY) //DDR_BASE_LEN_PHY_NV

+#define DDR_BASE_LEN_LTE_DATA           (0x00100000UL>>CPU_SHIFT)

+#define DDR_BASE_OFF_LTE_DATA           (DDR_BASE_OFF_PHY + DDR_BASE_LEN_PHY)

+

+/* 0.25M£¬Ö§³Åµ¼³öRamdump       */

+#define DDR_BASE_ADDR_RAMDUMP           (DDR_BASE_ADDR_LTE_DATA + DDR_BASE_LEN_LTE_DATA)

+#define DDR_BASE_LEN_RAMDUMP            (0x00040000UL>>CPU_SHIFT)

+#define DDR_BASE_OFF_RAMDUMP            (DDR_BASE_OFF_LTE_DATA + DDR_BASE_LEN_LTE_DATA)

+

+#ifdef _USE_VEHICLE_DC /* ³µÔØË«ºËLinux */

+/* 37.75M£¬AP¹²ºË°æ±¾(´Ë´óСÊǰ´ÕÕº¬CAPºËµÄ64MÄÚ´æÅäÖö¨Ò壬¸Ãºê±ð´¦²»»á±»Ê¹ÓÃ) */

+#define DDR_BASE_ADDR_AP                (DDR_BASE_ADDR_RAMDUMP + DDR_BASE_LEN_RAMDUMP)

+#define DDR_BASE_LEN_AP                 (0x025C0000UL>>CPU_SHIFT)

+#define DDR_BASE_OFF_AP                 (DDR_BASE_OFF_RAMDUMP + DDR_BASE_LEN_RAMDUMP)

+

+/* 2M, share memory between ap and cap */

+#define DDR_BASE_ADDR_CAP_BUF           (DDR_BASE_ADDR_AP + DDR_BASE_LEN_AP)

+#define DDR_BASE_LEN_CAP_BUF            (0x00200000UL>>CPU_SHIFT)

+#define DDR_BASE_OFF_CAP_BUF            (DDR_BASE_OFF_AP + DDR_BASE_LEN_AP)

+

+/* 84M/212M, cap°æ±¾ */

+#define DDR_BASE_ADDR_CAP               (DDR_BASE_ADDR_CAP_BUF + DDR_BASE_LEN_CAP_BUF)

+#define DDR_BASE_LEN_CAP                (0x05400000UL>>CPU_SHIFT)

+#define DDR_BASE_OFF_CAP                (DDR_BASE_OFF_CAP_BUF + DDR_BASE_LEN_CAP_BUF)

+

+/* capºËµÄdtbµØÖ·¹©ubootºËcap kernelʹÓà */

+#define DDR_BASE_CAP_DTB_ADDR           (DDR_BASE_ADDR_CAP_BUF + (0x00100000UL>>CPU_SHIFT))

+#else

+/* 42.75M£¬AP¹²ºË°æ±¾(´Ë´óСÊǰ´ÕÕº¬CAPºËµÄ64MÄÚ´æÅäÖö¨Ò壬¸Ãºê±ð´¦²»»á±»Ê¹ÓÃ) */

+#define DDR_BASE_ADDR_AP                (DDR_BASE_ADDR_RAMDUMP + DDR_BASE_LEN_RAMDUMP)

+#ifdef _USE_LTE_ONLY

+#define DDR_BASE_LEN_AP                 (0x02BC0000UL>>CPU_SHIFT)

+#else

+#define DDR_BASE_LEN_AP                 (0x02AC0000UL>>CPU_SHIFT)

+#endif

+#define DDR_BASE_OFF_AP                 (DDR_BASE_OFF_RAMDUMP + DDR_BASE_LEN_RAMDUMP)

+

+/* 1M, share memory between ap and cap */

+#define DDR_BASE_ADDR_CAP_BUF           (DDR_BASE_ADDR_AP + DDR_BASE_LEN_AP)

+#ifndef DDR_BASE_LEN_CAP_BUF

+#define DDR_BASE_LEN_CAP_BUF            (0x00100000UL>>CPU_SHIFT)

+#endif

+#define DDR_BASE_OFF_CAP_BUF            (DDR_BASE_OFF_AP + DDR_BASE_LEN_AP)

+

+/* 16M, cap°æ±¾ */

+#define DDR_BASE_ADDR_CAP               (DDR_BASE_ADDR_CAP_BUF + DDR_BASE_LEN_CAP_BUF)

+#ifndef DDR_BASE_LEN_CAP

+#define DDR_BASE_LEN_CAP                (0x01000000UL>>CPU_SHIFT)

+#endif

+#define DDR_BASE_OFF_CAP                (DDR_BASE_OFF_CAP_BUF + DDR_BASE_LEN_CAP_BUF)

+#endif

+

+#define DDR_BASE_PHYCODE_ADDR_PA     	(DDR_BASE_ADDR)

+#define DDR_BASE_MODEM_ADDR_PA       	(DDR_BASE_PHYCODE_ADDR_PA + DDR_BASE_LEN_PHY)

+#define DDR_BASE_MODEM_SIZE          	(DDR_BASE_LEN_LTE_DATA + DDR_BASE_LEN_RAMDUMP)

+#define DDR_BASE_AP_ADDR_PA             (DDR_BASE_MODEM_ADDR_PA + DDR_BASE_MODEM_SIZE)

+

+#define DDR_BASE_CAPBUF_ADDR_PA         (DDR_BASE_AP_ADDR_PA + DDR_BASE_LEN_AP)

+#define DDR_BASE_CAP_ADDR_PA            (DDR_BASE_CAPBUF_ADDR_PA + DDR_BASE_LEN_CAP_BUF)

+

+

+/* 1M£¬ÎïÀí²ãNV     ¿Õ¼ä¸´Óà         */

+#define DDR_BASE_ADDR_PHY_NV            (DDR_BASE_ADDR_LTE_DATA)

+#define DDR_BASE_LEN_PHY_NV             (0x00100000UL>>CPU_SHIFT)

+

+/* 0.375M£¬Çý¶¯Ê¡µç·þÓÃPS<->PHY½»»¥¿Õ¼ä */

+#define DDR_BASE_ADDR_PSM               (DDR_BASE_ADDR_LTE_DATA)

+#define DDR_BASE_LEN_PSM                (0x00060000UL>>CPU_SHIFT)

+#define DDR_BASE_OFF_PSM                (DDR_BASE_OFF_RAMDUMP)

+

+/* 1M£¬Ð­ÒéÕ»ÓëÎïÀí²ã½»»¥  ¿Õ¼ä¸´Óà */

+#define DDR_BASE_ADDR_WCDMA_DATA        (DDR_BASE_ADDR_LTE_DATA)

+#define DDR_BASE_LEN_WCDMA_DATA         (DDR_BASE_LEN_LTE_DATA)

+

+#if 0

+/* PsBuffer»ùÖ· */

+#define PS_BUF_BASE_ADDR                (DDR_BASE_ADDR_PSBUF)

+#endif

+

+/* ICP»ùÖ·      */

+#define ICP_DATA_BASE_ADDR              (DDR_BASE_ADDR_LTE_DATA)

+

+/* WCDMA»ùÖ·    */

+#define DDR_BASE_ADDR_FOR_W             (DDR_BASE_ADDR_WCDMA_DATA)

+

+/* ¹¤¾ß´úÀí»ùÖ· */

+/* #define TOOL_AGENT_BASE_ADDR            (DDR_BASE_ADDR_TOOL_AGENT) */

+

+#if 0

+/* PPP»ùÖ·      */

+#define PLAT_PPP_BASE_ADDR              (PS_BUF_BASE_ADDR)

+#endif

+

+/**/

+#define SHARE_BUF_AP_CP_BASE_ADDR       (DDR_BASE_ADDR_AP_CP_SHAREBUF)

+

+#if defined(_USE_CAP_SYS) || defined(_USE_VEHICLE_DC)

+#define ICP_CAP_BUF_ADDR                DDR_BASE_ADDR_CAP_BUF

+#define ICP_CAP_BUF_LEN                 ((924 * 1024UL)>>CPU_SHIFT)

+#define TOOL_CAP_BUF_ADDR               (ICP_CAP_BUF_ADDR + ICP_CAP_BUF_LEN)

+#define TOOL_CAP_BUF_LEN                ((60 * 1024UL)>>CPU_SHIFT)

+#define RINGBUF_CAP_TO_AP_ADDR          (TOOL_CAP_BUF_ADDR + TOOL_CAP_BUF_LEN)

+#define RINGBUF_CAP_TO_AP_LEN           ((32  * 1024UL)>>CPU_SHIFT)

+#define ADB_CAP_BUF_ADDR                (RINGBUF_CAP_TO_AP_ADDR + RINGBUF_CAP_TO_AP_LEN)

+#define ADB_CAP_BUF_LEN                 ((4 * 1024UL)>>CPU_SHIFT)

+#define RAMDUMP_CAP_CMM_BUF_ADDR        (ADB_CAP_BUF_ADDR + ADB_CAP_BUF_LEN)

+#define RAMDUMP_CAP_CMM_BUF_LEN         ((4 * 1024UL)>>CPU_SHIFT)

+#define RINGBUF_AP_TO_CAP_ADDR          (RAMDUMP_CAP_CMM_BUF_ADDR + RAMDUMP_CAP_CMM_BUF_LEN)

+#define RINGBUF_AP_TO_CAP_LEN           ((128 * 1024UL)>>CPU_SHIFT)

+#define TOOL_ZSP_TO_CAP_LOG_ADDR        (RINGBUF_AP_TO_CAP_ADDR + RINGBUF_AP_TO_CAP_LEN)

+#define TOOL_ZSP_TO_CAP_LOG_LEN         ((384 * 1024UL)>>CPU_SHIFT)

+#define RAMDUMP_AP_TO_CAP_BUF_ADDR      (TOOL_ZSP_TO_CAP_LOG_ADDR + TOOL_ZSP_TO_CAP_LOG_LEN)

+#define RAMDUMP_AP_TO_CAP_BUF_LEN       ((128 * 1024UL)>>CPU_SHIFT)

+#define TEE_SHARE_BUF_ADDR              (RAMDUMP_AP_TO_CAP_BUF_ADDR + RAMDUMP_AP_TO_CAP_BUF_LEN)

+#define TEE_SHARE_BUF_LEN               ((384 * 1024UL)>>CPU_SHIFT)

+

+#define ICP_CAP_BUF_ADDR_PA             DDR_BASE_CAPBUF_ADDR_PA

+#define TOOL_CAP_BUF_ADDR_PA            (ICP_CAP_BUF_ADDR_PA + ICP_CAP_BUF_LEN)

+#define RINGBUF_CAP_TO_AP_ADDR_PA       (TOOL_CAP_BUF_ADDR_PA + TOOL_CAP_BUF_LEN)

+#define ADB_CAP_BUF_ADDR_PA             (RINGBUF_CAP_TO_AP_ADDR_PA + RINGBUF_CAP_TO_AP_LEN)

+#define RAMDUMP_CAP_CMM_BUF_ADDR_PA     (ADB_CAP_BUF_ADDR_PA + ADB_CAP_BUF_LEN)

+#define RINGBUF_AP_TO_CAP_ADDR_PA       (RAMDUMP_CAP_CMM_BUF_ADDR_PA + RAMDUMP_CAP_CMM_BUF_LEN)

+#define TOOL_ZSP_TO_CAP_LOG_ADDR_PA     (RINGBUF_AP_TO_CAP_ADDR_PA + RINGBUF_AP_TO_CAP_LEN)

+#define RAMDUMP_AP_TO_CAP_BUF_ADDR_PA   (TOOL_ZSP_TO_CAP_LOG_ADDR_PA + TOOL_ZSP_TO_CAP_LOG_LEN)

+#define TEE_SHARE_BUF_ADDR_PA           (RAMDUMP_AP_TO_CAP_BUF_ADDR_PA + RAMDUMP_AP_TO_CAP_BUF_LEN)

+#endif

+

+/* 7520V3оƬIRAM0ѹËõ£¬Ð­ÒéÕ»ÎïÀí²ã½»»¥¿Õ¼äÒÆ¶¯µ½DDR£¬¸´ÓÃRamdump¿Õ¼ä */

+/* 34K£¬PSÓëPHYÐÅÏ¢½»»¥£¬LTEÒµÎñ */

+/* #define IRAM_BASE_ADDR_LTE              (DDR_BASE_ADDR_RAMDUMP) */

+/* 10K£¬PSÓëPHYÐÅÏ¢½»»¥£¬LTEÒµÎñ ʹÓÃIRAM0£¬¹¦ºÄÓÅ»¯ 7K+3K, 3k for embms*/

+#define IRAM_BASE_ADDR_LTE              (IRAM_BASE_ADDR_SYS_TRACE + IRAM_BASE_LEN_SYS_TRACE)

+#define IRAM_BASE_LEN_LTE               ((10 * 1024UL)>>CPU_SHIFT)

+

+/* 24K£¬PSÓëPHYµÄICP½»»¥£¬Ê¹ÓÃIRAM*/

+#define IRAM_BASE_ADDR_PS_PHY_SHAREBUF    (IRAM_BASE_ADDR_LTE + IRAM_BASE_LEN_LTE)

+#define IRAM_BASE_LEN_PS_PHY_SHAREBUF    ((24 * 1024UL)>>CPU_SHIFT)

+

+/* 221K£¬PSÓëPHYµÄICP½»»¥£¬Ê¹ÓÃDDR, ¸´ÓÃRAMDUMP*/

+#define DDR_BASE_ADDR_PS_PHY_SHAREBUF    (DDR_BASE_ADDR_RAMDUMP)

+#define DDR_BASE_LEN_PS_PHY_SHAREBUF    ((221 * 1024UL)>>CPU_SHIFT)

+

+/* 2k£¬zsp RAMDUMP*/

+#define DDR_BASE_ADDR_PHY_RAMDUMP        (DDR_BASE_ADDR_PS_PHY_SHAREBUF + DDR_BASE_LEN_PS_PHY_SHAREBUF)

+#define DDR_BASE_LEN_PHY_RAMDUMP        ((2 * 1024UL)>>CPU_SHIFT)

+

+/* 1K£¬PSÓëPHYÐÅÏ¢½»»¥£¬TDÒµÎñ ʹÓÃDDR*/

+#define IRAM_BASE_ADDR_TD               (DDR_BASE_ADDR_PHY_RAMDUMP + DDR_BASE_LEN_PHY_RAMDUMP)

+/* #define IRAM_BASE_LEN_TD                ((25 * 1024UL)>>CPU_SHIFT) */

+#define IRAM_BASE_LEN_TD                ((1 * 1024UL)>>CPU_SHIFT)

+

+/* 12K£¬PSÓëPHYÐÅÏ¢½»»¥£¬WÒµÎñ ʹÓÃDDR*/

+#define IRAM_BASE_ADDR_WCDMA            (IRAM_BASE_ADDR_TD + IRAM_BASE_LEN_TD)

+/* #define IRAM_BASE_LEN_WCDMA             ((48 * 1024UL)>>CPU_SHIFT) */

+#define IRAM_BASE_LEN_WCDMA             ((12 * 1024UL)>>CPU_SHIFT)

+

+/* 20K£¬W UPA ¿Õ¼ä */

+#define DDR_BASE_ADDR_WUPA_DATA         (IRAM_BASE_ADDR_WCDMA + IRAM_BASE_LEN_WCDMA)

+#define DDR_BASE_LEN_WUPA_DATA          ((20 * 1024UL)>>CPU_SHIFT)

+

+/* IRAM WCDMA»ùÖ·   */

+#define IRAM_BASE_ADDR_FOR_W            (IRAM_BASE_ADDR_WCDMA)

+

+/* DPRAM»ùÖ·        */

+#define DPRAM_BASE_ADDR                 (IRAM_BASE_ADDR_TD)

+

+/* DPRAM DDR»ùÖ·    */

+#define DPRAM_MEM_BASE_ADDR             (IRAM_BASE_ADDR_TD)

+

+/* PS tcm config for ramdump */

+#define RAMDUMP_PS_ITCM_BASE_EXTER      (0x0)

+#define RAMDUMP_PS_ITCM_BASE_INTER      (0x0)

+#define RAMDUMP_PS_ITCM_SIZE            (0x0)

+#define RAMDUMP_PS_DTCM_BASE_EXTER      (0x0)

+#define RAMDUMP_PS_DTCM_BASE_INTER      (0x0)

+#define RAMDUMP_PS_DTCM_SIZE            (0x0)

+

+/* ZSP Ramdump */

+/* #ifdef _USE_ZSP_RAMDUMP */

+# define RAMDUMP_ZSP_ITCM_BASE          (0x81040000UL)

+# define RAMDUMP_ZSP_ITCM_SIZE          (0x00010000UL)

+# define RAMDUMP_ZSP_DTCM_BASE          (0x81000000UL)

+# define RAMDUMP_ZSP_DTCM_SIZE          (0x00010000UL)

+

+# define RAMDUMP_ZSP_CODE_SIZE          (0x1b0000>>CPU_SHIFT)

+# define RAMDUMP_ZSP_IDDR_BASE          (DDR_BASE_ADDR_PHY)

+# define RAMDUMP_ZSP_IDDR_SIZE          (RAMDUMP_ZSP_CODE_SIZE)

+# define RAMDUMP_ZSP_DDDR_BASE          (RAMDUMP_ZSP_IDDR_BASE + RAMDUMP_ZSP_CODE_SIZE)

+# define RAMDUMP_ZSP_DDDR_SIZE          (DDR_BASE_LEN_PHY - RAMDUMP_ZSP_CODE_SIZE)

+

+# define RAMDUMP_ZSP_ITCM_SELF_BASE     (0x0)

+# define RAMDUMP_ZSP_DTCM_SELF_BASE     (0x10000UL)

+/* #endif */

+

+/*******************************************************************************

+*                                Êý¾ÝÀàÐͶ¨Òå                                  *

+*******************************************************************************/

+

+/*******************************************************************************

+*                                È«¾Ö±äÁ¿ÉùÃ÷                                  *

+*******************************************************************************/

+

+/*******************************************************************************

+*                                È«¾Öº¯ÊýÉùÃ÷                                  *

+*******************************************************************************/

+

+#endif  // #ifndef _RAM_BASE_CONFIG_7520V3

+